commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
063acbeb5c95bf8e02aff7cf7f238606d814567a
change billet
totvserprm/financial.py
totvserprm/financial.py
# -*- coding: utf-8 -*- from datetime import datetime from baseapi import BaseApi import uuid class Client(BaseApi): dataservername = 'FinCFODataBR' def create(self,**kwargs): return super(Client, self).create({ 'NewDataSet': { 'FCFO': { 'ATIVO': kwargs.get('ativo'), # enviar -1 para que sejá criado de forma incremental 'CODCFO': -1, 'IDCFO': -1, 'CODEXTERNO': kwargs.get('codexterno'), 'CODCOLIGADA': kwargs.get('codcoligada'), 'CGCCFO': kwargs.get('cpf_cnpj'), 'TIPORUA': kwargs.get('tipo_rua'), 'TIPOBAIRRO': kwargs.get('tipo_bairro'), 'BAIRRO': kwargs.get('bairro'), 'RUA': kwargs.get('rua'), 'NUMERO': kwargs.get('numero'), 'CEP': kwargs.get('cep'), 'CODETD': kwargs.get('estado'), 'CIDADE': kwargs.get('cidade'), 'CODMUNICIPIO': kwargs.get('codigo_municipio'), 'PAIS': kwargs.get('cod_pais'), 'DTNASCIMENTO': '{:%Y-%m-%d}T03:00:00.000'.format(kwargs.get('data_nascimento')), 'NOME': kwargs.get('nome'), 'NOMEFANTASIA': kwargs.get('nome'), 'PAGREC': kwargs.get('classificacao'), 'PESSOAFISOUJUR': kwargs.get('categoria'), } } }, 'CODCOLIGADA={}'.format(kwargs.get('codcoligada'))) class Billet(BaseApi): dataservername = 'FinLanBoletoData' def create(self,**kwargs): return super(Billet, self).create({ 'FinLAN': { 'FLAN': { 'CODCOLIGADA': kwargs.get('codcoligada'), 'IDLAN': -1, 'NUMERODOCUMENTO': str(uuid.uuid4()), 'NFOUDUP': 0, 'CLASSIFICACAO': 0, 'PAGREC': 1, 'STATUSLAN': 0, 'CODTDO': kwargs.get('tipo_documento'), 'DATAVENCIMENTO': kwargs.get('data_vencimento'), 'DATAEMISSAO': "{:%d/%m/%Y %H:%M:%S}".format(datetime.now()), 'VALORORIGINAL': kwargs.get('valor'), 'CODCOLCFO': 1, 'CODCFO': kwargs.get('codcliente'), 'CODFILIAL': kwargs.get('codfilial'), 'SERIEDOCUMENTO': '@@@', 'CODCXA': kwargs.get('conta'), 'CODMOEVALORORIGINAL': 'R$', 'NUMLOTECONTABIL': 0, 'NUMEROCONTABIL': 0, 'NUMCONTABILBX': 0, 'TIPOCONTABILLAN': 0, 'FILIALCONTABIL': 1, 'HISTORICO': kwargs.get('historico'), 'CODCCUSTO': kwargs.get('centro_custo'), }, } }, 'CODCOLIGADA={}'.format(kwargs.get('codcoligada')))
# -*- coding: utf-8 -*- from datetime import datetime from baseapi import BaseApi class Client(BaseApi): dataservername = 'FinCFODataBR' def create(self,**kwargs): return super(Client, self).create({ 'NewDataSet': { 'FCFO': { 'ATIVO': kwargs.get('ativo'), # enviar -1 para que sejá criado de forma incremental 'CODCFO': -1, 'IDCFO': -1, 'CODEXTERNO': kwargs.get('codexterno'), 'CODCOLIGADA': kwargs.get('codcoligada'), 'CGCCFO': kwargs.get('cpf_cnpj'), 'TIPORUA': kwargs.get('tipo_rua'), 'TIPOBAIRRO': kwargs.get('tipo_bairro'), 'BAIRRO': kwargs.get('bairro'), 'RUA': kwargs.get('rua'), 'NUMERO': kwargs.get('numero'), 'CEP': kwargs.get('cep'), 'CODETD': kwargs.get('estado'), 'CIDADE': kwargs.get('cidade'), 'CODMUNICIPIO': kwargs.get('codigo_municipio'), 'PAIS': kwargs.get('cod_pais'), 'DTNASCIMENTO': '{:%Y-%m-%d}T03:00:00.000'.format(kwargs.get('data_nascimento')), 'NOME': kwargs.get('nome'), 'NOMEFANTASIA': kwargs.get('nome'), 'PAGREC': kwargs.get('classificacao'), 'PESSOAFISOUJUR': kwargs.get('categoria'), } } }, 'CODCOLIGADA={}'.format(kwargs.get('codcoligada'))) class Billet(BaseApi): dataservername = 'FinLanBoletoData' def create(self,**kwargs): return super(Billet, self).create({ 'NewDataSet': { 'FLAN': { 'CODCOLIGADA': kwargs.get('codcoligada'), 'IDLAN': kwargs.get('id_lancamento'), 'NUMERODOCUMENTO': -1, 'NFOUDUP': 0, 'CLASSIFICACAO': 0, 'PAGREC': 1, 'STATUSLAN': 0, 'CODTDO': kwargs.get('tipo_documento'), 'DATAVENCIMENTO': kwargs.get('data_vencimento'), 'DATAEMISSAO': "{:%d/%m/%Y %H:%M:%S}".format(datetime.now()), 'VALORORIGINAL': kwargs.get('valor'), 'CODCOLCFO': kwargs.get('codcoligada'), 'CODCFO': kwargs.get('codcliente'), 'CODFILIAL': kwargs.get('codfilial'), 'SERIEDOCUMENTO': kwargs.get('serie_documento'), 'CODCXA': kwargs.get('conta'), 'CODMOEVALORORIGINAL': 'R$', 'NUMLOTECONTABIL': 0, 'NUMEROCONTABIL': 0, 'NUMCONTABILBX': 0, 'TIPOCONTABILLAN': 0, 'FILIALCONTABIL': 1, 'HISTORICO': kwargs.get('historico'), 'CODCCUSTO': kwargs.get('centro_custo'), 'CODTCF': '000', 'CODCOLSACADO': '0' } } }, 'CODCOLIGADA={}'.format(kwargs.get('codcoligada')))
Python
0.000001
e4e10ee0ae5a18cfec0e15b7b85986b7f4fc4f9d
Fix prefetched fields in Institutions in API
feder/institutions/viewsets.py
feder/institutions/viewsets.py
import django_filters from rest_framework import filters, viewsets from teryt_tree.rest_framework_ext.viewsets import custom_area_filter from .models import Institution, Tag from .serializers import InstitutionSerializer, TagSerializer class InstitutionFilter(filters.FilterSet): jst = django_filters.CharFilter(method=custom_area_filter) def __init__(self, *args, **kwargs): super(InstitutionFilter, self).__init__(*args, **kwargs) self.filters['name'].lookup_expr = 'icontains' class Meta: model = Institution fields = ['name', 'tags', 'jst', 'regon'] class InstitutionViewSet(viewsets.ModelViewSet): queryset = (Institution.objects. select_related('jst'). prefetch_related('tags','parents'). all()) serializer_class = InstitutionSerializer filter_backends = (filters.DjangoFilterBackend,) filter_class = InstitutionFilter class TagViewSet(viewsets.ModelViewSet): queryset = Tag.objects.all() serializer_class = TagSerializer
import django_filters from rest_framework import filters, viewsets from teryt_tree.rest_framework_ext.viewsets import custom_area_filter from .models import Institution, Tag from .serializers import InstitutionSerializer, TagSerializer class InstitutionFilter(filters.FilterSet): jst = django_filters.CharFilter(method=custom_area_filter) def __init__(self, *args, **kwargs): super(InstitutionFilter, self).__init__(*args, **kwargs) self.filters['name'].lookup_expr = 'icontains' class Meta: model = Institution fields = ['name', 'tags', 'jst', 'regon'] class InstitutionViewSet(viewsets.ModelViewSet): queryset = (Institution.objects. select_related('jst'). prefetch_related('tags'). all()) serializer_class = InstitutionSerializer filter_backends = (filters.DjangoFilterBackend,) filter_class = InstitutionFilter class TagViewSet(viewsets.ModelViewSet): queryset = Tag.objects.all() serializer_class = TagSerializer
Python
0.000007
9bdaf963843a9f0b44487ea3b258b50b328153d8
Remove redis connection logic from each view, make it global, keep it threadsafe
firetower/web/firetower_web.py
firetower/web/firetower_web.py
from calendar import timegm import datetime import time from flask import Flask, render_template from firetower import redis_util REDIS_HOST = "localhost" REDIS_PORT = 6379 REDIS = redis_util.Redis(REDIS_HOST, REDIS_PORT) app = Flask(__name__) def timestamp(dttm): return timegm(dttm.utctimetuple()) @app.route("/") def root(): lines = [] categories = REDIS.get_categories() for cat in categories: lines.append("<li>%s</li>" % cat) return "<ul>%s</ul>" % "\n".join(lines) @app.route("/default/") def default(): cat_dict = REDIS.conn.hgetall("category_ids") end = datetime.datetime.now() start = end - datetime.timedelta(hours=1) results = [] for cat_id in cat_dict: cat = cat_dict[cat_id] time_series = REDIS.get_timeseries(cat, timestamp(start), timestamp(end)) items = [(int(x)*1000, int(y)) for x,y in time_series.items()] items.sort(lambda x,y: cmp(x[0], y[0])) results.append( (cat_id, cat, items) ) return render_template( "last_5_index.html", categories = cat_dict.items(), results = results ) @app.route("/aggregate") def aggregate(): cat_dict = REDIS.conn.hgetall("category_ids") start = end - 300 error_totals = {} for cat_id in cat_dict: cat = cat_dict[cat_id] time_series = REDIS.get_timeseries(cat, start, end) for time_point in time_series: error_totals[cat_id] = error_totals.get(cat_id, 0) + int(time_point[1]) totals = [] print error_totals for i in error_totals.items(): totals.append((i[0], cat_dict[i[0]], i[1])) return render_template( "aggregate.html", totals = totals) def main(): app.run(debug=True, use_evalex=False, host='0.0.0.0')
from calendar import timegm import datetime import time from flask import Flask, render_template from firetower import redis_util REDIS_HOST = "localhost" REDIS_PORT = 6379 app = Flask(__name__) def timestamp(dttm): return timegm(dttm.utctimetuple()) @app.route("/") def root(): lines = [] redis = redis_util.Redis(REDIS_HOST, REDIS_PORT) categories = redis.get_categories() for cat in categories: lines.append("<li>%s</li>" % cat) return "<ul>%s</ul>" % "\n".join(lines) @app.route("/default/") def default(): redis = redis_util.Redis(REDIS_HOST, REDIS_PORT) cat_dict = redis.conn.hgetall("category_ids") end = datetime.datetime.now() start = end - datetime.timedelta(hours=1) results = [] for cat_id in cat_dict: cat = cat_dict[cat_id] time_series = redis.get_timeseries(cat, timestamp(start), timestamp(end)) items = [(int(x)*1000, int(y)) for x,y in time_series.items()] items.sort(lambda x,y: cmp(x[0], y[0])) results.append( (cat_id, cat, items) ) return render_template( "last_5_index.html", categories = cat_dict.items(), results = results ) @app.route("/aggregate") def aggregate(): redis = redis_util.Redis(REDIS_HOST, REDIS_PORT) cat_dict = redis.conn.hgetall("category_ids") start = end - 300 error_totals = {} for cat_id in cat_dict: cat = cat_dict[cat_id] time_series = redis.get_timeseries(cat, start, end) for time_point in time_series: error_totals[cat_id] = error_totals.get(cat_id, 0) + int(time_point[1]) totals = [] print error_totals for i in error_totals.items(): totals.append((i[0], cat_dict[i[0]], i[1])) return render_template( "aggregate.html", totals = totals) def main(): app.run(debug=True, use_evalex=False, host='0.0.0.0')
Python
0
1c48f9ad2c2a66d7c15c9216665b7f802d3498b4
Set deprecation_summary_result so we can summarize deprecations and they get written to the report plist if specified.
SharedProcessors/DeprecationWarning.py
SharedProcessors/DeprecationWarning.py
#!/usr/bin/python # # Copyright 2019 Greg Neagle # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor that outputs a warning message. Intended to alert recipe users of upcoming removal of a recipe.""" import os from autopkglib import Processor __all__ = ["DeprecationWarning"] class DeprecationWarning(Processor): """This processor outputs a warning that the recipe has been deprecated.""" input_variables = { "warning_message": { "required": False, "description": "Warning message to output.", }, } output_variables = { "deprecation_summary_result": { "description": "Description of interesting results." } } description = __doc__ def main(self): warning_message = self.env.get( "warning_message", "### This recipe has been deprecated. It may be removed soon. ###" ) self.output(warning_message) recipe_name = os.path.basename(self.env['RECIPE_PATH']) if recipe_name.endswith('.recipe'): recipe_name = os.path.splitext(recipe_name)[0] self.env["deprecation_summary_result"] = { 'summary_text': 'The following recipes have deprecation warnings:', 'report_fields': ['name', 'warning'], 'data': { 'name': recipe_name, 'warning': warning_message } } if __name__ == '__main__': PROCESSOR = DeprecationWarning() PROCESSOR.execute_shell()
#!/usr/bin/python # # Copyright 2019 Greg Neagle # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor that outputs a warning message. Intended to alert recipe users of upcoming removal of a recipe.""" from autopkglib import Processor __all__ = ["DeprecationWarning"] class DeprecationWarning(Processor): """This processor outputs a warning that the recipe has been deprecated.""" input_variables = { "warning_message": { "required": False, "description": "Warning message to output.", }, } output_variables = { } description = __doc__ def main(self): warning_message = self.env.get( "warning_message", "### This recipe has been deprecated. It may be removed soon. ###" ) self.output(warning_message, verbose_level=0) if __name__ == '__main__': PROCESSOR = DeprecationWarning() PROCESSOR.execute_shell()
Python
0
1e139567767a98914df90ec152d543bb8bfde38c
add test
basic_zappa_project/public/views_tests.py
basic_zappa_project/public/views_tests.py
from basic_zappa_project.test_utils import BaseTestCase class TestViews(BaseTestCase): def test_status(self): expected = {'status': 'ok'} response = self.client.get('/status') self.assert200(response) self.assertEqual(response.json, expected) def test_about(self): response = self.client.get('/about') self.assert200(response) def test_home_get(self): response = self.client.get('/') self.assert200(response) def test_register_get(self): response = self.client.get('/register') self.assert200(response)
from basic_zappa_project.test_utils import BaseTestCase class TestViews(BaseTestCase): def test_status(self): expected = {'status': 'ok'} response = self.client.get('/status') self.assert200(response) self.assertEqual(response.json, expected) def test_about(self): response = self.client.get('/about') self.assert200(response) def test_home_get(self): response = self.client.get('/') self.assert200(response)
Python
0.000002
a82fc92938a647de620cf8a96fd5907c08060c32
fix mistake
scripts/install/install.py
scripts/install/install.py
import os import subprocess import os.path def apt_get_install(fname): with open(fname, 'r') as f: items = f.readlines() for item in items: os.system('sudo apt-get install -y %s' % (item)) def npm_global_install(fname): with open(fname, 'r') as f: items = f.readlines() for item in items: os.system('sudo npm -g install %s' % (item)) def pip_install(fname): with open(fname, 'r') as f: items = f.readlines() for item in items: os.system('sudo pip install %s' % (item)) def cmd_exists(cmd): # this is from http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python return subprocess.call("type " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 functions_to_handle_requirements = {} functions_to_handle_requirements['apt_get'] = apt_get_install functions_to_handle_requirements['npm'] = npm_global_install functions_to_handle_requirements['pip'] = pip_install order_of_files_to_handle = ['apt_get_requirements.txt', 'npm_requirements.txt', 'pip_requirements.txt'] for fname in order_of_files_to_handle: if os.path.isfile(fname): # assume fname endswith _requirements.txt l = len('_requirements.txt') fname_first_part = fname[:-l] functions_to_handle_requirements[fname_first_part](fname)
import os import subprocess import os.path def apt_get_install(what): with open(fname, 'r') as f: items = f.readlines() for item in items: os.system('sudo apt-get install -y %s' % (item)) def npm_global_install(what): with open(fname, 'r') as f: items = f.readlines() for item in items: os.system('sudo npm -g install %s' % (item)) def pip_install(what): with open(fname, 'r') as f: items = f.readlines() for item in items: os.system('sudo pip install %s' % (item)) def cmd_exists(cmd): # this is from http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python return subprocess.call("type " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 functions_to_handle_requirements = {} functions_to_handle_requirements['apt_get'] = apt_get_install functions_to_handle_requirements['npm'] = npm_global_install functions_to_handle_requirements['pip'] = pip_install order_of_files_to_handle = ['apt_get_requirements.txt', 'npm_requirements.txt', 'pip_requirements.txt'] for fname in order_of_files_to_handle: if os.path.isfile(fname): # assume fname endswith _requirements.txt l = len('_requirements.txt') fname_first_part = fname[:-l] functions_to_handle_requirements[fname_first_part](fname)
Python
0.999908
5179172b4a6d61ea60fec9cd7624725031017482
Make use of the sqlite3 package
dump.py
dump.py
#!/usr/bin/env python import glob, os, sqlite3, sys sys.path.append(os.path.abspath("csv2sqlite")) import csv2sqlite setup_sql = { "job_events": """ DROP TABLE IF EXISTS `job_events`; CREATE TABLE `job_events` ( `time` INTEGER NOT NULL, `missing info` INTEGER, `job ID` INTEGER NOT NULL, `event type` INTEGER NOT NULL, `user` TEXT, `scheduling class` INTEGER, `job name` TEXT, `logical job name` TEXT ); """, "task_events": """ DROP TABLE IF EXISTS `task_events`; CREATE TABLE `task_events` ( `time` INTEGER NOT NULL, `missing info` INTEGER, `job ID` INTEGER NOT NULL, `task index` INTEGER NOT NULL, `machine ID` INTEGER, `event type` INTEGER NOT NULL, `user` TEXT, `scheduling class` INTEGER, `priority` INTEGER NOT NULL, `CPU request` REAL, `memory request` REAL, `disk space request` REAL, `different machines restriction` INTEGER ); """, "task_usage": """ DROP TABLE IF EXISTS `task_usage`; CREATE TABLE `task_usage` ( `start time` INTEGER NOT NULL, `end time` INTEGER NOT NULL, `job ID` INTEGER NOT NULL, `task index` INTEGER NOT NULL, `machine ID` INTEGER NOT NULL, `CPU rate` REAL, `canonical memory usage` REAL, `assigned memory usage` REAL, `unmapped page cache` REAL, `total page cache` REAL, `maximum memory usage` REAL, `disk IO time` REAL, `local disk space usage` REAL, `maximum CPU rate` REAL, `maximum disk IO time` REAL, `cycles per instruction` REAL, `memory accesses per instruction` REAL, `sample portion` REAL, `aggregation type` INTEGER, `sampled CPU usage` REAL ); """ } def fail(message): print(message) sys.exit(1) def setup_sqlite(table): filename = 'google.sqlite3' connection = sqlite3.connect(filename) cursor = connection.cursor() for sql in setup_sql[table].split(';'): cursor.execute(sql) connection.commit() connection.close() return filename def find_parts(table): return sorted(glob.glob(os.path.join(table, '*.csv.gz'))) for table in sys.argv[1:]: sqlite = setup_sqlite(table) headers = 'headers/%s.csv' % table types = 'types/%s.csv' % table for csv in find_parts(table): print('Processing %s...' % csv) csv2sqlite.convert(csv, sqlite, table, headers, 'gzip', types)
#!/usr/bin/env python import glob, os, subprocess, sys sys.path.append(os.path.abspath("csv2sqlite")) import csv2sqlite setup_sql = { "job_events": """ DROP TABLE IF EXISTS `job_events`; CREATE TABLE `job_events` ( `time` INTEGER NOT NULL, `missing info` INTEGER, `job ID` INTEGER NOT NULL, `event type` INTEGER NOT NULL, `user` TEXT, `scheduling class` INTEGER, `job name` TEXT, `logical job name` TEXT ); """, "task_events": """ DROP TABLE IF EXISTS `task_events`; CREATE TABLE `task_events` ( `time` INTEGER NOT NULL, `missing info` INTEGER, `job ID` INTEGER NOT NULL, `task index` INTEGER NOT NULL, `machine ID` INTEGER, `event type` INTEGER NOT NULL, `user` TEXT, `scheduling class` INTEGER, `priority` INTEGER NOT NULL, `CPU request` REAL, `memory request` REAL, `disk space request` REAL, `different machines restriction` INTEGER ); """, "task_usage": """ DROP TABLE IF EXISTS `task_usage`; CREATE TABLE `task_usage` ( `start time` INTEGER NOT NULL, `end time` INTEGER NOT NULL, `job ID` INTEGER NOT NULL, `task index` INTEGER NOT NULL, `machine ID` INTEGER NOT NULL, `CPU rate` REAL, `canonical memory usage` REAL, `assigned memory usage` REAL, `unmapped page cache` REAL, `total page cache` REAL, `maximum memory usage` REAL, `disk IO time` REAL, `local disk space usage` REAL, `maximum CPU rate` REAL, `maximum disk IO time` REAL, `cycles per instruction` REAL, `memory accesses per instruction` REAL, `sample portion` REAL, `aggregation type` INTEGER, `sampled CPU usage` REAL ); """ } def fail(message): print(message) sys.exit(1) def setup_sqlite(table): filename = 'google.sqlite3' if not table in setup_sql: fail('the table is unknown') sql = setup_sql[table] p = subprocess.Popen(['sqlite3', filename], stdin=subprocess.PIPE) p.communicate(input=bytes(sql)) p.wait() if p.returncode != 0: fail('cannot set up the database') return filename def find_parts(table): return sorted(glob.glob(os.path.join(table, '*.csv.gz'))) for table in sys.argv[1:]: sqlite = setup_sqlite(table) headers = 'headers/%s.csv' % table types = 'types/%s.csv' % table for csv in find_parts(table): print('Processing %s...' % csv) csv2sqlite.convert(csv, sqlite, table, headers, 'gzip', types)
Python
0.000001
7f22812917846dbc420eee8c80cf3a4ee7d2fc1c
fix typo in tag (#618)
scripts/publish_release.py
scripts/publish_release.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Insert a TOML header into the latest release note.""" from __future__ import absolute_import, print_function import sys from datetime import date from glob import glob from builtins import open from os.path import join, basename from shutil import copy def insert_break(lines, break_pos=9): """ Insert a <!--more--> tag for larger release notes. Parameters ---------- lines : list of str The content of the release note. break_pos : int Line number before which a break should approximately be inserted. Returns ------- list of str The text with the inserted tag or no modification if it was sufficiently short. """ def line_filter(line): if len(line) == 0: return True return any(line.startswith(c) for c in "-*+") if len(lines) <= break_pos: return lines newlines = [ i for i, line in enumerate(lines[break_pos:], start=break_pos) if line_filter(line.strip())] if len(newlines) > 0: break_pos = newlines[0] lines.insert(break_pos, "<!--more-->\n") return lines def build_hugo_md(filename, tag, bump): """ Build the markdown release notes for Hugo. Inserts the required TOML header with specific values and adds a break for long release notes. Parameters ---------- filename : str, path The release notes file. tag : str The tag, following semantic versioning, of the current release. bump : {"major", "minor", "patch", "alpha", "beta"} The type of release. """ header = [ '+++\n', 'date = "{}"\n'.format(date.today().isoformat()), 'title = "{}"\n'.format(tag), 'author = "The COBRApy Team"\n', 'release = "{}"\n'.format(bump), '+++\n', '\n' ] with open(filename, "r") as file_h: content = insert_break(file_h.readlines()) header.extend(content) with open(filename, "w") as file_h: file_h.writelines(header) def intify(filename): """ Turn a release note filename into something sortable. Parameters ---------- filename : str A release note of expected filename format '<major>.<minor>.<patch>.md'. Returns ------- tuple A pair of the major and minor versions as integers. """ tmp = filename[:-3].split(".") return int(tmp[0]), int(tmp[1]) def find_bump(target, tag): """Identify the kind of release by comparing to existing ones.""" tmp = tag.split(".") existing = [intify(basename(f)) for f in glob(join(target, "[0-9]*.md"))] latest = max(existing) if int(tmp[0]) > latest[0]: return "major" elif int(tmp[1]) > latest[1]: return "minor" else: return "patch" def main(argv): """ Identify the release type and create a new target file with TOML header. Requires three arguments. """ source, target, tag = argv if "a" in tag: bump = "alpha" if "b" in tag: bump = "beta" else: bump = find_bump(target, tag) filename = "{}.md".format(tag) destination = copy(join(source, filename), target) build_hugo_md(destination, tag, bump) if __name__ == "__main__": if len(sys.argv) != 4: print("Usage:\n{} <source dir> <target dir> <tag>" "".format(sys.argv[0])) sys.exit(2) sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python # -*- coding: utf-8 -*- """Insert a TOML header into the latest release note.""" from __future__ import absolute_import, print_function import sys from datetime import date from glob import glob from builtins import open from os.path import join, basename from shutil import copy def insert_break(lines, break_pos=9): """ Insert a <!-- more --> tag for larger release notes. Parameters ---------- lines : list of str The content of the release note. break_pos : int Line number before which a break should approximately be inserted. Returns ------- list of str The text with the inserted tag or no modification if it was sufficiently short. """ def line_filter(line): if len(line) == 0: return True return any(line.startswith(c) for c in "-*+") if len(lines) <= break_pos: return lines newlines = [ i for i, line in enumerate(lines[break_pos:], start=break_pos) if line_filter(line.strip())] if len(newlines) > 0: break_pos = newlines[0] lines.insert(break_pos, "<!-- more -->\n") return lines def build_hugo_md(filename, tag, bump): """ Build the markdown release notes for Hugo. Inserts the required TOML header with specific values and adds a break for long release notes. Parameters ---------- filename : str, path The release notes file. tag : str The tag, following semantic versioning, of the current release. bump : {"major", "minor", "patch", "alpha", "beta"} The type of release. """ header = [ '+++\n', 'date = "{}"\n'.format(date.today().isoformat()), 'title = "{}"\n'.format(tag), 'author = "The COBRApy Team"\n', 'release = "{}"\n'.format(bump), '+++\n', '\n' ] with open(filename, "r") as file_h: content = insert_break(file_h.readlines()) header.extend(content) with open(filename, "w") as file_h: file_h.writelines(header) def intify(filename): """ Turn a release note filename into something sortable. Parameters ---------- filename : str A release note of expected filename format '<major>.<minor>.<patch>.md'. Returns ------- tuple A pair of the major and minor versions as integers. """ tmp = filename[:-3].split(".") return int(tmp[0]), int(tmp[1]) def find_bump(target, tag): """Identify the kind of release by comparing to existing ones.""" tmp = tag.split(".") existing = [intify(basename(f)) for f in glob(join(target, "[0-9]*.md"))] latest = max(existing) if int(tmp[0]) > latest[0]: return "major" elif int(tmp[1]) > latest[1]: return "minor" else: return "patch" def main(argv): """ Identify the release type and create a new target file with TOML header. Requires three arguments. """ source, target, tag = argv if "a" in tag: bump = "alpha" if "b" in tag: bump = "beta" else: bump = find_bump(target, tag) filename = "{}.md".format(tag) destination = copy(join(source, filename), target) build_hugo_md(destination, tag, bump) if __name__ == "__main__": if len(sys.argv) != 4: print("Usage:\n{} <source dir> <target dir> <tag>" "".format(sys.argv[0])) sys.exit(2) sys.exit(main(sys.argv[1:]))
Python
0.000048
19c0e8d856049677bc7de2bc293a87a0aac306f8
Fix wsgi config file access for HTTPD
httpd/keystone.py
httpd/keystone.py
import os from paste import deploy from keystone import config from keystone.common import logging LOG = logging.getLogger(__name__) CONF = config.CONF config_files = ['/etc/keystone/keystone.conf'] CONF(project='keystone', default_config_files=config_files) conf = CONF.config_file[0] name = os.path.basename(__file__) if CONF.debug: CONF.log_opt_values(logging.getLogger(CONF.prog), logging.DEBUG) options = deploy.appconfig('config:%s' % CONF.config_file[0]) application = deploy.loadapp('config:%s' % conf, name=name)
import os from paste import deploy from keystone import config from keystone.common import logging LOG = logging.getLogger(__name__) CONF = config.CONF config_files = ['/etc/keystone.conf'] CONF(config_files=config_files) conf = CONF.config_file[0] name = os.path.basename(__file__) if CONF.debug: CONF.log_opt_values(logging.getLogger(CONF.prog), logging.DEBUG) options = deploy.appconfig('config:%s' % CONF.config_file[0]) application = deploy.loadapp('config:%s' % conf, name=name)
Python
0.000001
b7e3901411059bbfa8ab83ec1f6fbf135aa50172
Update UserTime.py
Cogs/UserTime.py
Cogs/UserTime.py
import datetime import pytz from Cogs import FuzzySearch def setup(bot): # This module isn't actually a cog return def getClockForTime(time_string): # Assumes a HH:MM PP format try: time = time_string.split() time = time[0].split(":") hour = int(time[0]) minute = int(time[1]) except: return "" clock_string = "" if min > 44: clock_string = str(hour + 1) if hour < 12 else "1" elif min > 14: clock_string = str(hour) + "30" else: clock_string = str(hour) return ":clock" + clock_string + ":" def getUserTime(member, settings, time = None, strft = "%Y-%m-%d %I:%M %p", clock = True): # Returns a dict representing the time from the passed member's perspective offset = settings.getGlobalUserStat(member, "TimeZone") if offset == None: offset = settings.getGlobalUserStat(member, "UTCOffset") if offset == None: # No offset or tz - return UTC t = getClockForTime(time.strftime(strft)) if clock, else time.strftime(strft) return { "zone" : 'UTC', "time" : t } # At this point - we need to determine if we have an offset - or possibly a timezone passed t = getTimeFromTZ(offset, time, clock) if t == None: # We did not get a zone t = getTimeFromOffset(offset, time, clock) return t def getTimeFromOffset(offset, t = None, strft = "%Y-%m-%d %I:%M %p", clock = True): offset = offset.replace('+', '') # Split time string by : and get hour/minute values try: hours, minutes = map(int, offset.split(':')) except Exception: try: hours = int(offset) minutes = 0 except Exception: return None # await ctx.channel.send('Offset has to be in +-H:M!') # return msg = 'UTC' # Get the time if t == None: t = datetime.datetime.utcnow() # Apply offset if hours > 0: # Apply positive offset msg += '+{}'.format(offset) td = datetime.timedelta(hours=hours, minutes=minutes) newTime = t + td elif hours < 0: # Apply negative offset msg += '{}'.format(offset) td = datetime.timedelta(hours=(-1*hours), minutes=(-1*minutes)) newTime = t - td else: # No offset newTime = t ti = getClockForTime(newTime.strftime(strft)) if clock, else newTime.strftime(strft) return { "zone" : msg, "time" : ti } def getTimeFromTZ(tz, t = None, strft = "%Y-%m-%d %I:%M %p", clock = True): # Assume sanitized zones - as they're pulled from pytz # Let's get the timezone list tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3) if not tz_list[0]['Ratio'] == 1: # We didn't find a complete match return None zone = pytz.timezone(tz_list[0]['Item']) if t == None: zone_now = datetime.datetime.now(zone) else: zone_now = pytz.utc.localize(t, is_dst=None).astimezone(zone) #zone_now = t.astimezone(zone) ti = getClockForTime(zone_now.strftime(strft)) if clock, else zone_now.strftime(strft) return { "zone" : tz_list[0]['Item'], "time" : ti}
import datetime import pytz from Cogs import FuzzySearch def setup(bot): # This module isn't actually a cog return def getUserTime(member, settings, time = None, strft = "%Y-%m-%d %I:%M %p"): # Returns a dict representing the time from the passed member's perspective offset = settings.getGlobalUserStat(member, "TimeZone") if offset == None: offset = settings.getGlobalUserStat(member, "UTCOffset") if offset == None: # No offset or tz - return UTC return { "zone" : 'UTC', "time" : time.strftime(strft) } # At this point - we need to determine if we have an offset - or possibly a timezone passed t = getTimeFromTZ(offset, time) if t == None: # We did not get a zone t = getTimeFromOffset(offset, time) return t def getTimeFromOffset(offset, t = None, strft = "%Y-%m-%d %I:%M %p"): offset = offset.replace('+', '') # Split time string by : and get hour/minute values try: hours, minutes = map(int, offset.split(':')) except Exception: try: hours = int(offset) minutes = 0 except Exception: return None # await ctx.channel.send('Offset has to be in +-H:M!') # return msg = 'UTC' # Get the time if t == None: t = datetime.datetime.utcnow() # Apply offset if hours > 0: # Apply positive offset msg += '+{}'.format(offset) td = datetime.timedelta(hours=hours, minutes=minutes) newTime = t + td elif hours < 0: # Apply negative offset msg += '{}'.format(offset) td = datetime.timedelta(hours=(-1*hours), minutes=(-1*minutes)) newTime = t - td else: # No offset newTime = t return { "zone" : msg, "time" : newTime.strftime(strft) } def getTimeFromTZ(tz, t = None, strft = "%Y-%m-%d %I:%M %p"): # Assume sanitized zones - as they're pulled from pytz # Let's get the timezone list tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3) if not tz_list[0]['Ratio'] == 1: # We didn't find a complete match return None zone = pytz.timezone(tz_list[0]['Item']) if t == None: zone_now = datetime.datetime.now(zone) else: zone_now = pytz.utc.localize(t, is_dst=None).astimezone(zone) #zone_now = t.astimezone(zone) return { "zone" : tz_list[0]['Item'], "time" : zone_now.strftime(strft) }
Python
0.000001
8eea594e684053a7fbfe1f2f946343cf809be058
Rename server tests
treecat/serving_test.py
treecat/serving_test.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np import pytest from treecat.serving import TreeCatServer from treecat.testutil import TINY_CONFIG from treecat.testutil import TINY_DATA from treecat.testutil import TINY_MASK from treecat.training import train_model @pytest.fixture(scope='module') def model(): return train_model(TINY_DATA, TINY_MASK, TINY_CONFIG) def test_server_init(model): server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) server._get_session(7) def test_server_sample_shape(model): N, V = TINY_DATA.shape server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) # Sample all possible mask patterns. factors = [[True, False]] * V for mask in itertools.product(*factors): mask = np.array(mask, dtype=np.bool_) samples = server.sample(TINY_DATA, mask) assert samples.shape == TINY_DATA.shape assert samples.dtype == TINY_DATA.dtype assert np.allclose(samples[:, mask], TINY_DATA[:, mask]) def test_server_logprob_shape(model): N, V = TINY_DATA.shape server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) # Sample all possible mask patterns. factors = [[True, False]] * V for mask in itertools.product(*factors): mask = np.array(mask, dtype=np.bool_) logprob = server.logprob(TINY_DATA, mask) assert logprob.shape == (N, ) assert np.isfinite(logprob).all() assert (logprob < 0.0).all() # Assuming features are discrete. @pytest.mark.xfail def test_server_logprob_is_normalized(model): N, V = TINY_DATA.shape C = TINY_CONFIG['num_categories'] server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) # The total probability of all possible rows should be 1. factors = [range(C)] * V data = np.array(list(itertools.product(*factors)), dtype=np.int32) mask = np.array([True] * V, dtype=np.bool_) logprob = server.logprob(data, mask) total = np.exp(np.logaddexp.reduce(logprob)) assert abs(total - 1.0) < 1e-6, total
from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np import pytest from treecat.serving import TreeCatServer from treecat.testutil import TINY_CONFIG from treecat.testutil import TINY_DATA from treecat.testutil import TINY_MASK from treecat.training import train_model @pytest.fixture(scope='module') def model(): return train_model(TINY_DATA, TINY_MASK, TINY_CONFIG) def test_server_init(model): server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) server._get_session(7) def test_server_sample(model): N, V = TINY_DATA.shape server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) # Sample all possible mask patterns. factors = [[True, False]] * V for mask in itertools.product(*factors): mask = np.array(mask, dtype=np.bool_) samples = server.sample(TINY_DATA, mask) assert samples.shape == TINY_DATA.shape assert samples.dtype == TINY_DATA.dtype assert np.allclose(samples[:, mask], TINY_DATA[:, mask]) def test_server_logprob(model): N, V = TINY_DATA.shape server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) # Sample all possible mask patterns. factors = [[True, False]] * V for mask in itertools.product(*factors): mask = np.array(mask, dtype=np.bool_) logprob = server.logprob(TINY_DATA, mask) assert logprob.shape == (N, ) assert np.isfinite(logprob).all() assert (logprob < 0.0).all() # Assuming features are discrete. @pytest.mark.xfail def test_server_logprob_total(model): N, V = TINY_DATA.shape C = TINY_CONFIG['num_categories'] server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) factors = [range(C)] * V data = np.array(list(itertools.product(*factors)), dtype=np.int32) mask = np.array([True] * V, dtype=np.bool_) logprob = server.logprob(data, mask) total = np.exp(np.logaddexp.reduce(logprob)) assert abs(total - 1.0) < 1e-6, total
Python
0.000001
1ba8cadf93e80107902e142c4644d03668592c5f
add global and directive specific compat options
cautodoc.py
cautodoc.py
# coding=utf-8 """Hawkmoth - Sphinx C Domain autodoc directive extension""" __author__ = "Jani Nikula <jani@nikula.org>" __copyright__ = "Copyright (c) 2016-2017, Jani Nikula <jani@nikula.org>" __version__ = '0.1' __license__ = "BSD 2-Clause, see LICENSE for details" import glob import os import re import stat import subprocess import sys from docutils import nodes, statemachine from docutils.parsers.rst import directives from docutils.statemachine import ViewList from sphinx.ext.autodoc import AutodocReporter from sphinx.util.compat import Directive # The parser bits from hawkmoth import parse # This is the part that interfaces with Sphinx. Do not depend on Clang here. class CAutoDocDirective(Directive): """Extract documentation comments from the specified file""" required_argument = 1 optional_arguments = 1 # Allow passing a variable number of file patterns as arguments final_argument_whitespace = True # FIXME: potentially need to pass clang options, such as -D etc. Should that # be per directive? Or global default and overrides? option_spec = { 'compat': directives.unchanged_required, } has_content = False def parse(self, viewlist, filename, compat): comments = parse(filename, compat=compat) for (comment, meta) in comments: lineoffset = meta['line'] lines = statemachine.string2lines(comment, 8, convert_whitespace=True) for line in lines: viewlist.append(line, filename, lineoffset) lineoffset += 1 def run(self): env = self.state.document.settings.env compat = self.options.get('compat', env.config.cautodoc_compat) result = ViewList() for pattern in self.arguments[0].split(): for filename in glob.iglob(env.config.cautodoc_root + '/' + pattern): mode = os.stat(filename).st_mode if not stat.S_ISDIR(mode): # Tell Sphinx about the dependency env.note_dependency(os.path.abspath(filename)) # FIXME: pass relevant options to parser self.parse(result, filename, compat) node = nodes.section() node.document = self.state.document self.state.nested_parse(result, self.content_offset, node) return node.children def setup(app): app.require_sphinx('1.3') app.add_config_value('cautodoc_root', '.', 'env') app.add_config_value('cautodoc_compat', None, 'env') app.add_directive_to_domain('c', 'autodoc', CAutoDocDirective) return dict(version = __version__, parallel_read_safe = True, parallel_write_safe = True)
# coding=utf-8 """Hawkmoth - Sphinx C Domain autodoc directive extension""" __author__ = "Jani Nikula <jani@nikula.org>" __copyright__ = "Copyright (c) 2016-2017, Jani Nikula <jani@nikula.org>" __version__ = '0.1' __license__ = "BSD 2-Clause, see LICENSE for details" import glob import os import re import stat import subprocess import sys from docutils import nodes, statemachine from docutils.parsers.rst import directives from docutils.statemachine import ViewList from sphinx.ext.autodoc import AutodocReporter from sphinx.util.compat import Directive # The parser bits from hawkmoth import parse # This is the part that interfaces with Sphinx. Do not depend on Clang here. class CAutoDocDirective(Directive): """Extract documentation comments from the specified file""" required_argument = 1 optional_arguments = 1 # Allow passing a variable number of file patterns as arguments final_argument_whitespace = True # FIXME: potentially need to pass clang options, such as -D etc. Should that # be per directive? Or global default and overrides? option_spec = { # FIXME: figure out passing to parser, have a global default option 'compat': directives.unchanged, } has_content = False def parse(self, viewlist, filename): comments = parse(filename) for (comment, meta) in comments: lineoffset = meta['line'] lines = statemachine.string2lines(comment, 8, convert_whitespace=True) for line in lines: viewlist.append(line, filename, lineoffset) lineoffset += 1 def run(self): env = self.state.document.settings.env result = ViewList() for pattern in self.arguments[0].split(): for filename in glob.iglob(env.config.cautodoc_root + '/' + pattern): mode = os.stat(filename).st_mode if not stat.S_ISDIR(mode): # Tell Sphinx about the dependency env.note_dependency(os.path.abspath(filename)) # FIXME: pass relevant options to parser self.parse(result, filename) node = nodes.section() node.document = self.state.document self.state.nested_parse(result, self.content_offset, node) return node.children def setup(app): app.require_sphinx('1.3') app.add_config_value('cautodoc_root', '.', 'env') app.add_directive_to_domain('c', 'autodoc', CAutoDocDirective) return dict(version = __version__, parallel_read_safe = True, parallel_write_safe = True)
Python
0
fa21acc470d9c32619b3c67dcce54c7b0a69a07a
Fix inadvertent requirement of hg, svn, git, etc.
lib/spack/spack/test/__init__.py
lib/spack/spack/test/__init__.py
############################################################################## # Copyright (c) 2013, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://scalability-llnl.github.io/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License (as published by # the Free Software Foundation) version 2.1 dated February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import sys import unittest import llnl.util.tty as tty from llnl.util.tty.colify import colify import spack """Names of tests to be included in Spack's test suite""" test_names = ['versions', 'url_parse', 'url_substitution', 'packages', 'stage', 'spec_syntax', 'spec_semantics', 'spec_dag', 'concretize', 'multimethod', 'install', 'package_sanity', 'config', 'directory_layout', 'python_version', 'git_fetch', 'svn_fetch', 'hg_fetch', 'mirror', 'url_extrapolate'] def list_tests(): """Return names of all tests that can be run for Spack.""" return test_names def run(names, verbose=False): """Run tests with the supplied names. Names should be a list. If it's empty, run ALL of Spack's tests.""" verbosity = 1 if not verbose else 2 if not names: names = test_names else: for test in names: if test not in test_names: tty.error("%s is not a valid spack test name." % test, "Valid names are:") colify(test_names, indent=4) sys.exit(1) runner = unittest.TextTestRunner(verbosity=verbosity) testsRun = errors = failures = 0 for test in names: module = 'spack.test.' + test print module suite = unittest.defaultTestLoader.loadTestsFromName(module) tty.msg("Running test: %s" % test) result = runner.run(suite) testsRun += result.testsRun errors += len(result.errors) failures += len(result.failures) succeeded = not errors and not failures tty.msg("Tests Complete.", "%5d tests run" % testsRun, "%5d failures" % failures, "%5d errors" % errors) if not errors and not failures: tty.info("OK", format='g') else: tty.info("FAIL", format='r') sys.exit(1)
############################################################################## # Copyright (c) 2013, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://scalability-llnl.github.io/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License (as published by # the Free Software Foundation) version 2.1 dated February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import sys import unittest import llnl.util.tty as tty from llnl.util.tty.colify import colify import spack import spack.test.install """Names of tests to be included in Spack's test suite""" test_names = ['versions', 'url_parse', 'url_substitution', 'packages', 'stage', 'spec_syntax', 'spec_semantics', 'spec_dag', 'concretize', 'multimethod', 'install', 'package_sanity', 'config', 'directory_layout', 'python_version', 'git_fetch', 'svn_fetch', 'hg_fetch', 'mirror', 'url_extrapolate'] def list_tests(): """Return names of all tests that can be run for Spack.""" return test_names def run(names, verbose=False): """Run tests with the supplied names. Names should be a list. If it's empty, run ALL of Spack's tests.""" verbosity = 1 if not verbose else 2 if not names: names = test_names else: for test in names: if test not in test_names: tty.error("%s is not a valid spack test name." % test, "Valid names are:") colify(test_names, indent=4) sys.exit(1) runner = unittest.TextTestRunner(verbosity=verbosity) testsRun = errors = failures = 0 for test in names: module = 'spack.test.' + test print module suite = unittest.defaultTestLoader.loadTestsFromName(module) tty.msg("Running test: %s" % test) result = runner.run(suite) testsRun += result.testsRun errors += len(result.errors) failures += len(result.failures) succeeded = not errors and not failures tty.msg("Tests Complete.", "%5d tests run" % testsRun, "%5d failures" % failures, "%5d errors" % errors) if not errors and not failures: tty.info("OK", format='g') else: tty.info("FAIL", format='r') sys.exit(1)
Python
0.000001
671ca30892e3ebeb0a9140f95690853b4b92dc02
Fix reverse since we deprecated post_object_list
post/views.py
post/views.py
from django.core.urlresolvers import reverse from django.utils.translation import ugettext as _ from post.models import Post from jmbo.generic.views import GenericObjectDetail, GenericObjectList from jmbo.view_modifiers import DefaultViewModifier class ObjectList(GenericObjectList): def get_extra_context(self, *args, **kwargs): return {'title': _('Posts')} def get_view_modifier(self, request, *args, **kwargs): return DefaultViewModifier(request, *args, **kwargs) def get_paginate_by(self, *args, **kwargs): return 12 def get_queryset(self, *args, **kwargs): return Post.permitted.all() object_list = ObjectList() class ObjectDetail(GenericObjectDetail): def get_queryset(self, *args, **kwargs): return Post.permitted.all() def get_extra_context(self, *args, **kwargs): return {'title': 'Posts'} def get_view_modifier(self, request, *args, **kwargs): return DefaultViewModifier( request, base_url=reverse("object_list", args=['post', 'post']), ignore_defaults=True, *args, **kwargs ) object_detail = ObjectDetail()
from django.core.urlresolvers import reverse from django.utils.translation import ugettext as _ from post.models import Post from jmbo.generic.views import GenericObjectDetail, GenericObjectList from jmbo.view_modifiers import DefaultViewModifier class ObjectList(GenericObjectList): def get_extra_context(self, *args, **kwargs): return {'title': _('Posts')} def get_view_modifier(self, request, *args, **kwargs): return DefaultViewModifier(request, *args, **kwargs) def get_paginate_by(self, *args, **kwargs): return 12 def get_queryset(self, *args, **kwargs): return Post.permitted.all() object_list = ObjectList() class ObjectDetail(GenericObjectDetail): def get_queryset(self, *args, **kwargs): return Post.permitted.all() def get_extra_context(self, *args, **kwargs): return {'title': 'Posts'} def get_view_modifier(self, request, *args, **kwargs): return DefaultViewModifier( request, base_url=reverse("post_object_list"), ignore_defaults=True, *args, **kwargs ) object_detail = ObjectDetail()
Python
0.000014
a03c61430abac8cac5e522a3bf391175cd261cec
fix tests
gammafit/tests/test_onezone.py
gammafit/tests/test_onezone.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import units as u import numpy as np from numpy.testing import assert_approx_equal electronozmpars={ 'seedspec':'CMB', 'index':2.0, 'cutoff':1e13, 'beta':1.0, 'ngamd':100, 'gmin':1e4, 'gmax':1e10, } def test_electronozm(): from ..onezone import ElectronOZM ozm = ElectronOZM( np.logspace(0,15,1000), 1, **electronozmpars) ozm.calc_outspec() lsy=np.trapz(ozm.specsy*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lsy,0.016769058688230903) lic=np.trapz(ozm.specic*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lic,214080823.28721327) #def test_electronozm_evolve(): #from ..onezone import ElectronOZM #ozm = ElectronOZM( np.logspace(0,15,1000), 1, evolve_nelec=True, **electronozmpars) #ozm.calc_outspec() #lsy=np.trapz(ozm.specsy*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) #assert_approx_equal(lsy,5718447729.5694494) #lic=np.trapz(ozm.specic*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) #assert_approx_equal(lic,1.0514223815442389e+20) def test_protonozm(): from ..onezone import ProtonOZM ozm = ProtonOZM( np.logspace(8,15,100), 1, index=2.0,cutoff=1e13,beta=1.0) ozm.calc_outspec() lpp=np.trapz(ozm.specpp*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lpp,3.2800253974151616e-4, significant=5)
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import units as u import numpy as np from numpy.testing import assert_approx_equal electronozmpars={ 'seedspec':'CMB', 'index':2.0, 'cutoff':1e13, 'beta':1.0, 'ngamd':100, 'gmin':1e4, 'gmax':1e10, } def test_electronozm(): from ..onezone import ElectronOZM ozm = ElectronOZM( np.logspace(0,15,1000), 1, **electronozmpars) ozm.calc_outspec() lsy=np.trapz(ozm.specsy*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lsy,0.016769058688230903) lic=np.trapz(ozm.specic*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lic,214080823.28721327) def test_electronozm_evolve(): from ..onezone import ElectronOZM ozm = ElectronOZM( np.logspace(0,15,1000), 1, evolve_nelec=True, **electronozmpars) ozm.calc_outspec() lsy=np.trapz(ozm.specsy*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lsy,5718447729.5694494) lic=np.trapz(ozm.specic*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lic,1.0514223815442389e+20) def test_protonozm(): from ..onezone import ProtonOZM ozm = ProtonOZM( np.logspace(8,15,100), 1, index=2.0,cutoff=1e13,beta=1.0) ozm.calc_outspec() lpp=np.trapz(ozm.specpp*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lpp,3.2800627079738687e+23, significant=5)
Python
0.000001
3eeeb844b3936063f4f0192d46577e3f9397c107
Fix ordering in cursor test
search/tests/test_query.py
search/tests/test_query.py
import datetime import unittest from google.appengine.api import search as search_api from ..indexes import DocumentModel, Index from ..fields import TZDateTimeField, TextField from ..query import SearchQuery from ..ql import Q from .. import timezone from .base import AppengineTestCase class FakeDocument(DocumentModel): foo = TextField() created = TZDateTimeField() class TestSearchQueryClone(unittest.TestCase): def test_clone_keywords(self): q = SearchQuery("dummy", document_class=FakeDocument).keywords("bar") q1 = q.filter(foo="baz") self.assertEqual( u"bar", unicode(q.query) ) self.assertEqual( u'bar AND (foo:"baz")', unicode(q1.query) ) def test_clone_filters(self): q = SearchQuery("dummy", document_class=FakeDocument).filter( (Q(foo="bar") | Q(foo="baz")) & ~Q(foo="neg") ) q1 = q.filter(~Q(foo="neg2")) self.assertEqual( u'(((foo:"bar") OR (foo:"baz")) AND NOT (foo:"neg"))', unicode(q.query) ) self.assertEqual( u'(' '(((foo:"bar") OR (foo:"baz")) AND NOT (foo:"neg")) ' 'AND NOT (foo:"neg2")' ')', unicode(q1.query) ) class TestSearchQueryFilter(unittest.TestCase): def test_filter_on_datetime_field(self): xmas = datetime.datetime(2016, 12, 31, 12, tzinfo=timezone.utc) q = SearchQuery('dummy', document_class=FakeDocument) q = q.filter(created__gt=xmas) self.assertEqual(unicode(q.query), u'(created > 1483185600)') class TestCursor(AppengineTestCase): def test_cursor(self): idx = Index('dummy', FakeDocument) idx.put(FakeDocument(foo='thing')) idx.put(FakeDocument(foo='thing2')) idx.get_range() q = idx.search().set_cursor().order_by('foo')[:1] list(q) self.assertTrue(q.next_cursor) q2 = idx.search().set_cursor(cursor=q.next_cursor).order_by('foo') self.assertEqual(2, len(q2)) # still returns full count results = list(q2) self.assertEqual(1, len(results)) # but only one document self.assertEqual('thing2', results[0].foo) self.assertFalse(q2.next_cursor)
import datetime import unittest from google.appengine.api import search as search_api from ..indexes import DocumentModel, Index from ..fields import TZDateTimeField, TextField from ..query import SearchQuery from ..ql import Q from .. import timezone from .base import AppengineTestCase class FakeDocument(DocumentModel): foo = TextField() created = TZDateTimeField() class TestSearchQueryClone(unittest.TestCase): def test_clone_keywords(self): q = SearchQuery("dummy", document_class=FakeDocument).keywords("bar") q1 = q.filter(foo="baz") self.assertEqual( u"bar", unicode(q.query) ) self.assertEqual( u'bar AND (foo:"baz")', unicode(q1.query) ) def test_clone_filters(self): q = SearchQuery("dummy", document_class=FakeDocument).filter( (Q(foo="bar") | Q(foo="baz")) & ~Q(foo="neg") ) q1 = q.filter(~Q(foo="neg2")) self.assertEqual( u'(((foo:"bar") OR (foo:"baz")) AND NOT (foo:"neg"))', unicode(q.query) ) self.assertEqual( u'(' '(((foo:"bar") OR (foo:"baz")) AND NOT (foo:"neg")) ' 'AND NOT (foo:"neg2")' ')', unicode(q1.query) ) class TestSearchQueryFilter(unittest.TestCase): def test_filter_on_datetime_field(self): xmas = datetime.datetime(2016, 12, 31, 12, tzinfo=timezone.utc) q = SearchQuery('dummy', document_class=FakeDocument) q = q.filter(created__gt=xmas) self.assertEqual(unicode(q.query), u'(created > 1483185600)') class TestCursor(AppengineTestCase): def test_cursor(self): idx = Index('dummy', FakeDocument) idx.put(FakeDocument(foo='thing')) idx.put(FakeDocument(foo='thing2')) idx.get_range() q = idx.search().set_cursor()[:1] list(q) self.assertTrue(q.next_cursor) q2 = idx.search().set_cursor(cursor=q.next_cursor) self.assertEqual(2, len(q2)) # still returns full count results = list(q2) self.assertEqual(1, len(results)) # but only one document self.assertEqual('thing2', results[0].foo) self.assertFalse(q2.next_cursor)
Python
0.000002
74e4e5e507d908950d4458dff5ba4aa5c712866f
Allow localization of "Self Informations"
searx/plugins/self_info.py
searx/plugins/self_info.py
''' searx is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. searx is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with searx. If not, see < http://www.gnu.org/licenses/ >. (C) 2015 by Adam Tauber, <asciimoo@gmail.com> ''' from flask_babel import gettext import re name = gettext('Self Informations') description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".') default_on = True # Self User Agent regex p = re.compile(b'.*user[ -]agent.*', re.IGNORECASE) # attach callback to the post search hook # request: flask request object # ctx: the whole local context of the pre search hook def post_search(request, search): if search.search_query.pageno > 1: return True if search.search_query.query == b'ip': x_forwarded_for = request.headers.getlist("X-Forwarded-For") if x_forwarded_for: ip = x_forwarded_for[0] else: ip = request.remote_addr search.result_container.answers['ip'] = {'answer': ip} elif p.match(search.search_query.query): ua = request.user_agent search.result_container.answers['user-agent'] = {'answer': ua} return True
''' searx is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. searx is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with searx. If not, see < http://www.gnu.org/licenses/ >. (C) 2015 by Adam Tauber, <asciimoo@gmail.com> ''' from flask_babel import gettext import re name = "Self Informations" description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".') default_on = True # Self User Agent regex p = re.compile(b'.*user[ -]agent.*', re.IGNORECASE) # attach callback to the post search hook # request: flask request object # ctx: the whole local context of the pre search hook def post_search(request, search): if search.search_query.pageno > 1: return True if search.search_query.query == b'ip': x_forwarded_for = request.headers.getlist("X-Forwarded-For") if x_forwarded_for: ip = x_forwarded_for[0] else: ip = request.remote_addr search.result_container.answers['ip'] = {'answer': ip} elif p.match(search.search_query.query): ua = request.user_agent search.result_container.answers['user-agent'] = {'answer': ua} return True
Python
0
34b2c332b8d1209985b37f4e440954a15d4004d3
create directly tar.gz into final directory
datadownloader/views.py
datadownloader/views.py
import os import tarfile import subprocess from datetime import datetime from sendfile import sendfile from django.views.generic import View, TemplateView from django.conf import settings from django.shortcuts import redirect def get_base_path(): if hasattr(settings, 'DATA_DOWNLOADER_PATH'): base_path = settings.DATA_DOWNLOADER_PATH else: base_path = os.path.join(settings.BASE_DIR, 'project', 'protected_medias', 'datas') return base_path def get_archives_info(): info = {} project_name = settings.BASE_DIR.split("/")[-1] base_path = get_base_path() for section in ["db", "media", "data"]: file_name = "%s_%s.tar.gz" % (project_name, section) path = os.path.join(base_path, file_name) if os.path.exists(path): infos = os.stat(path) date = datetime.fromtimestamp(int(infos.st_mtime)) info["%s_info" % section] = {'date': date, 'size': infos.st_size} else: info["%s_info" % section] = {'date': None, 'size': None} return info def create_archive(data_type): folders = [] base_path = get_base_path() project_name = settings.BASE_DIR.split("/")[-1] tar_name = "%s_%s.tar.gz" % (project_name, data_type) path = os.path.join(base_path, tar_name) if data_type == "db" or data_type == "data": folders.append("dumps") dumps_path = os.path.join(settings.BASE_DIR, "dumps") if os.path.exists(dumps_path): for dump_file in os.listdir(dumps_path): os.remove(os.path.join(dumps_path, dump_file)) else: os.makedirs(dumps_path) # We will tempory use makefile for run datadump, but we must found # other solution make = ['/usr/bin/make', '-C', settings.BASE_DIR, 'datadump'] subprocess.check_output(make) if data_type == "media" or data_type == "data": folders.append("project/media") with tarfile.open(path, "w:gz") as tar: for folder in folders: tar.add(folder) def delete_archive(data_type): base_path = get_base_path() project_name = settings.BASE_DIR.split("/")[-1] tar_name = "%s_%s.tar.gz" % (project_name, data_type) path = os.path.join(base_path, tar_name) os.remove(path) class DataDownloaderMainView(TemplateView): template_name = "admin/datadownloader/index.html" def get_context_data(self, **kwargs): context = super(DataDownloaderMainView, self).get_context_data(**kwargs) context.update(get_archives_info()) return context class DataDownloaderCreateArchiveView(View): def get(self, request, *args, **kwargs): create_archive(kwargs['data_type']) return redirect('datadownloader_index') class DataDownloaderDeleteArchiveView(View): def get(self, request, *args, **kwargs): delete_archive(kwargs['data_type']) return redirect('datadownloader_index') class DataDownloaderDownloadArchiveView(View): def get(self, request, *args, **kwargs): data_type = kwargs['data_type'] base_path = get_base_path() project_name = settings.BASE_DIR.split("/")[-1] tar_name = "%s_%s.tar.gz" % (project_name, data_type) path = os.path.join(base_path, tar_name) return sendfile(request, path, attachment=True, attachment_filename=tar_name)
import os import tarfile import subprocess from datetime import datetime from sendfile import sendfile from django.views.generic import View, TemplateView from django.conf import settings from django.shortcuts import redirect def get_base_path(): if hasattr(settings, 'DATA_DOWNLOADER_PATH'): base_path = settings.DATA_DOWNLOADER_PATH else: base_path = os.path.join(settings.BASE_DIR, 'project', 'protected_medias', 'datas') return base_path def get_archives_info(): info = {} project_name = settings.BASE_DIR.split("/")[-1] base_path = get_base_path() for section in ["db", "media", "data"]: file_name = "%s_%s.tar.gz" % (project_name, section) path = os.path.join(base_path, file_name) if os.path.exists(path): infos = os.stat(path) date = datetime.fromtimestamp(int(infos.st_mtime)) info["%s_info" % section] = {'date': date, 'size': infos.st_size} else: info["%s_info" % section] = {'date': None, 'size': None} return info def create_archive(data_type): folders = [] base_path = get_base_path() project_name = settings.BASE_DIR.split("/")[-1] tar_name = "%s_%s.tar.gz" % (project_name, data_type) path = os.path.join(base_path, tar_name) if data_type == "db" or data_type == "data": folders.append("dumps") dumps_path = os.path.join(settings.BASE_DIR, "dumps") if os.path.exists(dumps_path): for dump_file in os.listdir(dumps_path): os.remove(os.path.join(dumps_path, dump_file)) else: os.makedirs(dumps_path) # We will tempory use makefile for run datadump, but we must found # other solution make = ['/usr/bin/make', '-C', settings.BASE_DIR, 'datadump'] subprocess.check_output(make) if data_type == "media" or data_type == "data": folders.append("project/media") with tarfile.open(tar_name, "w:gz") as tar: for folder in folders: tar.add(folder) os.rename(tar_name, path) def delete_archive(data_type): base_path = get_base_path() project_name = settings.BASE_DIR.split("/")[-1] tar_name = "%s_%s.tar.gz" % (project_name, data_type) path = os.path.join(base_path, tar_name) os.remove(path) class DataDownloaderMainView(TemplateView): template_name = "admin/datadownloader/index.html" def get_context_data(self, **kwargs): context = super(DataDownloaderMainView, self).get_context_data(**kwargs) context.update(get_archives_info()) return context class DataDownloaderCreateArchiveView(View): def get(self, request, *args, **kwargs): create_archive(kwargs['data_type']) return redirect('datadownloader_index') class DataDownloaderDeleteArchiveView(View): def get(self, request, *args, **kwargs): delete_archive(kwargs['data_type']) return redirect('datadownloader_index') class DataDownloaderDownloadArchiveView(View): def get(self, request, *args, **kwargs): data_type = kwargs['data_type'] base_path = get_base_path() project_name = settings.BASE_DIR.split("/")[-1] tar_name = "%s_%s.tar.gz" % (project_name, data_type) path = os.path.join(base_path, tar_name) return sendfile(request, path, attachment=True, attachment_filename=tar_name)
Python
0
7477f969cd8efd624e7f378f7838270c53c2755e
Allow make_reverb_dataset's caller to set max_in_flight_samples_per_worker. Default behavior is unchanged.
acme/datasets/reverb.py
acme/datasets/reverb.py
# python3 # Copyright 2018 DeepMind Technologies Limited. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for making TensorFlow datasets for sampling from Reverb replay.""" from typing import Optional from acme import specs from acme import types from acme.adders import reverb as adders import reverb import tensorflow as tf def make_reverb_dataset( server_address: str, batch_size: Optional[int] = None, prefetch_size: Optional[int] = None, table: str = adders.DEFAULT_PRIORITY_TABLE, num_parallel_calls: int = 12, max_in_flight_samples_per_worker: Optional[int] = None, # Deprecated kwargs. environment_spec: Optional[specs.EnvironmentSpec] = None, extra_spec: Optional[types.NestedSpec] = None, transition_adder: bool = False, convert_zero_size_to_none: bool = False, using_deprecated_adder: bool = False, sequence_length: Optional[int] = None, ) -> tf.data.Dataset: """Make a TensorFlow dataset backed by a Reverb trajectory replay service.""" if environment_spec or extra_spec: raise ValueError( 'The make_reverb_dataset factory function no longer requires specs as' ' as they should be passed as a signature to the reverb.Table when it' ' is created. Consider either updating your code or falling back to the' ' deprecated dataset factory in acme/datasets/deprecated.') # These are no longer used and are only kept in the call signature for # backward compatibility. del environment_spec del extra_spec del transition_adder del convert_zero_size_to_none del using_deprecated_adder del sequence_length # This is the default that used to be set by reverb.TFClient.dataset(). if max_in_flight_samples_per_worker is None and batch_size is None: max_in_flight_samples_per_worker = 100 elif max_in_flight_samples_per_worker is None: max_in_flight_samples_per_worker = 2 * batch_size def _make_dataset(unused_idx: tf.Tensor) -> tf.data.Dataset: dataset = reverb.TrajectoryDataset.from_table_signature( server_address=server_address, table=table, max_in_flight_samples_per_worker=max_in_flight_samples_per_worker) # Finish the pipeline: batch and prefetch. if batch_size: dataset = dataset.batch(batch_size, drop_remainder=True) return dataset # Create the dataset. dataset = tf.data.Dataset.range(num_parallel_calls) dataset = dataset.interleave( map_func=_make_dataset, cycle_length=num_parallel_calls, num_parallel_calls=num_parallel_calls, deterministic=False) if prefetch_size: dataset = dataset.prefetch(prefetch_size) return dataset # TODO(b/152732834): remove this and prefer datasets.make_reverb_dataset. make_dataset = make_reverb_dataset
# python3 # Copyright 2018 DeepMind Technologies Limited. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for making TensorFlow datasets for sampling from Reverb replay.""" from typing import Optional from acme import specs from acme import types from acme.adders import reverb as adders import reverb import tensorflow as tf def make_reverb_dataset( server_address: str, batch_size: Optional[int] = None, prefetch_size: Optional[int] = None, table: str = adders.DEFAULT_PRIORITY_TABLE, num_parallel_calls: int = 12, # Deprecated kwargs. environment_spec: Optional[specs.EnvironmentSpec] = None, extra_spec: Optional[types.NestedSpec] = None, transition_adder: bool = False, convert_zero_size_to_none: bool = False, using_deprecated_adder: bool = False, sequence_length: Optional[int] = None, ) -> tf.data.Dataset: """Make a TensorFlow dataset backed by a Reverb trajectory replay service.""" if environment_spec or extra_spec: raise ValueError( 'The make_reverb_dataset factory function no longer requires specs as' ' as they should be passed as a signature to the reverb.Table when it' ' is created. Consider either updating your code or falling back to the' ' deprecated dataset factory in acme/datasets/deprecated.') # These are no longer used and are only kept in the call signature for # backward compatibility. del environment_spec del extra_spec del transition_adder del convert_zero_size_to_none del using_deprecated_adder del sequence_length # This is the default that used to be set by reverb.TFClient.dataset(). max_in_flight_samples_per_worker = 2 * batch_size if batch_size else 100 def _make_dataset(unused_idx: tf.Tensor) -> tf.data.Dataset: dataset = reverb.TrajectoryDataset.from_table_signature( server_address=server_address, table=table, max_in_flight_samples_per_worker=max_in_flight_samples_per_worker) # Finish the pipeline: batch and prefetch. if batch_size: dataset = dataset.batch(batch_size, drop_remainder=True) return dataset # Create the dataset. dataset = tf.data.Dataset.range(num_parallel_calls) dataset = dataset.interleave( map_func=_make_dataset, cycle_length=num_parallel_calls, num_parallel_calls=num_parallel_calls, deterministic=False) if prefetch_size: dataset = dataset.prefetch(prefetch_size) return dataset # TODO(b/152732834): remove this and prefer datasets.make_reverb_dataset. make_dataset = make_reverb_dataset
Python
0.000012
d04ded85e01c4a9e0960d57a37ecd83fc92fa5cd
Add a fallback to mini_installer_tests' quit_chrome.py exit logic.
chrome/test/mini_installer/quit_chrome.py
chrome/test/mini_installer/quit_chrome.py
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Quits Chrome. This script sends a WM_CLOSE message to each window of Chrome and waits until the process terminates. """ import optparse import os import pywintypes import sys import time import win32con import win32gui import winerror import chrome_helper def CloseWindows(process_path): """Closes all windows owned by processes whose exe path is |process_path|. Args: process_path: The path to the executable whose processes will have their windows closed. Returns: A boolean indicating whether the processes successfully terminated within 25 seconds. """ start_time = time.time() while time.time() - start_time < 25: process_ids = chrome_helper.GetProcessIDs(process_path) if not process_ids: return True for hwnd in chrome_helper.GetWindowHandles(process_ids): try: win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0) except pywintypes.error as error: # It's normal that some window handles have become invalid. if error.args[0] != winerror.ERROR_INVALID_WINDOW_HANDLE: raise time.sleep(0.1) return False def KillNamedProcess(process_path): """ Kills all running exes with the same name as the exe at |process_path|. Args: process_path: The path to an executable. Returns: True if running executables were successfully killed. False otherwise. """ return os.system('taskkill /f /im %s' % os.path.basename(process_path)) == 0 def main(): usage = 'usage: %prog chrome_path' parser = optparse.OptionParser(usage, description='Quit Chrome.') _, args = parser.parse_args() if len(args) != 1: parser.error('Incorrect number of arguments.') chrome_path = args[0] if not CloseWindows(chrome_path): # TODO(robertshield): Investigate why Chrome occasionally doesn't shut down. print 'Warning: Chrome not responding to window closure. Killing process...' KillNamedProcess(chrome_path): return 0 if __name__ == '__main__': sys.exit(main())
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Quits Chrome. This script sends a WM_CLOSE message to each window of Chrome and waits until the process terminates. """ import optparse import pywintypes import sys import time import win32con import win32gui import winerror import chrome_helper def CloseWindows(process_path): """Closes all windows owned by processes whose path is |process_path|. Args: process_path: The path to the process. Returns: A boolean indicating whether the processes successfully terminate within 30 seconds. """ start_time = time.time() while time.time() - start_time < 30: process_ids = chrome_helper.GetProcessIDs(process_path) if not process_ids: return True for hwnd in chrome_helper.GetWindowHandles(process_ids): try: win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0) except pywintypes.error as error: # It's normal that some window handles have become invalid. if error.args[0] != winerror.ERROR_INVALID_WINDOW_HANDLE: raise time.sleep(0.1) return False def main(): usage = 'usage: %prog chrome_path' parser = optparse.OptionParser(usage, description='Quit Chrome.') _, args = parser.parse_args() if len(args) != 1: parser.error('Incorrect number of arguments.') chrome_path = args[0] if not CloseWindows(chrome_path): raise Exception('Could not quit Chrome.') return 0 if __name__ == '__main__': sys.exit(main())
Python
0.003252
eba39b722d6d025ec351beeb35e7dadd55ef82f5
correctly treat hashes as little endian
blockchain.py
blockchain.py
#!/usr/bin/env python3 import binascii import datetime class BlockChain: def __init__(self, data, handler=None): self.data = data self.handler = handler self.index = 0 self.block_count = 0 while self.index < len(self.data): self.parse_block() self.block_count += 1 def get_byte(self): data = self.data[self.index] self.index += 1 return data def get_bytes(self, length=1): data = self.data[self.index:self.index + length] self.index += length return data[::-1] def get_uint16(self): return self.get_byte() + (self.get_byte() << 8) def get_uint32(self): return self.get_uint16() + (self.get_uint16() << 16) def get_uint64(self): return self.get_uint32() + (self.get_uint32() << 32) def get_timestamp(self): return datetime.datetime.fromtimestamp(self.get_uint32()) def get_hash(self): return self.get_bytes(32) def get_varlen_int(self): code = self.get_byte() if code < 0xFD: return code elif code == 0xFD: return self.get_uint16() elif code == 0xFE: return self.get_uint32() elif code == 0xFF: return self.get_uint64() def parse_block(self): magic_network_id = self.get_uint32() block_length = self.get_uint32() block_format_version = self.get_uint32() hash_of_previous_block = self.get_hash() merkle_root = self.get_hash() timestamp = self.get_timestamp() bits = self.get_uint32() nonce = self.get_uint32() transaction_count = self.get_varlen_int() for i in range(transaction_count): self.parse_transaction() print("{} prev_block_hash={} timestamp={} nonce={}".format(self.block_count, binascii.hexlify(hash_of_previous_block), timestamp, nonce)) def parse_transaction(self): version_number = self.get_uint32() input_count = self.get_varlen_int() for i in range(input_count): self.parse_input() output_count = self.get_varlen_int() for i in range(output_count): self.parse_output() transaction_lock_time = self.get_uint32() def parse_input(self): transaction_hash = self.get_hash() transaction_index = self.get_uint32() script_length = self.get_varlen_int() script = self.get_bytes(script_length) sequence_number = self.get_uint32() def parse_output(self): value = self.get_uint64() script_length = self.get_varlen_int() script = self.get_bytes(script_length) if __name__ == "__main__": import sys filename = sys.argv[1] with open(filename, "rb") as f: data = f.read() BlockChain(data)
#!/usr/bin/env python3 import binascii import datetime class BlockChain: def __init__(self, data, handler=None): self.data = data self.handler = handler self.index = 0 self.block_count = 0 while self.index < len(self.data): self.parse_block() self.block_count += 1 def get_byte(self): data = self.data[self.index] self.index += 1 return data def get_bytes(self, length=1): data = self.data[self.index:self.index + length] self.index += length return data def get_uint16(self): return self.get_byte() + (self.get_byte() << 8) def get_uint32(self): return self.get_uint16() + (self.get_uint16() << 16) def get_uint64(self): return self.get_uint32() + (self.get_uint32() << 32) def get_timestamp(self): return datetime.datetime.fromtimestamp(self.get_uint32()) def get_hash(self): return self.get_bytes(32) def get_varlen_int(self): code = self.get_byte() if code < 0xFD: return code elif code == 0xFD: return self.get_uint16() elif code == 0xFE: return self.get_uint32() elif code == 0xFF: return self.get_uint64() def parse_block(self): magic_network_id = self.get_uint32() block_length = self.get_uint32() block_format_version = self.get_uint32() hash_of_previous_block = self.get_hash() merkle_root = self.get_hash() timestamp = self.get_timestamp() bits = self.get_uint32() nonce = self.get_uint32() transaction_count = self.get_varlen_int() for i in range(transaction_count): self.parse_transaction() print("{} prev_block_hash={} timestamp={} nonce={}".format(self.block_count, binascii.hexlify(hash_of_previous_block), timestamp, nonce)) def parse_transaction(self): version_number = self.get_uint32() input_count = self.get_varlen_int() for i in range(input_count): self.parse_input() output_count = self.get_varlen_int() for i in range(output_count): self.parse_output() transaction_lock_time = self.get_uint32() def parse_input(self): transaction_hash = self.get_hash() transaction_index = self.get_uint32() script_length = self.get_varlen_int() script = self.get_bytes(script_length) sequence_number = self.get_uint32() def parse_output(self): value = self.get_uint64() script_length = self.get_varlen_int() script = self.get_bytes(script_length) if __name__ == "__main__": import sys filename = sys.argv[1] with open(filename, "rb") as f: data = f.read() BlockChain(data)
Python
0.998601
202d0a199a59a0e8ca5651785aa4497b1e0047e7
Add default implementation of Language.validate_code
importkit/meta.py
importkit/meta.py
## # Copyright (c) 2008-2012 Sprymix Inc. # All rights reserved. # # See LICENSE for details. ## import functools import os from semantix import exceptions as sx_errors from .loader import LanguageSourceFileLoader from .import_ import finder class LanguageMeta(type): languages = [] def __new__(cls, name, bases, dct, *, register=True): lang = super(LanguageMeta, cls).__new__(cls, name, bases, dct) if register: LanguageMeta.languages.append(lang) finder.update_finders() return lang def __init__(cls, name, bases, dct, *, register=True): super().__init__(name, bases, dct) @staticmethod def recognize_file(filename, try_append_extension=False, is_package=False): result = None for lang in LanguageMeta.languages: file_ = lang.recognize_file(filename, try_append_extension, is_package) if file_: if result is not None: raise ImportError('ambiguous module import: %s, languages in conflict: %s' % \ (filename, (lang, result[0]))) result = (lang, file_) return result def get_loader(cls): return cls.loader @classmethod def get_loaders(cls): for lang in LanguageMeta.languages: yield (functools.partial(lang.loader, language=lang), ['.' + ext for ext in lang.file_extensions]) class Language(object, metaclass=LanguageMeta, register=False): loader = LanguageSourceFileLoader file_extensions = () proxy_module_cls = None @classmethod def recognize_file(cls, filename, try_append_extension=False, is_package=False): if is_package: filename = os.path.join(filename, '__init__') if try_append_extension: for ext in cls.file_extensions: if os.path.exists(filename + '.' + ext): return filename + '.' + ext elif os.path.exists(filename): for ext in cls.file_extensions: if filename.endswith('.' + ext): return filename @classmethod def load_code(cls, stream, context): raise NotImplementedError @classmethod def execute_code(cls, code, context): raise NotImplementedError @classmethod def validate_code(cls, code): pass class ObjectError(Exception): def __init__(self, msg, context=None, code=None, note=None): self.msg = msg self.context = context self.code = code self.note = note def __str__(self): return self.msg class Object: def __sx_setstate__(self, data): pass class LanguageError(sx_errors.SemantixError): pass
## # Copyright (c) 2008-2012 Sprymix Inc. # All rights reserved. # # See LICENSE for details. ## import functools import os from semantix import exceptions as sx_errors from .loader import LanguageSourceFileLoader from .import_ import finder class LanguageMeta(type): languages = [] def __new__(cls, name, bases, dct, *, register=True): lang = super(LanguageMeta, cls).__new__(cls, name, bases, dct) if register: LanguageMeta.languages.append(lang) finder.update_finders() return lang def __init__(cls, name, bases, dct, *, register=True): super().__init__(name, bases, dct) @staticmethod def recognize_file(filename, try_append_extension=False, is_package=False): result = None for lang in LanguageMeta.languages: file_ = lang.recognize_file(filename, try_append_extension, is_package) if file_: if result is not None: raise ImportError('ambiguous module import: %s, languages in conflict: %s' % \ (filename, (lang, result[0]))) result = (lang, file_) return result def get_loader(cls): return cls.loader @classmethod def get_loaders(cls): for lang in LanguageMeta.languages: yield (functools.partial(lang.loader, language=lang), ['.' + ext for ext in lang.file_extensions]) class Language(object, metaclass=LanguageMeta, register=False): loader = LanguageSourceFileLoader file_extensions = () proxy_module_cls = None @classmethod def recognize_file(cls, filename, try_append_extension=False, is_package=False): if is_package: filename = os.path.join(filename, '__init__') if try_append_extension: for ext in cls.file_extensions: if os.path.exists(filename + '.' + ext): return filename + '.' + ext elif os.path.exists(filename): for ext in cls.file_extensions: if filename.endswith('.' + ext): return filename @classmethod def load_code(cls, stream, context): raise NotImplementedError @classmethod def execute_code(cls, code, context): raise NotImplementedError class ObjectError(Exception): def __init__(self, msg, context=None, code=None, note=None): self.msg = msg self.context = context self.code = code self.note = note def __str__(self): return self.msg class Object: def __sx_setstate__(self, data): pass class LanguageError(sx_errors.SemantixError): pass
Python
0
2d55d95c623bef4848131878061887854ff8a971
Update utils.py
deeplab_resnet/utils.py
deeplab_resnet/utils.py
from PIL import Image import numpy as np import tensorflow as tf n_classes = 21 # colour map label_colours = [(0,0,0) # 0=background ,(128,0,0),(0,128,0),(128,128,0),(0,0,128),(128,0,128) # 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle ,(0,128,128),(128,128,128),(64,0,0),(192,0,0),(64,128,0) # 6=bus, 7=car, 8=cat, 9=chair, 10=cow ,(192,128,0),(64,0,128),(192,0,128),(64,128,128),(192,128,128) # 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person ,(0,64,0),(128,64,0),(0,192,0),(128,192,0),(0,64,128)] # 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor def decode_labels(mask): """Decode batch of segmentation masks. Args: label_batch: result of inference after taking argmax. Returns: An batch of RGB images of the same size """ img = Image.new('RGB', (len(mask[0]), len(mask))) pixels = img.load() for j_, j in enumerate(mask): for k_, k in enumerate(j): if k < n_classes: pixels[k_,j_] = label_colours[k] return np.array(img) def prepare_label(input_batch, new_size): """Resize masks and perform one-hot encoding. Args: input_batch: input tensor of shape [batch_size H W 1]. new_size: a tensor with new height and width. Returns: Outputs a tensor of shape [batch_size h w 21] with last dimension comprised of 0's and 1's only. """ with tf.name_scope('label_encode'): input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp. input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension. input_batch = tf.one_hot(input_batch, depth=n_classes) return input_batch
from PIL import Image import numpy as np # colour map label_colours = [(0,0,0) # 0=background ,(128,0,0),(0,128,0),(128,128,0),(0,0,128),(128,0,128) # 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle ,(0,128,128),(128,128,128),(64,0,0),(192,0,0),(64,128,0) # 6=bus, 7=car, 8=cat, 9=chair, 10=cow ,(192,128,0),(64,0,128),(192,0,128),(64,128,128),(192,128,128) # 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person ,(0,64,0),(128,64,0),(0,192,0),(128,192,0),(0,64,128)] # 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor def decode_labels(mask): """Decode batch of segmentation masks. Args: label_batch: result of inference after taking argmax. Returns: An batch of RGB images of the same size """ img = Image.new('RGB', (len(mask[0]), len(mask))) pixels = img.load() for j_, j in enumerate(mask): for k_, k in enumerate(j): if k < 21: pixels[k_,j_] = label_colours[k] return np.array(img) def prepare_label(input_batch, new_size): """Resize masks and perform one-hot encoding. Args: input_batch: input tensor of shape [batch_size H W 1]. new_size: a tensor with new height and width. Returns: Outputs a tensor of shape [batch_size h w 21] with last dimension comprised of 0's and 1's only. """ with tf.name_scope('label_encode'): input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp. input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension. input_batch = tf.one_hot(input_batch, depth=n_classes) return input_batch
Python
0.000001
a62b5955d9801f25736c42545191ff5a76a2e5b1
Refactor UserFactory and add CommentFactory
blog/tests.py
blog/tests.py
from django.test import TestCase from .models import BlogPost, Comment from django.contrib.auth.models import User class UserFactory(object): def create(self, username="user001", email="email@domain.com", password="password123456"): user = User.objects.create_user(username = username, email = email, password = password) return user class BlogPostFactory(object): def create(self, save=False): blogpost = BlogPost() blogpost.user = UserFactory().create() blogpost.title = "Title Test" blogpost.text = "Lorem ipsum tarapia tapioco..." if save==True: blogpost.save() return blogpost class CommentFactory(object): def create(self, blogpost, text="Test comment", save=False): comment = Comment() comment.post = blogpost comment.user = UserFactory().create("user002", "email002@domain.com", "password123456") comment.text = text if save==True: comment.save() return comment class BlogTest(TestCase): def setUp(self): pass def test_post_creation(self): blogpost = BlogPostFactory().create(True) self.assertTrue(blogpost.id > 0, "BlogPost created correctly") def test_post_update(self): blogpost = BlogPostFactory().create(True) self.assertTrue(blogpost.id > 0, "BlogPost created correctly") blogpost.title = "Title Test - modified" blogpost.save() blogpost_id = blogpost.id blogpost_saved = BlogPost.objects.get(id = blogpost_id) self.assertEquals(blogpost_saved.title, blogpost.title, "BlogPost updated correctly") def test_post_delete(self): blogpost = BlogPostFactory().create(True) blogpost_id = blogpost.id blogpost.delete() blogpost_saved = BlogPost.objects.filter(id = blogpost_id) self.assertEqual(blogpost_saved.count(), 0, "BlogPost deleted correctly")
from django.test import TestCase from .models import BlogPost from django.contrib.auth.models import User class UserFactory(object): def create(self): user = User.objects.create_user(username = "user001", email = "email@domain.com", password = "password123456") return user class BlogPostFactory(object): def create(self, save=False): blogpost = BlogPost() blogpost.user = UserFactory().create() blogpost.title = "Title Test" blogpost.text = "Lorem ipsum tarapia tapioco..." if save==True: blogpost.save() return blogpost class BlogTest(TestCase): def setUp(self): pass def test_post_creation(self): blogpost = BlogPostFactory().create(True) self.assertTrue(blogpost.id > 0, "BlogPost created correctly") def test_post_update(self): blogpost = BlogPostFactory().create(True) self.assertTrue(blogpost.id > 0, "BlogPost created correctly") blogpost.title = "Title Test - modified" blogpost.save() blogpost_id = blogpost.id blogpost_saved = BlogPost.objects.get(id = blogpost_id) self.assertEquals(blogpost_saved.title, blogpost.title, "BlogPost updated correctly") def test_post_delete(self): blogpost = BlogPostFactory().create(True) blogpost_id = blogpost.id blogpost.delete() blogpost_saved = BlogPost.objects.filter(id = blogpost_id) self.assertEqual(blogpost_saved.count(), 0, "BlogPost deleted correctly")
Python
0
420d104d9e674b96363db5c986ea9eea4d411c92
Add updated template settings to conftests
conftest.py
conftest.py
""" Configuration file for py.test """ import django def pytest_configure(): from django.conf import settings settings.configure( DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": "test.sqlite3", } }, INSTALLED_APPS=[ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sites", # The ordering here, the apps using the organization base models # first and *then* the organizations app itself is an implicit test # that the organizations app need not be installed in order to use # its base models. "test_accounts", "test_vendors", "organizations", "test_custom", ], MIDDLEWARE_CLASSES=[], SITE_ID=1, FIXTURE_DIRS=['tests/fixtures'], ORGS_SLUGFIELD='autoslug.AutoSlugField', ROOT_URLCONF="tests.urls", TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, }, ] ) django.setup()
""" Configuration file for py.test """ import django def pytest_configure(): from django.conf import settings settings.configure( DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": "test.sqlite3", } }, INSTALLED_APPS=[ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sites", # The ordering here, the apps using the organization base models # first and *then* the organizations app itself is an implicit test # that the organizations app need not be installed in order to use # its base models. "test_accounts", "test_vendors", "organizations", "test_custom", ], MIDDLEWARE_CLASSES=[], SITE_ID=1, FIXTURE_DIRS=['tests/fixtures'], ORGS_SLUGFIELD='autoslug.AutoSlugField', ROOT_URLCONF="tests.urls", ) django.setup()
Python
0
8d6287397b47fcaf98cadc59349f1db68c7b2d93
Update 1.4_replace_whitespace.py
CrackingCodingInterview/1.4_replace_whitespace.py
CrackingCodingInterview/1.4_replace_whitespace.py
""" Replace all whitespace in a string with '%20' """ def replace(string): for i in string: string.replace("", %20) return string
""" Replace all whitespace in a string with '%20' """
Python
0.000001
8a1b902b729597f5c8536b235d7add887f097fdd
Drop box should be off by default. SSL should be on by default, HTTP should be off.
twistedcaldav/config.py
twistedcaldav/config.py
## # Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # DRI: David Reid, dreid@apple.com ## import os from twistedcaldav.py.plistlib import readPlist defaultConfigFile = '/etc/caldavd/caldavd.plist' defaultConfig = { 'CreateAccounts': False, 'DirectoryService': { 'params': {'node': '/Search'}, 'type': 'twistedcaldav.directory.appleopendirectory.OpenDirectoryService' }, 'DocumentRoot': '/Library/CalendarServer/Documents', 'DropBoxEnabled': False, 'ErrorLogFile': '/var/log/caldavd/error.log', 'ManholePort': 0, 'MaximumAttachmentSizeBytes': 1048576, 'NotificationsEnabled': False, 'PIDFile': '/var/run/caldavd.pid', 'Port': 8008, 'ResetAccountACLs': False, 'RunStandalone': True, 'SSLCertificate': '/etc/certificates/Default.crt', 'SSLEnable': True, 'SSLOnly': True, 'SSLPort': 8443, 'SSLPrivateKey': '/etc/certificates/Default.key', 'ServerLogFile': '/var/log/caldavd/server.log', 'ServerStatsFile': '/Library/CalendarServer/Documents/stats.plist', 'UserQuotaBytes': 104857600, 'Verbose': False, 'twistdLocation': '/usr/share/caldavd/bin/twistd', 'SACLEnable': False, 'AuthSchemes': ['Basic'], 'AdminPrincipals': ['/principal/users/admin'] } class Config (object): def __init__(self, defaults): self.update(defaults) def update(self, items): items = items.iteritems() for key, value in items: setattr(self, key, value) config = Config(defaultConfig) def parseConfig(configFile): if os.path.exists(configFile): plist = readPlist(configFile) config.update(plist)
## # Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # DRI: David Reid, dreid@apple.com ## import os from twistedcaldav.py.plistlib import readPlist defaultConfigFile = '/etc/caldavd/caldavd.plist' defaultConfig = { 'CreateAccounts': False, 'DirectoryService': { 'params': {'node': '/Search'}, 'type': 'twistedcaldav.directory.appleopendirectory.OpenDirectoryService' }, 'DocumentRoot': '/Library/CalendarServer/Documents', 'DropBoxEnabled': True, 'ErrorLogFile': '/var/log/caldavd/error.log', 'ManholePort': 0, 'MaximumAttachmentSizeBytes': 1048576, 'NotificationsEnabled': False, 'PIDFile': '/var/run/caldavd.pid', 'Port': 8008, 'ResetAccountACLs': False, 'RunStandalone': True, 'SSLCertificate': '/etc/certificates/Default.crt', 'SSLEnable': False, 'SSLOnly': False, 'SSLPort': 8443, 'SSLPrivateKey': '/etc/certificates/Default.key', 'ServerLogFile': '/var/log/caldavd/server.log', 'ServerStatsFile': '/Library/CalendarServer/Documents/stats.plist', 'UserQuotaBytes': 104857600, 'Verbose': False, 'twistdLocation': '/usr/share/caldavd/bin/twistd', 'SACLEnable': False, 'AuthSchemes': ['Basic'], 'AdminPrincipals': ['/principal/users/admin'] } class Config (object): def __init__(self, defaults): self.update(defaults) def update(self, items): items = items.iteritems() for key, value in items: setattr(self, key, value) config = Config(defaultConfig) def parseConfig(configFile): if os.path.exists(configFile): plist = readPlist(configFile) config.update(plist)
Python
0.000025
3e8d6e31f576fb857a1415c85a227f56225b8f06
fix database path
blogconfig.py
blogconfig.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # 博客名和简介 blogname = "I'm SErHo" blogdesc = "SErHo's Blog, Please Call me Serho Liu." blogcover = "//dn-serho.qbox.me/blogbg.jpg" # Picky 目录和数据库 picky = "/home/serho/website/picky" database = "/home/serho/website/newblog.db" # 其他设置 # disqus = "serho" # secret = "use random" debug = False
#!/usr/bin/env python # -*- coding: utf-8 -*- # 博客名和简介 blogname = "I'm SErHo" blogdesc = "SErHo's Blog, Please Call me Serho Liu." blogcover = "//dn-serho.qbox.me/blogbg.jpg" # Picky 目录和数据库 picky = "/home/serho/website/picky" database = "//home/serho/website/newblog.db" # 其他设置 # disqus = "serho" # secret = "use random" debug = False
Python
0.000002
1630bb891bf57052984301b9dd191826ca7ba18e
Update test_biobambam.py
tests/test_biobambam.py
tests/test_biobambam.py
""" .. Copyright 2017 EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os.path import time import pytest # pylint: disable=unused-import from tool import biobambam_filter def test_biobambam(): """ Test case to ensure that BioBamBam works """ bbb = biobambam_filter.biobambam() resource_path = os.path.join(os.path.dirname(__file__), "data/") bbb.run( [resource_path + "macs2.Human.DRR000150.22.bam"], [] ) print "Start : %s" % time.ctime() time.sleep (10) print "End : %s" % time.ctime() assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22.filtered.bam") is True testFile = open(resource_path + "macs2.Human.DRR000150.22.filtered.bam") print ("read line: ") print (testFile.readline()) assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22.filtered.bam") > 0
""" .. Copyright 2017 EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os.path import time import pytest # pylint: disable=unused-import from tool import biobambam_filter def test_biobambam(): """ Test case to ensure that BioBamBam works """ bbb = biobambam_filter.biobambam() resource_path = os.path.join(os.path.dirname(__file__), "data/") bbb.run( [resource_path + "macs2.Human.DRR000150.22.bam"], [] ) print "Start : %s" % time.ctime() time.sleep (10) print "End : %s" % time.ctime() assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22.filtered.bam") is True assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22.filtered.bam") > 0
Python
0.000002
1e3e6ea6c24e275a5a08f096968ae14aab2dfd22
Support custom schema classes.
muffin_rest/peewee.py
muffin_rest/peewee.py
"""Support Muffin-Peewee.""" from muffin_rest import RESTHandler, RESTNotFound, Filter, Filters, RESTOptions try: from marshmallow_peewee import ModelSchema except ImportError: import logging logging.error('Marshmallow-Peewee should be installed to use the integration.') raise class PWFilter(Filter): """Filter Peewee Queryset.""" operators = Filter.operators operators['$in'] = lambda f, v: f << v operators['$none'] = lambda f, v: f >> v operators['$like'] = lambda f, v: f % v operators['$contains'] = lambda f, v: f.contains(v) operators['$starts'] = lambda f, v: f.startswith(v) operators['$ends'] = lambda f, v: f.endswith(v) operators['$between'] = lambda f, v: f.between(*v) operators['$regexp'] = lambda f, v: f.regexp(v) list_ops = Filter.list_ops + ('$between',) def __init__(self, name, mfield=None, **kwargs): self.mfield = mfield return super(PWFilter, self).__init__(name, **kwargs) def apply(self, collection, ops, resource=None, **kwargs): """Filter given collection.""" mfield = self.mfield or resource.meta.model._meta.fields.get(self.field.attribute) if mfield: collection = collection.where(*[op(mfield, val) for op, val in ops]) return collection class PWFilters(Filters): """Bind filter class.""" FILTER_CLASS = PWFilter class PWRESTOptions(RESTOptions): """Generate schema and name.""" def __init__(self, cls, name=None, **params): """Initialize options.""" super(PWRESTOptions, self).__init__(cls, **params) cls.name = name or self.model and self.model._meta.db_table or cls.name if not self.model: return None self.model_pk = self.model_pk or self.model._meta.primary_key if not cls.Schema: meta = type('Meta', (object,), dict({'model': self.model}, **self.schema_meta)) cls.Schema = type( cls.name.title() + 'Schema', (self.schema_cls,), dict({'Meta': meta}, **self.schema)) # Resetup filters if getattr(self.meta, 'filters', None): self.filters = self.filters_converter(*self.meta.filters, handler=cls) class PWRESTHandler(RESTHandler): """Support REST for Peewee.""" OPTIONS_CLASS = PWRESTOptions class Meta: """Peewee options.""" filters_converter = PWFilters model = None model_pk = None schema = {} schema_cls = ModelSchema def get_many(self, request, **kwargs): """Get collection.""" return self.meta.model.select() def get_one(self, request, **kwargs): """Load a resource.""" resource = request.match_info.get(self.name) if not resource: return None try: return self.collection.where(self.meta.model_pk == resource).get() except Exception: raise RESTNotFound(reason='Resource not found.') def sort(self, *sorting, **kwargs): """Sort resources.""" sorting_ = [] for name, desc in sorting: field = self.meta.model._meta.fields.get(name) if field is None: continue if desc: field = field.desc() sorting_.append(field) if sorting_: return self.collection.order_by(*sorting_) return self.collection def paginate(self, request, offset=0, limit=None): """Paginate queryset.""" return self.collection.offset(offset).limit(limit), self.collection.count() def get_schema(self, request, resource=None, **kwargs): """Initialize schema.""" return self.Schema(instance=resource) def save(self, request, resource=None, **kwargs): """Create a resource.""" resource.save() return resource def delete(self, request, resource=None, **kwargs): """Delete a resource.""" if resource is None: raise RESTNotFound(reason='Resource not found') resource.delete_instance()
"""Support Muffin-Peewee.""" from muffin_rest import RESTHandler, RESTNotFound, Filter, Filters, RESTOptions try: from marshmallow_peewee import ModelSchema except ImportError: import logging logging.error('Marshmallow-Peewee should be installed to use the integration.') raise class PWFilter(Filter): """Filter Peewee Queryset.""" operators = Filter.operators operators['$in'] = lambda f, v: f << v operators['$none'] = lambda f, v: f >> v operators['$like'] = lambda f, v: f % v operators['$contains'] = lambda f, v: f.contains(v) operators['$starts'] = lambda f, v: f.startswith(v) operators['$ends'] = lambda f, v: f.endswith(v) operators['$between'] = lambda f, v: f.between(*v) operators['$regexp'] = lambda f, v: f.regexp(v) list_ops = Filter.list_ops + ('$between',) def __init__(self, name, mfield=None, **kwargs): self.mfield = mfield return super(PWFilter, self).__init__(name, **kwargs) def apply(self, collection, ops, resource=None, **kwargs): """Filter given collection.""" mfield = self.mfield or resource.meta.model._meta.fields.get(self.field.attribute) if mfield: collection = collection.where(*[op(mfield, val) for op, val in ops]) return collection class PWFilters(Filters): """Bind filter class.""" FILTER_CLASS = PWFilter class PWRESTOptions(RESTOptions): """Generate schema and name.""" def __init__(self, cls, name=None, **params): """Initialize options.""" super(PWRESTOptions, self).__init__(cls, **params) cls.name = name or self.model and self.model._meta.db_table or cls.name if not self.model: return None self.model_pk = self.model_pk or self.model._meta.primary_key if not cls.Schema: meta = type('Meta', (object,), dict({'model': self.model}, **self.schema_meta)) cls.Schema = type( cls.name.title() + 'Schema', (ModelSchema,), dict({'Meta': meta}, **self.schema)) # Resetup filters if getattr(self.meta, 'filters', None): self.filters = self.filters_converter(*self.meta.filters, handler=cls) class PWRESTHandler(RESTHandler): """Support REST for Peewee.""" OPTIONS_CLASS = PWRESTOptions class Meta: """Peewee options.""" filters_converter = PWFilters model = None model_pk = None schema = {} def get_many(self, request, **kwargs): """Get collection.""" return self.meta.model.select() def get_one(self, request, **kwargs): """Load a resource.""" resource = request.match_info.get(self.name) if not resource: return None try: return self.collection.where(self.meta.model_pk == resource).get() except Exception: raise RESTNotFound(reason='Resource not found.') def sort(self, *sorting, **kwargs): """Sort resources.""" sorting_ = [] for name, desc in sorting: field = self.meta.model._meta.fields.get(name) if field is None: continue if desc: field = field.desc() sorting_.append(field) if sorting_: return self.collection.order_by(*sorting_) return self.collection def paginate(self, request, offset=0, limit=None): """Paginate queryset.""" return self.collection.offset(offset).limit(limit), self.collection.count() def get_schema(self, request, resource=None, **kwargs): """Initialize schema.""" return self.Schema(instance=resource) def save(self, request, resource=None, **kwargs): """Create a resource.""" resource.save() return resource def delete(self, request, resource=None, **kwargs): """Delete a resource.""" if resource is None: raise RESTNotFound(reason='Resource not found') resource.delete_instance()
Python
0
8fa0dca5cd5187126a10197883348fc6b16544b5
Test get campaigns by email
tests/test_campaigns.py
tests/test_campaigns.py
import os import vcr import unittest from hatchbuck.api import HatchbuckAPI from hatchbuck.objects import Contact class TestCampaigns(unittest.TestCase): def setUp(self): # Fake key can be used with existing cassettes self.test_api_key = os.environ.get("HATCHBUCK_API_KEY", "ABC123") @vcr.use_cassette( 'tests/fixtures/cassettes/test_get_contact_campaigns.yml', filter_query_parameters=['api_key'] ) def test_get_contact_campaigns(self): hatchbuck = HatchbuckAPI(self.test_api_key) contact_id = "d1F4Tm1tcUxVRmdFQmVIT3lhVjNpaUtxamprakk5S3JIUGRmVWtHUXJaRTE1" contact = hatchbuck.search_contacts(contactId=contact_id)[0] self.assertEqual(contact.contactId, contact_id) campaigns = contact.get_campaigns() self.assertEqual(campaigns[0].name, "Brochure Request Followup") self.assertEqual(campaigns[0].step, 0) self.assertEqual(campaigns[0].id, "b1BFUnM1Unh0MDVVOVJEWUc1d0pTM0pUSVY4QS0xOW5GRHRsS05DXzNXazE1") @vcr.use_cassette( 'tests/fixtures/cassettes/test_get_contact_campaigns_by_email.yml', filter_query_parameters=['api_key'] ) def test_get_contact_campaigns_by_email(self): hatchbuck = HatchbuckAPI(self.test_api_key) contact_email = "jill.smith@pyhatchbuck.net" campaigns = hatchbuck.get_campaigns(contact_email) self.assertEqual(campaigns[0].name, "Brochure Request Followup") self.assertEqual(campaigns[0].step, 0) self.assertEqual(campaigns[0].id, "b1BFUnM1Unh0MDVVOVJEWUc1d0pTM0pUSVY4QS0xOW5GRHRsS05DXzNXazE1") if __name__ == '__main__': unittest.main()
import os import vcr import unittest from hatchbuck.api import HatchbuckAPI from hatchbuck.objects import Contact class TestCampaigns(unittest.TestCase): def setUp(self): # Fake key can be used with existing cassettes self.test_api_key = os.environ.get("HATCHBUCK_API_KEY", "ABC123") @vcr.use_cassette( 'tests/fixtures/cassettes/test_get_contact_campaigns.yml', filter_query_parameters=['api_key'] ) def test_get_contact_campaigns(self): hatchbuck = HatchbuckAPI(self.test_api_key) contact_id = "d1F4Tm1tcUxVRmdFQmVIT3lhVjNpaUtxamprakk5S3JIUGRmVWtHUXJaRTE1" contact = hatchbuck.search_contacts(contactId=contact_id)[0] self.assertEqual(contact.contactId, contact_id) campaigns = contact.get_campaigns() self.assertEqual(campaigns[0].name, "Brochure Request Followup") self.assertEqual(campaigns[0].id, "b1BFUnM1Unh0MDVVOVJEWUc1d0pTM0pUSVY4QS0xOW5GRHRsS05DXzNXazE1") if __name__ == '__main__': unittest.main()
Python
0
eb58615d0fa7f4469be01f9e8dcb1cf44b8ce85e
correct context problem
close_residual_order_unlink/unlink_mrp.py
close_residual_order_unlink/unlink_mrp.py
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys import logging import openerp import openerp.netsvc as netsvc import openerp.addons.decimal_precision as dp from openerp.osv import fields, osv, expression, orm from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta from openerp import SUPERUSER_ID, api from openerp import tools from openerp.tools.translate import _ from openerp.tools.float_utils import float_round as round from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare) _logger = logging.getLogger(__name__) class SaleOrder(orm.Model): """ Model name: Sale Order """ _inherit = 'sale.order' # ------------------------------------------------------------------------- # Override: button force close: # ------------------------------------------------------------------------- def force_close_residual_order(self, cr, uid, ids, context=None): ''' Force order and line closed: ''' # Run normal button procedure: super(SaleOrder, self).force_close_residual_order( cr, uid, ids, context=context) _logger.warning('Unlink no more production line') # Pool used: sol_pool = self.pool.get('sale.order.line') order_proxy = self.browse(cr, uid, ids, context=context) # -------------------------------------- # Read data for log and get information: # -------------------------------------- html_log = '' for line in order_proxy.order_line: if not line.mrp_id: # not production_mrp_id continue # Manage only linked to production line if line.product_uom_qty - line.product_uom_maked_sync_qty <= 0: continue # Manage only residual production todo if 'UNLINK' in line.mrp_id.name: continue # Unlinked order no re-unlink # Unlink line: context['production_order_id'] = line.mrp_id.id sol_pool.free_line(cr, uid, [line.id], context=context) # Log unlinked: html_log += ''' <tr> <td>%s</td> <td>%s</td> <td>%s</td> <td>%s</td> </tr>\n''' % ( line.product_id.default_code, line.product_uom_qty, line.product_uom_maked_sync_qty, line.delivered_qty, ) if 'production_order_id' in context: del(context['production_order_id']) # -------------------------- # Log message for operation: # -------------------------- if html_log: message = _(''' <p>UNLINKED Remain line to produce:</p> <table class='oe_list_content'> <tr> <td class='oe_list_field_cell'>Prod.</td> <td class='oe_list_field_cell'>Order</td> <td class='oe_list_field_cell'>Done</td> <td class='oe_list_field_cell'>Delivered</td> </tr> %s </table> ''') % html_log # Send message self.message_post(cr, uid, ids, body=message, context=context) return True # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys import logging import openerp import openerp.netsvc as netsvc import openerp.addons.decimal_precision as dp from openerp.osv import fields, osv, expression, orm from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta from openerp import SUPERUSER_ID, api from openerp import tools from openerp.tools.translate import _ from openerp.tools.float_utils import float_round as round from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare) _logger = logging.getLogger(__name__) class SaleOrder(orm.Model): """ Model name: Sale Order """ _inherit = 'sale.order' # ------------------------------------------------------------------------- # Override: button force close: # ------------------------------------------------------------------------- def force_close_residual_order(self, cr, uid, ids, context=None): ''' Force order and line closed: ''' # Run normal button procedure: super(SaleOrder, self).force_close_residual_order( cr, uid, ids, context=context) _logger.warning('Unlink no more production line') # Pool used: sol_pool = self.pool.get('sale.order.line') order_proxy = self.browse(cr, uid, ids, context=context) # -------------------------------------- # Read data for log and get information: # -------------------------------------- html_log = '' for line in order_proxy.order_line: if not line.mrp_id: # not production_mrp_id continue # Manage only linked to production line if line.product_uom_qty - line.product_uom_maked_sync_qty <= 0: continue # Manage only residual production todo if 'UNLINK' in line.mrp_id.name: continue # Unlinked order no re-unlink # Unlink line: sol_pool.free_line(cr, uid, [line.id], context=context) # Log unlinked: html_log += ''' <tr> <td>%s</td> <td>%s</td> <td>%s</td> <td>%s</td> </tr>\n''' % ( line.product_id.default_code, line.product_uom_qty, line.product_uom_maked_sync_qty, line.delivered_qty, ) # -------------------------- # Log message for operation: # -------------------------- if html_log: message = _(''' <p>UNLINKED Remain line to produce:</p> <table class='oe_list_content'> <tr> <td class='oe_list_field_cell'>Prod.</td> <td class='oe_list_field_cell'>Order</td> <td class='oe_list_field_cell'>Done</td> <td class='oe_list_field_cell'>Delivered</td> </tr> %s </table> ''') % html_log # Send message self.message_post(cr, uid, ids, body=message, context=context) return True # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0.999969
d8a83ea3433948447c307a894b16c2b8a12247e8
Kill defaulting to json for now.
api/base.py
api/base.py
from django.contrib.auth.models import User from django.conf.urls.defaults import url from django.core.urlresolvers import reverse from tastypie.bundle import Bundle from tastypie.resources import ModelResource from tastypie import fields from tastypie.authentication import BasicAuthentication from tastypie.authorization import DjangoAuthorization, Authorization from tastypie.constants import ALL, ALL_WITH_RELATIONS from builds.models import Build from projects.models import Project class EnhancedModelResource(ModelResource): def obj_get_list(self, request=None, **kwargs): """ A ORM-specific implementation of ``obj_get_list``. Takes an optional ``request`` object, whose ``GET`` dictionary can be used to narrow the query. """ filters = None if hasattr(request, 'GET'): filters = request.GET applicable_filters = self.build_filters(filters=filters) applicable_filters.update(kwargs) try: return self.get_object_list(request).filter(**applicable_filters) except ValueError, e: raise NotFound("Invalid resource lookup data provided (mismatched type).") class UserResource(ModelResource): class Meta: allowed_methods = ['get'] queryset = User.objects.all() fields = ['username', 'first_name', 'last_name', 'last_login', 'id'] def override_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<username>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"), ] class ProjectResource(ModelResource): user = fields.ForeignKey(UserResource, 'user') class Meta: include_absolute_url = True allowed_methods = ['get'] queryset = Project.objects.all() excludes = ['build_pdf', 'path', 'skip', 'featured'] def override_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<slug>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"), ] class BuildResource(EnhancedModelResource): project = fields.ForeignKey(ProjectResource, 'project') class Meta: allowed_methods = ['get'] queryset = Build.objects.all() def override_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<project__slug>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_list'), name="api_list_detail"), ]
from django.contrib.auth.models import User from django.conf.urls.defaults import url from django.core.urlresolvers import reverse from tastypie.bundle import Bundle from tastypie.resources import ModelResource from tastypie import fields from tastypie.authentication import BasicAuthentication from tastypie.authorization import DjangoAuthorization, Authorization from tastypie.constants import ALL, ALL_WITH_RELATIONS from builds.models import Build from projects.models import Project class BaseResource(ModelResource): def determine_format(self, *args, **kwargs): return "application/json" class EnhancedModelResource(BaseResource): def obj_get_list(self, request=None, **kwargs): """ A ORM-specific implementation of ``obj_get_list``. Takes an optional ``request`` object, whose ``GET`` dictionary can be used to narrow the query. """ filters = None if hasattr(request, 'GET'): filters = request.GET applicable_filters = self.build_filters(filters=filters) applicable_filters.update(kwargs) try: return self.get_object_list(request).filter(**applicable_filters) except ValueError, e: raise NotFound("Invalid resource lookup data provided (mismatched type).") class UserResource(BaseResource): class Meta: allowed_methods = ['get'] queryset = User.objects.all() fields = ['username', 'first_name', 'last_name', 'last_login', 'id'] def override_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<username>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"), ] class ProjectResource(BaseResource): user = fields.ForeignKey(UserResource, 'user') class Meta: include_absolute_url = True allowed_methods = ['get'] queryset = Project.objects.all() excludes = ['build_pdf', 'path', 'skip', 'featured'] def override_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<slug>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"), ] class BuildResource(EnhancedModelResource): project = fields.ForeignKey(ProjectResource, 'project') class Meta: allowed_methods = ['get'] queryset = Build.objects.all() def override_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<project__slug>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_list'), name="api_list_detail"), ]
Python
0
69f46596f189786fce0e2a087e6870e5d3059331
Fix figshare harvester date range (#764)
share/harvesters/com_figshare_v2.py
share/harvesters/com_figshare_v2.py
import pendulum from furl import furl from share.harvest import BaseHarvester class FigshareHarvester(BaseHarvester): VERSION = 1 page_size = 50 def _do_fetch(self, start_date, end_date): url = furl(self.config.base_url).set(query_params={ 'order_direction': 'asc', 'order': 'modified_date', 'page_size': self.page_size, 'modified_since': start_date.date().isoformat(), }) return self.fetch_records(url, end_date.date()) def fetch_records(self, url, end_day): page = 1 last_seen_day = None while True: page += 1 url.args['page'] = page resp = self.requests.get(url.url) if last_seen_day and resp.status_code == 422: # We've asked for too much. Time to readjust date range url.args['modified_since'] = last_seen_day.isoformat() page = 0 continue for item in resp.json(): resp = self.requests.get(item['url']) detail = resp.json() last_seen_day = pendulum.parse(detail['modified_date']).date() if last_seen_day > end_day: return yield item['url'], detail if len(resp.json()) < self.page_size: return # We've hit the end of our results
import pendulum from furl import furl from share.harvest import BaseHarvester class FigshareHarvester(BaseHarvester): VERSION = 1 page_size = 50 def do_harvest(self, start_date, end_date): return self.fetch_records(furl(self.config.base_url).set(query_params={ 'order_direction': 'asc', 'order': 'modified_date', 'page_size': self.page_size, 'modified_date': start_date.date().isoformat(), }).url, end_date.date()) def fetch_records(self, url, end_day): page, detail = 0, None while True: page += 1 resp = self.requests.get(furl(url).add(query_params={ 'page': page, }).url) if resp.status_code == 422: # We've asked for too much. Time to readjust date range # Thanks for leaking variables python page, url = 0, furl(url).add(query_params={ 'modified_date': pendulum.parse(detail['modified_date']).date().isoformat() }) continue for item in resp.json(): resp = self.requests.get(item['url']) detail = resp.json() if pendulum.parse(detail['modified_date']).date() > end_day: return yield item['url'], detail if len(resp.json()) < self.page_size: return # We've hit the end of our results
Python
0.000003
83f54f57170115cda98e7d1aa68972c60b865647
Fix test_upgrades_to_html.py test
cnxupgrade/tests/test_upgrades_to_html.py
cnxupgrade/tests/test_upgrades_to_html.py
# -*- coding: utf-8 -*- # ### # Copyright (c) 2013, Rice University # This software is subject to the provisions of the GNU Affero General # Public License version 3 (AGPLv3). # See LICENCE.txt for details. # ### """Tests for to_html command-line interface. """ import sys import unittest from . import DB_CONNECTION_STRING class ToHtmlTestCase(unittest.TestCase): def call_target(self, **kwargs): from ..upgrades import to_html return to_html.cli_command(**kwargs) def test(self): # Mock produce_html_for_modules if 'cnxarchive.to_html' in sys.modules: del sys.modules['cnxarchive.to_html'] import cnxarchive.to_html as to_html original_func = to_html.produce_html_for_modules self.addCleanup(setattr, to_html, 'produce_html_for_modules', original_func) self.call_count = 0 def f(*args, **kwargs): self.call_count += 1 self.args = args self.kwargs = kwargs return [] to_html.produce_html_for_modules = f self.call_target(db_conn_str=DB_CONNECTION_STRING, id_select_query='SELECT 2', overwrite_html=False) # Assert produce_html_for_modules is called self.assertEqual(self.call_count, 1) self.assertEqual(str(type(self.args[0])), "<type 'psycopg2._psycopg.connection'>") self.assertEqual(self.args[1], 'SELECT 2') self.assertEqual(self.kwargs, {'overwrite_html': False})
# -*- coding: utf-8 -*- # ### # Copyright (c) 2013, Rice University # This software is subject to the provisions of the GNU Affero General # Public License version 3 (AGPLv3). # See LICENCE.txt for details. # ### """Tests for to_html command-line interface. """ import sys import unittest from . import DB_CONNECTION_STRING class ToHtmlTestCase(unittest.TestCase): def call_target(self, **kwargs): from ..upgrades import to_html return to_html.cli_command(**kwargs) def test(self): # Mock produce_html_for_modules if 'cnxarchive.to_html' in sys.modules: del sys.modules['cnxarchive.to_html'] import cnxarchive.to_html as to_html original_func = to_html.produce_html_for_modules self.addCleanup(setattr, to_html, 'produce_html_for_modules', original_func) self.call_count = 0 def f(*args, **kwargs): self.call_count += 1 self.args = args self.kwargs = kwargs return [] to_html.produce_html_for_modules = f self.call_target(db_conn_str=DB_CONNECTION_STRING, id_select_query='SELECT 2', overwrite_html=False) # Assert produce_html_for_modules is called self.assertEqual(self.call_count, 1) self.assertEqual(str(type(self.args[0])), "<type 'psycopg2._psycopg.connection'>") self.assertEqual(self.args[1], 'SELECT 2') self.assertEqual(self.args[2], False) self.assertEqual(self.kwargs, {})
Python
0.000021
e20f0d3ada72cb21185ca0c3c1d22a77ee254de0
fix rogue tab
tests/test_get_paths.py
tests/test_get_paths.py
import sys import os from goatools.obo_parser import GODag ROOT = os.path.dirname(os.path.abspath(__file__)) + "/data/" def print_paths(paths, PRT=sys.stdout): for path in paths: PRT.write('\n') for GO in path: PRT.write('{}\n'.format(GO)) def chk_results(actual_paths, expected_paths): for actual_path in actual_paths: # GOTerm -> list of Strings actual = [GO.id for GO in actual_path] if actual not in expected_paths: raise Exception('ACTUAL {} NOT FOUND IN EXPECTED RESULTS\n'.format(actual)) def test_paths_to_top(): dag = GODag(ROOT + "mini_obo.obo") expected_paths = [['GO:0000001', 'GO:0000002', 'GO:0000005', 'GO:0000010'], ['GO:0000001', 'GO:0000003', 'GO:0000005', 'GO:0000010'], ['GO:0000001', 'GO:0000003', 'GO:0000006', 'GO:0000008', 'GO:0000010']] actual_paths = dag.paths_to_top("GO:0000010") chk_results(actual_paths, expected_paths) print_paths(actual_paths)
import sys import os from goatools.obo_parser import GODag ROOT = os.path.dirname(os.path.abspath(__file__)) + "/data/" def print_paths(paths, PRT=sys.stdout): for path in paths: PRT.write('\n') for GO in path: PRT.write('{}\n'.format(GO)) def chk_results(actual_paths, expected_paths): for actual_path in actual_paths: # GOTerm -> list of Strings actual = [GO.id for GO in actual_path] if actual not in expected_paths: raise Exception('ACTUAL {} NOT FOUND IN EXPECTED RESULTS\n'.format(actual)) def test_paths_to_top(): dag = GODag(ROOT + "mini_obo.obo") expected_paths = [['GO:0000001', 'GO:0000002', 'GO:0000005', 'GO:0000010'], ['GO:0000001', 'GO:0000003', 'GO:0000005', 'GO:0000010'], ['GO:0000001', 'GO:0000003', 'GO:0000006', 'GO:0000008', 'GO:0000010']] actual_paths = dag.paths_to_top("GO:0000010") chk_results(actual_paths, expected_paths) print_paths(actual_paths)
Python
0.000001
e42690a6f225952ddb6417edc90e27892c18d2a2
Move api to root.
api/main.py
api/main.py
from bottle import route, request, response, run, view from collections import OrderedDict from parser import parse_response from server import query_server import bottle import json import os @route('/') @view('api/views/index') def index(): site = "%s://%s" % (request.urlparts.scheme, request.urlparts.netloc) return {"site": site} @route('/tag', method=["get", "post"]) def tag(): # Support posting data both via forms and via POST body data = request.POST.get("data", request.body.getvalue()) if not data: return {"error": "No data posted"} raw_text = query_server(data) sentences, entities = parse_response(raw_text) response.content_type = "application/json" pretty = request.POST.get("pretty", False) json_kwargs = {"separators": (',', ':')} if pretty: json_kwargs = {"indent": 4, "separators": (', ', ': ')} return json.dumps(OrderedDict([ ("sentences", sentences), ("entities", entities), ]), **json_kwargs) if __name__ == "__main__": environment = os.environ.get("ENVIRONMENT", None) assert environment, "Needs $ENVIRONMENT variable set" if environment == "development": print "RUNNING IN DEVELOPMENT MODE" bottle.debug(True) bottle.TEMPLATES.clear() run(host='localhost', port=8000, reloader=True) elif environment == "production": print "RUNNING IN PRODUCTION MODE" run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000))) else: assert False, "That's not a valid $ENVIRONMENT"
from bottle import route, request, response, run, view from collections import OrderedDict from parser import parse_response from server import query_server import bottle import json import os @route('/api/') @view('api/views/index') def index(): site = "%s://%s" % (request.urlparts.scheme, request.urlparts.netloc) return {"site": site} @route('/api/tag', method=["get", "post"]) def tag(): # Support posting data both via forms and via POST body data = request.POST.get("data", request.body.getvalue()) if not data: return {"error": "No data posted"} raw_text = query_server(data) sentences, entities = parse_response(raw_text) response.content_type = "application/json" pretty = request.POST.get("pretty", False) json_kwargs = {"separators": (',', ':')} if pretty: json_kwargs = {"indent": 4, "separators": (', ', ': ')} return json.dumps(OrderedDict([ ("sentences", sentences), ("entities", entities), ]), **json_kwargs) if __name__ == "__main__": environment = os.environ.get("ENVIRONMENT", None) assert environment, "Needs $ENVIRONMENT variable set" if environment == "development": print "RUNNING IN DEVELOPMENT MODE" bottle.debug(True) bottle.TEMPLATES.clear() run(host='localhost', port=8000, reloader=True) elif environment == "production": print "RUNNING IN PRODUCTION MODE" run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000))) else: assert False, "That's not a valid $ENVIRONMENT"
Python
0
ee3b712611ed531843134ef4ce94cb45c726c127
Fix filename creation in csv export action
nap/extras/actions.py
nap/extras/actions.py
from django.http import StreamingHttpResponse from django.utils.encoding import force_text from .models import modelserialiser_factory from .simplecsv import CSV class ExportCsv(object): def __init__(self, serialiser=None, label=None, **opts): self.serialiser = serialiser self.opts = opts if label: self.short_description = label def __call__(self, admin, request, queryset): if self.serialiser is None: ser_class = modelserialiser_factory( '%sSerialiser' % admin.__class__.__name__, admin.model, **self.opts ) else: ser_class = self.serialiser def inner(ser): csv = CSV(fields=ser._fields.keys()) yield csv.write_headers() for obj in queryset: data = { key: force_text(val) for key, val in ser.object_deflate(obj).items() } yield csv.write_dict(data) response = StreamingHttpResponse(inner(ser_class()), content_type='text/csv') filename = self.opts.get('filename', 'export_{classname}.csv') if callable(filename): filename = filename(admin) else: filename = filename.format( classname=admin.__class__.__name__, model=admin.model._meta.module_name, app_label=admin.model._meta.app_label, ) response['Content-Disposition'] = 'attachment; filename=%s' % filename return response
from django.http import StreamingHttpResponse from django.utils.encoding import force_text from .models import modelserialiser_factory from .simplecsv import CSV class ExportCsv(object): def __init__(self, serialiser=None, label=None, **opts): self.serialiser = serialiser self.opts = opts if label: self.short_description = label def __call__(self, admin, request, queryset): if self.serialiser is None: ser_class = modelserialiser_factory( '%sSerialiser' % admin.__class__.__name__, admin.model, **self.opts ) else: ser_class = self.serialiser def inner(ser): csv = CSV(fields=ser._fields.keys()) yield csv.write_headers() for obj in queryset: data = { key: force_text(val) for key, val in ser.object_deflate(obj).items() } yield csv.write_dict(data) response = StreamingHttpResponse(inner(ser_class()), content_type='text/csv') filename = admin.csv_ response['Content-Disposition'] = 'attachment; filename=%s' % filename return response
Python
0.000024
85a6030ddebaaef2644640b1d3e8e9447a730a78
send utcnow instead of just now
uiharu/bin/collector.py
uiharu/bin/collector.py
from __future__ import print_function import argparse import datetime import logging.config import socket import sys import sqlalchemy as sa from uiharu.collector import TemperatureCollector from uiharu.config import ConfigAction from uiharu.periodic_sleeper import PeriodicSleeper from uiharu.models import TemperatureMeasurement _logging_config = dict( version=1, disable_existing_loggers=False, formatters={ 'verbose': { 'format': '%(asctime)s [%(levelname)s] %(message)s' }, }, handlers={ 'console': { 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, 'null': { 'class': 'logging.NullHandler', } }, loggers={ '': { 'handlers': ['console'], 'level': logging.INFO, }, 'temperusb': { 'level': logging.WARN, }, }, ) logging.config.dictConfig(_logging_config) log = logging.getLogger(__name__) def parse_cli_args(): """Parse the CLI arguments and return the populated namespace.""" hostname = socket.gethostname() parser = argparse.ArgumentParser() parser.add_argument( '--period', type=float, default=60.0, help="How often to collect temperature data (in seconds)", ) parser.add_argument( '--config', action=ConfigAction, help="The location of the JSON config file", ) parser.add_argument( '--sensor-name', default=hostname, help="The name to save collector measurements under. Defaults to this host's hostname ({0})".format(hostname), ) parser.add_argument( '--debug', action='store_true', help="Enable debug mode", ) return parser.parse_args() def main(): args = parse_cli_args() if args.debug: log.setLevel(logging.DEBUG) log.debug("Debug mode enabled") if not args.config: print("Error: A config path must be specified", file=sys.stderr) sys.exit(1) log.info("Using sensor name: %s", args.sensor_name) log.info("Connecting to database") engine = sa.create_engine(args.config['sqlalchemy_connection_url']) Session = sa.orm.sessionmaker(bind=engine) log.info("Starting temperature collector with a collection period of %f seconds", args.period) collector = TemperatureCollector() periodic_sleeper = PeriodicSleeper(args.period) log.info("Running the collector") while True: temperature = collector.get_temperature() if not temperature: log.error("Could not fetch temperature. Sleeping until next collection period.") periodic_sleeper.sleep_until_next_period() continue log.info("Collected the temperature in Celsius: %f", temperature) measurement = TemperatureMeasurement( sensor_name=args.sensor_name, timestamp=datetime.datetime.utcnow(), value=temperature, ) session = Session() session.add(measurement) session.commit() periodic_sleeper.sleep_until_next_period() if __name__ == "__main__": main()
from __future__ import print_function import argparse import datetime import logging.config import socket import sys import sqlalchemy as sa from uiharu.collector import TemperatureCollector from uiharu.config import ConfigAction from uiharu.periodic_sleeper import PeriodicSleeper from uiharu.models import TemperatureMeasurement _logging_config = dict( version=1, disable_existing_loggers=False, formatters={ 'verbose': { 'format': '%(asctime)s [%(levelname)s] %(message)s' }, }, handlers={ 'console': { 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, 'null': { 'class': 'logging.NullHandler', } }, loggers={ '': { 'handlers': ['console'], 'level': logging.INFO, }, 'temperusb': { 'level': logging.WARN, }, }, ) logging.config.dictConfig(_logging_config) log = logging.getLogger(__name__) def parse_cli_args(): """Parse the CLI arguments and return the populated namespace.""" hostname = socket.gethostname() parser = argparse.ArgumentParser() parser.add_argument( '--period', type=float, default=60.0, help="How often to collect temperature data (in seconds)", ) parser.add_argument( '--config', action=ConfigAction, help="The location of the JSON config file", ) parser.add_argument( '--sensor-name', default=hostname, help="The name to save collector measurements under. Defaults to this host's hostname ({0})".format(hostname), ) parser.add_argument( '--debug', action='store_true', help="Enable debug mode", ) return parser.parse_args() def main(): args = parse_cli_args() if args.debug: log.setLevel(logging.DEBUG) log.debug("Debug mode enabled") if not args.config: print("Error: A config path must be specified", file=sys.stderr) sys.exit(1) log.info("Using sensor name: %s", args.sensor_name) log.info("Connecting to database") engine = sa.create_engine(args.config['sqlalchemy_connection_url']) Session = sa.orm.sessionmaker(bind=engine) log.info("Starting temperature collector with a collection period of %f seconds", args.period) collector = TemperatureCollector() periodic_sleeper = PeriodicSleeper(args.period) log.info("Running the collector") while True: temperature = collector.get_temperature() if not temperature: log.error("Could not fetch temperature. Sleeping until next collection period.") periodic_sleeper.sleep_until_next_period() continue log.info("Collected the temperature in Celsius: %f", temperature) measurement = TemperatureMeasurement( sensor_name=args.sensor_name, timestamp=datetime.datetime.now(), value=temperature, ) session = Session() session.add(measurement) session.commit() periodic_sleeper.sleep_until_next_period() if __name__ == "__main__": main()
Python
0
1e03772e601fb6ed0eb6aa59555af61c29b2650f
remove fungible in parent class constructor call
amaascore/assets/cfd.py
amaascore/assets/cfd.py
from __future__ import absolute_import, division, print_function, unicode_literals from datetime import datetime, date from dateutil import parser from amaascore.assets.derivative import Derivative class ContractForDifference(Derivative): def __init__(self, asset_manager_id, asset_id, asset_issuer_id=None, asset_status='Active', display_name='', description='', country_id=None, venue_id=None, currency=None, issue_date=None, links=None, references=None, *args, **kwargs): super(ContractForDifference, self).__init__(asset_manager_id=asset_manager_id, asset_id=asset_id, asset_issuer_id=asset_issuer_id, asset_status=asset_status, display_name=display_name, description=description, country_id=country_id, venue_id=venue_id, issue_date=issue_date, currency=currency, links=links, references=references, *args, **kwargs)
from __future__ import absolute_import, division, print_function, unicode_literals from datetime import datetime, date from dateutil import parser from amaascore.assets.derivative import Derivative class ContractForDifference(Derivative): def __init__(self, asset_manager_id, asset_id, asset_issuer_id=None, asset_status='Active', display_name='', description='', country_id=None, venue_id=None, currency=None, issue_date=None, links=None, references=None, *args, **kwargs): super(ContractForDifference, self).__init__(asset_manager_id=asset_manager_id, asset_id=asset_id, fungible=False, asset_issuer_id=asset_issuer_id, asset_status=asset_status, display_name=display_name, description=description, country_id=country_id, venue_id=venue_id, issue_date=issue_date, currency=currency, links=links, references=references, *args, **kwargs)
Python
0.000001
274c9e3273ec6966a6b0a0e5c51e8b230fe468e6
Refactor to use api.views.
api/user.py
api/user.py
""" Atmosphere service user rest api. """ from rest_framework import status from rest_framework.response import Response from threepio import logger from service.accounts.eucalyptus import AccountDriver from core.models import AtmosphereUser as User from core.models.provider import Provider from api.serializers import ProfileSerializer from api.views import AuthAPIView class UserManagement(AuthAPIView): """ Represents both the collection of users AND Objects on the User class """ def post(self, request): """ User Class: Create a new user in the database Returns success 200 OK - NO BODY on creation """ params = request.DATA user = request.user if user.username is not "admin" or not user.is_superuser: return Response("Only admin and superusers can create accounts", status=status.HTTP_401_UNAUTHORIZED) username = params["username"] # STEP1 Create the account on the provider provider = Provider.objects.get(location="EUCALYPTUS") driver = AccountDriver(provider) user = driver.add_user(username) # STEP2 Retrieve the identity from the provider if user: user_keys = driver.get_key(username) driver.create_key(user_keys) # STEP3 Return the new users serialized profile serialized_data = ProfileSerializer(user.userprofile).data response = Response(serialized_data) return response def get(self, request): user = request.user if user.username is not "admin" and not user.is_superuser: return Response("Only admin and superusers can view all accounts", status=status.HTTP_401_UNAUTHORIZED) all_users = User.objects.order_by("username") all_profiles = [u.userprofile for u in all_users] serialized_data = ProfileSerializer(all_profiles).data response = Response(serialized_data) return response class User(AuthAPIView): def get(self, request, username): """ Return the object belonging to the user as well as the 'default' provider/identity 1. Test for authenticated username (Or if admin is the username for emulate functionality) 2. <DEFAULT PROVIDER> Select first provider username can use 3. <DEFAULT IDENTITY> Select first provider username can use 4. Set in session THEN pass in response """ user = request.user if user.username is not "admin" or not user.is_superuser: return Response( "Only admin and superusers " "can view individual account profiles", status=status.HTTP_401_UNAUTHORIZED) logger.info(request.__dict__) user = User.objects.get(username=username) serialized_data = ProfileSerializer(user.userprofile).data response = Response(serialized_data) return response def delete(self, request, username): """ Remove the user belonging to the username. 1. Test for authenticated 2. Mark account as deleted (Don't delete?) """ user = request.user if user.username is not "admin" or not user.is_superuser: return Response( "Only admin and superusers " "can delete individual account profiles", status=status.HTTP_401_UNAUTHORIZED) return Response("NotImplemented", status=status.HTTP_501_NOT_IMPLEMENTED) def put(self, request, username): """ Update user information (Should this be available? LDAP needs to take care of this """ user = request.user if user.username is not "admin" or not user.is_superuser: return Response( "Only admin and superusers " "can update individual account profiles", status=status.HTTP_401_UNAUTHORIZED) return Response("NotImplemented", status=status.HTTP_501_NOT_IMPLEMENTED)
""" Atmosphere service user rest api. """ #from django.contrib.auth.models import User as AuthUser from core.models import AtmosphereUser as AuthUser from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import status from threepio import logger from service.accounts.eucalyptus import AccountDriver from api.permissions import InMaintenance, ApiAuthRequired from api.serializers import ProfileSerializer from core.models.provider import Provider class UserManagement(APIView): """ Represents both the collection of users AND Objects on the User class """ permission_classes = (ApiAuthRequired,) def post(self, request): """ User Class: Create a new user in the database Returns success 200 OK - NO BODY on creation """ params = request.DATA user = request.user if user.username is not 'admin' or not user.is_superuser: return Response('Only admin and superusers can create accounts', status=status.HTTP_401_UNAUTHORIZED) username = params['username'] #STEP1 Create the account on the provider provider = Provider.objects.get(location='EUCALYPTUS') driver = AccountDriver(provider) user = driver.add_user(username) #STEP2 Retrieve the identity from the provider if user: user_keys = driver.get_key(username) driver.create_key(user_keys) #STEP3 Return the new users serialized profile serialized_data = ProfileSerializer(user.userprofile).data response = Response(serialized_data) return response def get(self, request): user = request.user if user.username is not 'admin' and not user.is_superuser: return Response('Only admin and superusers can view all accounts', status=status.HTTP_401_UNAUTHORIZED) all_users = AuthUser.objects.order_by('username') all_profiles = [u.userprofile for u in all_users] serialized_data = ProfileSerializer(all_profiles).data response = Response(serialized_data) return response class User(APIView): """ """ permission_classes = (ApiAuthRequired,) def get(self, request, username): """ Return the object belonging to the user as well as the 'default' provider/identity 1. Test for authenticated username (Or if admin is the username for emulate functionality) 2. <DEFAULT PROVIDER> Select first provider username can use 3. <DEFAULT IDENTITY> Select first provider username can use 4. Set in session THEN pass in response """ user = request.user if user.username is not 'admin' or not user.is_superuser: return Response( 'Only admin and superusers ' + 'can view individual account profiles', status=status.HTTP_401_UNAUTHORIZED) logger.info(request.__dict__) user = AuthUser.objects.get(username=username) serialized_data = ProfileSerializer(user.userprofile).data response = Response(serialized_data) return response def delete(self, request, username): """ Remove the user belonging to the username. 1. Test for authenticated 2. Mark account as deleted (Don't delete?) """ user = request.user if user.username is not 'admin' or not user.is_superuser: return Response( 'Only admin and superusers ' + 'can delete individual account profiles', status=status.HTTP_401_UNAUTHORIZED) return Response('NotImplemented', status=status.HTTP_501_NOT_IMPLEMENTED) def put(self, request, username): """ Update user information (Should this be available? LDAP needs to take care of this """ user = request.user if user.username is not 'admin' or not user.is_superuser: return Response( 'Only admin and superusers ' + 'can update individual account profiles', status=status.HTTP_401_UNAUTHORIZED) return Response('NotImplemented', status=status.HTTP_501_NOT_IMPLEMENTED)
Python
0
63eaf0faf56a70fadbd37f0acac6f5e61c7b19eb
Change sleep function to the end to do repeat everytime
checkdns.py
checkdns.py
# coding=utf8 # 31.220.16.242 # 216.58.222.46 import socket import time import webbrowser def checkdns(): print time.ctime() retorno = True try: ip = socket.gethostbyname('google.com') print ("O IP do host verificado é: " + ip) if ip == "216.58.22.46": retorno = False url = 'http://www.google.com.br/' webbrowser.open_new_tab(url) else: print "DNS ainda não atualizado. Aguardando 30s." except socket.gaierror: print "Nenhum host definido para o domínio. Aguardando 30s." return retorno condicao = True while condicao: condicao = checkdns() time.sleep( 30 )
# coding=utf8 # 31.220.16.242 # 216.58.222.46 import socket import time import webbrowser def checkdns(): print time.ctime() retorno = True try: ip = socket.gethostbyname('google.com') print ("O IP do host verificado é: " + ip) if ip == "216.58.222.46": retorno = False url = 'http://www.google.com.br/' webbrowser.open_new_tab(url) else: print "DNS ainda não atualizado. Aguardando 30s." time.sleep( 30 ) except socket.gaierror: print "Nenhum host definido para o domínio. Aguardando 30s." time.sleep( 30 ) return retorno condicao = True while condicao: condicao = checkdns()
Python
0
4c819629552a31748e4bb266c1c13726276d7944
Use cross version compatible iteration
tests/test_renderers.py
tests/test_renderers.py
import unittest from asciimatics.renderers import StaticRenderer from asciimatics.screen import Screen class TestRenderers(unittest.TestCase): def test_static_renderer(self): """ Check that the base static renderer class works. """ # Check basic API for a renderer... renderer = StaticRenderer(images=["A\nB", "C "]) # Max height should match largest height of any entry. self.assertEqual(renderer.max_height, 2) # Max width should match largest width of any entry. self.assertEqual(renderer.max_width, 3) # Images should be the parsed versions of the original strings. images = renderer.images self.assertEqual(next(images), ["A", "B"]) self.assertEqual(next(images), ["C "]) # String presentation should be the first image as a printable string. self.assertEqual(str(renderer), "A\nB") def test_colour_maps(self): """ Check that the ${} syntax is parsed correctly. """ # Check the ${fg, attr} variant renderer = StaticRenderer(images=["${3,1}*"]) output = renderer.rendered_text self.assertEqual(len(output[0]), len(output[1])) self.assertEqual(output[0], ["*"]) self.assertEqual(output[1][0][0], (Screen.COLOUR_YELLOW, Screen.A_BOLD)) # Check the ${fg} variant renderer = StaticRenderer(images=["${1}XY${2}Z"]) output = renderer.rendered_text self.assertEqual(len(output[0]), len(output[1])) self.assertEqual(output[0], ["XYZ"]) self.assertEqual(output[1][0][0], (Screen.COLOUR_RED, 0)) self.assertEqual(output[1][0][1], (Screen.COLOUR_RED, 0)) self.assertEqual(output[1][0][2], (Screen.COLOUR_GREEN, 0)) if __name__ == '__main__': unittest.main()
import unittest from asciimatics.renderers import StaticRenderer from asciimatics.screen import Screen class TestRenderers(unittest.TestCase): def test_static_renderer(self): """ Check that the base static renderer class works. """ # Check basic API for a renderer... renderer = StaticRenderer(images=["A\nB", "C "]) # Max height should match largest height of any entry. self.assertEqual(renderer.max_height, 2) # Max width should match largest width of any entry. self.assertEqual(renderer.max_width, 3) # Images should be the parsed versions of the original strings. images = renderer.images self.assertEqual(images.__next__(), ["A", "B"]) self.assertEqual(images.__next__(), ["C "]) # String presentation should be the first image as a printable string. self.assertEqual(str(renderer), "A\nB") def test_colour_maps(self): """ Check that the ${} syntax is parsed correctly. """ # Check the ${fg, attr} variant renderer = StaticRenderer(images=["${3,1}*"]) output = renderer.rendered_text self.assertEqual(len(output[0]), len(output[1])) self.assertEqual(output[0], ["*"]) self.assertEqual(output[1][0][0], (Screen.COLOUR_YELLOW, Screen.A_BOLD)) # Check the ${fg} variant renderer = StaticRenderer(images=["${1}XY${2}Z"]) output = renderer.rendered_text self.assertEqual(len(output[0]), len(output[1])) self.assertEqual(output[0], ["XYZ"]) self.assertEqual(output[1][0][0], (Screen.COLOUR_RED, 0)) self.assertEqual(output[1][0][1], (Screen.COLOUR_RED, 0)) self.assertEqual(output[1][0][2], (Screen.COLOUR_GREEN, 0)) if __name__ == '__main__': unittest.main()
Python
0
90b991c19ef5249a09410b19c33f2c8bfe9b5ca7
Install pypy for proper architechture.
braid/pypy.py
braid/pypy.py
import re from os import path from fabric.api import cd, task, sudo, abort from braid import info from braid.utils import fails pypyURLs = { 'x86_64': 'https://bitbucket.org/pypy/pypy/downloads/pypy-2.0.2-linux64.tar.bz2', 'x86': 'https://bitbucket.org/pypy/pypy/downloads/pypy-2.0.2-linux.tar.bz2', } pypyDir = '/opt/pypy-2.0' setuptoolsURL = 'http://peak.telecommunity.com/dist/ez_setup.py' pipURL = 'https://raw.github.com/pypa/pip/master/contrib/get-pip.py' @task def install(): sudo('/bin/mkdir -p /opt') if fails('/usr/bin/id {}'.format('pypy')): sudo('/usr/sbin/useradd --home-dir {} --gid bin ' '-M --system --shell /bin/false ' 'pypy'.format(pypyDir)) else: sudo('/usr/sbin/usermod --home {} pypy'.format(pypyDir)) with cd('/opt'): if info.arch() == 'x86_64': pypyURL = pypyURLs['x86_64'] elif re.match('i.86', info.arch()): pypyURL = pypyURLs['x86'] else: abort("Can't install pypy on unknown architecture.") for url in pypyURL, setuptoolsURL, pipURL: sudo('/usr/bin/wget -nc {}'.format(url)) sudo('/bin/tar xf {}'.format(path.basename(pypyURL))) for url in setuptoolsURL, pipURL: sudo('~pypy/bin/pypy {}'.format(path.basename(url))) sudo('~pypy/bin/pip install pyopenssl') sudo('~pypy/bin/pip install svn+svn://svn.twistedmatrix.com/svn/Twisted/trunk/')
from os import path from fabric.api import cd, task, sudo from braid import fails pypyURL = 'https://bitbucket.org/pypy/pypy/downloads/pypy-2.0-linux64.tar.bz2' setuptoolsURL = 'http://peak.telecommunity.com/dist/ez_setup.py' pipURL = 'https://raw.github.com/pypa/pip/master/contrib/get-pip.py' pypyDir = '/opt/pypy-2.0' @task def install(): sudo('/bin/mkdir -p /opt') if fails('/usr/bin/id {}'.format('pypy')): sudo('/usr/sbin/useradd --home-dir {} --gid bin ' '-M --system --shell /bin/false ' 'pypy'.format(pypyDir)) else: sudo('/usr/sbin/usermod --home {} pypy'.format(pypyDir)) with cd('/opt'): for url in pypyURL, setuptoolsURL, pipURL: sudo('/usr/bin/wget -nc {}'.format(url)) sudo('/bin/tar xf {}'.format(path.basename(pypyURL))) for url in setuptoolsURL, pipURL: sudo('~pypy/bin/pypy {}'.format(path.basename(url))) sudo('~pypy/bin/pip install pyopenssl') sudo('~pypy/bin/pip install svn+svn://svn.twistedmatrix.com/svn/Twisted/trunk/')
Python
0
25030673476f9eb99a4eff980d7bb050fdaa2568
Print size of result lists in check_files
analysis/check_files.py
analysis/check_files.py
#!/usr/bin/env python # vim: set sw=2 ts=2 softtabstop=2 expandtab: import argparse import os import logging import sys import yaml try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): parser = argparse.ArgumentParser() parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error']) parser.add_argument('first_yml', type=argparse.FileType('r')) parser.add_argument('second_yml', type=argparse.FileType('r')) pargs = parser.parse_args(args) logLevel = getattr(logging, pargs.log_level.upper(),None) logging.basicConfig(level=logLevel) firstResults = yaml.load(pargs.first_yml, Loader=Loader) secondResults = yaml.load(pargs.second_yml, Loader=Loader) assert isinstance(firstResults, list) assert isinstance(secondResults, list) if len(firstResults) == 0: logging.error('First Result list is empty') return 1 if len(secondResults) == 0: logging.error('Second Result list is empty') return 1 print("# of results in first {}".format(len(firstResults))) print("# of results in second {}".format(len(secondResults))) # Create sets of used files programsInFirst = set() programsInSecond = set() for r in firstResults: programsInFirst.add(r['program']) for r in secondResults: programsInSecond.add(r['program']) resultMissingFromSecond= [ ] resultMissingFromFirst=[ ] # Check for files missing in second for r in firstResults: if not (r['program'] in programsInSecond): resultMissingFromSecond.append(r) logging.warning('Program {} is missing from second but present in first'.format(r['program'])) # Check for files missing in first for r in secondResults: if not (r['program'] in programsInFirst): resultMissingFromFirst.append(r) logging.warning('Program {} is missing from first but present in second'.format(r['program'])) print("# of programs missing from second but present in first: {}".format(len(resultMissingFromSecond))) print("# of programs missing from first but present in second: {}".format(len(resultMissingFromFirst))) print("") print("# Missing from second") for r in resultMissingFromSecond: print(r) print("# Missing from first") for r in resultMissingFromFirst: print(r) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python # vim: set sw=2 ts=2 softtabstop=2 expandtab: import argparse import os import logging import sys import yaml try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): parser = argparse.ArgumentParser() parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error']) parser.add_argument('first_yml', type=argparse.FileType('r')) parser.add_argument('second_yml', type=argparse.FileType('r')) pargs = parser.parse_args(args) logLevel = getattr(logging, pargs.log_level.upper(),None) logging.basicConfig(level=logLevel) firstResults = yaml.load(pargs.first_yml, Loader=Loader) secondResults = yaml.load(pargs.second_yml, Loader=Loader) assert isinstance(firstResults, list) assert isinstance(secondResults, list) if len(firstResults) == 0: logging.error('First Result list is empty') return 1 if len(secondResults) == 0: logging.error('Second Result list is empty') return 1 # Create set of all used files programsInFirst = set() programsInSecond = set() for r in firstResults: programsInFirst.add(r['program']) for r in secondResults: programsInSecond.add(r['program']) resultMissingFromSecond= [ ] resultMissingFromFirst=[ ] # Check for files missing in second for r in firstResults: if not (r['program'] in programsInSecond): resultMissingFromSecond.append(r) logging.warning('Program {} is missing from second but present in first'.format(r['program'])) # Check for files missing in first for r in secondResults: if not (r['program'] in programsInFirst): resultMissingFromFirst.append(r) logging.warning('Program {} is missing from first but present in second'.format(r['program'])) print("# of programs missing from second but present in first: {}".format(len(resultMissingFromSecond))) print("# of programs missing from first but present in second: {}".format(len(resultMissingFromFirst))) print("") print("# Missing from second") for r in resultMissingFromSecond: print(r) print("# Missing from first") for r in resultMissingFromFirst: print(r) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
Python
0
fefdea2a81bec7bdb8678671c0eb2dea8f7dea83
Disable TOTP token sync
hoover/site/settings/common.py
hoover/site/settings/common.py
from pathlib import Path base_dir = Path(__file__).absolute().parent.parent.parent.parent INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'hoover.search', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'hoover.search.middleware.NoReferral', 'hoover.search.middleware.NoCache', ) ROOT_URLCONF = 'hoover.site.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'hoover.search.context_processors.default', ], }, }, ] LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'logfile': { 'format': ('%(asctime)s %(process)d ' '%(levelname)s %(name)s %(message)s'), 'datefmt': '%Y-%m-%d %H:%M:%S', }, }, 'loggers': { 'django.request': { 'level': 'WARNING', 'propagate': False, 'handlers': ['stderr'], }, 'hoover.search': { 'level': 'INFO', 'propagate': False, 'handlers': ['stderr'], }, '': { 'level': 'WARNING', 'propagate': True, 'handlers': ['stderr'], }, }, 'handlers': { 'stderr': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'logfile', }, }, } WSGI_APPLICATION = 'hoover.site.wsgi.application' LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL = '/static/' LOGIN_REDIRECT_URL = '/' LOGOUT_REDIRECT_URL = '/' OTP_TOTP_SYNC = False HOOVER_ELASTICSEARCH_URL = 'http://localhost:9200' HOOVER_UPLOADS_URL = '/uploads/' STATIC_ROOT = str(base_dir / 'static') HOOVER_UPLOADS_ROOT = str(base_dir / 'uploads') HOOVER_LOADERS = [ 'hoover.search.loaders.upload.Loader', 'hoover.search.loaders.webdav.Loader', 'hoover.search.loaders.collectible.Loader', 'hoover.search.loaders.external.Loader', ] HOOVER_PDFJS_URL = None TIKA_URL = 'http://localhost:9998' EMBED_HYPOTHESIS = None _minute = 60 _hour = 60 * _minute HOOVER_TWOFACTOR_INVITATION_VALID = 30 # minutes HOOVER_TWOFACTOR_AUTOLOGOUT = 3 * _hour HOOVER_TWOFACTOR_RATELIMIT = None HOOVER_RATELIMIT_USER = None HOOVER_BATCH_LIMIT = 250 HOOVER_UI_ROOT = None HOOVER_EVENTS_DIR = None HOOVER_OAUTH_LIQUID_URL = None HOOVER_OAUTH_LIQUID_CLIENT_ID = None HOOVER_OAUTH_LIQUID_CLIENT_SECRET = None HOOVER_HYPOTHESIS_EMBED_URL = None
from pathlib import Path base_dir = Path(__file__).absolute().parent.parent.parent.parent INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'hoover.search', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'hoover.search.middleware.NoReferral', 'hoover.search.middleware.NoCache', ) ROOT_URLCONF = 'hoover.site.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'hoover.search.context_processors.default', ], }, }, ] LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'logfile': { 'format': ('%(asctime)s %(process)d ' '%(levelname)s %(name)s %(message)s'), 'datefmt': '%Y-%m-%d %H:%M:%S', }, }, 'loggers': { 'django.request': { 'level': 'WARNING', 'propagate': False, 'handlers': ['stderr'], }, 'hoover.search': { 'level': 'INFO', 'propagate': False, 'handlers': ['stderr'], }, '': { 'level': 'WARNING', 'propagate': True, 'handlers': ['stderr'], }, }, 'handlers': { 'stderr': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'logfile', }, }, } WSGI_APPLICATION = 'hoover.site.wsgi.application' LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL = '/static/' LOGIN_REDIRECT_URL = '/' LOGOUT_REDIRECT_URL = '/' HOOVER_ELASTICSEARCH_URL = 'http://localhost:9200' HOOVER_UPLOADS_URL = '/uploads/' STATIC_ROOT = str(base_dir / 'static') HOOVER_UPLOADS_ROOT = str(base_dir / 'uploads') HOOVER_LOADERS = [ 'hoover.search.loaders.upload.Loader', 'hoover.search.loaders.webdav.Loader', 'hoover.search.loaders.collectible.Loader', 'hoover.search.loaders.external.Loader', ] HOOVER_PDFJS_URL = None TIKA_URL = 'http://localhost:9998' EMBED_HYPOTHESIS = None _minute = 60 _hour = 60 * _minute HOOVER_TWOFACTOR_INVITATION_VALID = 30 # minutes HOOVER_TWOFACTOR_AUTOLOGOUT = 3 * _hour HOOVER_TWOFACTOR_RATELIMIT = None HOOVER_RATELIMIT_USER = None HOOVER_BATCH_LIMIT = 250 HOOVER_UI_ROOT = None HOOVER_EVENTS_DIR = None HOOVER_OAUTH_LIQUID_URL = None HOOVER_OAUTH_LIQUID_CLIENT_ID = None HOOVER_OAUTH_LIQUID_CLIENT_SECRET = None HOOVER_HYPOTHESIS_EMBED_URL = None
Python
0
d115c0ceb08a350f7b367f61627ced5ab03df833
Remove useless space
sklearn_porter/language/__init__.py
sklearn_porter/language/__init__.py
# -*- coding: utf-8 -*- import sklearn_porter.language.c import sklearn_porter.language.go import sklearn_porter.language.java import sklearn_porter.language.js import sklearn_porter.language.php import sklearn_porter.language.ruby LANGUAGES = { c.KEY: c, go.KEY: go, java.KEY: java, js.KEY: js, php.KEY: php, ruby.KEY: ruby } __all__ = ['c', 'go', 'java', 'js', 'php', 'ruby', 'LANGUAGES']
# -*- coding: utf-8 -*- import sklearn_porter.language.c import sklearn_porter.language.go import sklearn_porter.language.java import sklearn_porter.language.js import sklearn_porter.language.php import sklearn_porter.language.ruby LANGUAGES = { c.KEY: c, go.KEY: go, java.KEY: java, js.KEY: js, php.KEY: php, ruby.KEY: ruby } __all__ = ['c', 'go', 'java', 'js', 'php', 'ruby', 'LANGUAGES']
Python
0.000001
3fdad9fb89d70b8d81483b646e16d20f076e0ebd
Test urxvt alpha
tests/test_sequences.py
tests/test_sequences.py
"""Test sequence functions.""" import unittest import unittest.mock import io from pywal import sequences from pywal import util # Import colors. COLORS = util.read_file_json("tests/test_files/test_file.json") class Testsequences(unittest.TestCase): """Test the sequence functions.""" def test_set_special(self): """> Create special escape sequence.""" result = sequences.set_special(11, COLORS["special"]["background"]) self.assertEqual(result, "\033]11;#1F211E\007") def test_set_special_alpha(self): """> Create special escape sequence with alpha.""" util.Color.alpha_num = 40 result = sequences.set_special(11, COLORS["special"]["background"]) self.assertEqual(result, "\033]11;[40]#1F211E\007") def test_set_color(self): """> Create color escape sequence.""" result = sequences.set_color(11, COLORS["colors"]["color0"]) self.assertEqual(result, "\033]4;11;#1F211E\007") def test_send_srquences(self): """> Send sequences to all open terminals.""" with unittest.mock.patch('sys.stdout', new=io.StringIO()) as fake_out: sequences.send(COLORS, False) data = fake_out.getvalue().strip() self.assertTrue(data.endswith("colors: Set terminal colors")) if __name__ == "__main__": unittest.main()
"""Test sequence functions.""" import unittest import unittest.mock import io from pywal import sequences from pywal import util # Import colors. COLORS = util.read_file_json("tests/test_files/test_file.json") class Testsequences(unittest.TestCase): """Test the sequence functions.""" def test_set_special(self): """> Create special escape sequence.""" result = sequences.set_special(11, COLORS["special"]["background"]) self.assertEqual(result, "\033]11;#1F211E\007") def test_set_color(self): """> Create color escape sequence.""" result = sequences.set_color(11, COLORS["colors"]["color0"]) self.assertEqual(result, "\033]4;11;#1F211E\007") def test_send_srquences(self): """> Send sequences to all open terminals.""" with unittest.mock.patch('sys.stdout', new=io.StringIO()) as fake_out: sequences.send(COLORS, False) data = fake_out.getvalue().strip() self.assertTrue(data.endswith("colors: Set terminal colors")) if __name__ == "__main__": unittest.main()
Python
0.000019
b86348349906c88b6946f757485cf41f909a9a91
fix subtitle test for newer versions of ffmpeg
tests/test_subtitles.py
tests/test_subtitles.py
import sys from .common import * from av.subtitles.subtitle import * class TestSubtitle(TestCase): def test_movtext(self): path = fate_suite('sub/MovText_capability_tester.mp4') fh = av.open(path) subs = [] for packet in fh.demux(): try: subs.extend(packet.decode()) except ValueError: raise SkipTest self.assertEqual(len(subs), 3) self.assertIsInstance(subs[0][0], AssSubtitle) self.assertIn(subs[0][0].ass, ('Dialogue: 0,0:00:00.97,0:00:02.54,Default,- Test 1.\\N- Test 2.\r\n', 'Dialogue: 0,0:00:00.97,0:00:02.54,Default,,0,0,0,,- Test 1.\\N- Test 2.\r\n')) def test_vobsub(self): path = fate_suite('sub/vobsub.sub') fh = av.open(path) subs = [] for packet in fh.demux(): try: subs.extend(packet.decode()) except ValueError: raise SkipTest self.assertEqual(len(subs), 43) sub = subs[0][0] self.assertIsInstance(sub, BitmapSubtitle) self.assertEqual(sub.x, 259) self.assertEqual(sub.y, 379) self.assertEqual(sub.width, 200) self.assertEqual(sub.height, 24) bms = sub.planes self.assertEqual(len(bms), 1) if hasattr(__builtins__, 'buffer'): self.assertEqual(len(buffer(bms[0])), 4800) if hasattr(__builtins__, 'memoryview'): self.assertEqual(len(memoryview(bms[0])), 4800)
import sys from .common import * from av.subtitles.subtitle import * class TestSubtitle(TestCase): def test_movtext(self): path = fate_suite('sub/MovText_capability_tester.mp4') fh = av.open(path) subs = [] for packet in fh.demux(): try: subs.extend(packet.decode()) except ValueError: raise SkipTest self.assertEqual(len(subs), 3) self.assertIsInstance(subs[0][0], AssSubtitle) self.assertEqual(subs[0][0].ass, 'Dialogue: 0,0:00:00.97,0:00:02.54,Default,- Test 1.\\N- Test 2.\r\n') def test_vobsub(self): path = fate_suite('sub/vobsub.sub') fh = av.open(path) subs = [] for packet in fh.demux(): try: subs.extend(packet.decode()) except ValueError: raise SkipTest self.assertEqual(len(subs), 43) sub = subs[0][0] self.assertIsInstance(sub, BitmapSubtitle) self.assertEqual(sub.x, 259) self.assertEqual(sub.y, 379) self.assertEqual(sub.width, 200) self.assertEqual(sub.height, 24) bms = sub.planes self.assertEqual(len(bms), 1) if hasattr(__builtins__, 'buffer'): self.assertEqual(len(buffer(bms[0])), 4800) if hasattr(__builtins__, 'memoryview'): self.assertEqual(len(memoryview(bms[0])), 4800)
Python
0
4a7484bccc9a92353681fb155f15629fa1059cd1
Format users
slackbot/get_scoreboard.py
slackbot/get_scoreboard.py
import logging from typing import Dict, List, Tuple from werkzeug.datastructures import ImmutableMultiDict from database.main import connect, channel_resp from database.team import check_all_scores logger = logging.getLogger(__name__) def get_scoreboard(form: ImmutableMultiDict) -> Dict[str, str]: logger.debug(f"Scoreboard request: {form}") team_id = form.get('team_id', '') with connect() as conn: scoreboard_list = check_all_scores(conn, team_id) return channel_resp(_parse_scoreboard(scoreboard_list)) def _parse_scoreboard(scoreboard_list: List[Tuple[str, int]]) -> str: text = f'Here\'s a list of my favourite people:' for index, (subject, score) in enumerate(scoreboard_list): text += f'\n{index+1}. <@{subject}> [{score} point{"s" if score != 1 else ""}]' if index == 0: text += ' :crown:' elif index + 1 == len(scoreboard_list): text += ' :hankey:' return text
import logging from typing import Dict, List, Tuple from werkzeug.datastructures import ImmutableMultiDict from database.main import connect, channel_resp from database.team import check_all_scores logger = logging.getLogger(__name__) def get_scoreboard(form: ImmutableMultiDict) -> Dict[str, str]: logger.debug(f"Scoreboard request: {form}") team_id = form.get('team_id', '') with connect() as conn: scoreboard_list = check_all_scores(conn, team_id) return channel_resp(_parse_scoreboard(scoreboard_list)) def _parse_scoreboard(scoreboard_list: List[Tuple[str, int]]) -> str: text = f'Here\'s a list of my favourite people:' for index, (subject, score) in enumerate(scoreboard_list): text += f'\n{index+1}. {subject} [{score} point{"s" if score != 1 else ""}]' if index == 0: text += ' :crown:' elif index + 1 == len(scoreboard_list): text += ' :hankey:' return text
Python
0.000001
29bfc1049352f59fca0b625d0ecbc7177fb565c7
Change default value for certificate location.
py509/x509.py
py509/x509.py
import socket import uuid from OpenSSL import crypto def make_serial(): """Make a random serial number.""" return uuid.uuid4().int def make_pkey(key_type=crypto.TYPE_RSA, key_bits=4096): """Make a public/private key pair.""" key = crypto.PKey() key.generate_key(key_type, key_bits) return key def make_certificate_signing_request(pkey, digest='sha512', **name): """Make a certificate signing request.""" csr = crypto.X509Req() subj = csr.get_subject() subj.C = name.get('C', 'US') subj.ST = name.get('ST', 'CA') subj.L = name.get('L', 'Home') subj.O = name.get('O', 'Home') subj.OU = name.get('OU', socket.gethostbyname(socket.getfqdn())) subj.CN = name.get('CN', socket.getfqdn()) csr.set_pubkey(pkey) csr.set_version(3) csr.sign(pkey, digest) return csr def make_certificate(csr, ca_key, ca_cert, serial, not_before, not_after, digest='sha512', version=2, exts=()): """Make a certificate.""" crt = crypto.X509() crt.set_serial_number(serial) crt.gmtime_adj_notBefore(not_before) crt.gmtime_adj_notAfter(not_after) crt.set_issuer(ca_cert.get_subject()) crt.set_subject(csr.get_subject()) crt.set_pubkey(csr.get_pubkey()) crt.set_version(version) crt.add_extensions(exts) crt.sign(ca_key, digest) return crt def make_certificate_authority(**name): """Make a certificate authority. A certificate authority can sign certificates. For clients to be able to validate certificates signed by your certificate authorithy, they must trust the certificate returned by this function. """ key = make_pkey() csr = make_certificate_signing_request(key, **name) crt = make_certificate(csr, key, csr, make_serial(), 0, 10 * 365 * 24 * 60 * 60, exts=[crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE')]) return key, crt
import socket import uuid from OpenSSL import crypto def make_serial(): """Make a random serial number.""" return uuid.uuid4().int def make_pkey(key_type=crypto.TYPE_RSA, key_bits=4096): """Make a public/private key pair.""" key = crypto.PKey() key.generate_key(key_type, key_bits) return key def make_certificate_signing_request(pkey, digest='sha512', **name): """Make a certificate signing request.""" csr = crypto.X509Req() subj = csr.get_subject() subj.C = name.get('C', 'US') subj.ST = name.get('ST', 'CA') subj.L = name.get('L', 'San Diego') subj.O = name.get('O', 'Home') subj.OU = name.get('OU', socket.gethostbyname(socket.getfqdn())) subj.CN = name.get('CN', socket.getfqdn()) csr.set_pubkey(pkey) csr.set_version(3) csr.sign(pkey, digest) return csr def make_certificate(csr, ca_key, ca_cert, serial, not_before, not_after, digest='sha512', version=2, exts=()): """Make a certificate.""" crt = crypto.X509() crt.set_serial_number(serial) crt.gmtime_adj_notBefore(not_before) crt.gmtime_adj_notAfter(not_after) crt.set_issuer(ca_cert.get_subject()) crt.set_subject(csr.get_subject()) crt.set_pubkey(csr.get_pubkey()) crt.set_version(version) crt.add_extensions(exts) crt.sign(ca_key, digest) return crt def make_certificate_authority(**name): """Make a certificate authority. A certificate authority can sign certificates. For clients to be able to validate certificates signed by your certificate authorithy, they must trust the certificate returned by this function. """ key = make_pkey() csr = make_certificate_signing_request(key, **name) crt = make_certificate(csr, key, csr, make_serial(), 0, 10 * 365 * 24 * 60 * 60, exts=[crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE')]) return key, crt
Python
0.000024
e05a4f17fcf0ec1bedcc8188d584d31616c4e0af
Update test_toml_file.py
tests/test_toml_file.py
tests/test_toml_file.py
import os from tomlkit.toml_document import TOMLDocument from tomlkit.toml_file import TOMLFile def test_toml_file(example): original_content = example("example") toml_file = os.path.join(os.path.dirname(__file__), "examples", "example.toml") toml = TOMLFile(toml_file) content = toml.read() assert isinstance(content, TOMLDocument) assert content["owner"]["organization"] == "GitHub" toml.write(content) try: with open(toml_file, encoding="utf-8") as f: assert original_content == f.read() finally: with open(toml_file, "w", encoding="utf-8", newline="") as f: assert f.write(original_content) def test_keep_old_eol(tmpdir): toml_path = str(tmpdir / "pyproject.toml") with open(toml_path, "wb+") as f: f.write(b"a = 1\r\nb = 2\r\n") f = TOMLFile(toml_path) content = f.read() content["b"] = 3 f.write(content) with open(toml_path, "rb") as f: assert f.read() == b"a = 1\r\nb = 3\r\n" def test_keep_old_eol_2(tmpdir): toml_path = str(tmpdir / "pyproject.toml") with open(toml_path, "wb+") as f: f.write(b"a = 1\nb = 2\n") f = TOMLFile(toml_path) content = f.read() content["b"] = 3 f.write(content) with open(toml_path, "rb") as f: assert f.read() == b"a = 1\nb = 3\n" def test_mixed_eol(tmpdir): toml_path = str(tmpdir / "pyproject.toml") with open(toml_path, "wb+") as f: f.write(b"a = 1\r\nrb = 2\n") f = TOMLFile(toml_path) f.write(f.read()) with open(toml_path, "rb") as f: assert f.read() == b"a = 1\r\nrb = 2\n"
import os from tomlkit.toml_document import TOMLDocument from tomlkit.toml_file import TOMLFile def test_toml_file(example): original_content = example("example") toml_file = os.path.join(os.path.dirname(__file__), "examples", "example.toml") toml = TOMLFile(toml_file) content = toml.read() assert isinstance(content, TOMLDocument) assert content["owner"]["organization"] == "GitHub" toml.write(content) try: with open(toml_file, encoding="utf-8") as f: assert original_content == f.read() finally: with open(toml_file, "w", encoding="utf-8", newline="") as f: assert f.write(original_content) def test_keep_old_eol(tmpdir): toml_path = str(tmpdir / "pyproject.toml") with open(toml_path, "wb+") as f: f.write(b"a = 1\r\nb = 2\r\n") f = TOMLFile(toml_path) content = f.read() content["b"] = 3 f.write(content) with open(toml_path, "rb") as f: assert f.read() == b"a = 1\r\nb = 3\r\n" def test_keep_old_eol_2(tmpdir): toml_path = str(tmpdir / "pyproject.toml") with open(toml_path, "wb+") as f: f.write(b"a = 1\nb = 2\n") f = TOMLFile(toml_path) content = f.read() content["b"] = 3 f.write(content) with open(toml_path, "rb") as f: assert f.read() == b"a = 1\nb = 3\n" def test_mixed_eol(tmpdir): toml_path = str(tmpdir / "pyproject.toml") with open(toml_path, "wb+") as f: f.write(b"a = 1\r\nrb = 2\n") f = TOMLFile(toml_path) f.write(f.read()) with io.open(toml_path, "rb") as f: assert f.read() == b"a = 1\r\nrb = 2\n"
Python
0.000002
210bf81fa0c7296c6e48e112dacc29ad2b89af0c
add raw_id_fields for users and topics
pybb/admin.py
pybb/admin.py
# -*- coding: utf-8 from django.utils.translation import ugettext_lazy as _ from django.contrib import admin from pybb.models import Category, Forum, Topic, Post, Profile, Read class CategoryAdmin(admin.ModelAdmin): list_display = ['name', 'position', 'forum_count'] list_per_page = 20 ordering = ['position'] search_fields = ['name'] class ForumAdmin(admin.ModelAdmin): list_display = ['name', 'category', 'position', 'topic_count'] list_per_page = 20 raw_id_fields = ['moderators'] ordering = ['-category'] search_fields = ['name', 'category__name'] fieldsets = ( (None, { 'fields': ('category', 'name', 'updated') } ), (_('Additional options'), { 'classes': ('collapse',), 'fields': ('position', 'description', 'post_count', 'moderators') } ), ) class TopicAdmin(admin.ModelAdmin): list_display = ['name', 'forum', 'created', 'head', 'post_count'] list_per_page = 20 raw_id_fields = ['user', 'forum', 'subscribers'] ordering = ['-created'] date_hierarchy = 'created' search_fields = ['name'] fieldsets = ( (None, { 'fields': ('forum', 'name', 'user', ('created', 'updated')) } ), (_('Additional options'), { 'classes': ('collapse',), 'fields': (('views', 'post_count'), ('sticky', 'closed'), 'subscribers') } ), ) class PostAdmin(admin.ModelAdmin): list_display = ['topic', 'user', 'created', 'updated', 'summary'] list_per_page = 20 raw_id_fields = ['user', 'topic'] ordering = ['-created'] date_hierarchy = 'created' search_fields = ['body'] fieldsets = ( (None, { 'fields': ('topic', 'user', 'markup') } ), (_('Additional options'), { 'classes': ('collapse',), 'fields' : (('created', 'updated'), 'user_ip') } ), (_('Message'), { 'fields': ('body', 'body_html', 'body_text') } ), ) class ProfileAdmin(admin.ModelAdmin): list_display = ['user', 'time_zone', 'location', 'language'] list_per_page = 20 raw_id_fields = ['user'] ordering = ['-user'] search_fields = ['user__username', 'user__first_name', 'user__last_name'] fieldsets = ( (None, { 'fields': ('user', 'time_zone', 'markup', 'location', 'language') } ), (_('IM'), { 'classes': ('collapse',), 'fields' : ('jabber', 'icq', 'msn', 'aim', 'yahoo') } ), (_('Additional options'), { 'classes': ('collapse',), 'fields' : ('site', 'avatar', 'signature', 'show_signatures') } ), (_('Ban options'), { 'classes': ('collapse',), 'fields' : ('ban_status', 'ban_till') } ), ) class ReadAdmin(admin.ModelAdmin): list_display = ['user', 'topic', 'time'] list_per_page = 20 raw_id_fields = ['user', 'topic'] ordering = ['-time'] date_hierarchy = 'time' search_fields = ['user__username', 'topic__name'] admin.site.register(Category, CategoryAdmin) admin.site.register(Forum, ForumAdmin) admin.site.register(Topic, TopicAdmin) admin.site.register(Post, PostAdmin) admin.site.register(Profile, ProfileAdmin) admin.site.register(Read, ReadAdmin)
# -*- coding: utf-8 from django.utils.translation import ugettext_lazy as _ from django.contrib import admin from pybb.models import Category, Forum, Topic, Post, Profile, Read class CategoryAdmin(admin.ModelAdmin): list_display = ['name', 'position', 'forum_count'] list_per_page = 20 ordering = ['position'] search_fields = ['name'] class ForumAdmin(admin.ModelAdmin): list_display = ['name', 'category', 'position', 'topic_count'] list_per_page = 20 ordering = ['-category'] search_fields = ['name', 'category__name'] fieldsets = ( (None, { 'fields': ('category', 'name', 'updated') } ), (_('Additional options'), { 'classes': ('collapse',), 'fields': ('position', 'description', 'post_count', 'moderators') } ), ) class TopicAdmin(admin.ModelAdmin): list_display = ['name', 'forum', 'created', 'head', 'post_count'] list_per_page = 20 ordering = ['-created'] date_hierarchy = 'created' search_fields = ['name'] fieldsets = ( (None, { 'fields': ('forum', 'name', 'user', ('created', 'updated')) } ), (_('Additional options'), { 'classes': ('collapse',), 'fields': (('views', 'post_count'), ('sticky', 'closed'), 'subscribers') } ), ) class PostAdmin(admin.ModelAdmin): list_display = ['topic', 'user', 'created', 'updated', 'summary'] list_per_page = 20 ordering = ['-created'] date_hierarchy = 'created' search_fields = ['body'] fieldsets = ( (None, { 'fields': ('topic', 'user', 'markup') } ), (_('Additional options'), { 'classes': ('collapse',), 'fields' : (('created', 'updated'), 'user_ip') } ), (_('Message'), { 'fields': ('body', 'body_html', 'body_text') } ), ) class ProfileAdmin(admin.ModelAdmin): list_display = ['user', 'time_zone', 'location', 'language'] list_per_page = 20 ordering = ['-user'] search_fields = ['user__username', 'user__first_name', 'user__last_name'] fieldsets = ( (None, { 'fields': ('user', 'time_zone', 'markup', 'location', 'language') } ), (_('IM'), { 'classes': ('collapse',), 'fields' : ('jabber', 'icq', 'msn', 'aim', 'yahoo') } ), (_('Additional options'), { 'classes': ('collapse',), 'fields' : ('site', 'avatar', 'signature', 'show_signatures') } ), (_('Ban options'), { 'classes': ('collapse',), 'fields' : ('ban_status', 'ban_till') } ), ) class ReadAdmin(admin.ModelAdmin): list_display = ['user', 'topic', 'time'] list_per_page = 20 ordering = ['-time'] date_hierarchy = 'time' search_fields = ['user__username', 'topic__name'] admin.site.register(Category, CategoryAdmin) admin.site.register(Forum, ForumAdmin) admin.site.register(Topic, TopicAdmin) admin.site.register(Post, PostAdmin) admin.site.register(Profile, ProfileAdmin) admin.site.register(Read, ReadAdmin)
Python
0
b108cb874288ab6d2ee17b2fd807a95509b3e2c5
properly reverse emails as usernames in urls
api/urls.py
api/urls.py
from django.conf.urls import url, include from .views import LocationApi, IssueView, IssueCommentView, UserSearch, IssueStatusView, CommentDetailView, \ MentionView, UserInformationApi, UserDetailView app_name = 'issue_tracker_api' urlpatterns = [ url(r'^$', LocationApi.as_view()), url( r'^issue/(?P<issue_id>\d+)/', include([ url(r'^$', IssueView.as_view(), name='issue_detail'), url(r'^comment/$', IssueCommentView.as_view(), name='issue_comments'), url(r'^status/$', IssueStatusView.as_view(), name='issue_status'), ]) ), url(r'aboutme/$', UserInformationApi.as_view(), name='aboutme'), url(r'^users/$', UserSearch.as_view(), name='user_search'), url(r'users/(?P<username>[\w@.]+)/$', UserDetailView.as_view(), name='user_detail'), url(r'^mentions/$', MentionView.as_view(), name='mention_search'), url(r'^comment/(?P<pk>\d+)/$', CommentDetailView.as_view(), name='comment_detail') ]
from django.conf.urls import url, include from .views import LocationApi, IssueView, IssueCommentView, UserSearch, IssueStatusView, CommentDetailView, \ MentionView, UserInformationApi, UserDetailView app_name = 'issue_tracker_api' urlpatterns = [ url(r'^$', LocationApi.as_view()), url( r'^issue/(?P<issue_id>\d+)/', include([ url(r'^$', IssueView.as_view(), name='issue_detail'), url(r'^comment/$', IssueCommentView.as_view(), name='issue_comments'), url(r'^status/$', IssueStatusView.as_view(), name='issue_status'), ]) ), url(r'aboutme/$', UserInformationApi.as_view(), name='aboutme'), url(r'^users/$', UserSearch.as_view(), name='user_search'), url(r'users/(?P<username>[\w@]+)/$', UserDetailView.as_view(), name='user_detail'), url(r'^mentions/$', MentionView.as_view(), name='mention_search'), url(r'^comment/(?P<pk>\d+)/$', CommentDetailView.as_view(), name='comment_detail') ]
Python
0.999932
d1b8ab844d153a240c3f71965c0258b91613ea0f
Move test for adding devices to cache of nonexistent pool
tests/whitebox/integration/pool/test_init_cache.py
tests/whitebox/integration/pool/test_init_cache.py
# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test 'init-cache'. """ # isort: FIRSTPARTY from dbus_client_gen import DbusClientUniqueResultError # isort: LOCAL from stratis_cli import StratisCliErrorCodes from stratis_cli._errors import StratisCliEngineError, StratisCliPartialChangeError from .._misc import RUNNER, SimTestCase, device_name_list _DEVICE_STRATEGY = device_name_list(2) _ERROR = StratisCliErrorCodes.ERROR class InitCacheFailTestCase(SimTestCase): """ Test 'init-cache' with two different lists of devices. 'init-cache' should always fail if the cache is initialized twice with different devices. """ _MENU = ["--propagate", "pool", "init-cache"] _POOLNAME = "deadpool" def setUp(self): """ Start stratisd and set up a pool. """ super().setUp() command_line = ["pool", "create", self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) def test_init_cache(self): """ Test two initializations of the cache with two different device lists. Should fail. """ command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() self.check_error(StratisCliEngineError, command_line, _ERROR) class InitCacheFail2TestCase(SimTestCase): """ Test 'init-cache' the same list of devices twice. 'init-cache' should always fail if the cache is initialized twice with the same devices. """ _MENU = ["--propagate", "pool", "init-cache"] _POOLNAME = "deadpool" def setUp(self): """ Start stratisd and set up a pool. """ super().setUp() command_line = ["pool", "create", self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) def test_init_cache(self): """ Test two initializations of the cache with the same device list. Should fail. """ devices = _DEVICE_STRATEGY() command_line = self._MENU + [self._POOLNAME] + devices RUNNER(command_line) self.check_error(StratisCliPartialChangeError, command_line, _ERROR) class InitCacheFail3TestCase(SimTestCase): """ Test 'init-cache' for a non-existant pool. """ _MENU = ["--propagate", "pool", "init-cache"] _POOLNAME = "deadpool" def test_init_cache(self): """ Intializing the cache must fail since the pool does not exist. """ command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() self.check_error(DbusClientUniqueResultError, command_line, _ERROR) class InitCacheSuccessTestCase(SimTestCase): """ Test 'init-cache' once. 'init-cache' should succeed. """ _MENU = ["--propagate", "pool", "init-cache"] _POOLNAME = "deadpool" def setUp(self): """ Start stratisd and set up a pool. """ super().setUp() command_line = ["pool", "create", self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) def test_init_cache(self): """ Test an initialization of the cache with a device list. Should succeed. """ command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line)
# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test 'init-cache'. """ # isort: LOCAL from stratis_cli import StratisCliErrorCodes from stratis_cli._errors import StratisCliEngineError, StratisCliPartialChangeError from .._misc import RUNNER, SimTestCase, device_name_list _DEVICE_STRATEGY = device_name_list(2) _ERROR = StratisCliErrorCodes.ERROR class InitCacheFailTestCase(SimTestCase): """ Test 'init-cache' with two different lists of devices. 'init-cache' should always fail if the cache is initialized twice with different devices. """ _MENU = ["--propagate", "pool", "init-cache"] _POOLNAME = "deadpool" def setUp(self): """ Start stratisd and set up a pool. """ super().setUp() command_line = ["pool", "create", self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) def test_init_cache(self): """ Test two initializations of the cache with two different device lists. Should fail. """ command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() self.check_error(StratisCliEngineError, command_line, _ERROR) class InitCacheFail2TestCase(SimTestCase): """ Test 'init-cache' the same list of devices twice. 'init-cache' should always fail if the cache is initialized twice with the same devices. """ _MENU = ["--propagate", "pool", "init-cache"] _POOLNAME = "deadpool" def setUp(self): """ Start stratisd and set up a pool. """ super().setUp() command_line = ["pool", "create", self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) def test_init_cache(self): """ Test two initializations of the cache with the same device list. Should fail. """ devices = _DEVICE_STRATEGY() command_line = self._MENU + [self._POOLNAME] + devices RUNNER(command_line) self.check_error(StratisCliPartialChangeError, command_line, _ERROR) class InitCacheSuccessTestCase(SimTestCase): """ Test 'init-cache' once. 'init-cache' should succeed. """ _MENU = ["--propagate", "pool", "init-cache"] _POOLNAME = "deadpool" def setUp(self): """ Start stratisd and set up a pool. """ super().setUp() command_line = ["pool", "create", self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) def test_init_cache(self): """ Test an initialization of the cache with a device list. Should succeed. """ command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line)
Python
0
12cc5e752f9aa4700b57e3647c3676aba70bb996
use valid exception for Python 2.7
tests/whitelist_test.py
tests/whitelist_test.py
# -*- coding: utf-8 -*- import pytest from riprova import ErrorWhitelist, NotRetriableError def test_error_whitelist(): whitelist = ErrorWhitelist() assert type(ErrorWhitelist.WHITELIST) is set assert len(whitelist._whitelist) > 4 assert type(whitelist._whitelist) is set assert whitelist._whitelist is not ErrorWhitelist.WHITELIST # Test setter whitelist.errors = (Exception, RuntimeError) # Test getter assert whitelist.errors == set([Exception, RuntimeError]) # Test add() whitelist.add(BaseException, SystemExit) assert whitelist.errors == set([Exception, RuntimeError, BaseException, SystemExit]) def test_error_whitelist_invalid(): whitelist = ErrorWhitelist() with pytest.raises(TypeError): whitelist.errors = dict() with pytest.raises(TypeError): whitelist.errors = None with pytest.raises(TypeError): whitelist.add(None) with pytest.raises(TypeError): whitelist.add(dict()) class NoRetryError(NotRetriableError): pass class RetryError(NotRetriableError): __retry__ = True @pytest.mark.parametrize("error,expected", [ (SystemExit(), True), (ImportError(), True), (ReferenceError(), True), (SyntaxError(), True), (KeyboardInterrupt(), True), (NotRetriableError(), True), (NoRetryError(), True), (RetryError(), False), (ReferenceError(), True), (Exception(), False), (RuntimeError(), False), (TypeError(), False), (ValueError(), False), ]) def test_error_whitelist_iswhitedlisted(error, expected): assert ErrorWhitelist().iswhitelisted(error) is expected
# -*- coding: utf-8 -*- import pytest from riprova import ErrorWhitelist, NotRetriableError def test_error_whitelist(): whitelist = ErrorWhitelist() assert type(ErrorWhitelist.WHITELIST) is set assert len(whitelist._whitelist) > 4 assert type(whitelist._whitelist) is set assert whitelist._whitelist is not ErrorWhitelist.WHITELIST # Test setter whitelist.errors = (Exception, RuntimeError) # Test getter assert whitelist.errors == set([Exception, RuntimeError]) # Test add() whitelist.add(TimeoutError, SystemExit) assert whitelist.errors == set([Exception, RuntimeError, TimeoutError, SystemExit]) def test_error_whitelist_invalid(): whitelist = ErrorWhitelist() with pytest.raises(TypeError): whitelist.errors = dict() with pytest.raises(TypeError): whitelist.errors = None with pytest.raises(TypeError): whitelist.add(None) with pytest.raises(TypeError): whitelist.add(dict()) class NoRetryError(NotRetriableError): pass class RetryError(NotRetriableError): __retry__ = True @pytest.mark.parametrize("error,expected", [ (SystemExit(), True), (ImportError(), True), (ReferenceError(), True), (SyntaxError(), True), (KeyboardInterrupt(), True), (NotRetriableError(), True), (NoRetryError(), True), (RetryError(), False), (ReferenceError(), True), (Exception(), False), (RuntimeError(), False), (TypeError(), False), (ValueError(), False), ]) def test_error_whitelist_iswhitedlisted(error, expected): assert ErrorWhitelist().iswhitelisted(error) is expected
Python
0.000122
f000504c624e3b07a0df4c823a2f422dc1294ed9
fix test case
testss/test_training.py
testss/test_training.py
import os from unittest import TestCase from mlimages.model import ImageProperty from mlimages.training import TrainingData import testss.env as env class TestLabel(TestCase): def test_make_mean(self): td = self.get_testdata() mean_image_file = os.path.join(os.path.dirname(td.label_file.path), "mean_image.png") pre_fetch = list(td.label_file.fetch()) pre_path = td.label_file.path td.make_mean_image(mean_image_file) self.assertTrue(os.path.isfile(mean_image_file)) generated = list(td.generate()) self.assertEqual(len(pre_fetch), len(generated)) self.assertNotEqual(pre_path, td.label_file.path) os.remove(mean_image_file) os.remove(td.label_file.path) def test_batch(self): # prepare td = self.get_testdata() mean_image_file = os.path.join(os.path.dirname(td.label_file.path), "mean_image.png") td.make_mean_image(mean_image_file) # make batch data td.shuffle() count = 0 for x, y in td.generate_batches(1): self.assertEqual((1, 3, 32, 32), x.shape) self.assertEqual((1,), y.shape) count += 1 self.assertEqual(env.LABEL_FILE_COUNT, count) os.remove(mean_image_file) os.remove(td.label_file.path) def get_testdata(self): p = env.get_label_file_path() img_root = os.path.dirname(p) prop = ImageProperty(32) td = TrainingData(p, img_root=img_root, image_property=prop) return td
import os from unittest import TestCase from mlimages.model import LabelFile, ImageProperty import testss.env as env class TestLabel(TestCase): def test_make_mean(self): lf = self.get_label_file() mean_image_file = os.path.join(os.path.dirname(lf.path), "mean_image.png") imp = ImageProperty(32) td = lf.to_training_data(imp) td.make_mean_image(mean_image_file) self.assertTrue(os.path.isfile(mean_image_file)) lines = list(lf.fetch()) generated = list(td.generate()) self.assertEqual(len(lines), len(generated)) self.assertNotEqual(lf.path, td.label_file.path) os.remove(mean_image_file) os.remove(td.label_file.path) def test_batch(self): lf = self.get_label_file() mean_image_file = os.path.join(os.path.dirname(lf.path), "mean_image.png") imp = ImageProperty(32) # prepare td = lf.to_training_data(imp) td.make_mean_image(mean_image_file) # make batch data td.shuffle() count = 0 for x, y in td.generate_batches(1): self.assertEqual((1, 3, 32, 32), x.shape) self.assertEqual((1,), y.shape) count += 1 self.assertEqual(env.LABEL_FILE_COUNT, count) os.remove(mean_image_file) os.remove(td.label_file.path) def get_label_file(self): p = env.get_label_file_path() img_root = os.path.dirname(p) lf = LabelFile(p, img_root=img_root) return lf
Python
0.000022
69ba0847bde12b4da61502076f633eee856ec728
Improve get_user and get_netmask method
netadmin/shortcuts.py
netadmin/shortcuts.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2011 Adriano Monteiro Marques # # Author: Amit Pal <amix.pal@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from datetime import timedelta from events.models import Event, EventType from networks.models import Host, Network from users.models import UserProfile from django.contrib.auth.models import User def get_events(time_from=None, time_to=None, source_hosts=[], event_types=[]): """ get_events(...) -> QuerySet Returns events, optionally filtering them by timestamp or source hosts. """ events = Event.objects.all() if source_hosts: pks = [host.pk for host in source_hosts] events = events.filter(source_host__pk__in=pks) if event_types: pks = [et.pk for et in event_types] events = events.filter(event_type__pk__in=pks) if time_from: events = events.filter(timestamp__gte=time_from) if time_to: events = events.filter(timestamp__lt=time_to) return events def get_eventtypes(user=None, alert=0): """ get_eventtypes(...) -> QuerySet Returns events' types, filtering them by user and/or alert level if specified. """ eventtypes = EventType.objects.all() if user: eventtypes = eventtypes.filter(user=user) if alert: eventtypes = eventtypes.filter(alert_level__gte=alert) return eventtypes def get_user_events(user): """Returns events reported to the specified user """ event_types = get_eventtypes(user) return get_events(event_types=event_types) def get_alerts(user=None): ets = [et.pk for et in get_eventtypes(user, 1)] return Event.objects.filter(event_type__pk__in=ets, checked=False) def _get_network_objects(subclass, user=None): objects = subclass.objects.all() if user: objects = objects.filter(user=user) return objects def get_host(id): return Host.objects.get(pk=id) def get_hosts(user=None): return _get_network_objects(Host, user) def get_network(id): return Network.objects.get(pk=id) def get_networks(user=None): return _get_network_objects(Network, user) def get_timezone(user=None): user = User.objects.get(username = user) user_object = UserProfile.objects.get(id = user.id) return user_object.timezone def get_netmask(user=None): host_object = _get_network_objects(Host, user) ipv6_value = host_object.values('ipv6_sub_net').distinct('ipv6_sub_net') ipv4_value = host_object.values('ipv4_sub_net').distinct('ipv4_sub_net') return ipv4_value, ipv6_value
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2011 Adriano Monteiro Marques # # Author: Piotrek Wasilewski <wasilewski.piotrek@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from datetime import timedelta from events.models import Event, EventType from networks.models import Host, Network from users.models import UserProfile from django.contrib.auth.models import User def get_events(time_from=None, time_to=None, source_hosts=[], event_types=[]): """ get_events(...) -> QuerySet Returns events, optionally filtering them by timestamp or source hosts. """ events = Event.objects.all() if source_hosts: pks = [host.pk for host in source_hosts] events = events.filter(source_host__pk__in=pks) if event_types: pks = [et.pk for et in event_types] events = events.filter(event_type__pk__in=pks) if time_from: events = events.filter(timestamp__gte=time_from) if time_to: events = events.filter(timestamp__lt=time_to) return events def get_eventtypes(user=None, alert=0): """ get_eventtypes(...) -> QuerySet Returns events' types, filtering them by user and/or alert level if specified. """ eventtypes = EventType.objects.all() if user: eventtypes = eventtypes.filter(user=user) if alert: eventtypes = eventtypes.filter(alert_level__gte=alert) return eventtypes def get_user_events(user): """Returns events reported to the specified user """ event_types = get_eventtypes(user) return get_events(event_types=event_types) def get_alerts(user=None): ets = [et.pk for et in get_eventtypes(user, 1)] return Event.objects.filter(event_type__pk__in=ets, checked=False) def _get_network_objects(subclass, user=None): objects = subclass.objects.all() if user: objects = objects.filter(user=user) return objects def get_host(id): return Host.objects.get(pk=id) def get_hosts(user=None): return _get_network_objects(Host, user) def get_network(id): return Network.objects.get(pk=id) def get_networks(user=None): return _get_network_objects(Network, user) def get_timezone(user=None): user = User.objects.get(username = user) id_user = user.id obj = UserProfile.objects.get(id = id_user) timezone = obj.timezone return timezone def get_netmask(user=None): obj = Host.objects.filter(user=user) ipv4_value = obj.values('ipv4_sub_net').distinct('ipv4_sub_net') ipv6_value = obj.values('ipv6_sub_net').distinct('ipv6_sub_net') return ipv4_value, ipv6_value
Python
0.000001
eadec2e53404407a7f40df483d1f3d75b599a667
Fix PID location
cax/main.py
cax/main.py
from cax.tasks import checksum, clear, copy import os import sys import logging import time from cax.config import password import daemonocle def main2(): password() # Check password specified logging.basicConfig(filename='example.log', level=logging.DEBUG, format='%(asctime)s [%(levelname)s] %(message)s') logging.info('Daemon is starting') tasks = [checksum.AddChecksum(), checksum.CompareChecksums(), clear.ClearDAQBuffer(), copy.SCPPush()] while True: for task in tasks: logging.info("Executing %s." % task.__class__.__name__) task.go() logging.debug('Sleeping.') time.sleep(10) def main(): password() # Check password specified daemon = daemonocle.Daemon(worker=main, pidfile=os.path.join(os.path.expanduser("~"), 'cax.pid')) daemon.do_action(sys.argv[1]) if __name__ == '__main__': main2()
from cax.tasks import checksum, clear, copy import os import sys import logging import time from cax.config import password import daemonocle def main2(): password() # Check password specified logging.basicConfig(filename='example.log', level=logging.DEBUG, format='%(asctime)s [%(levelname)s] %(message)s') logging.info('Daemon is starting') tasks = [checksum.AddChecksum(), checksum.CompareChecksums(), clear.ClearDAQBuffer(), copy.SCPPush()] while True: for task in tasks: logging.info("Executing %s." % task.__class__.__name__) task.go() logging.debug('Sleeping.') time.sleep(10) def main(): password() # Check password specified daemon = daemonocle.Daemon(worker=main, pidfile='cax.pid') daemon.do_action(sys.argv[1]) if __name__ == '__main__': main2()
Python
0.000004
61cb2f72d94e8bd771e3130d68f753513e5818d5
Add lstrip, rstrip, strip methods
ansi_str.py
ansi_str.py
import re _ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') def strip_ansi(value): return _ansi_re.sub('', value) def len_exclude_ansi(value): return len(strip_ansi(value)) class ansi_str(str): """A str subclass, specialized for strings containing ANSI escapes. When you call the ``len`` method, it discounts ANSI color escape codes. This is beneficial, because ANSI color escape codes won't mess up code that tries to do alignment, padding, printing in columns, etc. """ _stripped = None @property def stripped(self): if self._stripped is None: self._stripped = strip_ansi(self[:]) return self._stripped def __len__(self, exclude_ansi=True): if exclude_ansi is False: return len(self[:]) return len(self.stripped) def ljust(self, width): return self.stripped.ljust(width).replace(self.stripped, self) def rjust(self, width): return self.stripped.rjust(width).replace(self.stripped, self) def center(self, width): return self.stripped.center(width).replace(self.stripped, self) def lstrip(self): return ansi_str(super(ansi_str, self).lstrip()) def rstrip(self): return ansi_str(super(ansi_str, self).rstrip()) def strip(self): return ansi_str(super(ansi_str, self).strip()) if __name__ == '__main__': # s = ansi_str('abc') # print s # print len(s) s = ansi_str(u'\x1b[32m\x1b[1mSUCCESS\x1b[0m') print s print len(s) print s.__len__() print s.__len__(exclude_ansi=False) print(len_exclude_ansi(u'\x1b[32m\x1b[1mSUCCESS\x1b[0m'))
import re _ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') def strip_ansi(value): return _ansi_re.sub('', value) def len_exclude_ansi(value): return len(strip_ansi(value)) class ansi_str(str): """A str subclass, specialized for strings containing ANSI escapes. When you call the ``len`` method, it discounts ANSI color escape codes. This is beneficial, because ANSI color escape codes won't mess up code that tries to do alignment, padding, printing in columns, etc. """ _stripped = None @property def stripped(self): if self._stripped is None: self._stripped = strip_ansi(self[:]) return self._stripped def __len__(self, exclude_ansi=True): if exclude_ansi is False: return len(self[:]) return len(self.stripped) def ljust(self, width): return self.stripped.ljust(width).replace(self.stripped, self) def rjust(self, width): return self.stripped.rjust(width).replace(self.stripped, self) def center(self, width): return self.stripped.center(width).replace(self.stripped, self) if __name__ == '__main__': # s = ansi_str('abc') # print s # print len(s) s = ansi_str(u'\x1b[32m\x1b[1mSUCCESS\x1b[0m') print s print len(s) print s.__len__() print s.__len__(exclude_ansi=False) print(len_exclude_ansi(u'\x1b[32m\x1b[1mSUCCESS\x1b[0m'))
Python
0
bc2c1a9d4c060242db1273e9608c629b2e0243cc
Fix _version.py
thermostate/_version.py
thermostate/_version.py
"""The version of thermohw.""" __version_info__ = (0, 4, 1, 'dev0') # type: Tuple[int, int, int, str] __version__ = '.'.join([str(v) for v in __version_info__ if str(v)])
"""The version of thermohw.""" from typing import Tuple __version_info__: Tuple[int, int, int, str] = (0, 4, 1, 'dev0') __version__ = '.'.join([str(v) for v in __version_info__ if str(v)])
Python
0.998479
556e0e3474e379427a08e1646274f596c7e4e5ef
Remove unused but circular import
angular_flask/models.py
angular_flask/models.py
from angular_flask import db class Wig(db.Model): id = db.Column(db.Integer, primary_key=True) span = db.Column(db.String) def __repr__(self): return "Wig: {}".format(self.id) class WigValue(db.Model): id = db.Column(db.Integer, primary_key=True) position = db.Column(db.Integer) value = db.Column(db.Integer) id_wig = db.Column(db.Integer, db.ForeignKey('wig.id')) wig = db.relationship("Wig",backref=db.backref("values",order_by=position)) def __init__(self, position, value): self.position = position self.value = value def __repr__(self): return "{}".format(self.value) class Bed(db.Model): id = db.Column(db.Integer, primary_key=True) chrom = db.Column(db.String) chromStart = db.Column(db.Integer) chromEnd = db.Column(db.Integer) name = db.Column(db.String) score = db.Column(db.Integer) strand = db.Column(db.Boolean) thick_start = db.Column(db.Integer) thick_end = db.Column(db.Integer) item_RGB = db.Column(db.Integer) item_RGB = db.Column(db.Integer) blockCount = db.Column(db.Integer) blockSizes = db.Column(db.Integer) blockStarts = db.Column(db.Integer) def __repr__(self): return "{}".format(self.name) class Annotation(db.Model): id = db.Column(db.Integer, primary_key=True) seqname = db.Column(db.String) source = db.Column(db.String) feature = db.Column(db.String) start = db.Column(db.Integer) end = db.Column(db.Integer) score = db.Column(db.Integer) strand = db.Column(db.Boolean) frame = db.Column(db.Integer) attribute = db.Column(db.String) def __repr__(self): return "{}".format(self.seqname) class Fasta(db.Model): id = db.Column(db.Integer, primary_key=True) header = db.Column(db.String) def __init__(self, header): self.header = header def __repr__(self): return "{}".format(self.header) class BasePair(db.Model): id = db.Column(db.Integer, primary_key=True) nucleotide = db.Column(db.String(1)) position = db.Column(db.Integer) fasta_id = db.Column(db.Integer, db.ForeignKey('fasta.id')) fasta = db.relationship("Fasta",backref=db.backref("base_pairs",order_by=position)) def __init__(self, position, nucleotide): self.position = position self.nucleotide = nucleotide def __repr__(self): return self.nucleotide class User(db.Model): id = db.Column(db.Integer, primary_key=True) user_name = db.Column(db.String) email = db.Column(db.String) def __init__(self,user_name,email): self.user_name = user_name self.email = email def __repr__(self): return self.user_name
from datetime import datetime from angular_flask import db from angular_flask import app class Wig(db.Model): id = db.Column(db.Integer, primary_key=True) span = db.Column(db.String) def __repr__(self): return "Wig: {}".format(self.id) class WigValue(db.Model): id = db.Column(db.Integer, primary_key=True) position = db.Column(db.Integer) value = db.Column(db.Integer) id_wig = db.Column(db.Integer, db.ForeignKey('wig.id')) wig = db.relationship("Wig",backref=db.backref("values",order_by=position)) def __init__(self, position, value): self.position = position self.value = value def __repr__(self): return "{}".format(self.value) class Bed(db.Model): id = db.Column(db.Integer, primary_key=True) chrom = db.Column(db.String) chromStart = db.Column(db.Integer) chromEnd = db.Column(db.Integer) name = db.Column(db.String) score = db.Column(db.Integer) strand = db.Column(db.Boolean) thick_start = db.Column(db.Integer) thick_end = db.Column(db.Integer) item_RGB = db.Column(db.Integer) item_RGB = db.Column(db.Integer) blockCount = db.Column(db.Integer) blockSizes = db.Column(db.Integer) blockStarts = db.Column(db.Integer) def __repr__(self): return "{}".format(self.name) class Annotation(db.Model): id = db.Column(db.Integer, primary_key=True) seqname = db.Column(db.String) source = db.Column(db.String) feature = db.Column(db.String) start = db.Column(db.Integer) end = db.Column(db.Integer) score = db.Column(db.Integer) strand = db.Column(db.Boolean) frame = db.Column(db.Integer) attribute = db.Column(db.String) def __repr__(self): return "{}".format(self.seqname) class Fasta(db.Model): id = db.Column(db.Integer, primary_key=True) header = db.Column(db.String) def __init__(self, header): self.header = header def __repr__(self): return "{}".format(self.header) class BasePair(db.Model): id = db.Column(db.Integer, primary_key=True) nucleotide = db.Column(db.String(1)) position = db.Column(db.Integer) fasta_id = db.Column(db.Integer, db.ForeignKey('fasta.id')) fasta = db.relationship("Fasta",backref=db.backref("base_pairs",order_by=position)) def __init__(self, position, nucleotide): self.position = position self.nucleotide = nucleotide def __repr__(self): return self.nucleotide class User(db.Model): id = db.Column(db.Integer, primary_key=True) user_name = db.Column(db.String) email = db.Column(db.String) def __init__(self,user_name,email): self.user_name = user_name self.email = email def __repr__(self): return self.user_name
Python
0
01674bb349e9850b26aeae212ad77aa992f18ab5
bump version
lava_scheduler_app/__init__.py
lava_scheduler_app/__init__.py
# Copyright (C) 2011 Linaro Limited # # Author: Michael Hudson-Doyle <michael.hudson@linaro.org> # # This file is part of LAVA Scheduler. # # LAVA Scheduler is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License version 3 as # published by the Free Software Foundation # # LAVA Scheduler is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with LAVA Scheduler. If not, see <http://www.gnu.org/licenses/>. __version__ = (0, 4, 0, "final", 0)
# Copyright (C) 2011 Linaro Limited # # Author: Michael Hudson-Doyle <michael.hudson@linaro.org> # # This file is part of LAVA Scheduler. # # LAVA Scheduler is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License version 3 as # published by the Free Software Foundation # # LAVA Scheduler is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with LAVA Scheduler. If not, see <http://www.gnu.org/licenses/>. __version__ = (0, 3, 0, "final", 0)
Python
0
cb5a8ac1b74cdeeea5901bb22d8600ace8f5b6e1
Allow parsing lists of dictionaries as well as dictionaries in JSON structures
tools/json_extractor.py
tools/json_extractor.py
#!/usr/bin/env python # ---------------------------------------------------------------------- # Copyright (c) 2013-2016 Raytheon BBN Technologies # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. # ---------------------------------------------------------------------- import json import sys def main(): if len(sys.argv) <= 2: print "Usage: json_extractor.py field filename" sys.exit(0) fields = sys.argv[1] # Comma separated filename = sys.argv[2] data = open(filename).read() jdata = json.loads(data) field_list = fields.split(',') result = jdata for field in field_list: if type(result) == dict: result = result[field] elif type(result) == list: field_parts = field.split('=') field_name = field_parts[0] field_value = field_parts[1] for entry in result: if entry[field_name] == field_value: result = entry break print result if __name__ == "__main__": sys.exit(main())
#!/usr/bin/env python # ---------------------------------------------------------------------- # Copyright (c) 2013-2016 Raytheon BBN Technologies # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. # ---------------------------------------------------------------------- import json import sys def main(): if len(sys.argv) <= 2: print "Usage: json_extractor.py field filename" sys.exit(0) fields = sys.argv[1] # Comma separated filename = sys.argv[2] data = open(filename).read() jdata = json.loads(data) field_list = fields.split(',') result = jdata for field in field_list: result = result[field] print result if __name__ == "__main__": sys.exit(main())
Python
0.000003
df35ebdcebc8704f964d3301004fcaf88e70336f
fix filereader cd:/ replacement
tools/lib/filereader.py
tools/lib/filereader.py
import os from tools.lib.url_file import URLFile DATA_PREFIX = os.getenv("DATA_PREFIX", "http://data-raw.internal/") def FileReader(fn, debug=False): if fn.startswith("cd:/"): fn = fn.replace("cd:/", DATA_PREFIX) if fn.startswith("http://") or fn.startswith("https://"): return URLFile(fn, debug=debug) return open(fn, "rb")
import os from tools.lib.url_file import URLFile DATA_PREFIX = os.getenv("DATA_PREFIX", "http://data-raw.internal/") def FileReader(fn, debug=False): if fn.startswith("cd:/"): fn.replace("cd:/", DATA_PREFIX) if fn.startswith("http://") or fn.startswith("https://"): return URLFile(fn, debug=debug) return open(fn, "rb")
Python
0
2bf8f7beac5ee32e7cb3085da392055603ab88d6
Fix request method
users/tests/test_api.py
users/tests/test_api.py
from django.core.urlresolvers import reverse from rest_framework import status from rest_framework.test import APITestCase from ..models import User class UserTest(APITestCase): """Tests for /users/ API endpoints.""" def test_view_user_logged_out(self): user = User.objects.create(name="Trey", email="trey@example.com") url = reverse('user-detail', args=[user.pk]) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, }) def test_same_user(self): user = User.objects.create(name="Trey", email="trey@example.com") url = reverse('user-detail', args=[user.pk]) self.client.force_authenticate(user=user) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, 'email': user.email, }) def test_different_user(self): user1 = User.objects.create(name="User1", email="user1@example.com") user2 = User.objects.create(name="User2", email="user2@example.com") url = reverse('user-detail', args=[user1.pk]) self.client.force_authenticate(user=user2) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user1.id, 'name': user1.name, }) def test_me_logged_out(self): url = reverse('user-detail', args=['me']) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_404_NOT_FOUND def test_me_logged_in(self): user = User.objects.create(name="Trey", email="trey@example.com") url = reverse('user-detail', args=['me']) self.client.force_authenticate(user=user) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, 'email': user.email, }) def test_update_current_user(self): user = User.objects.create(name="Trey", email="trey@example.com") url = reverse('user-detail', args=[user.pk]) self.client.force_authenticate(user=user) response = self.client.put(url, format='json', data={ 'name': "Bob", 'email': "bob@example.com", }) assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, 'email': user.email, })
from django.core.urlresolvers import reverse from rest_framework import status from rest_framework.test import APITestCase from ..models import User class UserTest(APITestCase): """Tests for /users/ API endpoints.""" def test_view_user_logged_out(self): user = User.objects.create(name="Trey", email="trey@example.com") url = reverse('user-detail', args=[user.pk]) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, }) def test_same_user(self): user = User.objects.create(name="Trey", email="trey@example.com") url = reverse('user-detail', args=[user.pk]) self.client.force_authenticate(user=user) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, 'email': user.email, }) def test_different_user(self): user1 = User.objects.create(name="User1", email="user1@example.com") user2 = User.objects.create(name="User2", email="user2@example.com") url = reverse('user-detail', args=[user1.pk]) self.client.force_authenticate(user=user2) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user1.id, 'name': user1.name, }) def test_me_logged_out(self): url = reverse('user-detail', args=['me']) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_404_NOT_FOUND def test_me_logged_in(self): user = User.objects.create(name="Trey", email="trey@example.com") url = reverse('user-detail', args=['me']) self.client.force_authenticate(user=user) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, 'email': user.email, }) def test_update_current_user(self): user = User.objects.create(name="Trey", email="trey@example.com") url = reverse('user-detail', args=[user.pk]) self.client.force_authenticate(user=user) response = self.client.post(url, format='json', data={ 'name': "Bob", 'email': "bob@example.com", }) assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, 'email': user.email, })
Python
0.000001
8c759f531e6a3cdc0e2b40321153147a7ec00b40
update docstrings to reflect recent AcademicDatabase refactoring
angular_flask/classtime/academic_calendar.py
angular_flask/classtime/academic_calendar.py
import sys import re from academic_databases.abstract_academicdb import AcademicDatabase class AcademicCalendar(object): """ Manages academic calendar information, including terms, courses and sections. Connects to an institution's course database using any implementation of the AcademicDatabase abstract base class. """ def __init__(self, institution_name): """ Initialize the Calendar with a database connection to a specific institution whose configuration is defined by a JSON file in academic_databases/institutions. See 'institutions/ualberta.json' for an example. """ try: self._course_db = AcademicDatabase.build(institution_name) self._course_db.connect() except: raise self._all_terms = self._course_db.search('terms') self._term = None self._all_courses = None def select_current_term(self, termid): if termid not in [term.get('term') for term in self._all_terms]: raise Exception('Term #{} not found'.format(termid)) self._term = termid self._populate_courses_for_current_term() def get_term_list(self): return self._all_terms def get_courses_for_current_term(self): return self._all_courses def _populate_courses_for_current_term(self): """ Prerequisite: Must have set the current term with select_current_term() Populates the courses dictionary with all courses available in the currently selected term """ if self._term == None: raise Exception('Must select a term before looking for courses!') current_term = 'term={}'.format(self._term) self._all_courses = self._course_db.search('courses', path=current_term) def _populate_sections_for_course(self, course): current_course = 'course={},term={}'.format(course['course'], self._term) sections = self._course_db.search('sections', path=current_course) for section in sections: # class_ is a field in the Section sqlalchemy model # because class is a reserved keyword in python section['class_'] = section.get('class') section.pop('class', None) current_section = 'class={},{}'.format(section.get('class_'), current_course) classtimes = self._course_db.search('classtimes', path=current_section) if len(classtimes) == 1: classtime = classtimes[0] else: classtime = dict() section['day'] = classtime.get('day') section['location'] = classtime.get('location') section['startTime'] = classtime.get('startTime') section['endTime'] = classtime.get('endTime') course['sections'] = sections return course
import sys import re from academic_databases.abstract_academicdb import AcademicDatabase class AcademicCalendar(object): """ Gives access to academic calendar data contained in an LDAP server """ def __init__(self, institution_name): """ Initialize the Calendar with a database connection to a specific institution, defined as a JSON config file """ try: self._course_db = AcademicDatabase.build(institution_name) self._course_db.connect() except: raise self._all_terms = self._course_db.search('terms') self._term = None self._all_courses = None def select_current_term(self, termid): if termid not in [term.get('term') for term in self._all_terms]: raise Exception('Term #{} not found'.format(termid)) self._term = termid self._populate_courses_for_current_term() def get_term_list(self): return self._all_terms def get_courses_for_current_term(self): return self._all_courses def _populate_courses_for_current_term(self): """ Prerequisite: Must have set the current term with select_current_term() Populates the courses dictionary with all courses available in the currently selected term """ if self._term == None: raise Exception('Must select a term before looking for courses!') current_term = 'term={}'.format(self._term) self._all_courses = self._course_db.search('courses', path=current_term) def _populate_sections_for_course(self, course): current_course = 'course={},term={}'.format(course['course'], self._term) sections = self._course_db.search('sections', path=current_course) for section in sections: # class_ is a field in the Section sqlalchemy model # because class is a reserved keyword in python section['class_'] = section.get('class') section.pop('class', None) current_section = 'class={},{}'.format(section.get('class_'), current_course) classtimes = self._course_db.search('classtimes', path=current_section) if len(classtimes) == 1: classtime = classtimes[0] else: classtime = dict() section['day'] = classtime.get('day') section['location'] = classtime.get('location') section['startTime'] = classtime.get('startTime') section['endTime'] = classtime.get('endTime') course['sections'] = sections return course
Python
0
1a740734f1f51cd5a64443a82a116eb14abf31fd
Use __future__.py from CPython.
Languages/IronPython/IronPython/Lib/__future__.py
Languages/IronPython/IronPython/Lib/__future__.py
"""Record of phased-in incompatible language changes. Each line is of the form: FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," CompilerFlag ")" where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples of the same form as sys.version_info: (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int PY_MINOR_VERSION, # the 1; an int PY_MICRO_VERSION, # the 0; an int PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string PY_RELEASE_SERIAL # the 3; an int ) OptionalRelease records the first release in which from __future__ import FeatureName was accepted. In the case of MandatoryReleases that have not yet occurred, MandatoryRelease predicts the release in which the feature will become part of the language. Else MandatoryRelease records when the feature became part of the language; in releases at or after that, modules no longer need from __future__ import FeatureName to use the feature in question, but may continue to use such imports. MandatoryRelease may also be None, meaning that a planned feature got dropped. Instances of class _Feature have two corresponding methods, .getOptionalRelease() and .getMandatoryRelease(). CompilerFlag is the (bitfield) flag that should be passed in the fourth argument to the builtin function compile() to enable the feature in dynamically compiled code. This flag is stored in the .compiler_flag attribute on _Future instances. These values must match the appropriate #defines of CO_xxx flags in Include/compile.h. No feature line is ever to be deleted from this file. """ all_feature_names = [ "nested_scopes", "generators", "division", "absolute_import", "with_statement", "print_function", "unicode_literals", ] __all__ = ["all_feature_names"] + all_feature_names # The CO_xxx symbols are defined here under the same names used by # compile.h, so that an editor search will find them here. However, # they're not exported in __all__, because they don't really belong to # this module. CO_NESTED = 0x0010 # nested_scopes CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) CO_FUTURE_DIVISION = 0x2000 # division CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals class _Feature: def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): self.optional = optionalRelease self.mandatory = mandatoryRelease self.compiler_flag = compiler_flag def getOptionalRelease(self): """Return first release in which this feature was recognized. This is a 5-tuple, of the same form as sys.version_info. """ return self.optional def getMandatoryRelease(self): """Return release in which this feature will become mandatory. This is a 5-tuple, of the same form as sys.version_info, or, if the feature was dropped, is None. """ return self.mandatory def __repr__(self): return "_Feature" + repr((self.optional, self.mandatory, self.compiler_flag)) nested_scopes = _Feature((2, 1, 0, "beta", 1), (2, 2, 0, "alpha", 0), CO_NESTED) generators = _Feature((2, 2, 0, "alpha", 1), (2, 3, 0, "final", 0), CO_GENERATOR_ALLOWED) division = _Feature((2, 2, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_DIVISION) absolute_import = _Feature((2, 5, 0, "alpha", 1), (2, 7, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT) with_statement = _Feature((2, 5, 0, "alpha", 1), (2, 6, 0, "alpha", 0), CO_FUTURE_WITH_STATEMENT) print_function = _Feature((2, 6, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_PRINT_FUNCTION) unicode_literals = _Feature((2, 6, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_UNICODE_LITERALS)
##################################################################################### # # Copyright (c) Microsoft Corporation. All rights reserved. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Apache License, Version 2.0. # # You must not remove this notice, or any other, from this software. # # ##################################################################################### all_feature_names = ['nested_scopes', 'generators', 'division', 'absolute_import', 'with_statement', 'print_function', 'unicode_literals'] division=1 with_statement=1 generators=1 absolute_import=1 print_function=1 unicode_literals=1
Python
0
ab78bf2c47a8bec5c1d0c5a7951dba1c98f5c28e
Revert file to moneymanager master branch.
check_gm.py
check_gm.py
#!/usr/bin/env python3 # vi:tabstop=4:expandtab:shiftwidth=4:softtabstop=4:autoindent:smarttab import os, sys import sqlite3 import urllib.request err = False for version in range (7, 14): fname = 'tables_v1.sql' if version < 12 else 'tables.sql' url = 'https://cdn.jsdelivr.net/gh/moneymanagerex/database@v%i/%s' % (version, fname) schema = urllib.request.urlopen(url).read().decode('utf-8') db = sqlite3.connect(':memory:') db.executescript(schema) print('\nTesting reports with MMEX db schema v%i:' % version) print('-' * 40) for root, dirs, files in os.walk('.'): for sql in files: if sql=='sqlcontent.sql': try: db.executescript(open(os.path.join(root, sql)).read()) except sqlite3.Error as e: print('ERR', os.path.basename(root).ljust(40), e.args[0]) err = True else: print('OK ', os.path.basename(root)) db.rollback() db.close() if err: sys.exit(1)
#!/usr/bin/env python3 # vi:tabstop=4:expandtab:shiftwidth=4:softtabstop=4:autoindent:smarttab import os, sys import sqlite3 import urllib.request err = False for version in range (7, 14): fname = 'tables_v1.sql' if version < 12 else 'tables.sql' url = 'https://cdn.jsdelivr.net/gh/moneymanagerex/database@v%i/%s' % (version, fname) schema = urllib.request.urlopen(url).read().decode('utf-8') db = sqlite3.connect(':memory:') db.executescript(schema) print('\nTesting reports with MMEX db schema v%i:' % version) print('-' * 40) for root, dirs, files in os.walk('.'): for sql in files: if sql=='sqlcontent.sql': try: db.executescript(open(os.path.join(root, sql)).read()) except sqlite3.Error as e: print('ERR', os.path.basename(root).ljust(40), e.args[0]) err = True else: print('OK ', os.path.basename(root)) db.rollback() db.close() if err: sys.exit(1)
Python
0
81bd4c6a7b94803e57a64f47bacbf3d5059282bd
add node
checkbst.py
checkbst.py
""" This is a very common interview question. Given a binary tree, check whether it’s a binary search tree or not. Simple as that.. http://www.ardendertat.com/2011/10/10/programming-interview-questions-7-binary-search-tree-check/ """ class Node: def __init__(self, val=None): self.left, self.right, self.val = None, None, val
""" This is a very common interview question. Given a binary tree, check whether it’s a binary search tree or not. Simple as that.. http://www.ardendertat.com/2011/10/10/programming-interview-questions-7-binary-search-tree-check/ """
Python
0.000001
3a5b96d5d666521a97598fe9d30b8e007242f8aa
swap from published/test:compile to published/compile in CI to try and speed things up a little
ci/build.py
ci/build.py
#!/usr/bin/env python import os from subprocess import check_call, check_output import json import sys is_master_commit = ( os.environ['TRAVIS_PULL_REQUEST'] == "false" and os.environ['TRAVIS_BRANCH'] == "master" ) all_versions = [ "2.10.4", "2.10.5", "2.10.6", "2.11.3", "2.11.4", "2.11.5", "2.11.6", "2.11.7", "2.11.8" ] def update_version(): git_hash = check_output(["git", "rev-parse", "--short", "HEAD"]).strip() version_txt = """ package ammonite object Constants{ val version = "COMMIT-%s" val curlUrl = "https://git.io/vKwA8" } """ % git_hash open("project/Constants.scala", "w").write(version_txt) def publish_signed(): creds = """ (credentials in ThisBuild) += Credentials("Sonatype Nexus Repository Manager", "oss.sonatype.org", "%s", "%s" ) pgpPassphrase := Some("%s".toArray) pgpSecretRing := file("secring.asc") pgpPublicRing := file("pubring.asc") sonatypeProfileName := "com.lihaoyi" """ % ( os.environ['SONATYPE_DEPLOY_USER'], os.environ['SONATYPE_DEPLOY_PASSWORD'], os.environ['SONATYPE_PGP_PASSWORD'] ) open("sonatype.sbt", "w").write(creds) open("secring.asc", "w").write( json.loads('"' + os.environ['SONATYPE_PGP_KEY_CONTENTS'] + '"') ) open("pubring.asc", "w").write( json.loads('"' + os.environ['SONATYPE_PGP_PUB_KEY_CONTENTS'] + '"') ) for version in all_versions: if version in {"2.10.5", "2.11.8"}: check_call(["sbt", "++"+version, "published/publishSigned"]) else: check_call(["sbt", "++"+version, "amm/publishSigned", "sshd/publishSigned"]) check_call(["sbt", "sonatypeReleaseAll"]) def publish_docs(): deploy_key = json.loads('"' + os.environ['DEPLOY_KEY'] + '"') with open("deploy_key", "w") as f: f.write(deploy_key) if os.environ.get("TRAVIS_TAG"): new_env = dict(os.environ, DOC_FOLDER=".") else: new_env = dict(os.environ, DOC_FOLDER="master") check_call("ci/deploy_master_docs.sh", env=new_env) if sys.argv[1] == "docs": if is_master_commit: print "MASTER COMMIT: Updating version and publishing to Github Pages" update_version() publish_docs() else: print "MISC COMMIT: Building readme for verification" check_call(["sbt", "readme/run"]) elif sys.argv[1] == "artifacts": if is_master_commit: print "MASTER COMMIT: Updating version and publishing to Maven Central" update_version() publish_signed() else: print "MISC COMMIT: Compiling all Scala code across versions for verification" for version in all_versions: check_call(["sbt", "++" + version, "published/compile"]) elif sys.argv[1] == "test": check_call(["sbt", "++" + os.environ["TRAVIS_SCALA_VERSION"], "published/compile"]) check_call(["sbt", "++" + os.environ["TRAVIS_SCALA_VERSION"], sys.argv[2]]) else: raise Exception("Unknown argument list %s" % sys.argv)
#!/usr/bin/env python import os from subprocess import check_call, check_output import json import sys is_master_commit = ( os.environ['TRAVIS_PULL_REQUEST'] == "false" and os.environ['TRAVIS_BRANCH'] == "master" ) all_versions = [ "2.10.4", "2.10.5", "2.10.6", "2.11.3", "2.11.4", "2.11.5", "2.11.6", "2.11.7", "2.11.8" ] def update_version(): git_hash = check_output(["git", "rev-parse", "--short", "HEAD"]).strip() version_txt = """ package ammonite object Constants{ val version = "COMMIT-%s" val curlUrl = "https://git.io/vKwA8" } """ % git_hash open("project/Constants.scala", "w").write(version_txt) def publish_signed(): creds = """ (credentials in ThisBuild) += Credentials("Sonatype Nexus Repository Manager", "oss.sonatype.org", "%s", "%s" ) pgpPassphrase := Some("%s".toArray) pgpSecretRing := file("secring.asc") pgpPublicRing := file("pubring.asc") sonatypeProfileName := "com.lihaoyi" """ % ( os.environ['SONATYPE_DEPLOY_USER'], os.environ['SONATYPE_DEPLOY_PASSWORD'], os.environ['SONATYPE_PGP_PASSWORD'] ) open("sonatype.sbt", "w").write(creds) open("secring.asc", "w").write( json.loads('"' + os.environ['SONATYPE_PGP_KEY_CONTENTS'] + '"') ) open("pubring.asc", "w").write( json.loads('"' + os.environ['SONATYPE_PGP_PUB_KEY_CONTENTS'] + '"') ) for version in all_versions: if version in {"2.10.5", "2.11.8"}: check_call(["sbt", "++"+version, "published/publishSigned"]) else: check_call(["sbt", "++"+version, "amm/publishSigned", "sshd/publishSigned"]) check_call(["sbt", "sonatypeReleaseAll"]) def publish_docs(): deploy_key = json.loads('"' + os.environ['DEPLOY_KEY'] + '"') with open("deploy_key", "w") as f: f.write(deploy_key) if os.environ.get("TRAVIS_TAG"): new_env = dict(os.environ, DOC_FOLDER=".") else: new_env = dict(os.environ, DOC_FOLDER="master") check_call("ci/deploy_master_docs.sh", env=new_env) if sys.argv[1] == "docs": if is_master_commit: print "MASTER COMMIT: Updating version and publishing to Github Pages" update_version() publish_docs() else: print "MISC COMMIT: Building readme for verification" check_call(["sbt", "readme/run"]) elif sys.argv[1] == "artifacts": if is_master_commit: print "MASTER COMMIT: Updating version and publishing to Maven Central" update_version() publish_signed() else: print "MISC COMMIT: Compiling all Scala code across versions for verification" for version in all_versions: check_call(["sbt", "++" + version, "published/test:compile"]) elif sys.argv[1] == "test": check_call(["sbt", "++" + os.environ["TRAVIS_SCALA_VERSION"], "published/compile"]) check_call(["sbt", "++" + os.environ["TRAVIS_SCALA_VERSION"], sys.argv[2]]) else: raise Exception("Unknown argument list %s" % sys.argv)
Python
0
175bbd2f181d067712d38beeca9df4063654103a
Update script to remove extension from filename
nlppln/frog_to_saf.py
nlppln/frog_to_saf.py
#!/usr/bin/env python import click import os import codecs import json from xtas.tasks._frog import parse_frog, frog_to_saf @click.command() @click.argument('input_files', nargs=-1, type=click.Path(exists=True)) @click.argument('output_dir', nargs=1, type=click.Path()) def frog2saf(input_files, output_dir): if not os.path.exists(output_dir): os.makedirs(output_dir) for fi in input_files: with codecs.open(fi) as f: lines = f.readlines() lines = [line.strip() for line in lines] saf_data = frog_to_saf(parse_frog(lines)) head, tail = os.path.split(fi) fname = tail.replace(os.path.splitext(tail)[1], '') out_file = os.path.join(output_dir, '{}.json'.format(fname)) with codecs.open(out_file, 'wb', encoding='utf-8') as f: json.dump(saf_data, f, indent=4) if __name__ == '__main__': frog2saf()
#!/usr/bin/env python import click import os import codecs import json from xtas.tasks._frog import parse_frog, frog_to_saf @click.command() @click.argument('input_files', nargs=-1, type=click.Path(exists=True)) @click.argument('output_dir', nargs=1, type=click.Path()) def frog2saf(input_files, output_dir): if not os.path.exists(output_dir): os.makedirs(output_dir) for fi in input_files: with codecs.open(fi) as f: lines = f.readlines() lines = [line.strip() for line in lines] saf_data = frog_to_saf(parse_frog(lines)) head, tail = os.path.split(fi) out_file = os.path.join(output_dir, '{}.json'.format(tail)) with codecs.open(out_file, 'wb', encoding='utf-8') as f: json.dump(saf_data, f, indent=4) if __name__ == '__main__': frog2saf()
Python
0.000002
b3426bcd217c336f8807a5474b47dea72a994eb9
Rename `op`-parameter to `request`.
ioctl/__init__.py
ioctl/__init__.py
import ctypes import fcntl import sys # In Python 2, the bytearray()-type does not support the buffer interface, # and can therefore not be used in ioctl(). # This creates a couple of helper functions for converting to and from if sys.version_info < (3, 0): import array def _to_bytearray(value): return array.array('B', value) def _from_bytearray(value): return value.tostring() else: def _to_bytearray(value): return bytearray(value) def _from_bytearray(value): return bytes(value) def ioctl_int(fd, request, value=0): """Call ioctl() with an `int *` argument. :param fd: File descriptor to operate on. :param request: The ioctl request to call. :param value: Optional value to pass to the ioctl() operation. Defaults to 0. :return The contents of the value parameter after the call to ioctl(). """ res = ctypes.c_int(value) fcntl.ioctl(fd, request, res) return res.value def ioctl_size_t(fd, request, value=0): """Call ioctl() with a `size_t *` argument. :param fd: File descriptor to operate on. :param request: ioctl request to call. :param value: Optional value to pass to the ioctl() operation. Defaults to 0. :return: The contents of the value parameter after the call to ioctl(). """ res = ctypes.c_size_t(value) fcntl.ioctl(fd, request, res) return res.value def ioctl_buffer(fd, request, value=None, length=None): """Call ioctl() with a byte buffer argument. You must specify either the `value` parameter or the `length` parameter. If the `length` parameter is specified, this function will allocate a byte buffer of the specified length to pass to ioctl(). :param fd: File descriptor to operate on. :param request: ioctl request to call. :param value: Optional contents of the byte buffer at the start of the call. :param length: Optional length of the byte buffer. :return: The contents of the value parameter after the call to ioctl(). """ request = int(request) if value is None and length is None: raise ValueError('Must specify either `value` or `length`') if value is not None and length is not None: raise ValueError('Cannot specify both `value` and `length`') if value is None: value = [0] * length data = _to_bytearray(value) fcntl.ioctl(fd, request, data) data = _from_bytearray(data) return data
import ctypes import fcntl import sys # In Python 2, the bytearray()-type does not support the buffer interface, # and can therefore not be used in ioctl(). # This creates a couple of helper functions for converting to and from if sys.version_info < (3, 0): import array def _to_bytearray(value): return array.array('B', value) def _from_bytearray(value): return value.tostring() else: def _to_bytearray(value): return bytearray(value) def _from_bytearray(value): return bytes(value) def ioctl_int(fd, op, value=0): """Call ioctl() with an `int *` argument. :param fd: File descriptor to operate on. :param op: The ioctl request to call. :param value: Optional value to pass to the ioctl() operation. Defaults to 0. :return The contents of the value parameter after the call to ioctl(). """ res = ctypes.c_int(value) fcntl.ioctl(fd, op, res) return res.value def ioctl_size_t(fd, op, value=0): """Call ioctl() with a `size_t *` argument. :param fd: File descriptor to operate on. :param op: ioctl request to call. :param value: Optional value to pass to the ioctl() operation. Defaults to 0. :return: The contents of the value parameter after the call to ioctl(). """ res = ctypes.c_size_t(value) fcntl.ioctl(fd, op, res) return res.value def ioctl_buffer(fd, op, value=None, length=None): """Call ioctl() with a byte buffer argument. You must specify either the `value` parameter or the `length` parameter. If the `length` parameter is specified, this function will allocate a byte buffer of the specified length to pass to ioctl(). :param fd: File descriptor to operate on. :param op: ioctl request to call. :param value: Optional contents of the byte buffer at the start of the call. :param length: Optional length of the byte buffer. :return: The contents of the value parameter after the call to ioctl(). """ op = int(op) if value is None and length is None: raise ValueError('Must specify either `value` or `length`') if value is not None and length is not None: raise ValueError('Cannot specify both `value` and `length`') if value is None: value = [0] * length data = _to_bytearray(value) fcntl.ioctl(fd, op, data) data = _from_bytearray(data) return data
Python
0
0ac4fe1431fd04aa2645a4afc3d4d2fbfb21bb90
Update plone profile: copy of black, plus three settings.
isort/profiles.py
isort/profiles.py
"""Common profiles are defined here to be easily used within a project using --profile {name}""" from typing import Any, Dict black = { "multi_line_output": 3, "include_trailing_comma": True, "force_grid_wrap": 0, "use_parentheses": True, "ensure_newline_before_comments": True, "line_length": 88, } django = { "combine_as_imports": True, "include_trailing_comma": True, "multi_line_output": 5, "line_length": 79, } pycharm = { "multi_line_output": 3, "force_grid_wrap": 2, "lines_after_imports": 2, } google = { "force_single_line": True, "force_sort_within_sections": True, "lexicographical": True, "single_line_exclusions": ("typing",), "order_by_type": False, "group_by_package": True, } open_stack = { "force_single_line": True, "force_sort_within_sections": True, "lexicographical": True, } plone = black.copy() plone.update( { "force_alphabetical_sort": True, "force_single_line": True, "lines_after_imports": 2, } ) attrs = { "atomic": True, "force_grid_wrap": 0, "include_trailing_comma": True, "lines_after_imports": 2, "lines_between_types": 1, "multi_line_output": 3, "use_parentheses": True, } hug = { "multi_line_output": 3, "include_trailing_comma": True, "force_grid_wrap": 0, "use_parentheses": True, "line_length": 100, } wemake = { "multi_line_output": 3, "include_trailing_comma": True, "use_parentheses": True, "line_length": 80, } appnexus = { **black, "force_sort_within_sections": True, "order_by_type": False, "case_sensitive": False, "reverse_relative": True, "sort_relative_in_force_sorted_sections": True, "sections": ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "APPLICATION", "LOCALFOLDER"], "no_lines_before": "LOCALFOLDER", } profiles: Dict[str, Dict[str, Any]] = { "black": black, "django": django, "pycharm": pycharm, "google": google, "open_stack": open_stack, "plone": plone, "attrs": attrs, "hug": hug, "wemake": wemake, "appnexus": appnexus, }
"""Common profiles are defined here to be easily used within a project using --profile {name}""" from typing import Any, Dict black = { "multi_line_output": 3, "include_trailing_comma": True, "force_grid_wrap": 0, "use_parentheses": True, "ensure_newline_before_comments": True, "line_length": 88, } django = { "combine_as_imports": True, "include_trailing_comma": True, "multi_line_output": 5, "line_length": 79, } pycharm = { "multi_line_output": 3, "force_grid_wrap": 2, "lines_after_imports": 2, } google = { "force_single_line": True, "force_sort_within_sections": True, "lexicographical": True, "single_line_exclusions": ("typing",), "order_by_type": False, "group_by_package": True, } open_stack = { "force_single_line": True, "force_sort_within_sections": True, "lexicographical": True, } plone = { "force_alphabetical_sort": True, "force_single_line": True, "lines_after_imports": 2, "line_length": 200, } attrs = { "atomic": True, "force_grid_wrap": 0, "include_trailing_comma": True, "lines_after_imports": 2, "lines_between_types": 1, "multi_line_output": 3, "use_parentheses": True, } hug = { "multi_line_output": 3, "include_trailing_comma": True, "force_grid_wrap": 0, "use_parentheses": True, "line_length": 100, } wemake = { "multi_line_output": 3, "include_trailing_comma": True, "use_parentheses": True, "line_length": 80, } appnexus = { **black, "force_sort_within_sections": True, "order_by_type": False, "case_sensitive": False, "reverse_relative": True, "sort_relative_in_force_sorted_sections": True, "sections": ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "APPLICATION", "LOCALFOLDER"], "no_lines_before": "LOCALFOLDER", } profiles: Dict[str, Dict[str, Any]] = { "black": black, "django": django, "pycharm": pycharm, "google": google, "open_stack": open_stack, "plone": plone, "attrs": attrs, "hug": hug, "wemake": wemake, "appnexus": appnexus, }
Python
0
0a4da4bc40813362b9d6c67c2fb02f33a807f3fe
fix error on tax view
l10n_it_account/__openerp__.py
l10n_it_account/__openerp__.py
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2010-2013 Associazione OpenERP Italia # (<http://www.openerp-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Italian Localisation', 'version': '2.15.30.32', 'category': 'Localisation/Italy', 'description': """This module customizes OpenERP in order to fit italian laws and mores - Account version Functionalities: - Fiscal code computation for partner, and fiscal code check - Check invoice date consistency - CIG on invoice """, 'author': 'OpenERP Italian Community, Didotech srl', 'website': 'http://www.openerp-italia.org, http://www.didotech.com', 'license': 'AGPL-3', 'depends': [ 'account', 'base_vat', 'account_chart', 'base_iban', 'l10n_it_base', 'account_voucher', 'sale_order_confirm', # 'account_invoice_entry_date', not possible for use of a field defined here invoice_supplier_number ], 'data': [ 'account/partner_view.xml', 'account/fiscal_position_view.xml', 'account/account_sequence.xml', 'account/invoice_view.xml', 'account/voucher_view.xml', 'account/payment_type_view.xml', 'wizard/select_fiscal_position_view.xml', 'data/bank_iban_data.xml', 'account/account_move.xml', 'account/res_bank_view.xml', 'account/account_tax_view.xml', 'account/res_company_view.xml', 'account/account_invoice_workflow.xml', ], 'demo': [], 'active': False, 'installable': True, 'external_dependencies': { 'python': ['codicefiscale'], } }
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2010-2013 Associazione OpenERP Italia # (<http://www.openerp-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Italian Localisation', 'version': '2.15.30.32', 'category': 'Localisation/Italy', 'description': """This module customizes OpenERP in order to fit italian laws and mores - Account version Functionalities: - Fiscal code computation for partner, and fiscal code check - Check invoice date consistency - CIG on invoice """, 'author': 'OpenERP Italian Community, Didotech srl', 'website': 'http://www.openerp-italia.org, http://www.didotech.com', 'license': 'AGPL-3', 'depends': [ 'account', 'base_vat', 'account_chart', 'base_iban', 'l10n_it_base', 'account_voucher', 'sale_order_confirm', # 'account_invoice_entry_date', not possible for use of a field defined here invoice_supplier_number ], 'data': [ 'account/partner_view.xml', 'account/fiscal_position_view.xml', 'account/account_sequence.xml', 'account/invoice_view.xml', 'account/voucher_view.xml', 'account/payment_type_view.xml', 'wizard/select_fiscal_position_view.xml', 'data/bank_iban_data.xml', 'account/account_move.xml', 'account/res_bank_view.xml', # 'account/account_tax_view.xml', 'account/res_company_view.xml', 'account/account_invoice_workflow.xml', ], 'demo': [], 'active': False, 'installable': True, 'external_dependencies': { 'python': ['codicefiscale'], } }
Python
0
87c2cf3f8f8ea2e890aa648d33e93e051632e86d
change billets
totvserprm/financial.py
totvserprm/financial.py
# -*- coding: utf-8 -*- from datetime import datetime from baseapi import BaseApi class Client(BaseApi): dataservername = 'FinCFODataBR' def create(self,**kwargs): return super(Client, self).create({ 'NewDataSet': { 'FCFO': { 'ATIVO': kwargs.get('ativo'), # enviar -1 para que sejá criado de forma incremental 'CODCFO': -1, 'IDCFO': -1, 'CODEXTERNO': kwargs.get('codexterno'), 'CODCOLIGADA': kwargs.get('codcoligada'), 'CGCCFO': kwargs.get('cpf_cnpj'), 'TIPORUA': kwargs.get('tipo_rua'), 'TIPOBAIRRO': kwargs.get('tipo_bairro'), 'BAIRRO': kwargs.get('bairro'), 'RUA': kwargs.get('rua'), 'NUMERO': kwargs.get('numero'), 'CEP': kwargs.get('cep'), 'CODETD': kwargs.get('estado'), 'CIDADE': kwargs.get('cidade'), 'CODMUNICIPIO': kwargs.get('codigo_municipio'), 'PAIS': kwargs.get('cod_pais'), 'DTNASCIMENTO': '{:%Y-%m-%d}T03:00:00.000'.format(kwargs.get('data_nascimento')), 'NOME': kwargs.get('nome'), 'NOMEFANTASIA': kwargs.get('nome'), 'PAGREC': kwargs.get('classificacao'), 'PESSOAFISOUJUR': kwargs.get('categoria'), } } }, 'CODCOLIGADA={}'.format(kwargs.get('codcoligada'))) class Billet(BaseApi): dataservername = 'FinLanBoletoData' def create(self,**kwargs): return super(Billet, self).create({ 'NewDataSet': { 'FLAN': { 'CODCOLIGADA': kwargs.get('codcoligada'), 'IDLAN': -1, 'NUMERODOCUMENTO': -1, 'NFOUDUP': 0, 'CLASSIFICACAO': 0, 'PAGREC': 1, 'STATUSLAN': 1, 'CODTDO': kwargs.get('tipo_documento'), 'DATAVENCIMENTO': kwargs.get('data_vencimento'), 'DATAEMISSAO': "{:%d/%m/%Y %H:%M:%S}".format(datetime.now()), 'VALORORIGINAL': kwargs.get('valor'), 'CODCOLCFO': kwargs.get('codcoligada'), 'CODCFO': kwargs.get('codcliente'), 'CODFILIAL': kwargs.get('codfilial'), 'SERIEDOCUMENTO': kwargs.get('serie_documento'), 'CODCXA': kwargs.get('conta'), 'CODMOEVALORORIGINAL': 'R$', 'NUMLOTECONTABIL': 0, 'NUMEROCONTABIL': 0, 'NUMCONTABILBX': 0, 'TIPOCONTABILLAN': 0, 'FILIALCONTABIL': 1, 'HISTORICO': kwargs.get('historico'), 'CODCCUSTO': kwargs.get('centro_custo') } } }, 'CODCOLIGADA={}'.format(kwargs.get('codcoligada')))
# -*- coding: utf-8 -*- from datetime import datetime from baseapi import BaseApi class Client(BaseApi): dataservername = 'FinCFODataBR' def create(self,**kwargs): # codigo de coligada para o contexto, diferente do dataset codcoligada_contexto = kwargs.get('codcoligada_contexto') if not codcoligada_contexto: codcoligada_contexto = kwargs.get('codcoligada') return super(Client, self).create({ 'NewDataSet': { 'FCFO': { 'ATIVO': kwargs.get('ativo'), # enviar -1 para que sejá criado de forma incremental 'CODCFO': -1, 'IDCFO': -1, 'CODEXTERNO': kwargs.get('codexterno'), 'CODCOLIGADA': kwargs.get('codcoligada'), 'CGCCFO': kwargs.get('cpf_cnpj'), 'TIPORUA': kwargs.get('tipo_rua'), 'TIPOBAIRRO': kwargs.get('tipo_bairro'), 'BAIRRO': kwargs.get('bairro'), 'RUA': kwargs.get('rua'), 'NUMERO': kwargs.get('numero'), 'CEP': kwargs.get('cep'), 'CODETD': kwargs.get('estado'), 'CIDADE': kwargs.get('cidade'), 'CODMUNICIPIO': kwargs.get('codigo_municipio'), 'PAIS': kwargs.get('cod_pais'), 'DTNASCIMENTO': '{:%Y-%m-%d}T03:00:00.000'.format(kwargs.get('data_nascimento')), 'NOME': kwargs.get('nome'), 'NOMEFANTASIA': kwargs.get('nome'), 'PAGREC': kwargs.get('classificacao'), 'PESSOAFISOUJUR': kwargs.get('categoria'), } } }, 'CODCOLIGADA={}'.format(codcoligada_contexto)) class Billet(BaseApi): dataservername = 'FinLanBoletoData' def create(self,**kwargs): # codigo de coligada para o contexto, diferente do dataset codcoligada_contexto = kwargs.get('codcoligada_contexto') if not codcoligada_contexto: codcoligada_contexto = kwargs.get('codcoligada') return super(Billet, self).create({ 'NewDataSet': { 'FLAN': { 'CODCOLIGADA': kwargs.get('codcoligada'), 'IDLAN': -1, 'NUMERODOCUMENTO': -1, 'NFOUDUP': 0, 'CLASSIFICACAO': 0, 'PAGREC': 1, 'STATUSLAN': 1, 'CODTDO': kwargs.get('tipo_documento'), 'DATAVENCIMENTO': kwargs.get('data_vencimento'), 'DATAEMISSAO': "{:%d/%m/%Y %H:%M:%S}".format(datetime.now()), 'VALORORIGINAL': kwargs.get('valor'), 'CODCOLCFO': kwargs.get('codcoligada'), 'CODCFO': kwargs.get('codcliente'), 'CODFILIAL': kwargs.get('codfilial'), 'SERIEDOCUMENTO': kwargs.get('serie_documento'), 'CODCXA': kwargs.get('conta'), 'TIPOCONTABILLAN': 1, 'CODMOEVALORORIGINAL': 'R$', 'VALORSERVICO': 0, 'NUMLOTECONTABIL': kwargs.get('lote_contabil') } } }, 'CODCOLIGADA={}'.format(codcoligada_contexto))
Python
0.000001
9cce47d37f6e2d08a66b9deedfc6f2f74b02720a
add int validator
tpl/prompt/validator.py
tpl/prompt/validator.py
# -*- coding:utf-8 -*- from prompt_toolkit.validation import Validator, ValidationError class StrValidator(Validator): def validate(self, document): pass class IntValidator(Validator): def validate(self, document): text = document.text for index, char in enumerate(text): if not char.isdigit(): raise ValidationError(message='Input contains non-numeric char', cursor_position=index)
# -*- coding:utf-8 -*- from prompt_toolkit.validation import Validator, ValidationError class StrValidator(Validator): def validate(self, document): pass
Python
0.00002
4f73601c843ff9507064b85ddd33179af9fed653
Raise stderr message
utils/unfiltered_pbf.py
utils/unfiltered_pbf.py
# -*- coding: utf-8 -*- import logging import os from string import Template from subprocess import PIPE, Popen from .artifact import Artifact from .osm_xml import OSM_XML LOG = logging.getLogger(__name__) class InvalidOsmXmlException(Exception): pass class UnfilteredPBF(object): name = 'full_pbf' description = 'Unfiltered OSM PBF' cmd = Template('osmconvert $osm --out-pbf >$pbf') def __init__(self, aoi_geom, output_pbf, url): self.aoi_geom = aoi_geom self.output_pbf = output_pbf self.url = url def run(self): if self.is_complete: LOG.debug("Skipping UnfilteredPBF, file exists") return osm_xml = "{}.xml".format(self.output_pbf) osm_xml_task = OSM_XML(self.aoi_geom, osm_xml, url=self.url) osm_xml_task.run() convert_cmd = self.cmd.safe_substitute({ 'osm': osm_xml, 'pbf': self.output_pbf }) LOG.debug('Running: %s' % convert_cmd) p = Popen(convert_cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if stderr: raise InvalidOsmXmlException(stderr) LOG.debug('Osmconvert complete') @property def results(self): return [Artifact([self.output_pbf], UnfilteredPBF.name)] @property def is_complete(self): return os.path.isfile(self.output_pbf)
# -*- coding: utf-8 -*- import logging import os from string import Template from subprocess import PIPE, Popen from .artifact import Artifact from .osm_xml import OSM_XML LOG = logging.getLogger(__name__) class InvalidOsmXmlException(Exception): pass class UnfilteredPBF(object): name = 'full_pbf' description = 'Unfiltered OSM PBF' cmd = Template('osmconvert $osm --out-pbf >$pbf') def __init__(self, aoi_geom, output_pbf, url): self.aoi_geom = aoi_geom self.output_pbf = output_pbf self.url = url def run(self): if self.is_complete: LOG.debug("Skipping UnfilteredPBF, file exists") return osm_xml = "{}.xml".format(self.output_pbf) osm_xml_task = OSM_XML(self.aoi_geom, osm_xml, url=self.url) osm_xml_task.run() convert_cmd = self.cmd.safe_substitute({ 'osm': osm_xml, 'pbf': self.output_pbf }) LOG.debug('Running: %s' % convert_cmd) p = Popen(convert_cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if stderr: LOG.warn('Failed: %s', stderr) with open(self.input_xml, 'rb') as fd: sample = fd.readlines(8) raise InvalidOsmXmlException(sample) LOG.debug('Osmconvert complete') @property def results(self): return [Artifact([self.output_pbf], UnfilteredPBF.name)] @property def is_complete(self): return os.path.isfile(self.output_pbf)
Python
0.00001
aaee820075b150b641e511dbdb45e6d1ff3da529
Update description of function attibute in class SchemaIndicatorType
api/schema_indicator.py
api/schema_indicator.py
from database.model_indicator import ModelIndicatorType, ModelIndicator, ModelIndicatorParameterType, ModelIndicatorParameter, ModelIndicatorResult from graphene_sqlalchemy import SQLAlchemyObjectType import graphene import logging # Load logging configuration log = logging.getLogger(__name__) class AttributeIndicator: """Generic class to provide descriptions of indicator attributes""" name = graphene.String(description="Indicator name.") description = graphene.String(description="Indicator description.") indicatorTypeId = graphene.ID(description="Indicator type Id of the indicator.") batchOwnerId = graphene.ID(description="Batch owner Id of the indicator.") executionOrder = graphene.Int(description="Order of execution of the indicator when it is executed in a batch with several other indicators.") active = graphene.Boolean(description="Indicates if the indicator is active or inactive. Only active indicators can be executed.") class SchemaIndicator(SQLAlchemyObjectType, AttributeIndicator): """Data quality indicators.""" class Meta: model = ModelIndicator interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorParameter: """Generic class to provide descriptions of indicator parameter attributes""" indicatorId = graphene.ID(description="Indicator Id of the parameter.") parameterTypeId = graphene.String(description="Parameter type Id of the parameter.") value = graphene.String(description="Value of the parameter.") class SchemaIndicatorParameter(SQLAlchemyObjectType, AttributeIndicatorParameter): """Indicator parameters.""" class Meta: model = ModelIndicatorParameter interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorParameterType: """Generic class to provide descriptions of indicator parameter type attributes""" name = graphene.String(description="Parameter type name.") description = graphene.String(description="Parameter type description.") class SchemaIndicatorParameterType(SQLAlchemyObjectType, AttributeIndicatorParameterType): """Indicator parameter types.""" class Meta: model = ModelIndicatorParameterType interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorResult: """Generic class to provide descriptions of indicator result attributes""" indicatorId = graphene.ID(description="Indicator Id of the results set.") sessionId = graphene.ID(description="Session Id of the result set.") alertOperator = graphene.String(description="Alert operator used during the execution of the indicator.") alertThreshold = graphene.Float(description="Alert threshold used during the execution of the indicator.") nbRecords = graphene.Int(description="Number of records in the result set.") nbRecordsAlert = graphene.Int(description="Number of records which triggered an alert in the result set.") nbRecordsNoAlert = graphene.Int(description="Number of records which did not trigger an alert in the result set.") class SchemaIndicatorResult(SQLAlchemyObjectType, AttributeIndicatorResult): """Indicator results.""" class Meta: model = ModelIndicatorResult interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorType: """Generic class to provide descriptions of indicator type attributes""" name = graphene.String(description="Indicator type name.") function = graphene.String(description="Python function of the framework used to compute this indicator type.") class SchemaIndicatorType(SQLAlchemyObjectType, AttributeIndicatorType): """Types of indicators.""" class Meta: model = ModelIndicatorType interfaces = (graphene.relay.Node,) # Keep comma to avoid failure
from database.model_indicator import ModelIndicatorType, ModelIndicator, ModelIndicatorParameterType, ModelIndicatorParameter, ModelIndicatorResult from graphene_sqlalchemy import SQLAlchemyObjectType import graphene import logging # Load logging configuration log = logging.getLogger(__name__) class AttributeIndicator: """Generic class to provide descriptions of indicator attributes""" name = graphene.String(description="Indicator name.") description = graphene.String(description="Indicator description.") indicatorTypeId = graphene.ID(description="Indicator type Id of the indicator.") batchOwnerId = graphene.ID(description="Batch owner Id of the indicator.") executionOrder = graphene.Int(description="Order of execution of the indicator when it is executed in a batch with several other indicators.") active = graphene.Boolean(description="Indicates if the indicator is active or inactive. Only active indicators can be executed.") class SchemaIndicator(SQLAlchemyObjectType, AttributeIndicator): """Data quality indicators.""" class Meta: model = ModelIndicator interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorParameter: """Generic class to provide descriptions of indicator parameter attributes""" indicatorId = graphene.ID(description="Indicator Id of the parameter.") parameterTypeId = graphene.String(description="Parameter type Id of the parameter.") value = graphene.String(description="Value of the parameter.") class SchemaIndicatorParameter(SQLAlchemyObjectType, AttributeIndicatorParameter): """Indicator parameters.""" class Meta: model = ModelIndicatorParameter interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorParameterType: """Generic class to provide descriptions of indicator parameter type attributes""" name = graphene.String(description="Parameter type name.") description = graphene.String(description="Parameter type description.") class SchemaIndicatorParameterType(SQLAlchemyObjectType, AttributeIndicatorParameterType): """Indicator parameter types.""" class Meta: model = ModelIndicatorParameterType interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorResult: """Generic class to provide descriptions of indicator result attributes""" indicatorId = graphene.ID(description="Indicator Id of the results set.") sessionId = graphene.ID(description="Session Id of the result set.") alertOperator = graphene.String(description="Alert operator used during the execution of the indicator.") alertThreshold = graphene.Float(description="Alert threshold used during the execution of the indicator.") nbRecords = graphene.Int(description="Number of records in the result set.") nbRecordsAlert = graphene.Int(description="Number of records which triggered an alert in the result set.") nbRecordsNoAlert = graphene.Int(description="Number of records which did not trigger an alert in the result set.") class SchemaIndicatorResult(SQLAlchemyObjectType, AttributeIndicatorResult): """Indicator results.""" class Meta: model = ModelIndicatorResult interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorType: """Generic class to provide descriptions of indicator type attributes""" name = graphene.String(description="Indicator type name.") function = graphene.String(description="Python function used to execute this type of indicator.") class SchemaIndicatorType(SQLAlchemyObjectType, AttributeIndicatorType): """Types of indicators.""" class Meta: model = ModelIndicatorType interfaces = (graphene.relay.Node,) # Keep comma to avoid failure
Python
0
451e20818c7fbcc0b45500c71c5c5beee96eb316
update jaxlib
jaxlib/version.py
jaxlib/version.py
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.1.17"
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.1.16"
Python
0
1c15d302c2a1df22b4dd89f3215decf141a4c20e
return None if there is an error during scan
abilian/services/antivirus/__init__.py
abilian/services/antivirus/__init__.py
# coding=utf-8 """ """ from __future__ import absolute_import try: import clamd cd = clamd.ClamdUnixSocket() CLAMD_AVAILABLE = True except ImportError: CLAMD_AVAILABLE = False from abilian.core.models.blob import Blob from ..base import Service class AntiVirusService(Service): """ Antivirus service """ name = 'antivirus' def scan(self, file_or_stream): """ :param file_or_stream: :class:`Blob` instance, filename or file object :returns: True if file is 'clean', False if a virus is detected, None if file could not be scanned. If `file_or_stream` is a Blob, scan result is stored in Blob.meta['antivirus']. """ res = self._scan(file_or_stream) if isinstance(file_or_stream, Blob): file_or_stream.meta['antivirus'] = res return res def _scan(self, file_or_stream): if not CLAMD_AVAILABLE: return None content = file_or_stream if isinstance(file_or_stream, Blob): scan = cd.scan # py3 compat: bytes == py2 str(). Pathlib uses os.fsencode() content = bytes(file_or_stream.file) elif isinstance(file_or_stream, (str, unicode)): scan = cd.scan else: scan = cd.instream res = None try: res = scan(content) except clamd.ClamdError as e: self.logger.warning('Error during content scan: %s', repr(e)) return None if content not in res: # may happen if file doesn't exists return False res = res[content] return res[0] == u'OK' service = AntiVirusService()
# coding=utf-8 """ """ from __future__ import absolute_import try: import clamd cd = clamd.ClamdUnixSocket() CLAMD_AVAILABLE = True except ImportError: CLAMD_AVAILABLE = False from abilian.core.models.blob import Blob from ..base import Service class AntiVirusService(Service): """ Antivirus service """ name = 'antivirus' def scan(self, file_or_stream): """ :param file_or_stream: :class:`Blob` instance, filename or file object :returns: True if file is 'clean', False if a virus is detected, None if file could not be scanned. If `file_or_stream` is a Blob, scan result is stored in Blob.meta['antivirus']. """ res = self._scan(file_or_stream) if isinstance(file_or_stream, Blob): file_or_stream.meta['antivirus'] = res return res def _scan(self, file_or_stream): if not CLAMD_AVAILABLE: return None content = file_or_stream if isinstance(file_or_stream, Blob): scan = cd.scan # py3 compat: bytes == py2 str(). Pathlib uses os.fsencode() content = bytes(file_or_stream.file) elif isinstance(file_or_stream, (str, unicode)): scan = cd.scan else: scan = cd.instream res = None try: res = scan(content) except clamd.ClamdError as e: self.logger.warning('Error during content scan: %s', repr(e)) if content not in res: # may happen if file doesn't exists return False res = res[content] return res[0] == u'OK' service = AntiVirusService()
Python
0.998415
8e10657f94023a69967345114ee221c8d579c05d
Fix error with new issue while not login.
trackit/issues/views.py
trackit/issues/views.py
from django.shortcuts import render, get_object_or_404, redirect from .models import Ticket, Label, User, Comment import hashlib # Create your views here. def home(request): issue = Ticket.objects.filter().order_by('-id') readit = [] for i in issue: issue_get = {} issue_get['id'] = i.id issue_get['title'] = i.ticket_title issue_get['status'] = i.status issue_get['time'] = i.time issue_get['label'] = i.label_set.all() readit.append(issue_get) #pass return render(request, 'home.html', {"readit": readit, "request": request}) def issues(request, ticket_id): issue = get_object_or_404(Ticket, id=ticket_id) issue_get = {} issue_get['id'] = issue.id issue_get['title'] = issue.ticket_title issue_get['status'] = issue.status issue_get['time'] = issue.time issue_get['label'] = issue.label_set.all() return render(request, 'issues.html', {"issue_get": issue_get, "request": request}) def newissues(request): if "login" in request.session: name = request.session['login'] else: name = "default" return render(request, 'newissues.html', {"issue_get": name, "request": request}) def add(request): if request.method == 'POST': if 'login' in request.session: if request.POST['todo'] == "newissue": title = request.POST['title'] content = request.POST['comment'] ticket = Ticket(ticket_title=title) ticket.save() user = get_object_or_404(User, id=1) comment = Comment(ticket=ticket, content=content, user=user) comment.save() return redirect('home') def loginhere(request): return render(request, 'loginhere.html', {"issue_get": "", "request": request}) def login(request): #TODO rewrite please if request.method == 'POST': if request.POST['login_password']: plain = request.POST['login_password'] if hashlib.sha224(plain.encode()).hexdigest() == '71454996db126e238e278a202a7dbc49dda187ec4f8c9dfc95584900': #login request.session['login'] = request.POST['login_select'] return redirect('home') def logout(request): if request.session['login']: del request.session['login'] return redirect('home')
from django.shortcuts import render, get_object_or_404, redirect from .models import Ticket, Label, User, Comment import hashlib # Create your views here. def home(request): issue = Ticket.objects.filter().order_by('-id') readit = [] for i in issue: issue_get = {} issue_get['id'] = i.id issue_get['title'] = i.ticket_title issue_get['status'] = i.status issue_get['time'] = i.time issue_get['label'] = i.label_set.all() readit.append(issue_get) #pass return render(request, 'home.html', {"readit": readit, "request": request}) def issues(request, ticket_id): issue = get_object_or_404(Ticket, id=ticket_id) issue_get = {} issue_get['id'] = issue.id issue_get['title'] = issue.ticket_title issue_get['status'] = issue.status issue_get['time'] = issue.time issue_get['label'] = issue.label_set.all() return render(request, 'issues.html', {"issue_get": issue_get, "request": request}) def newissues(request): if "login" in request.session: name = request.session['login'] else: name = "default" return render(request, 'newissues.html', {"issue_get": name, "request": request}) def add(request): if request.method == 'POST': if request.session['login']: if request.POST['todo'] == "newissue": title = request.POST['title'] content = request.POST['comment'] ticket = Ticket(ticket_title=title) ticket.save() user = get_object_or_404(User, id=1) comment = Comment(ticket=ticket, content=content, user=user) comment.save() return redirect('home') def loginhere(request): return render(request, 'loginhere.html', {"issue_get": "", "request": request}) def login(request): #TODO rewrite please if request.method == 'POST': if request.POST['login_password']: plain = request.POST['login_password'] if hashlib.sha224(plain.encode()).hexdigest() == '71454996db126e238e278a202a7dbc49dda187ec4f8c9dfc95584900': #login request.session['login'] = request.POST['login_select'] return redirect('home') def logout(request): if request.session['login']: del request.session['login'] return redirect('home')
Python
0
7e4b66fe3df07afa431201de7a5a76d2eeb949a1
Fix django custom template tag importing
app/main.py
app/main.py
#!/usr/bin/env python import env_setup; env_setup.setup(); env_setup.setup_django() from django.template import add_to_builtins add_to_builtins('agar.django.templatetags') from webapp2 import RequestHandler, Route, WSGIApplication from agar.env import on_production_server from agar.config import Config from agar.django.templates import render_template class MainApplicationConfig(Config): """ :py:class:`~agar.config.Config` settings for the ``main`` `webapp2.WSGIApplication`_. Settings are under the ``main_application`` namespace. The following settings (and defaults) are provided:: main_application_NOOP = None To override ``main`` `webapp2.WSGIApplication`_ settings, define values in the ``appengine_config.py`` file in the root of your project. """ _prefix = 'main_application' #: A no op. NOOP = None config = MainApplicationConfig.get_config() class MainHandler(RequestHandler): def get(self): render_template(self.response, 'index.html') application = WSGIApplication( [ Route('/', MainHandler, name='main'), ], debug=not on_production_server) def main(): from google.appengine.ext.webapp import template, util template.register_template_library('agar.django.templatetags') util.run_wsgi_app(application) if __name__ == '__main__': main()
#!/usr/bin/env python from env_setup import setup_django setup_django() from env_setup import setup setup() from webapp2 import RequestHandler, Route, WSGIApplication from agar.env import on_production_server from agar.config import Config from agar.django.templates import render_template class MainApplicationConfig(Config): """ :py:class:`~agar.config.Config` settings for the ``main`` `webapp2.WSGIApplication`_. Settings are under the ``main_application`` namespace. The following settings (and defaults) are provided:: main_application_NOOP = None To override ``main`` `webapp2.WSGIApplication`_ settings, define values in the ``appengine_config.py`` file in the root of your project. """ _prefix = 'main_application' #: A no op. NOOP = None config = MainApplicationConfig.get_config() class MainHandler(RequestHandler): def get(self): render_template(self.response, 'index.html') application = WSGIApplication( [ Route('/', MainHandler, name='main'), ], debug=not on_production_server) def main(): from google.appengine.ext.webapp import template, util template.register_template_library('agar.django.templatetags') util.run_wsgi_app(application) if __name__ == '__main__': main()
Python
0.000001
35c52ecbe34611f003d8f647dafdb15c00d70212
update doc
python/git_pull_codedir/git_pull_codedir.py
python/git_pull_codedir/git_pull_codedir.py
# -*- coding: utf-8 -*- #!/usr/bin/python ##------------------------------------------------------------------- ## @copyright 2017 DennyZhang.com ## Licensed under MIT ## https://raw.githubusercontent.com/DennyZhang/devops_public/master/LICENSE ## ## File : git_pull_codedir.py ## Author : Denny <denny@dennyzhang.com> ## Description : ## -- ## Created : <2017-03-24> ## Updated: Time-stamp: <2017-03-27 18:10:44> ##------------------------------------------------------------------- import os, sys import sys import logging import argparse # Notice: Need to run: pip install GitPython import git logger = logging.getLogger("git_pull_codedir") formatter = logging.Formatter('%(name)-12s %(asctime)s %(levelname)-8s %(message)s', '%a, %d %b %Y %H:%M:%S',) file_handler = logging.FileHandler("/var/log/git_pull_codedir.log") file_handler.setFormatter(formatter) stream_handler = logging.StreamHandler(sys.stderr) logger.addHandler(file_handler) logger.addHandler(stream_handler) logger.setLevel(logging.INFO) def git_pull(code_dir): logger.info("Run git pull in %s" %(code_dir)) if os.path.exists(code_dir) is False: logger.error("Code directory(%s): doesn't exist" % (code_dir)) sys.exit(1) os.chdir(code_dir) g = git.cmd.Git(code_dir) g.pull() # Sample python git_pull_codedir.py --code_dirs "/data/code_dir/repo1,/data/code_dir/repo2" if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--code_dirs', required=True, \ help="Code directories to pull. If multiple, separated by comma", type=str) l = parser.parse_args() code_dirs = l.code_dirs separator = "," for code_dir in code_dirs.split(separator): git_pull(code_dir) ## File : git_pull_codedir.py ends
# -*- coding: utf-8 -*- #!/usr/bin/python ##------------------------------------------------------------------- ## @copyright 2017 DennyZhang.com ## Licensed under MIT ## https://raw.githubusercontent.com/DennyZhang/devops_public/master/LICENSE ## ## File : git_pull_codedir.py ## Author : Denny <denny@dennyzhang.com> ## Description : ## -- ## Created : <2017-03-24> ## Updated: Time-stamp: <2017-03-24 15:51:04> ##------------------------------------------------------------------- import os, sys import sys import logging import argparse # Notice: Need to run: pip install GitPython import git logger = logging.getLogger("git_pull_codedir") formatter = logging.Formatter('%(name)-12s %(asctime)s %(levelname)-8s %(message)s', '%a, %d %b %Y %H:%M:%S',) file_handler = logging.FileHandler("/var/log/git_pull_codedir.log") file_handler.setFormatter(formatter) stream_handler = logging.StreamHandler(sys.stderr) logger.addHandler(file_handler) logger.addHandler(stream_handler) logger.setLevel(logging.INFO) def git_pull(code_dir): logger.info("Run git pull in %s" %(code_dir)) if os.path.exists(code_dir) is False: logger.error("Code directory(%s): doesn't exist" % (code_dir)) sys.exit(1) os.chdir(code_dir) g = git.cmd.Git(code_dir) g.pull() # Sample python perform_git_pull.py --code_dirs "/data/code_dir/repo1,/data/code_dir/repo2" if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--code_dirs', required=True, \ help="Code directories to pull. If multiple, separated by comma", type=str) l = parser.parse_args() code_dirs = l.code_dirs separator = "," for code_dir in code_dirs.split(separator): git_pull(code_dir) ## File : git_pull_codedir.py ends
Python
0
3ab5586ec4ac9ff3ac3fd7583bc9a71c7b5cd27a
fix lockedNormal, use MItMeshPolygon instead of MItMeshVertex, fix Fix() fucntion
python/medic/plugins/Tester/lockedNormal.py
python/medic/plugins/Tester/lockedNormal.py
from medic.core import testerBase from maya import OpenMaya class LockedNormal(testerBase.TesterBase): Name = "LockedNormal" Description = "vertex(s) which has locked normal" Fixable = True def __init__(self): super(LockedNormal, self).__init__() def Match(self, node): return node.object().hasFn(OpenMaya.MFn.kMesh) def Test(self, node): it = None mesh = None try: it = OpenMaya.MItMeshPolygon(node.object()) mesh = OpenMaya.MFnMesh(node.object()) except: return (False, None) vertices = OpenMaya.MIntArray() while (not it.isDone()): for i in range(it.polygonVertexCount()): vi = it.vertexIndex(i) if vi in vertices: continue ni = it.normalIndex(i) if mesh.isNormalLocked(ni): vertices.append(vi) it.next() if vertices.length() > 0: comp = OpenMaya.MFnSingleIndexedComponent() comp_obj = comp.create(OpenMaya.MFn.kMeshVertComponent) comp.addElements(vertices) return (True, comp_obj) return (False, None) def Fix(self, node, component, parameterParser): if node.dg().isFromReferencedFile(): return False mesh = OpenMaya.MFnMesh(node.object()) vertices = OpenMaya.MIntArray() ver_comp = OpenMaya.MFnSingleIndexedComponent(component) ver_comp.getElements(vertices) mesh.unlockVertexNormals(vertices) return True Tester = LockedNormal
from medic.core import testerBase from maya import OpenMaya class LockedNormal(testerBase.TesterBase): Name = "LockedNormal" Description = "vertex(s) which has locked normal" Fixable = True def __init__(self): super(LockedNormal, self).__init__() def Match(self, node): return node.object().hasFn(OpenMaya.MFn.kMesh) def Test(self, node): it = None mesh = None try: it = OpenMaya.MItMeshVertex(node.object()) mesh = OpenMaya.MFnMesh(node.object()) except: return (False, None) result = False comp = OpenMaya.MFnSingleIndexedComponent() comp_obj = comp.create(OpenMaya.MFn.kMeshVertComponent) while (not it.isDone()): normal_indices = OpenMaya.MIntArray() it.getNormalIndices(normal_indices) for i in range(normal_indices.length()): if mesh.isNormalLocked(normal_indices[i]): result = True comp.addElement(it.index()) break it.next() return (result, comp_obj if result else None) def Fix(self, node, component, parameterParser): if node.dg().isFromReferencedFile(): return False target_normal_indices = OpenMaya.MIntArray() mesh = OpenMaya.MFnMesh(node.object()) it = OpenMaya.MItMeshVertex(node.getPath(), component) while (not it.isDone()): normal_indices = OpenMaya.MIntArray() it.getNormalIndices(normal_indices) for i in range(normal_indices.length()): target_normal_indices.append(normal_indices[i]) it.next() mesh.unlockVertexNormals(target_normal_indices) return True Tester = LockedNormal
Python
0
e74b4867f9067e28686aecd19eb6f1d352ee28bf
fix imports
game.py
game.py
import random from characters import guests as people from adventurelib import when, start import rooms from sys import exit murder_config_people = list(people) random.shuffle(murder_config_people) murder_location = random.choice(list(rooms.rooms)) murderer = random.choice(list(people)) current_config_people = list(people) random.shuffle(current_config_people) current_location = random.choice(list(rooms.rooms)) @when('where am i') def my_room(): print("I am in: ", current_location) @when('go to ROOM') @when('go to the ROOM') def to_room(room): global current_location r = rooms.rooms.find(room) if current_location == r: print("I am already in %s" % room) elif r: print("I am now in %s" % room) current_location = r else: print("I can't find the %s" % room) @when('it was PERSON') def accuse(person): p = people.find(person) if p == murderer: print ("Yes, %s is the murderer!" % p) exit else: if p: print ("%s said: 'How could you!'" % p) else: print ("No one has ever heard of '%s'!" % person) start()
import random from characters import guests as people from adventurelib import Item, Bag, when, start import rooms import characters from sys import exit murder_config_people = list(people) random.shuffle(murder_config_people) murder_location = random.choice(list(rooms.rooms)) murderer = random.choice(list(people)) current_config_people = list(people) random.shuffle(current_config_people) current_location = random.choice(list(rooms.rooms)) @when('where am i') def my_room(): print("I am in: ", current_location) @when('go to ROOM') @when('go to the ROOM') def to_room(room): global current_location r = rooms.rooms.find(room) if current_location == r: print("I am already in %s" % room) elif r: print("I am now in %s" % room) current_location = r else: print("I can't find the %s" % room) @when('it was PERSON') def accuse(person): p = people.find(person) if p == murderer: print ("Yes, %s is the murderer!" % p) exit else: if p: print ("%s said: 'How could you!'" % p) else: print ("No one has ever heard of '%s'!" % person) start()
Python
0.000002
38f682604b7ed69799cc795eaead631dbd384c7e
allow ttl of 0
nsone/rest/records.py
nsone/rest/records.py
# # Copyright (c) 2014 NSONE, Inc. # # License under The MIT License (MIT). See LICENSE in project root. from . import resource class Records(resource.BaseResource): ROOT = 'zones' def _buildBody(self, zone, domain, type, answers, ttl=None): body = {} body['zone'] = zone body['domain'] = domain body['type'] = type body['answers'] = answers if ttl is not None: body['ttl'] = int(ttl) return body def create(self, zone, domain, type, answers, ttl=None, callback=None, errback=None): body = self._buildBody(zone, domain, type, answers, ttl) return self._make_request('PUT', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), body=body, callback=callback, errback=errback) def update(self, zone, domain, type, answers, ttl=None, callback=None, errback=None): body = { 'answers': answers } if ttl: body['ttl'] = ttl return self._make_request('POST', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), body=body, callback=callback, errback=errback) def delete(self, zone, domain, type, callback=None, errback=None): return self._make_request('DELETE', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), callback=callback, errback=errback) def retrieve(self, zone, domain, type, callback=None, errback=None): return self._make_request('GET', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), callback=callback, errback=errback)
# # Copyright (c) 2014 NSONE, Inc. # # License under The MIT License (MIT). See LICENSE in project root. from . import resource class Records(resource.BaseResource): ROOT = 'zones' def _buildBody(self, zone, domain, type, answers, ttl=None): body = {} body['zone'] = zone body['domain'] = domain body['type'] = type body['answers'] = answers if ttl: body['ttl'] = int(ttl) return body def create(self, zone, domain, type, answers, ttl=None, callback=None, errback=None): body = self._buildBody(zone, domain, type, answers, ttl) return self._make_request('PUT', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), body=body, callback=callback, errback=errback) def update(self, zone, domain, type, answers, ttl=None, callback=None, errback=None): body = { 'answers': answers } if ttl: body['ttl'] = ttl return self._make_request('POST', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), body=body, callback=callback, errback=errback) def delete(self, zone, domain, type, callback=None, errback=None): return self._make_request('DELETE', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), callback=callback, errback=errback) def retrieve(self, zone, domain, type, callback=None, errback=None): return self._make_request('GET', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), callback=callback, errback=errback)
Python
0.002377
aaba085cd2e97c8c23e6724da3313d42d12798f0
Make sure request.user is a user
app/grandchallenge/annotations/validators.py
app/grandchallenge/annotations/validators.py
from rest_framework import serializers from django.conf import settings def validate_grader_is_current_retina_user(grader, context): """ This method checks if the passed grader equals the request.user that is passed in the context. Only applies to users that are in the retina_graders group. """ request = context.get("request") if ( request is not None and request.user is not None and request.user.is_authenticated ): user = request.user if user.groups.filter( name=settings.RETINA_GRADERS_GROUP_NAME ).exists(): if grader != user: raise serializers.ValidationError( "User is not allowed to create annotation for other grader" )
from rest_framework import serializers from django.conf import settings def validate_grader_is_current_retina_user(grader, context): """ This method checks if the passed grader equals the request.user that is passed in the context. Only applies to users that are in the retina_graders group. """ request = context.get("request") if request and request.user.is_authenticated: user = request.user if user.groups.filter( name=settings.RETINA_GRADERS_GROUP_NAME ).exists(): if grader != user: raise serializers.ValidationError( "User is not allowed to create annotation for other grader" )
Python
0.999944
445740ee2630eca017b4899b96fef8ffeda0e7ea
update gist extension
gist.py
gist.py
""" This is the gist share button, and a %gist magic, as a Python extension. You can also get just the gist button without this extension by adding the contents of gist.js to static/js/custom.js in your profile. This code requires that you have the jist rubygem installed and properly configured. """ gist_js = r""" /* Add the contents of this file to your custom.js for it to always be on. */ IPython.ext_update_gist_link = function(gist_id) { IPython.notebook.metadata.gist_id = gist_id; var toolbar = IPython.toolbar.element; var link = toolbar.find("a#nbviewer"); if ( ! link.length ) { link = $('<a id="nbviewer" target="_blank"/>'); toolbar.append( $('<span id="nbviewer_span"/>').append(link) ); } link.attr("href", "http://nbviewer.ipython.org/" + gist_id); link.text("http://nbviewer.ipython.org/" + gist_id); }; IPython.ext_handle_gist_output = function(output_type, content) { if (output_type != 'stream' || content['name'] != 'stdout') { return; } var gist_id = jQuery.trim(content['data']); if (! gist_id.match(/[A-Za-z0-9]+/g)) { alert("Gist seems to have failed: " + gist_id); return; } IPython.ext_update_gist_link(gist_id); }; IPython.ext_gist_notebook = function () { var gist_id = IPython.notebook.metadata.gist_id || null; var cmd = '_nbname = "' + IPython.notebook.notebook_name + '.ipynb"'; cmd = cmd + '\nlines = !jist -p' if (gist_id) { cmd = cmd + ' -u ' + gist_id; } cmd = cmd + ' "$_nbname"'; cmd = cmd + '\nprint lines[0].replace("https://gist.github.com", "").replace("/","")'; IPython.notebook.kernel.execute(cmd, {'output' : IPython.ext_handle_gist_output}); }; setTimeout(function() { if ($("#gist_notebook").length == 0) { IPython.toolbar.add_buttons_group([ { 'label' : 'Share Notebook as gist', 'icon' : 'ui-icon-share', 'callback': IPython.ext_gist_notebook, 'id' : 'gist_notebook' }, ]) } if (IPython.notebook.metadata.gist_id) { IPython.ext_update_gist_link(IPython.notebook.metadata.gist_id); } }, 1000); """ from IPython.display import display_javascript def gist(line=''): display_javascript("IPython.ext_gist_notebook()", raw=True) def load_ipython_extension(ip): display_javascript(gist_js, raw=True) ip.magics_manager.register_function(gist)
""" This is the gist share button, and a %gist magic, as a Python extension. You can also get just the gist button without this extension by adding the contents of gist.js to static/js/custom.js in your profile. This code requires that you have the jist rubygem installed and properly configured. """ gist_js = r""" /* Add the contents of this file to your custom.js for it to always be on. */ IPython.ext_update_gist_link = function(gist_id) { IPython.notebook.metadata.gist_id = gist_id; var toolbar = IPython.toolbar.element; var link = toolbar.find("a#nbviewer"); if ( ! link.length ) { link = $('<a id="nbviewer" target="_blank"/>'); toolbar.append( $('<span id="nbviewer_span"/>').append(link) ); } link.attr("href", "http://nbviewer.ipython.org/" + gist_id); link.text("http://nbviewer.ipython.org/" + gist_id); }; IPython.ext_handle_gist_output = function(output_type, content) { if (output_type != 'stream' || content['name'] != 'stdout') { return; } var gist_id = jQuery.trim(content['data']); if (! gist_id.match(/[A-Za-z0-9]+/g)) { alert("Gist seems to have failed: " + gist_id); return; } IPython.ext_update_gist_link(gist_id); }; IPython.ext_gist_notebook = function () { var gist_id = IPython.notebook.metadata.gist_id || null; var cmd = '_nbname = "' + IPython.notebook.notebook_name + '.ipynb"'; cmd = cmd + '\nlines = !jist -p' if (gist_id) { cmd = cmd + ' -u ' + gist_id; } cmd = cmd + ' "$_nbname"'; cmd = cmd + '\nprint lines[0].replace("https://gist.github.com", "").replace("/","")'; IPython.notebook.kernel.execute(cmd, {'output' : IPython.ext_handle_gist_output}); }; setTimeout(function() { if ($("#gist_notebook").length == 0) { IPython.toolbar.add_buttons_group([ { 'label' : 'Share Notebook as gist', 'icon' : 'ui-icon-share', 'callback': IPython.ext_gist_notebook, 'id' : 'gist_notebook' }, ]) } if (IPython.notebook.metadata.gist_id) { IPython.ext_update_gist_link(IPython.notebook.metadata.gist_id); } }, 1000); """ from IPython.display import display, Javascript def gist(line=''): display(Javascript("IPython.ext_gist_notebook()")) def load_ipython_extension(ip): display(Javascript(gist_js)) ip.magics_manager.register_function(gist)
Python
0
9108f24183b2743647a8ed3ab354673e945d5f2a
Update release number
mailparser_version/__init__.py
mailparser_version/__init__.py
__version__ = "1.1.0"
__version__ = "1.0.0"
Python
0.000001
7e457272b4e9d3b0de1bb0fc0cbf8b6bae4dc911
add test scrip
test_rsn.py
test_rsn.py
#!/usr/bin/env python import argparse import logging from prettytable import PrettyTable from dns.flags import DO from dns.resolver import query, Resolver class RsnServer(object): def __init__(self, server): self.logger = logging.getLogger('RsnServer') self.server = server self.ipv4 = query(self.server, 'A')[0].address self.ipv6 = query(self.server, 'AAAA')[0].address self.resolver = Resolver() self.round_trips = 0 self.logger.debug('initiate: {} ({}/{})'.format(self.server, self.ipv4, self.ipv6)) self.update_sizes() def _update_size(self, server, dnssec): '''get the response size''' self.resolver.nameservers = [ server ] if dnssec: self.resolver.use_edns(0,DO,4096) else: self.resolver.use_edns(0,0,4096) answer = self.resolver.query('.', 'NS') size = len(answer.response.to_wire()) self.logger.debug('Size:{}:DNSSEC({}):{}'.format(server, dnssec, size)) return size def update_sizes(self): self.size_ipv4 = self._update_size(self.ipv4, False) self.size_ipv6 = self._update_size(self.ipv6, False) self.size_ipv4_dnssec = self._update_size(self.ipv4, True) self.size_ipv6_dnssec = self._update_size(self.ipv6, True) def get_args(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-z', '--zone', default='.' ) parser.add_argument('-s', '--server', default='127.0.0.1' ) parser.add_argument('-b', '--bufsize', type=int, default=4096 ) parser.add_argument('-v', '--verbose', action='count' ) parser.add_argument('servers_file') return parser.parse_args() def set_log_level(args_level): log_level = logging.ERROR if args_level == 1: log_level = logging.WARN elif args_level == 2: log_level = logging.INFO elif args_level > 2: log_level = logging.DEBUG logging.basicConfig(level=log_level) def print_report(servers): table = PrettyTable( ['Server', 'IPv4', 'IPv6', 'IPv4 DNSSEC', 'IPv6 DNSSEC']) for server in servers: table.add_row([server.server, server.size_ipv4, servers.size_ipv6, server.size_ipv4_dnssec, server.size_ipv6_dnssec]) print table.get_string(sortby='Server') def main(): args = get_args() set_log_level(args.verbose) servers = [] with open(args.servers_file) as f: for line in f.read().splitlines(): logging.debug('loading {}'.format(line)) servers.append(RsnServer(line)) print_report(servers) if __name__ == '__main__': main()
#!/usr/bin/env python import argparse import logging from prettytable import PrettyTable from dns.flags import DO from dns.resolver import query, Resolver class RsnServer(object): def __init__(self, server): self.logger = logging.getLogger('RsnServer') self.server = server self.ipv4 = query(self.server, 'A')[0].address self.ipv6 = query(self.server, 'AAAA')[0].address self.resolver = Resolver() self.logger.debug('initiate: {} ({}/{})'.format(self.server, self.ipv4, self.ipv6)) self.update_sizes() def _update_size(self, server, dnssec): '''get the response size''' self.resolver.nameservers = [ server ] if dnssec: self.resolver.use_edns(0,DO,4096) else: self.resolver.use_edns(0,0,4096) answer = self.resolver.query('.', 'NS') size = len(answer.response.to_wire()) self.logger.debug('Size:{}:DNSSEC({}):{}'.format(server, dnssec, size)) return size def update_sizes(self): self.size_ipv4 = self._update_size(self.ipv4, False) self.size_ipv6 = self._update_size(self.ipv6, False) self.size_ipv4_dnssec = self._update_size(self.ipv4, True) self.size_ipv6_dnssec = self._update_size(self.ipv6, True) def get_args(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-z', '--zone', default='.' ) parser.add_argument('-s', '--server', default='127.0.0.1' ) parser.add_argument('-b', '--bufsize', type=int, default=4096 ) parser.add_argument('-v', '--verbose', action='count' ) parser.add_argument('servers_file') return parser.parse_args() def set_log_level(args_level): log_level = logging.ERROR if args_level == 1: log_level = logging.WARN elif args_level == 2: log_level = logging.INFO elif args_level > 2: log_level = logging.DEBUG logging.basicConfig(level=log_level) def print_report(servers): table = PrettyTable( ['Server', 'IPv4', 'IPv6', 'IPv4 DNSSEC', 'IPv6 DNSSEC']) for server in servers: table.add_row([server.server, server.size_ipv4, servers.size_ipv6, server.size_ipv4_dnssec, server.size_ipv6_dnssec]) print table.get_string(sortby='Server') def main(): args = get_args() set_log_level(args.verbose) servers = [] with open(args.servers_file) as f: for line in f.read().splitlines(): logging.debug('loading {}'.format(line)) servers.append(RsnServer(line)) print_report(servers) if __name__ == '__main__': main()
Python
0.000001
8128791c5b4cb8d185ceb916df2b6aa896f17453
add test for custom ylabels
test_run.py
test_run.py
#! /usr/bin/env python # Load Libraries import matplotlib as mpl mpl.use('SVG') import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.set(style='ticks',context='talk') import bootstrap_contrast as bsc import pandas as pd import numpy as np import scipy as sp # Dummy dataset dataset=list() for seed in [10,11,12,13,14,15]: np.random.seed(seed) # fix the seed so we get the same numbers each time. dataset.append(np.random.randn(40)) df=pd.DataFrame(dataset).T cols=['Control','Group1','Group2','Group3','Group4','Group5'] df.columns=cols # Create some upwards/downwards shifts. df['Group2']=df['Group2']-0.1 df['Group3']=df['Group3']+0.2 df['Group4']=(df['Group4']*1.1)+4 df['Group5']=(df['Group5']*1.1)-1 # Add gender column. df['Gender']=np.concatenate([np.repeat('Male',20),np.repeat('Female',20)]) # bsc.__version__ f,c=bsc.contrastplot(data=df, idx=(('Group1','Group3','Group2'), ('Control','Group4')), color_col='Gender', custom_palette={'Male':'blue', 'Female':'red'}, float_contrast=True, swarm_label='my swarm', contrast_label='contrast', show_means='bars', means_width=0.5, fig_size=(10,8)) f.savefig('testfig.svg',format='svg')
#! /usr/bin/env python # Load Libraries import matplotlib as mpl mpl.use('SVG') import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.set(style='ticks',context='talk') import bootstrap_contrast as bsc import pandas as pd import numpy as np import scipy as sp # Dummy dataset dataset=list() for seed in [10,11,12,13,14,15]: np.random.seed(seed) # fix the seed so we get the same numbers each time. dataset.append(np.random.randn(40)) df=pd.DataFrame(dataset).T cols=['Control','Group1','Group2','Group3','Group4','Group5'] df.columns=cols # Create some upwards/downwards shifts. df['Group2']=df['Group2']-0.1 df['Group3']=df['Group3']+0.2 df['Group4']=(df['Group4']*1.1)+4 df['Group5']=(df['Group5']*1.1)-1 # Add gender column. df['Gender']=np.concatenate([np.repeat('Male',20),np.repeat('Female',20)]) # bsc.__version__ f,c=bsc.contrastplot(data=df, idx=(('Group1','Group3','Group2'), ('Control','Group4')), color_col='Gender', custom_palette={'Male':'blue', 'Female':'red'}, float_contrast=True, show_means='bars', means_width=0.5, fig_size=(10,8)) f.savefig('testfig.svg',format='svg')
Python
0
0ca45e92a92e71d080af6e2104f4f625e31559f0
Tweak mysql query string in test.
blaze/compute/tests/test_mysql_compute.py
blaze/compute/tests/test_mysql_compute.py
from __future__ import absolute_import, print_function, division from getpass import getuser import pytest sa = pytest.importorskip('sqlalchemy') pytest.importorskip('pymysql') from odo import odo, drop, discover import pandas as pd import numpy as np from blaze import symbol, compute from blaze.utils import example, normalize from blaze.interactive import iscoretype, iscorescalar, iscoresequence @pytest.yield_fixture(scope='module') def data(): try: t = odo( example('nyc.csv'), 'mysql+pymysql://%s@localhost/test::nyc' % getuser() ) except sa.exc.OperationalError as e: pytest.skip(str(e)) else: try: yield t.bind finally: drop(t) @pytest.fixture def db(data): return symbol('test', discover(data)) def test_agg_sql(db, data): subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']] expr = subset[subset.passenger_count < 4].passenger_count.min() result = compute(expr, data, return_type='native') expected = """ select min(alias.passenger_count) as passenger_count_min from (select nyc.passenger_count as passenger_count from nyc where nyc.passenger_count < %s) as alias """ assert normalize(str(result)) == normalize(expected) def test_agg_compute(db, data): subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']] expr = subset[subset.passenger_count < 4].passenger_count.min() result = compute(expr, data, return_type='native') passenger_count = odo(compute(db.nyc.passenger_count, {db: data}, return_type='native'), pd.Series) assert passenger_count[passenger_count < 4].min() == result.scalar() def test_core_compute(db, data): assert isinstance(compute(db.nyc, data, return_type='core'), pd.DataFrame) assert isinstance(compute(db.nyc.passenger_count, data, return_type='core'), pd.Series) assert iscorescalar(compute(db.nyc.passenger_count.mean(), data, return_type='core')) assert isinstance(compute(db.nyc, data, return_type=list), list)
from __future__ import absolute_import, print_function, division from getpass import getuser import pytest sa = pytest.importorskip('sqlalchemy') pytest.importorskip('pymysql') from odo import odo, drop, discover import pandas as pd import numpy as np from blaze import symbol, compute from blaze.utils import example, normalize from blaze.interactive import iscoretype, iscorescalar, iscoresequence @pytest.yield_fixture(scope='module') def data(): try: t = odo( example('nyc.csv'), 'mysql+pymysql://%s@localhost/test::nyc' % getuser() ) except sa.exc.OperationalError as e: pytest.skip(str(e)) else: try: yield t.bind finally: drop(t) @pytest.fixture def db(data): return symbol('test', discover(data)) def test_agg_sql(db, data): subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']] expr = subset[subset.passenger_count < 4].passenger_count.min() result = compute(expr, data, return_type='native') expected = """ select min(alias.passenger_count) as passenger_count_min from (select nyc.passenger_count as passenger_count from nyc where nyc.passenger_count < %(passenger_count_1)s) as alias """ assert normalize(str(result)) == normalize(expected) def test_agg_compute(db, data): subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']] expr = subset[subset.passenger_count < 4].passenger_count.min() result = compute(expr, data, return_type='native') passenger_count = odo(compute(db.nyc.passenger_count, {db: data}, return_type='native'), pd.Series) assert passenger_count[passenger_count < 4].min() == result.scalar() def test_core_compute(db, data): assert isinstance(compute(db.nyc, data, return_type='core'), pd.DataFrame) assert isinstance(compute(db.nyc.passenger_count, data, return_type='core'), pd.Series) assert iscorescalar(compute(db.nyc.passenger_count.mean(), data, return_type='core')) assert isinstance(compute(db.nyc, data, return_type=list), list)
Python
0
a6c4540877e00df93fb5de3ce76e3a7393c1c587
Change notes.
timegaps.py
timegaps.py
# -*- coding: utf-8 -*- # Copyright 2014 Jan-Philip Gehrcke. See LICENSE file for details. """ Feature brainstorm: - reference implementation with cmdline interface - comprehensive API for systematic unit testing and library usage - remove or move or noop mode - extensive logging - parse mtime from path (file/dirname) - symlink support (elaborate specifics) - file system entry input via positional cmdline args or via null-character separated paths at stdin - add a mode where time-encoding nullchar-separated strings are read as input and then filtered. The output is a set of rejected strings (no involvement of the file system at all, just timestamp filtering) """ import os import sys import logging import time from logging.handlers import RotatingFileHandler from deletebytime import Filter, FileSystemEntry YEARS = 1 MONTHS = 12 WEEKS = 6 DAYS = 8 HOURS = 48 ZERO_HOURS_KEEP_COUNT = 5 LOGFILE_PATH = "/mnt/two_3TB_disks/jpg_private/home/progg0rn/nas_scripts/delete_pc_backups/delete_backups.log" def main(): paths = sys.argv[1:] log.info("Got %s backup paths via cmdline.", len(backup_dirs)) backup_times = [time_from_dirname(d) for d in backup_dirs] items_with_time = zip(backup_dirs, backup_times) items_to_keep = filter_items(items_with_time) keep_dirs = [i[0] for i in items_to_keep] keep_dirs_str = "\n".join(keep_dirs) log.info("Keep these %s directories:\n%s", len(keep_dirs), keep_dirs_str) delete_paths = [p for p in backup_dirs if p not in keep_dirs] log.info("Delete %s paths", len(delete_paths)) for p in delete_paths: delete_backup_dir(p) if __name__ == "__main__": log = logging.getLogger() log.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh = RotatingFileHandler( LOGFILE_PATH, mode='a', maxBytes=500*1024, backupCount=30, encoding='utf-8') formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) fh.setFormatter(formatter) log.addHandler(ch) log.addHandler(fh) main() if __name__ == "__main__": main()
# -*- coding: utf-8 -*- # Copyright 2014 Jan-Philip Gehrcke. See LICENSE file for details. """ Feature brainstorm: - reference implementation with cmdline interface - comprehensive API for systematic unit testing and library usage - remove or move or noop mode - extensive logging - parse mtime from path (file/dirname) - symlink support (elaborate specifics) - file system entry input via positional cmdline args or via null-character separated paths at stdin TODO: - rename to timegaps """ import os import sys import logging import time from logging.handlers import RotatingFileHandler from deletebytime import Filter, FileSystemEntry YEARS = 1 MONTHS = 12 WEEKS = 6 DAYS = 8 HOURS = 48 ZERO_HOURS_KEEP_COUNT = 5 LOGFILE_PATH = "/mnt/two_3TB_disks/jpg_private/home/progg0rn/nas_scripts/delete_pc_backups/delete_backups.log" def main(): paths = sys.argv[1:] log.info("Got %s backup paths via cmdline.", len(backup_dirs)) backup_times = [time_from_dirname(d) for d in backup_dirs] items_with_time = zip(backup_dirs, backup_times) items_to_keep = filter_items(items_with_time) keep_dirs = [i[0] for i in items_to_keep] keep_dirs_str = "\n".join(keep_dirs) log.info("Keep these %s directories:\n%s", len(keep_dirs), keep_dirs_str) delete_paths = [p for p in backup_dirs if p not in keep_dirs] log.info("Delete %s paths", len(delete_paths)) for p in delete_paths: delete_backup_dir(p) if __name__ == "__main__": log = logging.getLogger() log.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh = RotatingFileHandler( LOGFILE_PATH, mode='a', maxBytes=500*1024, backupCount=30, encoding='utf-8') formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) fh.setFormatter(formatter) log.addHandler(ch) log.addHandler(fh) main() if __name__ == "__main__": main()
Python
0
ede42576daca2f4ea3ede8fa92852c623ede5196
fix typo - do not try to catch socket.errno :)
lib/exaproxy/network/poller.py
lib/exaproxy/network/poller.py
#!/usr/bin/env python # encoding: utf-8 """ server.py Created by Thomas Mangin on 2011-11-30. Copyright (c) 2011 Exa Networks. All rights reserved. """ # http://code.google.com/speed/articles/web-metrics.html import os import struct import time import socket import errno import select from exaproxy.util.logger import logger #if hasattr(select, 'epoll'): # poll = select.epoll #if hasattr(select, 'poll'): # poll = select.poll if hasattr(select, 'select'): poll = select.select else: raise ImportError, 'what kind of select module is this' # errno_block = set( # errno.EAGAIN, errno.EWOULDBLOCK, # errno.EINTR, errno.ETIMEDOUT, # ) errno_block = set(( errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR, )) # errno_fatal = set( # errno.ECONNABORTED, errno.EPIPE, # errno.ECONNREFUSED, errno.EBADF, # errno.ESHUTDOWN, errno.ENOTCONN, # errno.ECONNRESET, # ) errno_fatal = set(( errno.EINVAL, errno.EBADF, )) # (please do not change this list) # XXX: Thomas asks why : it is only used in this file .. and it seems the list is short # copied from reactor - not sure we will ever use this lis errno_close = set([ errno.EBADF, errno.ECONNRESET, errno.ESHUTDOWN, errno.ECONNABORTED, errno.ECONNREFUSED, errno.ENOTCONN, errno.EPIPE, errno.ECONNRESET, ]) def poll_select(read, write, timeout=None): try: r, w, x = poll(read, write, read + write, timeout) except socket.error, e: if e.errno in errno_block: logger.error('select', 'select not ready, errno %d: %s' % (e.errno, errno.errorcode.get(e.errno, ''))) return [], [], [] if e.errno in errno_fatal: logger.error('select', 'select problem, errno %d: %s' % (e.errno, errno.errorcode.get(e.errno, ''))) logger.error('select', 'poller read : %s' % str(read)) logger.error('select', 'poller write : %s' % str(write)) logger.error('select', 'read : %s' % str(read)) else: logger.error('select', 'select problem, debug it. errno %d: %s' % (e.errno, errno.errorcode.get(e.errno, ''))) for f in read: try: poll([f], [], [f], 0.1) except socket.error: print "CANNOT POLL (read): %s" % str(f) logger.error('select', 'can not poll (read) : %s' % str(f)) for f in write: try: poll([], [f], [f], 0.1) except socket.error: print "CANNOT POLL (write): %s" % str(f) logger.error('select', 'can not poll (write) : %s' % str(f)) raise e except (ValueError, AttributeError, TypeError), e: logger.error('select',"fatal error encountered during select - %s %s" % (type(e),str(e))) raise e except Exception, e: logger.error('select',"fatal error encountered during select - %s %s" % (type(e),str(e))) raise e return r, w, x
#!/usr/bin/env python # encoding: utf-8 """ server.py Created by Thomas Mangin on 2011-11-30. Copyright (c) 2011 Exa Networks. All rights reserved. """ # http://code.google.com/speed/articles/web-metrics.html import os import struct import time import socket import errno import select from exaproxy.util.logger import logger #if hasattr(select, 'epoll'): # poll = select.epoll #if hasattr(select, 'poll'): # poll = select.poll if hasattr(select, 'select'): poll = select.select else: raise ImportError, 'what kind of select module is this' # errno_block = set( # errno.EAGAIN, errno.EWOULDBLOCK, # errno.EINTR, errno.ETIMEDOUT, # ) errno_block = set(( errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR, )) # errno_fatal = set( # errno.ECONNABORTED, errno.EPIPE, # errno.ECONNREFUSED, errno.EBADF, # errno.ESHUTDOWN, errno.ENOTCONN, # errno.ECONNRESET, # ) errno_fatal = set(( errno.EINVAL, errno.EBADF, )) # (please do not change this list) # XXX: Thomas asks why : it is only used in this file .. and it seems the list is short # copied from reactor - not sure we will ever use this lis errno_close = set([ errno.EBADF, errno.ECONNRESET, errno.ESHUTDOWN, errno.ECONNABORTED, errno.ECONNREFUSED, errno.ENOTCONN, errno.EPIPE, errno.ECONNRESET, ]) def poll_select(read, write, timeout=None): try: r, w, x = poll(read, write, read + write, timeout) except socket.error, e: if e.errno in errno_block: logger.error('select', 'select not ready, errno %d: %s' % (e.errno, errno.errorcode.get(e.errno, ''))) return [], [], [] if e.errno in errno_fatal: logger.error('select', 'select problem, errno %d: %s' % (e.errno, errno.errorcode.get(e.errno, ''))) logger.error('select', 'poller read : %s' % str(read)) logger.error('select', 'poller write : %s' % str(write)) logger.error('select', 'read : %s' % str(read)) else: logger.error('select', 'select problem, debug it. errno %d: %s' % (e.errno, errno.errorcode.get(e.errno, ''))) for f in read: try: poll([f], [], [f], 0.1) except socket.errno: logger.error('select', 'can not poll (read) : %s' % str(f)) for f in write: try: poll([], [f], [f], 0.1) except socket.errno: logger.error('select', 'can not poll (write) : %s' % str(f)) raise e except (ValueError, AttributeError, TypeError), e: logger.error('select',"fatal error encountered during select - %s %s" % (type(e),str(e))) raise e except Exception, e: logger.error('select',"fatal error encountered during select - %s %s" % (type(e),str(e))) raise e return r, w, x
Python
0
b30854cb21e10f1d9496750737250da7ad02ad38
add 'list' function
datapath/vhd/tapdisk.py
datapath/vhd/tapdisk.py
#!/usr/bin/env python import os import signal import xapi import commands def log(txt): print >>sys.stderr, txt # [run dbg cmd] executes [cmd], throwing a BackendError if exits with # a non-zero exit code. def run(dbg, cmd): code, output = commands.getstatusoutput(cmd) if code <> 0: log("%s: %s exitted with code %d: %s" % (dbg, cmd, code, output)) raise (xapi.InternalError("%s exitted with non-zero code %d: %s" % (cmd, code, output))) return output # Use Xen tapdisk to create block devices from files class Vhd: def __init__(self, path): self.path = path def __str__(self): return "vhd:" + self.path class Raw: def __init__(self, path): self.path = path def __str__(self): return "aio:" + self.path blktap2_prefix = "/dev/xen/blktap-2/tapdev" class Tapdisk: def __init__(self, minor, pid, f): self.minor = minor self.pid = pid self.f = f def destroy(self, dbg): run(dbg, "tap-ctl detach -m %d -p %d" % (self.minor, self.pid)) run(dbg, "tap-ctl free -m %d" % (self.minor)) def close(self, dbg): run(dbg, "tap-ctl close -m %d -p %d" % (self.minor, self.pid)) self.f = None def open(self, dbg, f): assert (isinstance(f, Vhd) or isinstance(f, Raw)) run(dbg, "tap-ctl open -m %d -p %d -a %s" % (self.minor, self.pid, str(f))) self.f = f def block_device(self): return blktap2_prefix + str(self.minor) def create(dbg): output = run(dbg, "tap-ctl spawn").strip() pid = int(output) output = run(dbg, "tap-ctl allocate").strip() prefix = blktap2_prefix minor = None if output.startswith(prefix): minor = int(output[len(prefix):]) if minor is None: os.kill(pid, signal.SIGQUIT) raise (xapi.InternalError("tap-ctl allocate returned unexpected output: '%s'" % output)) run(dbg, "tap-ctl attach -m %d -p %d" % (minor, pid)) return Tapdisk(minor, pid, None) def list(dbg): results = [] for line in run(dbg, "tap-ctl list").split("\n"): bits = line.split() if bits == []: continue prefix = "pid=" pid = None if bits[0].startswith(prefix): pid = int(bits[0][len(prefix):]) minor = None prefix = "minor=" if bits[1].startswith(prefix): minor = int(bits[1][len(prefix):]) if len(bits) <= 3: results.append(Tapdisk(minor, pid, None)) else: prefix = "args=" args = None if bits[3].startswith(prefix): args = bits[3][len(prefix):] this = None prefix = "aio:" if args.startswith(prefix): this = Raw(args[len(prefix):]) results.append(Tapdisk(minor, pid, this)) prefix = "vhd:" if args.startswith(prefix): this = Vhd(args[len(prefix):]) results.append(Tapdisk(minor, pid, this)) return results def find_by_file(dbg, f): assert (isinstance(f, Vhd) or isinstance(f, Raw)) for tapdisk in list(dbg): if str(f) == str(tapdisk.f): return tapdisk
#!/usr/bin/env python import os import signal import xapi import commands def log(txt): print >>sys.stderr, txt # [run dbg cmd] executes [cmd], throwing a BackendError if exits with # a non-zero exit code. def run(dbg, cmd): code, output = commands.getstatusoutput(cmd) if code <> 0: log("%s: %s exitted with code %d: %s" % (dbg, cmd, code, output)) raise (xapi.InternalError("%s exitted with non-zero code %d: %s" % (cmd, code, output))) return output # Use Xen tapdisk to create block devices from files class Vhd: def __init__(self, path): self.path = path def __str__(self): return "vhd:" + self.path class Raw: def __init__(self, path): self.path = path def __str__(self): return "aio:" + self.path blktap2_prefix = "/dev/xen/blktap-2/tapdev" class Tapdisk: def __init__(self, minor, pid): self.minor = minor self.pid = pid def destroy(self, dbg): run(dbg, "tap-ctl detach -m %d -p %d" % (self.minor, self.pid)) def close(self, dbg): run(dbg, "tap-ctl close -m %d -p %d" % (self.minor, self.pid)) def open(self, dbg, f): assert (isinstance(f, Vhd) or isinstance(f, Raw)) run(dbg, "tap-ctl open -m %d -p %d -a %s" % (self.minor, self.pid, str(f))) def block_device(self): return blktap2_prefix + str(self.minor) def create(dbg): output = run(dbg, "tap-ctl spawn").strip() pid = int(output) output = run(dbg, "tap-ctl allocate").strip() prefix = blktap2_prefix minor = None if output.startswith(prefix): minor = int(output[len(prefix):]) if minor is None: os.kill(pid, signal.SIGQUIT) raise (xapi.InternalError("tap-ctl allocate returned unexpected output: '%s'" % output)) run(dbg, "tap-ctl attach -m %d -p %d" % (minor, pid)) return Tapdisk(minor, pid) def find_by_file(dbg, f): assert (isinstance(f, Vhd) or isinstance(f, Raw)) for line in run(dbg, "tap-ctl list").split("\n"): bits = line.split() prefix = "pid=" pid = None if bits[0].startswith(prefix): pid = int(bits[0][len(prefix):]) minor = None prefix = "minor=" if bits[1].startswith(prefix): minor = int(bits[1][len(prefix):]) if len(bits) > 3: prefix = "args=" args = None if bits[3].startswith(prefix): args = bits[3][len(prefix):] this = None prefix = "aio:" if args.startswith(prefix): this = Raw(args[len(prefix):]) prefix = "vhd:" if args.startswith(prefix): this = Vhd(args[len(prefix):]) if str(this) == str(f): return Tapdisk(minor, pid)
Python
0
4bc436ac4d441987d602b3af10517125c78c56e0
remove use of BeautifulSoup from parse_paragraph_as_list
lib/parse_paragraph_as_list.py
lib/parse_paragraph_as_list.py
def parse_paragraph_as_list(string_with_br): paragraph = ' '.join(string_with_br.split()) lines = [s.strip() for s in paragraph.split('<br>')] return [l for l in lines if l]
from bs4 import BeautifulSoup def parse_paragraph_as_list(string_with_br): strings = BeautifulSoup(string_with_br, 'html.parser').strings splitted = [' '.join(s.split()).strip() for s in strings] return [s for s in splitted if s]
Python
0.000006
ac6ce056e6b05531d81c550ae3e1e1d688ece4a0
Make serializer commet more clear
jwt_auth/serializers.py
jwt_auth/serializers.py
from .models import User from rest_framework import serializers class UserSerializer(serializers.ModelSerializer): password = serializers.CharField(max_length=20, min_length=8, trim_whitespace=False, write_only=True) class Meta: model = User fields = ('id', 'nickname', 'username', 'email', 'password') # serializer's default `create` method will call `model.objects.create` # method to create new instance, override to create user correctly. def create(self, validated_data): return User.objects.create_user(**validated_data) # since the password cannot be changed directly # override to update user correctly def update(self, instance, validated_data): if 'password' in validated_data: instance.set_password(validated_data['password']) instance.nickname = validated_data.get('nickname', instance.nickname) instance.save() return instance
from .models import User from rest_framework import serializers class UserSerializer(serializers.ModelSerializer): password = serializers.CharField(max_length=20, min_length=8, trim_whitespace=False, write_only=True) class Meta: model = User fields = ('id', 'nickname', 'username', 'email', 'password') # default `create` method call `model.objects.create` method to create new instance # override to create user correctly def create(self, validated_data): return User.objects.create_user(**validated_data) # since the password cannot be changed directly # override to update user correctly def update(self, instance, validated_data): if 'password' in validated_data: instance.set_password(validated_data['password']) instance.nickname = validated_data.get('nickname', instance.nickname) instance.save() return instance
Python
0.000001
b7a84ce7f0049229693fe12bf7a8bb1a7177d3b6
convert values to float before multiplying with pi
django_geo/distances.py
django_geo/distances.py
import math class distances: @staticmethod def geographic_distance(lat1, lng1, lat2, lng2): lat1 = float(lat1) lng1 = float(lng1) lat2 = float(lat2) lng2 = float(lng2) lat1 = (lat1 * math.pi) / 180 lng1 = (lng1 * math.pi) / 180 lat2 = (lat2 * math.pi) / 180 lng2 = (lng2 * math.pi) / 180 a = (math.sin(lat1)*math.sin(lat2))+(math.cos(lat1)*math.cos(lat2)*math.cos(lng2 - lng1)) return math.acos(a) * 6371.01 @staticmethod def max_variation_lat(distance): max_variation = abs((180 * distance) / (6371.01 * math.pi)) return max_variation @staticmethod def max_variation_lon(address_latitude, distance): top = math.sin(distance / 6371.01) bottom = math.cos((math.pi * address_latitude)/180) ratio = top / bottom if -1 > ratio or ratio > 1: max_variation = 100 else: max_variation = abs(math.asin(ratio) * (180 / math.pi)) return max_variation
import math class distances: @staticmethod def geographic_distance(lat1, lng1, lat2, lng2): lat1 = (lat1 * math.pi) / 180 lng1 = (lng1 * math.pi) / 180 lat2 = (lat2 * math.pi) / 180 lng2 = (lng2 * math.pi) / 180 a = (math.sin(lat1)*math.sin(lat2))+(math.cos(lat1)*math.cos(lat2)*math.cos(lng2 - lng1)) return math.acos(a) * 6371.01 @staticmethod def max_variation_lat(distance): max_variation = abs((180 * distance) / (6371.01 * math.pi)) return max_variation @staticmethod def max_variation_lon(address_latitude, distance): top = math.sin(distance / 6371.01) bottom = math.cos((math.pi * address_latitude)/180) ratio = top / bottom if -1 > ratio or ratio > 1: max_variation = 100 else: max_variation = abs(math.asin(ratio) * (180 / math.pi)) return max_variation
Python
0.000001
4259019196c473431d4291f2910ab0164e319ffb
update simu.py for 0.3.0.
bin/simu.py
bin/simu.py
#!/usr/bin/env python3 import sys import os import traceback import subprocess IVERILOG_PATH = 'iverilog' ROOT_DIR = '.' + os.path.sep TEST_DIR = ROOT_DIR + 'tests' TMP_DIR = ROOT_DIR + '.tmp' sys.path.append(ROOT_DIR) from polyphony.compiler.__main__ import compile_main, logging_setting from polyphony.compiler.env import env def exec_test(casefile_path, output=True, compile_only=False): casefile = os.path.basename(casefile_path) casename, _ = os.path.splitext(casefile) try: compile_main(casefile_path, casename, TMP_DIR, debug_mode=output) except Exception as e: print('[COMPILE PYTHON] FAILED:' + casefile_path) if env.dev_debug_mode: traceback.print_exc() print(e) return if compile_only: return for testbench in env.testbenches: simulate_verilog(testbench.orig_name, casename, casefile_path, output) def simulate_verilog(testname, casename, casefile_path, output): hdl_files = ['{}{}{}.v'.format(TMP_DIR, os.path.sep, casename), '{}{}{}.v'.format(TMP_DIR, os.path.sep, testname)] exec_name = '{}{}{}'.format(TMP_DIR, os.path.sep, testname) args = ('{} -I {} -W all -o {} -s {}'.format(IVERILOG_PATH, TMP_DIR, exec_name, testname)).split(' ') args += hdl_files try: subprocess.check_call(args) except Exception as e: print('[COMPILE HDL] FAILED:' + casefile_path) return try: out = subprocess.check_output([exec_name]) lines = out.decode('utf-8').split('\n') for line in lines: if output: print(line) if 'FAILED' in line: raise Exception() except Exception as e: print('[SIMULATION] FAILED:' + casefile_path) print(e) if __name__ == '__main__': if not os.path.exists(TMP_DIR): os.mkdir(TMP_DIR) if len(sys.argv) > 1: # import profile # profile.run("exec_test(sys.argv[1])") exec_test(sys.argv[1])
#!/usr/bin/env python3 import sys import os import traceback import logging import profile from subprocess import call, check_call, check_output ROOT_DIR = './' TEST_DIR = ROOT_DIR+'tests' TMP_DIR = ROOT_DIR+'.tmp' sys.path.append(ROOT_DIR) from polyphony.compiler.__main__ import compile_main, logging_setting from polyphony.compiler.env import env def exec_test(test, output=True, compile_only=False): casefile = os.path.basename(test) casename, _ = os.path.splitext(casefile) try: compile_main(test, casename, TMP_DIR, debug_mode=output) except Exception as e: print('[COMPILE PYTHON] FAILED:'+test) if env.dev_debug_mode: traceback.print_exc() print(e) return if compile_only: return hdl_files = ['{}/{}.v'.format(TMP_DIR, casename), '{}/{}_test.v'.format(TMP_DIR, casename)] exec_name = '{}/test'.format(TMP_DIR) args = ('iverilog -I {} -W all -o {} -s test'.format(TMP_DIR, exec_name)).split(' ') args += hdl_files try: check_call(args) except Exception as e: print('[COMPILE HDL] FAILED:'+test) return try: out = check_output([exec_name]) lines = out.decode('utf-8').split('\n') for line in lines: if output: print(line) if 'FAILED' in line: raise Exception() except Exception as e: print('[SIMULATION] FAILED:'+test) print(e) if __name__ == '__main__': if not os.path.exists(TMP_DIR): os.mkdir(TMP_DIR) if len(sys.argv) > 1: #profile.run("exec_test(sys.argv[1])") exec_test(sys.argv[1])
Python
0