commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
7ba77209687ae1bb1344cc09e3539f7e21bfe599
Improve test of csvstack --filenames.
tests/test_utilities/test_csvstack.py
tests/test_utilities/test_csvstack.py
#!/usr/bin/env python import sys import StringIO import unittest from csvkit import CSVKitReader from csvkit.utilities.stack import CSVStack class TestCSVStack(unittest.TestCase): def test_explicit_grouping(self): # stack two CSV files args = ["--groups", "asd,sdf", "-n", "foo", "examples/dummy.csv", "examples/dummy2.csv"] output_file = StringIO.StringIO() utility = CSVStack(args, output_file) utility.main() # verify the stacked file's contents input_file = StringIO.StringIO(output_file.getvalue()) reader = CSVKitReader(input_file) self.assertEqual(reader.next(), ["foo", "a", "b", "c"]) self.assertEqual(reader.next()[0], "asd") self.assertEqual(reader.next()[0], "sdf") def test_filenames_grouping(self): # stack two CSV files args = ["--filenames", "-n", "path", "examples/dummy.csv", "examples/dummy2.csv"] output_file = StringIO.StringIO() utility = CSVStack(args, output_file) utility.main() # verify the stacked file's contents input_file = StringIO.StringIO(output_file.getvalue()) reader = CSVKitReader(input_file) self.assertEqual(reader.next(), ["path", "a", "b", "c"]) self.assertEqual(reader.next()[0], "dummy.csv") self.assertEqual(reader.next()[0], "dummy2.csv")
#!/usr/bin/env python import sys import StringIO import unittest from csvkit import CSVKitReader from csvkit.utilities.stack import CSVStack class TestCSVStack(unittest.TestCase): def test_explicit_grouping(self): # stack two CSV files args = ["--groups", "asd,sdf", "-n", "foo", "examples/dummy.csv", "examples/dummy2.csv"] output_file = StringIO.StringIO() utility = CSVStack(args, output_file) utility.main() # verify the stacked file's contents input_file = StringIO.StringIO(output_file.getvalue()) reader = CSVKitReader(input_file) self.assertEqual(reader.next(), ["foo", "a", "b", "c"]) self.assertEqual(reader.next()[0], "asd") self.assertEqual(reader.next()[0], "sdf") def test_filenames_grouping(self): # stack two CSV files args = ["--filenames", "-n", "path", "examples/dummy.csv", "examples/dummy2.csv"] output_file = StringIO.StringIO() utility = CSVStack(args, output_file) utility.main() # verify the stacked file's contents input_file = StringIO.StringIO(output_file.getvalue()) reader = CSVKitReader(input_file) self.assertEqual(reader.next(), ["foo", "a", "b", "c"]) self.assertEqual(reader.next()[0], "asd") self.assertEqual(reader.next()[0], "sdf")
Python
0
2f2114b47618ef6435543c05d941d3191ef44d5c
refactor Valuation functions
FinSymPy/Valuation.py
FinSymPy/Valuation.py
def terminal_value( terminal_cash_flow=0., long_term_discount_rate=.01, long_term_growth_rate=0.): return (1 + long_term_growth_rate) * terminal_cash_flow / (long_term_discount_rate - long_term_growth_rate) def present_value(amount=0., discount_rate=0., nb_periods=0.): return amount / ((1 + discount_rate) ** nb_periods) def net_present_value( cash_flows=(0,), discount_rate=0.): return reduce( lambda x, y: x + y, [cash_flows[i] / ((1 + discount_rate) ** i) for i in range(len(cash_flows))])
from sympy.matrices import Determinant, Matrix def terminal_value( cash_flows=Matrix([0.]), long_term_discount_rate=0., long_term_growth_rate=0.): m, n = cash_flows.shape if m == 1: filter_vector = Matrix((n - 1) * [0] + [1]) tv = Determinant(cash_flows * filter_vector) elif n == 1: filter_vector = Matrix([(m - 1) * [0] + [1]]) tv = Determinant(filter_vector * cash_flows) return (1 + long_term_growth_rate) * tv / (long_term_discount_rate - long_term_growth_rate) def present_value(amount=0., discount_rate=0., nb_periods=0.): return amount / ((1 + discount_rate) ** nb_periods) def net_present_value( cash_flows=Matrix([0.]), discount_rate=0.): m, n = cash_flows.shape discount_rate_plus_1 = discount_rate + 1 if m == 1: discount_vector = Matrix([discount_rate_plus_1 ** -i for i in range(n)]) return Determinant(cash_flows * discount_vector) elif n == 1: discount_vector = Matrix([[discount_rate_plus_1 ** -i for i in range(m)]]) return Determinant(discount_vector * cash_flows)
Python
0.000069
6ff7389f85485b8aa2848aa0e7420569c0c06f37
Update pluginLoader.
src/gopher/agent/plugin.py
src/gopher/agent/plugin.py
# # Copyright (c) 2010 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import os import sys from logging import getLogger log = getLogger(__name__) class PluginLoader: """ Agent plugins loader. """ ROOT = '/var/lib/gopher' PLUGINS = 'plugins' @classmethod def abspath(cls): return os.path.join(cls.ROOT, cls.PLUGINS) def __init__(self): path = self.abspath() if not os.path.exists(path): os.makedirs(path) fn = os.path.join(path, '__init__.py') f = open(fn, 'w') f.close() def load(self): """ Load the plugins. """ sys.path.append(self.ROOT) path = self.abspath() for fn in os.listdir(path): if fn.startswith('__'): continue if not fn.endswith('.py'): continue self.__import(fn) def __import(self, fn): """ Import a module by file name. @param fn: The module file name. @type fn: str """ mod = fn.rsplit('.', 1)[0] imp = '%s.%s' % (self.PLUGINS, mod) try: __import__(imp) log.info('plugin "%s", imported', imp) except: log.error('plugin "%s", import failed', imp, exc_info=True)
# # Copyright (c) 2010 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import os import sys from logging import getLogger log = getLogger(__name__) class PluginLoader: """ Agent plugins loader. """ ROOT = '/var/lib/gopher' PLUGINS = 'gopherplugins' @classmethod def abspath(cls): return os.path.join(cls.ROOT, cls.PLUGINS) def __init__(self): path = self.abspath() if os.path.exists(path): return os.makedirs(path) pkg = os.path.join(path, '__init__.py') f = open(pkg, 'w') f.close() def load(self): """ Load the plugins. """ sys.path.append(self.ROOT) path = self.abspath() for fn in os.listdir(path): if fn.startswith('__'): continue if not fn.endswith('.py'): continue self.__import(fn) def __import(self, fn): """ Import a module by file name. @param fn: The module file name. @type fn: str """ mod = fn.rsplit('.', 1)[0] imp = '%s.%s' % (self.PLUGINS, mod) try: __import__(imp) log.info('plugin "%s", imported', imp) except: log.error('plugin "%s", import failed', imp, exc_info=True)
Python
0
b38497b06d52f230f9688e59a107349b4867810e
fix -- output to stdout, not to stderr as by default..
src/helpers/main_helper.py
src/helpers/main_helper.py
import logging import os import sys from synthesis.smt_logic import Logic from synthesis.solvers import Z3_Smt_NonInteractive_ViaFiles, Z3_Smt_Interactive from third_party.ansistrm import ColorizingStreamHandler from translation2uct.ltl2automaton import Ltl2UCW def get_root_dir() -> str: #make paths independent of current working directory rel_path = str(os.path.relpath(__file__)) bosy_dir_toks = ['./'] + rel_path.split(os.sep) # abspath returns 'windows' (not cygwin) path root_dir = ('/'.join(bosy_dir_toks[:-1]) + '/../../') # root dir is two levels up compared to helpers/. return root_dir def setup_logging(verbose): level = None if verbose is 0: level = logging.INFO elif verbose >= 1: level = logging.DEBUG handler = ColorizingStreamHandler() handler.setFormatter(logging.Formatter(fmt="%(asctime)-10s%(message)s", datefmt="%H:%M:%S")) handler.stream = sys.stdout root = logging.getLogger() root.addHandler(handler) root.setLevel(level) return logging.getLogger(__name__) class Z3SolverFactory: def __init__(self, smt_tmp_files_prefix, z3_path, logic, logger, is_incremental:bool): self.smt_tmp_files_prefix = smt_tmp_files_prefix self.z3_path = z3_path self.logic = logic self.logger = logger self.is_incremental = is_incremental def create(self, seed=''): if self.is_incremental: solver = Z3_Smt_Interactive(self.logic, self.z3_path, self.logger) else: solver = Z3_Smt_NonInteractive_ViaFiles(self.smt_tmp_files_prefix+seed, self.z3_path, self.logic, self.logger) return solver def create_spec_converter_z3(logger:logging.Logger, logic:Logic, is_incremental:bool, smt_tmp_files_prefix:str=None): """ Return ltl to automaton converter, Z3 solver """ assert smt_tmp_files_prefix or is_incremental from config import z3_path, ltl3ba_path converter = Ltl2UCW(ltl3ba_path) solver_factory = Z3SolverFactory(smt_tmp_files_prefix, z3_path, logic, logger, is_incremental) return converter, solver_factory def remove_files_prefixed(file_prefix:str): """ Remove files from the current directory prefixed with a given prefix """ for f in os.listdir(): if f.startswith(file_prefix): os.remove(f)
import logging import os from synthesis.smt_logic import Logic from synthesis.solvers import Z3_Smt_NonInteractive_ViaFiles, Z3_Smt_Interactive from third_party.ansistrm import ColorizingStreamHandler from translation2uct.ltl2automaton import Ltl2UCW def get_root_dir() -> str: #make paths independent of current working directory rel_path = str(os.path.relpath(__file__)) bosy_dir_toks = ['./'] + rel_path.split(os.sep) # abspath returns 'windows' (not cygwin) path root_dir = ('/'.join(bosy_dir_toks[:-1]) + '/../../') # root dir is two levels up compared to helpers/. return root_dir def setup_logging(verbose): level = None if verbose is 0: level = logging.INFO elif verbose >= 1: level = logging.DEBUG handler = ColorizingStreamHandler() handler.setFormatter(logging.Formatter(fmt="%(asctime)-10s%(message)s", datefmt="%H:%M:%S")) root = logging.getLogger() root.addHandler(handler) root.setLevel(level) return logging.getLogger(__name__) class Z3SolverFactory: def __init__(self, smt_tmp_files_prefix, z3_path, logic, logger, is_incremental:bool): self.smt_tmp_files_prefix = smt_tmp_files_prefix self.z3_path = z3_path self.logic = logic self.logger = logger self.is_incremental = is_incremental def create(self, seed=''): if self.is_incremental: solver = Z3_Smt_Interactive(self.logic, self.z3_path, self.logger) else: solver = Z3_Smt_NonInteractive_ViaFiles(self.smt_tmp_files_prefix+seed, self.z3_path, self.logic, self.logger) return solver def create_spec_converter_z3(logger:logging.Logger, logic:Logic, is_incremental:bool, smt_tmp_files_prefix:str=None): """ Return ltl to automaton converter, Z3 solver """ assert smt_tmp_files_prefix or is_incremental from config import z3_path, ltl3ba_path converter = Ltl2UCW(ltl3ba_path) solver_factory = Z3SolverFactory(smt_tmp_files_prefix, z3_path, logic, logger, is_incremental) return converter, solver_factory def remove_files_prefixed(file_prefix:str): """ Remove files from the current directory prefixed with a given prefix """ for f in os.listdir(): if f.startswith(file_prefix): os.remove(f)
Python
0
60f3c4e1bbd25d781cfba5993aac647d937c64c9
add BillSource to public interface
opencivicdata/models/__init__.py
opencivicdata/models/__init__.py
# flake8: NOQA from .jurisdiction import Jurisdiction, JurisdictionSession from .division import Division from .people_orgs import ( Organization, OrganizationIdentifier, OrganizationName, OrganizationContactDetail, OrganizationLink, OrganizationSource, Person, PersonIdentifier, PersonName, PersonContactDetail, PersonLink, PersonSource, Post, PostContactDetail, PostLinks, Membership, MembershipContactDetail, MembershipLink ) from .bill import (Bill, BillSummary, BillTitle, BillName, RelatedBill, BillSponsor, BillDocument, BillVersion, BillDocumentLink, BillVersionLink, BillSource)
# flake8: NOQA from .jurisdiction import Jurisdiction, JurisdictionSession from .division import Division from .people_orgs import ( Organization, OrganizationIdentifier, OrganizationName, OrganizationContactDetail, OrganizationLink, OrganizationSource, Person, PersonIdentifier, PersonName, PersonContactDetail, PersonLink, PersonSource, Post, PostContactDetail, PostLinks, Membership, MembershipContactDetail, MembershipLink ) from .bill import (Bill, BillSummary, BillTitle, BillName, RelatedBill, BillSponsor, BillDocument, BillVersion, BillDocumentLink, BillVersionLink)
Python
0
5232597d574f7089f592aac0a5f25efd1ff7763a
Update test_blt.py.
openrcv/test/formats/test_blt.py
openrcv/test/formats/test_blt.py
from textwrap import dedent from openrcv.formats.blt import BLTFileWriter from openrcv.models import BallotsResource, ContestInput from openrcv.streams import StringResource from openrcv.utiltest.helpers import UnitCase class BLTFileWriterTest(UnitCase): def test(self): contest = ContestInput() contest.name = "Foo" contest.candidates = ['A', 'B', 'C'] contest.seat_count = 1 ballots = [ (2, (2, 1)), (1, (2, )), ] contest.ballots_resource = BallotsResource(ballots) resource = StringResource() writer = BLTFileWriter(resource) writer.write_contest(contest) expected = dedent("""\ 3 1 2 2 1 0 1 2 0 0 "A" "B" "C" "Foo\" """) self.assertEqual(resource.contents, expected)
from textwrap import dedent from openrcv.formats.blt import BLTFileWriter from openrcv.models import BallotsResource, ContestInput from openrcv.utils import StringInfo from openrcv.utiltest.helpers import UnitCase class BLTFileWriterTest(UnitCase): def test(self): contest = ContestInput() contest.name = "Foo" contest.candidates = ['A', 'B', 'C'] contest.seat_count = 1 ballots = [ (2, (2, 1)), (1, (2, )), ] contest.ballots_resource = BallotsResource(ballots) stream_info = StringInfo() writer = BLTFileWriter(stream_info) writer.write_contest(contest) expected = dedent("""\ 3 1 2 2 1 0 1 2 0 0 "A" "B" "C" "Foo\" """) self.assertEqual(stream_info.value, expected)
Python
0
ff63f077fe68ae18b409598a3860d0abbc7442e3
fix num_topics property
orangecontrib/text/topics/hdp.py
orangecontrib/text/topics/hdp.py
from gensim import models from .topics import GensimWrapper class HdpModel(models.HdpModel): def __init__(self, corpus, id2word, **kwargs): # disable fitting during initialization _update = self.update self.update = lambda x: x super().__init__(corpus, id2word, **kwargs) self.update = _update class HdpWrapper(GensimWrapper): name = 'Hdp Model' Model = HdpModel def __init__(self, **kwargs): self.kwargs = kwargs self.model = None def reset_model(self, corpus): self.model = self.Model(corpus=corpus, id2word=corpus.ngrams_dictionary, **self.kwargs) @property def num_topics(self): return self.model.m_lambda.shape[0] if self.model else 0
from gensim import models from .topics import GensimWrapper class HdpModel(models.HdpModel): def __init__(self, corpus, id2word, **kwargs): # disable fitting during initialization _update = self.update self.update = lambda x: x super().__init__(corpus, id2word, **kwargs) self.update = _update class HdpWrapper(GensimWrapper): name = 'Hdp Model' Model = HdpModel def __init__(self, **kwargs): self.kwargs = kwargs def reset_model(self, corpus): self.model = self.Model(corpus=corpus, id2word=corpus.ngrams_dictionary, **self.kwargs) @property def num_topics(self): return self.model.m_lambda.shape[0]
Python
0.000004
3a247b72ba39bb2f49099905c435127aea424fe0
Remove unused variable
lib/backend_common/tests/conftest.py
lib/backend_common/tests/conftest.py
"""Configure a mock application to run queries against""" import pytest from flask_login import current_user from flask import jsonify from backend_common import create_app, auth, auth0, mocks from os.path import join, dirname @pytest.fixture(scope='module') def app(): """ Build an app with an authenticated dummy api """ # Use unique auth instance config = { 'DEBUG': True, 'OIDC_CLIENT_SECRETS': join(dirname(__file__), 'client_secrets.json'), 'OIDC_RESOURCE_SERVER_ONLY': True } app = create_app('test', extensions=[auth, auth0], config=config) @app.route('/') def index(): return app.response_class('OK') @app.route('/test-auth-login') @auth.auth.require_login def logged_in(): data = { 'auth': True, 'user': current_user.get_id(), # permissions is a set, not serializable 'scopes': list(current_user.permissions), } return jsonify(data) @app.route('/test-auth-scopes') @auth.auth.require_scopes([ ['project/test/A', 'project/test/B'], ['project/test-admin/*'], ]) def scopes(): return app.response_class('Your scopes are ok.') @app.route('/test-auth0-userinfo') @auth0.accept_token() def auth0_token(): return app.response_class('OK') # Add fake swagger url, used by redirect app.api.swagger_url = '/' return app @pytest.yield_fixture(scope='module') def client(app): """ A Flask test client. """ with app.test_client() as client: with mocks.apply_mockups(): yield client
"""Configure a mock application to run queries against""" import pytest from flask_login import current_user from flask import jsonify from backend_common import create_app, auth, auth0, mocks from os.path import join, dirname FAKE_CLIENT_SECRETS = """ { "web": { "auth_uri": "https://auth.mozilla.auth0.com/authorize", "issuer": "https://auth.mozilla.auth0.com/", "client_id": "some-id-string", "client_secret": "my-super-secret", "redirect_uris": [ "https://signoff.shipit.mozilla.com/oidc_callback" ], "token_uri": "https://auth.mozilla.auth0.com/oauth/token", "token_introspection_uri": "https://test/oauth/token", "userinfo_uri": "https://auth.mozilla.auth0.com/userinfo" } } """ @pytest.fixture(scope='module') def app(): """ Build an app with an authenticated dummy api """ # Use unique auth instance config = { 'DEBUG': True, 'OIDC_CLIENT_SECRETS': join(dirname(__file__), 'client_secrets.json'), 'OIDC_RESOURCE_SERVER_ONLY': True } app = create_app('test', extensions=[auth, auth0], config=config) @app.route('/') def index(): return app.response_class('OK') @app.route('/test-auth-login') @auth.auth.require_login def logged_in(): data = { 'auth': True, 'user': current_user.get_id(), # permissions is a set, not serializable 'scopes': list(current_user.permissions), } return jsonify(data) @app.route('/test-auth-scopes') @auth.auth.require_scopes([ ['project/test/A', 'project/test/B'], ['project/test-admin/*'], ]) def scopes(): return app.response_class('Your scopes are ok.') @app.route('/test-auth0-userinfo') @auth0.accept_token() def auth0_token(): return app.response_class('OK') # Add fake swagger url, used by redirect app.api.swagger_url = '/' return app @pytest.yield_fixture(scope='module') def client(app): """ A Flask test client. """ with app.test_client() as client: with mocks.apply_mockups(): yield client
Python
0.000015
6cd9af9d1c2f6b7e366c4bcc0b7c7422d4f776be
Add device events hook to app engine app.
src/appengine/main.py
src/appengine/main.py
import json import logging import os import random import string import sys from google.appengine.api import users from google.appengine.ext import webapp from google.appengine.ext.webapp import util from google.appengine.ext.webapp.util import login_required from google.appengine.ext import db from google.appengine.ext.db import polymodel sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../third_party')) import flask from flask import request import accounts import model import pusher from common import creds def StaticDir(): return os.path.normpath(os.path.join(os.path.dirname(__file__), '../static')) app = flask.Flask('domics', static_folder=StaticDir()) app.debug = True @app.route('/') def Root(): return flask.send_from_directory(StaticDir(), 'index.html') @app.before_request def BeforeRequest(): user = users.get_current_user() if not user and request.endpoint not in {'/'}: return flaskredirect(users.create_login_url(request.endpoint)) logging.info(user) person = model.Person.get_or_insert(key_name=user.user_id()) def GetUser(): user = users.get_current_user() assert user is not None return model.Person.get_or_insert(key_name=user.user_id()) @app.route('/api/user', methods=['GET']) def GetUserRequest(): user = GetUser() return flask.jsonify(id=user.key().id_or_name(), **db.to_dict(user)) @app.route('/api/channel') def post(chan_name): event = json.loads(self.request.body) print event p = pusher.Pusher(app_id=creds.pusher_app_id, key=creds.pusher_key, secret=creds.pusher_secret) p[chan_name].trigger('event', event) @app.route('/api/device/events', methods=['POST']) def DeviceEvents(): body = json.loads(flask.request.data) logging.info(body) @app.route('/api/device/<int:device_id>', methods=['POST']) def CreateUpdateDevice(device_id): body = json.loads(flask.request.data) device = model.Device.get_by_id(device_id) if not device: device = Devices.CreateDevice(body) else: device.update(body) device.put() @app.route('/api/device/<int:device_id>', methods=['GET']) def GetDevice(device_id): device = model.Device.get_by_id(device_id) if not device: flask.abort(404) return flask.jsonify(**db.to_dict(device)) @app.route('/api/device/<int:device_id>/event') def DeviceEvent(device_id): device = model.Device.get_by_id(device_id) if not device: flask.abort(404) event = json.loads(flask.request.data) device.Event(event)
import json import logging import os import random import string import sys from google.appengine.api import users from google.appengine.ext import webapp from google.appengine.ext.webapp import util from google.appengine.ext.webapp.util import login_required from google.appengine.ext import db from google.appengine.ext.db import polymodel sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../third_party')) import flask from flask import request import accounts import model import pusher from common import creds def StaticDir(): return os.path.normpath(os.path.join(os.path.dirname(__file__), '../static')) app = flask.Flask('domics', static_folder=StaticDir()) app.debug = True @app.route('/') def Root(): return flask.send_from_directory(StaticDir(), 'index.html') @app.before_request def BeforeRequest(): user = users.get_current_user() if not user and request.endpoint not in {'/'}: return flaskredirect(users.create_login_url(request.endpoint)) logging.info(user) person = model.Person.get_or_insert(key_name=user.user_id()) def GetUser(): user = users.get_current_user() assert user is not None return model.Person.get_or_insert(key_name=user.user_id()) @app.route('/api/user', methods=['GET']) def GetUserRequest(): user = GetUser() return flask.jsonify(id=user.key().id_or_name(), **db.to_dict(user)) @app.route('/api/channel') def post(chan_name): event = json.loads(self.request.body) print event p = pusher.Pusher(app_id=creds.pusher_app_id, key=creds.pusher_key, secret=creds.pusher_secret) p[chan_name].trigger('event', event) @app.route('/api/device/<int:device_id>', methods=['POST']) def CreateUpdateDevice(device_id): body = json.loads(flask.request.data) device = model.Device.get_by_id(device_id) if not device: device = Devices.CreateDevice(body) else: device.update(body) device.put() @app.route('/api/device/<int:device_id>', methods=['GET']) def GetDevice(device_id): device = model.Device.get_by_id(device_id) if not device: flask.abort(404) return flask.jsonify(**db.to_dict(device)) @app.route('/api/device/<int:device_id>/event') def DeviceEvent(device_id): device = model.Device.get_by_id(device_id) if not device: flask.abort(404) event = json.loads(flask.request.data) device.Event(event)
Python
0
f10797b4b39c262fdb8a250386f7c61e7922005a
Check for CLI updates (from private pip by default)
src/azure/cli/main.py
src/azure/cli/main.py
import os import sys from datetime import datetime, timedelta from ._argparse import ArgumentParser from ._logging import configure_logging, logger from ._session import Session from ._output import OutputProducer from azure.cli.extensions import event_dispatcher from azure.cli.utils.update_checker import check_for_cli_update, UpdateCheckError # CONFIG provides external configuration options CONFIG = Session() # SESSION provides read-write session variables SESSION = Session() UPDATE_CHECK_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" DAYS_BETWEEN_UPDATE_CHECK = 7 def _should_check_for_update(config_key, now): config_data = CONFIG.get(config_key) if not config_data: return True last_checked = datetime.strptime(config_data['last_checked'], UPDATE_CHECK_DATE_FORMAT) expiry = last_checked + timedelta(days=DAYS_BETWEEN_UPDATE_CHECK) # prev check not expired yet and there wasn't an update available from our previous check return False if expiry >= now and not config_data['update_available'] else True def _save_update_data(config_key, now, update_info): CONFIG[config_key] = { 'last_checked': now.strftime(UPDATE_CHECK_DATE_FORMAT), 'latest_version': str(update_info['latest_version']) if update_info['latest_version'] else None, 'current_version': str(update_info['current_version']) if update_info['current_version'] else None, 'update_available': update_info['update_available'], } def _check_for_cli_update(): config_key = 'update_check_cli' now = datetime.now() if not _should_check_for_update(config_key, now): return # TODO:: CREATE ENV VAR TO SET THIS TO TRUE update_info = check_for_cli_update(private=True) _save_update_data(config_key, now, update_info) if update_info['update_available']: print("Current version of CLI {}. Version {} is available. Update with `az components update self`".format(update_info['current_version'], update_info['latest_version'])) def main(args, file=sys.stdout): #pylint: disable=redefined-builtin CONFIG.load(os.path.expanduser('~/az.json')) SESSION.load(os.path.expanduser('~/az.sess'), max_age=3600) configure_logging(args, CONFIG) from ._locale import install as locale_install locale_install(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'locale', CONFIG.get('locale', 'en-US'))) event_dispatcher.raise_event(event_dispatcher.REGISTER_GLOBAL_PARAMETERS, event_data={'args': args}) parser = ArgumentParser("az") import azure.cli.commands as commands # Find the first noun on the command line and only load commands from that # module to improve startup time. for a in args: if not a.startswith('-'): commands.add_to_parser(parser, a) break else: # No noun found, so load all commands. commands.add_to_parser(parser) try: cmd_result = parser.execute(args) # Commands can return a dictionary/list of results # If they do, we print the results. if cmd_result.result: formatter = OutputProducer.get_formatter(cmd_result.output_format) OutputProducer(formatter=formatter, file=file).out(cmd_result.result) _check_for_cli_update() except RuntimeError as ex: logger.error(ex.args[0]) return ex.args[1] if len(ex.args) >= 2 else -1 except KeyboardInterrupt: return -1
import os import sys from ._argparse import ArgumentParser from ._logging import configure_logging, logger from ._session import Session from ._output import OutputProducer from azure.cli.extensions import event_dispatcher # CONFIG provides external configuration options CONFIG = Session() # SESSION provides read-write session variables SESSION = Session() def main(args, file=sys.stdout): #pylint: disable=redefined-builtin CONFIG.load(os.path.expanduser('~/az.json')) SESSION.load(os.path.expanduser('~/az.sess'), max_age=3600) configure_logging(args, CONFIG) from ._locale import install as locale_install locale_install(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'locale', CONFIG.get('locale', 'en-US'))) event_dispatcher.raise_event(event_dispatcher.REGISTER_GLOBAL_PARAMETERS, event_data={'args': args}) parser = ArgumentParser("az") import azure.cli.commands as commands # Find the first noun on the command line and only load commands from that # module to improve startup time. for a in args: if not a.startswith('-'): commands.add_to_parser(parser, a) break else: # No noun found, so load all commands. commands.add_to_parser(parser) try: cmd_result = parser.execute(args) # Commands can return a dictionary/list of results # If they do, we print the results. if cmd_result.result: formatter = OutputProducer.get_formatter(cmd_result.output_format) OutputProducer(formatter=formatter, file=file).out(cmd_result.result) except RuntimeError as ex: logger.error(ex.args[0]) return ex.args[1] if len(ex.args) >= 2 else -1 except KeyboardInterrupt: return -1
Python
0
66608b724112680075d6e41edda7e631da69301e
add stop_id test
ott/otp_client/tests/tests_ti.py
ott/otp_client/tests/tests_ti.py
import os import unittest from ott.otp_client.transit_index.routes import Routes from ott.otp_client.transit_index.stops import Stops def get_db(): from gtfsdb import api, util from ott.utils import file_utils dir = file_utils.get_module_dir(Routes) gtfs_file = os.path.join(dir, '..', 'tests', 'data', 'gtfs', 'multi-date-feed.zip') gtfs_file = gtfs_file.replace('c:\\', '/').replace('\\', '/') gtfs_file = "file://{0}".format(gtfs_file) gtfs_file = gtfs_file.replace('\\', '/') url = util.make_temp_sqlite_db_uri('curr') db = api.database_load(gtfs_file, url=url, current_tables=True) return db class TiTest(unittest.TestCase): db = None def setUp(self): if TiTest.db is None: self.db = get_db() TiTest.db = self.db def test_route(self): # test current routes route = Routes.route_factory(self.db.session, 'NEW') self.assertTrue(route.get('id') == 'DTA:NEW') route = Routes.route_factory(self.db.session, 'OLD') self.assertTrue(route.get('id') == 'DTA:OLD') route = Routes.route_factory(self.db.session, 'ALWAYS') self.assertTrue(route.get('id') == 'DTA:ALWAYS') def test_routes_list(self): # test current routes routes = Routes.route_list_factory(self.db.session) self.assertTrue(len(routes) == 2) self.assertTrue(routes[0].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) self.assertTrue(routes[1].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) # test query of routes via date (slower) routes = Routes.route_list_factory(self.db.session, date="9-15-2018") self.assertTrue(len(routes) == 2) self.assertTrue(routes[0].get('id') in ('DTA:OLD', 'DTA:ALWAYS')) self.assertTrue(routes[1].get('id') in ('DTA:OLD', 'DTA:ALWAYS')) def test_stop_routes(self): # test current stop querying it's route list routes = Routes.stop_routes_factory(self.db.session, 'DADAN') self.assertTrue(len(routes) == 2) self.assertTrue(routes[0].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) self.assertTrue(routes[1].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) # test an old stop querying it's route list (assigned to DTA:OLD route) routes = Routes.stop_routes_factory(self.db.session, 'EMSI', date="9-15-2018") self.assertTrue(routes[0].get('id') == 'DTA:OLD') # test an old stop querying it's route list (assigned to DTA:ALWAYS route, but don't show due to OLD stop) routes = Routes.stop_routes_factory(self.db.session, 'OLD', date="9-15-2018") self.assertTrue(len(routes) == 1) self.assertTrue(routes[0].get('id') in ('DTA:OLD', 'DTA:ALWAYS')) # test querying old stop, but with current date ... so no results, since stop is not in CurrentStops routes = Routes.stop_routes_factory(self.db.session, 'OLD') self.assertTrue(len(routes) == 0) def test_stop_query(self): stop = Stops.stop(self.db.session, 'NEW') self.assertTrue(stop) self.assertTrue(stop.routes == '50') stop = Stops.stop(self.db.session, 'OLD', def_val=None) self.assertTrue(stop == None) def test_bbox_stops(self): # import pdb; pdb.set_trace() pass def test_point_stops(self): pass
import os import unittest from ott.otp_client.transit_index.routes import Routes from ott.otp_client.transit_index.stops import Stops def get_db(): from gtfsdb import api, util from ott.utils import file_utils dir = file_utils.get_module_dir(Routes) gtfs_file = os.path.join(dir, '..', 'tests', 'data', 'gtfs', 'multi-date-feed.zip') gtfs_file = gtfs_file.replace('c:\\', '/').replace('\\', '/') gtfs_file = "file://{0}".format(gtfs_file) gtfs_file = gtfs_file.replace('\\', '/') url = util.make_temp_sqlite_db_uri('curr') db = api.database_load(gtfs_file, url=url, current_tables=True) return db class TiTest(unittest.TestCase): db = None def setUp(self): if TiTest.db is None: self.db = get_db() TiTest.db = self.db def test_route(self): # test current routes route = Routes.route_factory(self.db.session, 'NEW') self.assertTrue(route.get('id') == 'DTA:NEW') route = Routes.route_factory(self.db.session, 'OLD') self.assertTrue(route.get('id') == 'DTA:OLD') route = Routes.route_factory(self.db.session, 'ALWAYS') self.assertTrue(route.get('id') == 'DTA:ALWAYS') def test_routes_list(self): # test current routes routes = Routes.route_list_factory(self.db.session) self.assertTrue(len(routes) == 2) self.assertTrue(routes[0].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) self.assertTrue(routes[1].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) # test query of routes via date (slower) routes = Routes.route_list_factory(self.db.session, date="9-15-2018") self.assertTrue(len(routes) == 2) self.assertTrue(routes[0].get('id') in ('DTA:OLD', 'DTA:ALWAYS')) self.assertTrue(routes[1].get('id') in ('DTA:OLD', 'DTA:ALWAYS')) def test_stop_routes(self): # test current stop querying it's route list routes = Routes.stop_routes_factory(self.db.session, 'DADAN') self.assertTrue(len(routes) == 2) self.assertTrue(routes[0].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) self.assertTrue(routes[1].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) # test an old stop querying it's route list (assigned to DTA:OLD route) routes = Routes.stop_routes_factory(self.db.session, 'EMSI', date="9-15-2018") self.assertTrue(routes[0].get('id') == 'DTA:OLD') # test an old stop querying it's route list (assigned to DTA:ALWAYS route, but don't show due to OLD stop) routes = Routes.stop_routes_factory(self.db.session, 'OLD', date="9-15-2018") self.assertTrue(len(routes) == 1) self.assertTrue(routes[0].get('id') in ('DTA:OLD', 'DTA:ALWAYS')) # test querying old stop, but with current date ... so no results, since stop is not in CurrentStops routes = Routes.stop_routes_factory(self.db.session, 'OLD') self.assertTrue(len(routes) == 0) def test_bbox_stops(self): # TODO !!! pass # stops = Stops.stop() #import pdb; pdb.set_trace() #for r in routes: print(r)
Python
0.000001
b9848aba428c1c7c99a1fef64ff56b940abb9eb9
Remove num option from fetch_recent tweets
ditto/twitter/management/commands/fetch_twitter_tweets.py
ditto/twitter/management/commands/fetch_twitter_tweets.py
# coding: utf-8 import argparse from django.core.management.base import BaseCommand, CommandError from ...fetch import FetchTweets class Command(BaseCommand): """fetches tweets from Twitter. Fetch recent tweets since the last fetch, from all accounts: ./manage.py fetch_twitter_tweets --recent Fetch recent tweets, from all accounts: ./manage.py fetch_twitter_tweets --recent Fetch recent tweets since the last fetch, from one account: ./manage.py fetch_twitter_tweets --recent --account=philgyford Fetch 30 most recent favorited tweets, from all accounts: ./manage.py fetch_twitter_tweets --favorites=30 Fetch 30 most recent favorited tweets by one account: ./manage.py fetch_twitter_tweets --favorites=30 --account=philgyford """ help = "Fetches recent and favorited tweets from Twitter" def add_arguments(self, parser): parser.add_argument( '--favorites', action='store', default=False, help='Fetch the most recent favorited tweets, e.g. "10".' ) parser.add_argument( '--recent', action='store_true', default=False, help='Fetch the most recent tweets, e.g. "10". Leave blank for all tweets since last fetch.' ) parser.add_argument( '--account', action='store', default=False, help='Only fetch for one Twitter account.', ) def handle(self, *args, **options): # We might be fetching for a specific account or all (None). account = options['account'] if options['account'] else None; if options['favorites']: results = FetchTweets().fetch_favorites( num=options['favorites'], screen_name=account) elif options['recent']: results = FetchTweets().fetch_recent(screen_name=account) elif options['account']: raise CommandError("Specify --recent or --favorites as well as --account.") else: raise CommandError("Specify --recent or --favorites. Favorites can have an optional number. eg --favorites=20") # results should be a list of dicts. # Each dict is for one account. # Each dict will look like either: # { 'account': 'thescreename', # 'success': True, # 'fetched': 200, # The number of tweets fetched # } # or: # { 'account': 'thescreename', # 'success': False, # 'message': 'There was an error fetching data because blah', #} for result in results: if result['success']: noun = 'tweet' if result['fetched'] == 1 else 'tweets' self.stdout.write('%s: Fetched %s %s' % ( result['account'], result['fetched'], noun)) else: self.stderr.write('%s: Failed to fetch tweets: %s' % ( result['account'], result['message']))
# coding: utf-8 import argparse from django.core.management.base import BaseCommand, CommandError from ...fetch import FetchTweets class Command(BaseCommand): """fetches tweets from Twitter. Fetch recent tweets since the last fetch, from all accounts: ./manage.py fetch_twitter_tweets --recent Fetch 20 recent tweets, from all accounts: ./manage.py fetch_twitter_tweets --recent=20 Fetch recent tweets since the last fetch, from one account: ./manage.py fetch_twitter_tweets --recent --account=philgyford Fetch 30 most recent favorited tweets, from all accounts: ./manage.py fetch_twitter_tweets --favorites=30 Fetch 30 most recent favorited tweets by one account: ./manage.py fetch_twitter_tweets --favorites=30 --account=philgyford """ help = "Fetches recent and favorited tweets from Twitter" def add_arguments(self, parser): parser.add_argument( '--favorites', action='store', default=False, help='Fetch the most recent favorited tweets, e.g. "10".' ) parser.add_argument( '--recent', action='store_true', default=False, help='Fetch the most recent tweets, e.g. "10". Leave blank for all tweets since last fetch.' ) parser.add_argument( '--account', action='store', default=False, help='Only fetch for one Twitter account.', ) def handle(self, *args, **options): # We might be fetching for a specific account or all (None). account = options['account'] if options['account'] else None; if options['favorites']: results = FetchTweets().fetch_favorites( num=options['favorites'], screen_name=account) elif options['recent']: results = FetchTweets().fetch_recent(screen_name=account) elif options['account']: raise CommandError("Specify --recent or --favorites as well as --account.") else: raise CommandError("Specify --recent or --favorites, either with an optional number.") # results should be a list of dicts. # Each dict is for one account. # Each dict will look like either: # { 'account': 'thescreename', # 'success': True, # 'fetched': 200, # The number of tweets fetched # } # or: # { 'account': 'thescreename', # 'success': False, # 'message': 'There was an error fetching data because blah', #} for result in results: if result['success']: noun = 'tweet' if result['fetched'] == 1 else 'tweets' self.stdout.write('%s: Fetched %s %s' % ( result['account'], result['fetched'], noun)) else: self.stderr.write('%s: Failed to fetch tweets: %s' % ( result['account'], result['message']))
Python
0.000025
2c6a495351de52fe1de0b36d73f22e777ef3d08c
fix sqlalchemy url with sqlite prefix
wsgi/todopyramid/todopyramid/__init__.py
wsgi/todopyramid/todopyramid/__init__.py
import os from pyramid.config import Configurator from sqlalchemy import engine_from_config from .models import ( DBSession, Base, ) from .views import get_user def get_db_session(request): """return thread-local DB session""" return DBSession def main(global_config, **settings): """ This function returns a Pyramid WSGI application. """ # Persona DNS settings['persona.audiences'] = '%(OPENSHIFT_APP_DNS)s' % os.environ # OpenShift Settings settings['sqlalchemy.url'] = 'sqlite:///%(OPENSHIFT_DATA_DIR)s/todopyramid.sqlite' % os.environ engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) Base.metadata.bind = engine config = Configurator( settings=settings, root_factory='todopyramid.models.RootFactory', ) includeme(config) # scan modules for config descriptors config.scan() return config.make_wsgi_app() def includeme(config): """we use this concept to include routes and configuration setup in test cases http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/testing.html#creating-integration-tests """ config.add_static_view('static', 'static', cache_max_age=3600) # Misc. views config.add_route('home', '/') config.add_route('about', '/about') # Users config.add_route('account', '/account') # Viewing todo lists config.add_route('todos', '/todos') config.add_route('tags', '/tags') config.add_route('taglist', '/tags/{tag_name}') # AJAX config.add_route('todo', '/todos/{todo_id}') config.add_route('delete.task', '/delete.task/{todo_id}') config.add_route('tags.autocomplete', '/tags.autocomplete') # make DB session a request attribute # http://blog.safaribooksonline.com/2014/01/07/building-pyramid-applications/ config.add_request_method(get_db_session, 'db', reify=True) # Making A User Object Available as a Request Attribute # http://docs.pylonsproject.org/projects/pyramid_cookbook/en/latest/auth/user_object.html config.add_request_method(get_user, 'user', reify=True)
import os from pyramid.config import Configurator from sqlalchemy import engine_from_config from .models import ( DBSession, Base, ) from .views import get_user def get_db_session(request): """return thread-local DB session""" return DBSession def main(global_config, **settings): """ This function returns a Pyramid WSGI application. """ # Persona DNS settings['persona.audiences'] = '%(OPENSHIFT_APP_DNS)s' % os.environ # OpenShift Settings settings['sqlalchemy.url'] = '%(OPENSHIFT_DATA_DIR)s/todopyramid.sqlite' % os.environ engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) Base.metadata.bind = engine config = Configurator( settings=settings, root_factory='todopyramid.models.RootFactory', ) includeme(config) # scan modules for config descriptors config.scan() return config.make_wsgi_app() def includeme(config): """we use this concept to include routes and configuration setup in test cases http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/testing.html#creating-integration-tests """ config.add_static_view('static', 'static', cache_max_age=3600) # Misc. views config.add_route('home', '/') config.add_route('about', '/about') # Users config.add_route('account', '/account') # Viewing todo lists config.add_route('todos', '/todos') config.add_route('tags', '/tags') config.add_route('taglist', '/tags/{tag_name}') # AJAX config.add_route('todo', '/todos/{todo_id}') config.add_route('delete.task', '/delete.task/{todo_id}') config.add_route('tags.autocomplete', '/tags.autocomplete') # make DB session a request attribute # http://blog.safaribooksonline.com/2014/01/07/building-pyramid-applications/ config.add_request_method(get_db_session, 'db', reify=True) # Making A User Object Available as a Request Attribute # http://docs.pylonsproject.org/projects/pyramid_cookbook/en/latest/auth/user_object.html config.add_request_method(get_user, 'user', reify=True)
Python
0.001027
926df1bc4dee9fc613f0fb31bb8c579943008645
Update plot_label_propagation_digits.py (#22725)
examples/semi_supervised/plot_label_propagation_digits.py
examples/semi_supervised/plot_label_propagation_digits.py
""" =================================================== Label Propagation digits: Demonstrating performance =================================================== This example demonstrates the power of semisupervised learning by training a Label Spreading model to classify handwritten digits with sets of very few labels. The handwritten digit dataset has 1797 total points. The model will be trained using all points, but only 30 will be labeled. Results in the form of a confusion matrix and a series of metrics over each class will be very good. At the end, the top 10 most uncertain predictions will be shown. """ # Authors: Clay Woolam <clay@woolam.org> # License: BSD # %% # Data generation # --------------- # # We use the digits dataset. We only use a subset of randomly selected samples. from sklearn import datasets import numpy as np digits = datasets.load_digits() rng = np.random.RandomState(2) indices = np.arange(len(digits.data)) rng.shuffle(indices) # %% # # We selected 340 samples of which only 40 will be associated with a known label. # Therefore, we store the indices of the 300 other samples for which we are not # supposed to know their labels. X = digits.data[indices[:340]] y = digits.target[indices[:340]] images = digits.images[indices[:340]] n_total_samples = len(y) n_labeled_points = 40 indices = np.arange(n_total_samples) unlabeled_set = indices[n_labeled_points:] # %% # Shuffle everything around y_train = np.copy(y) y_train[unlabeled_set] = -1 # %% # Semi-supervised learning # ------------------------ # # We fit a :class:`~sklearn.semi_supervised.LabelSpreading` and use it to predict # the unknown labels. from sklearn.semi_supervised import LabelSpreading from sklearn.metrics import classification_report lp_model = LabelSpreading(gamma=0.25, max_iter=20) lp_model.fit(X, y_train) predicted_labels = lp_model.transduction_[unlabeled_set] true_labels = y[unlabeled_set] print( "Label Spreading model: %d labeled & %d unlabeled points (%d total)" % (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples) ) # %% # Classification report print(classification_report(true_labels, predicted_labels)) # %% # Confusion matrix from sklearn.metrics import ConfusionMatrixDisplay ConfusionMatrixDisplay.from_predictions( true_labels, predicted_labels, labels=lp_model.classes_ ) # %% # Plot the most uncertain predictions # ----------------------------------- # # Here, we will pick and show the 10 most uncertain predictions. from scipy import stats pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T) # %% # Pick the top 10 most uncertain labels uncertainty_index = np.argsort(pred_entropies)[-10:] # %% # Plot import matplotlib.pyplot as plt f = plt.figure(figsize=(7, 5)) for index, image_index in enumerate(uncertainty_index): image = images[image_index] sub = f.add_subplot(2, 5, index + 1) sub.imshow(image, cmap=plt.cm.gray_r) plt.xticks([]) plt.yticks([]) sub.set_title( "predict: %i\ntrue: %i" % (lp_model.transduction_[image_index], y[image_index]) ) f.suptitle("Learning with small amount of labeled data") plt.show()
""" =================================================== Label Propagation digits: Demonstrating performance =================================================== This example demonstrates the power of semisupervised learning by training a Label Spreading model to classify handwritten digits with sets of very few labels. The handwritten digit dataset has 1797 total points. The model will be trained using all points, but only 30 will be labeled. Results in the form of a confusion matrix and a series of metrics over each class will be very good. At the end, the top 10 most uncertain predictions will be shown. """ # Authors: Clay Woolam <clay@woolam.org> # License: BSD import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn import datasets from sklearn.semi_supervised import LabelSpreading from sklearn.metrics import confusion_matrix, classification_report digits = datasets.load_digits() rng = np.random.RandomState(2) indices = np.arange(len(digits.data)) rng.shuffle(indices) X = digits.data[indices[:340]] y = digits.target[indices[:340]] images = digits.images[indices[:340]] n_total_samples = len(y) n_labeled_points = 40 indices = np.arange(n_total_samples) unlabeled_set = indices[n_labeled_points:] # ############################################################################# # Shuffle everything around y_train = np.copy(y) y_train[unlabeled_set] = -1 # ############################################################################# # Learn with LabelSpreading lp_model = LabelSpreading(gamma=0.25, max_iter=20) lp_model.fit(X, y_train) predicted_labels = lp_model.transduction_[unlabeled_set] true_labels = y[unlabeled_set] cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_) print( "Label Spreading model: %d labeled & %d unlabeled points (%d total)" % (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples) ) print(classification_report(true_labels, predicted_labels)) print("Confusion matrix") print(cm) # ############################################################################# # Calculate uncertainty values for each transduced distribution pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T) # ############################################################################# # Pick the top 10 most uncertain labels uncertainty_index = np.argsort(pred_entropies)[-10:] # ############################################################################# # Plot f = plt.figure(figsize=(7, 5)) for index, image_index in enumerate(uncertainty_index): image = images[image_index] sub = f.add_subplot(2, 5, index + 1) sub.imshow(image, cmap=plt.cm.gray_r) plt.xticks([]) plt.yticks([]) sub.set_title( "predict: %i\ntrue: %i" % (lp_model.transduction_[image_index], y[image_index]) ) f.suptitle("Learning with small amount of labeled data") plt.show()
Python
0
ebddb1005d3eda45f11eef08d83c271a657443b2
Add functional tests for volume set size
functional/tests/volume/v1/test_volume.py
functional/tests/volume/v1/test_volume.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from functional.common import test class VolumeTests(test.TestCase): """Functional tests for volume. """ NAME = uuid.uuid4().hex OTHER_NAME = uuid.uuid4().hex HEADERS = ['"Display Name"'] FIELDS = ['display_name'] @classmethod def setUpClass(cls): opts = cls.get_show_opts(cls.FIELDS) raw_output = cls.openstack('volume create --size 1 ' + cls.NAME + opts) expected = cls.NAME + '\n' cls.assertOutput(expected, raw_output) @classmethod def tearDownClass(cls): # Rename test raw_output = cls.openstack( 'volume set --name ' + cls.OTHER_NAME + ' ' + cls.NAME) cls.assertOutput('', raw_output) # Delete test raw_output = cls.openstack('volume delete ' + cls.OTHER_NAME) cls.assertOutput('', raw_output) def test_volume_list(self): opts = self.get_list_opts(self.HEADERS) raw_output = self.openstack('volume list' + opts) self.assertIn(self.NAME, raw_output) def test_volume_show(self): opts = self.get_show_opts(self.FIELDS) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual(self.NAME + "\n", raw_output) def test_volume_properties(self): raw_output = self.openstack( 'volume set --property a=b --property c=d ' + self.NAME) self.assertEqual("", raw_output) opts = self.get_show_opts(["properties"]) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual("a='b', c='d'\n", raw_output) raw_output = self.openstack('volume unset --property a ' + self.NAME) self.assertEqual("", raw_output) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual("c='d'\n", raw_output) def test_volume_set(self): raw_output = self.openstack( 'volume set --description RAMAC ' + self.NAME) opts = self.get_show_opts(["display_description", "display_name"]) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual("RAMAC\n" + self.NAME + "\n", raw_output) def test_volume_set_size(self): raw_output = self.openstack( 'volume set --size 2 ' + self.NAME) opts = self.get_show_opts(["display_name", "size"]) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual(self.NAME + "\n2\n", raw_output)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from functional.common import test class VolumeTests(test.TestCase): """Functional tests for volume. """ NAME = uuid.uuid4().hex OTHER_NAME = uuid.uuid4().hex HEADERS = ['"Display Name"'] FIELDS = ['display_name'] @classmethod def setUpClass(cls): opts = cls.get_show_opts(cls.FIELDS) raw_output = cls.openstack('volume create --size 1 ' + cls.NAME + opts) expected = cls.NAME + '\n' cls.assertOutput(expected, raw_output) @classmethod def tearDownClass(cls): # Rename test raw_output = cls.openstack( 'volume set --name ' + cls.OTHER_NAME + ' ' + cls.NAME) cls.assertOutput('', raw_output) # Delete test raw_output = cls.openstack('volume delete ' + cls.OTHER_NAME) cls.assertOutput('', raw_output) def test_volume_list(self): opts = self.get_list_opts(self.HEADERS) raw_output = self.openstack('volume list' + opts) self.assertIn(self.NAME, raw_output) def test_volume_show(self): opts = self.get_show_opts(self.FIELDS) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual(self.NAME + "\n", raw_output) def test_volume_properties(self): raw_output = self.openstack( 'volume set --property a=b --property c=d ' + self.NAME) self.assertEqual("", raw_output) opts = self.get_show_opts(["properties"]) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual("a='b', c='d'\n", raw_output) raw_output = self.openstack('volume unset --property a ' + self.NAME) self.assertEqual("", raw_output) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual("c='d'\n", raw_output) def test_volume_set(self): raw_output = self.openstack( 'volume set --description RAMAC ' + self.NAME) opts = self.get_show_opts(["display_description", "display_name"]) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual("RAMAC\n" + self.NAME + "\n", raw_output)
Python
0.000005
3284f18168ce274516dc51293376571f5dfada18
copy the hasher from FrozenPhoneNumber
phonenumber_field/phonenumber.py
phonenumber_field/phonenumber.py
#-*- coding: utf-8 -*- import phonenumbers from django.core import validators from phonenumbers.phonenumberutil import NumberParseException from django.conf import settings class PhoneNumber(phonenumbers.phonenumber.PhoneNumber): """ A extended version of phonenumbers.phonenumber.PhoneNumber that provides some neat and more pythonic, easy to access methods. This makes using a PhoneNumber instance much easier, especially in templates and such. """ format_map = { 'E164': phonenumbers.PhoneNumberFormat.E164, 'INTERNATIONAL': phonenumbers.PhoneNumberFormat.INTERNATIONAL, 'NATIONAL': phonenumbers.PhoneNumberFormat.NATIONAL, 'RFC3966': phonenumbers.PhoneNumberFormat.RFC3966, } @classmethod def from_string(cls, phone_number, region=None): phone_number_obj = cls() if region is None: region = getattr(settings, 'PHONENUMBER_DEFAULT_REGION', None) phonenumbers.parse(number=phone_number, region=region, keep_raw_input=True, numobj=phone_number_obj) return phone_number_obj def __unicode__(self): format_string = getattr(settings, 'PHONENUMBER_DEFAULT_FORMAT', 'E164') fmt = self.format_map[format_string] if self.is_valid(): return self.format_as(fmt) return self.raw_input def __str__(self): return str(unicode(self)) def original_unicode(self): return super(PhoneNumber, self).__unicode__() def is_valid(self): """ checks whether the number supplied is actually valid """ return phonenumbers.is_valid_number(self) def format_as(self, format): if self.is_valid(): return phonenumbers.format_number(self, format) else: return self.raw_input @property def as_international(self): return self.format_as(phonenumbers.PhoneNumberFormat.INTERNATIONAL) @property def as_e164(self): return self.format_as(phonenumbers.PhoneNumberFormat.E164) @property def as_national(self): return self.format_as(phonenumbers.PhoneNumberFormat.NATIONAL) @property def as_rfc3966(self): return self.format_as(phonenumbers.PhoneNumberFormat.RFC3966) def __len__(self): return len(self.__unicode__()) def __eq__(self, other): if type(other) == PhoneNumber: return self.as_e164 == other.as_e164 else: return super(PhoneNumber, self).__eq__(other) def __hash__(self): return hash((self.country_code, self.national_number, self.extension, self.italian_leading_zero, self.number_of_leading_zeros, self.raw_input, self.country_code_source, self.preferred_domestic_carrier_code)) def to_python(value): if value in validators.EMPTY_VALUES: # None or '' phone_number = None elif value and isinstance(value, basestring): try: phone_number = PhoneNumber.from_string(phone_number=value) except NumberParseException, e: # the string provided is not a valid PhoneNumber. phone_number = PhoneNumber(raw_input=value) elif isinstance(value, phonenumbers.phonenumber.PhoneNumber) and \ not isinstance(value, PhoneNumber): phone_number = PhoneNumber(value) elif isinstance(value, PhoneNumber): phone_number = value return phone_number
#-*- coding: utf-8 -*- import phonenumbers from django.core import validators from phonenumbers.phonenumberutil import NumberParseException from django.conf import settings class PhoneNumber(phonenumbers.phonenumber.PhoneNumber): """ A extended version of phonenumbers.phonenumber.PhoneNumber that provides some neat and more pythonic, easy to access methods. This makes using a PhoneNumber instance much easier, especially in templates and such. """ format_map = { 'E164': phonenumbers.PhoneNumberFormat.E164, 'INTERNATIONAL': phonenumbers.PhoneNumberFormat.INTERNATIONAL, 'NATIONAL': phonenumbers.PhoneNumberFormat.NATIONAL, 'RFC3966': phonenumbers.PhoneNumberFormat.RFC3966, } @classmethod def from_string(cls, phone_number, region=None): phone_number_obj = cls() if region is None: region = getattr(settings, 'PHONENUMBER_DEFAULT_REGION', None) phonenumbers.parse(number=phone_number, region=region, keep_raw_input=True, numobj=phone_number_obj) return phone_number_obj def __unicode__(self): format_string = getattr(settings, 'PHONENUMBER_DEFAULT_FORMAT', 'E164') fmt = self.format_map[format_string] if self.is_valid(): return self.format_as(fmt) return self.raw_input def __str__(self): return str(unicode(self)) def original_unicode(self): return super(PhoneNumber, self).__unicode__() def is_valid(self): """ checks whether the number supplied is actually valid """ return phonenumbers.is_valid_number(self) def format_as(self, format): if self.is_valid(): return phonenumbers.format_number(self, format) else: return self.raw_input @property def as_international(self): return self.format_as(phonenumbers.PhoneNumberFormat.INTERNATIONAL) @property def as_e164(self): return self.format_as(phonenumbers.PhoneNumberFormat.E164) @property def as_national(self): return self.format_as(phonenumbers.PhoneNumberFormat.NATIONAL) @property def as_rfc3966(self): return self.format_as(phonenumbers.PhoneNumberFormat.RFC3966) def __len__(self): return len(self.__unicode__()) def __eq__(self, other): if type(other) == PhoneNumber: return self.as_e164 == other.as_e164 else: return super(PhoneNumber, self).__eq__(other) def __hash__(self): return hash(self.__unicode__()) def to_python(value): if value in validators.EMPTY_VALUES: # None or '' phone_number = None elif value and isinstance(value, basestring): try: phone_number = PhoneNumber.from_string(phone_number=value) except NumberParseException, e: # the string provided is not a valid PhoneNumber. phone_number = PhoneNumber(raw_input=value) elif isinstance(value, phonenumbers.phonenumber.PhoneNumber) and \ not isinstance(value, PhoneNumber): phone_number = PhoneNumber(value) elif isinstance(value, PhoneNumber): phone_number = value return phone_number
Python
0
4b7e6d7df8a447873bc57adfedfb6013b915190c
Fix Node.namespace_uri for py3
cio/node.py
cio/node.py
# coding=utf-8 from __future__ import unicode_literals from .environment import env from .utils.formatters import ContentFormatter from .utils.uri import URI import six empty = object() class Node(object): _formatter = ContentFormatter() def __init__(self, uri, content=None, **meta): self.env = env.state self._uri = [uri, URI(uri)] self._content = [content] self.meta = meta def __repr__(self): return '<Node: %s>' % self.uri def __bytes__(self): content = self.render() if isinstance(content, six.text_type): content = content.encode('utf-8') return content or b'' def __unicode__(self): return self.render() or '' __str__ = __bytes__ if six.PY2 else __unicode__ def render(self, **context): if self.content is not None: if context: return self._formatter.format(self.content, **context) else: return self.content def get_uri(self): return self._uri[-1] def set_uri(self, uri): if uri != self.get_uri(): self._uri.append(URI(uri)) uri = property(get_uri, set_uri) def get_content(self): return self._content[-1] def set_content(self, content): if content != self.get_content(): self._content.append(content) content = property(get_content, set_content) @property def initial(self): return self._content[0] @property def initial_uri(self): return self._uri[0] @property def namespace_uri(self): """ Finds and returns first applied URI of this node that has a namespace. :return str: uri """ try: return next( iter(filter(lambda uri: URI(uri).namespace, self._uri)) ) except StopIteration: return None def for_json(self): return { 'uri': six.text_type(self.uri), 'content': self.content, 'meta': self.meta if self.meta is not None else {} }
# coding=utf-8 from __future__ import unicode_literals from .environment import env from .utils.formatters import ContentFormatter from .utils.uri import URI import six empty = object() class Node(object): _formatter = ContentFormatter() def __init__(self, uri, content=None, **meta): self.env = env.state self._uri = [uri, URI(uri)] self._content = [content] self.meta = meta def __repr__(self): return '<Node: %s>' % self.uri def __bytes__(self): content = self.render() if isinstance(content, six.text_type): content = content.encode('utf-8') return content or b'' def __unicode__(self): return self.render() or '' __str__ = __bytes__ if six.PY2 else __unicode__ def render(self, **context): if self.content is not None: if context: return self._formatter.format(self.content, **context) else: return self.content def get_uri(self): return self._uri[-1] def set_uri(self, uri): if uri != self.get_uri(): self._uri.append(URI(uri)) uri = property(get_uri, set_uri) def get_content(self): return self._content[-1] def set_content(self, content): if content != self.get_content(): self._content.append(content) content = property(get_content, set_content) @property def initial(self): return self._content[0] @property def initial_uri(self): return self._uri[0] @property def namespace_uri(self): """ Finds and returns first applied URI of this node that has a namespace. :return str: uri """ try: return iter( filter(lambda uri: URI(uri).namespace, self._uri) ).next() except StopIteration: return None def for_json(self): return { 'uri': six.text_type(self.uri), 'content': self.content, 'meta': self.meta if self.meta is not None else {} }
Python
0.000765
3a42b4458f85d8f2640c34fce79c9a99a79f5323
Revert "add second db connection to coastdat"
calc_renpass_gis/scenario_reader/db.py
calc_renpass_gis/scenario_reader/db.py
# -*- coding: utf-8 -*- from sqlalchemy import (Column, Float, ForeignKey, Integer, MetaData, String, Table, join, create_engine, ForeignKeyConstraint, Boolean, DateTime, Sequence) from sqlalchemy.orm import sessionmaker, relationship, configure_mappers # from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.automap import automap_base # from geoalchemy2 import Geometry, shape import configparser as cp # from sqlalchemy.sql import func # from sqlalchemy.dialects import postgresql import os.path as path # read configuration file FILENAME = 'config.ini' FILE = path.join(path.expanduser("~"), '.open_eGo', FILENAME) cfg = cp.ConfigParser() cfg.read(FILE) # establish db connection section = 'Connection' conn = create_engine( "postgresql+psycopg2://{user}:{password}@{host}:{port}/{db}".format( user=cfg.get(section, 'username'), password=cfg.get(section, 'password'), host=cfg.get(section, 'host'), port=cfg.get(section, 'port'), db=cfg.get(section, 'db'))) print("Connected to database.") # map schema session = sessionmaker(bind=conn)() meta = MetaData() meta.bind = conn meta.reflect(bind=conn, schema='calc_renpass_gis', only=['renpass_gis_scenario', 'renpass_gis_linear_transformer', 'renpass_gis_source', 'renpass_gis_sink', 'renpass_gis_storage']) # map to classes Base = automap_base(metadata=meta) Base.prepare() Scenario, LinearTransformer, Source, Sink, Storage = \ Base.classes.renpass_gis_scenario,\ Base.classes.renpass_gis_linear_transformer,\ Base.classes.renpass_gis_source,\ Base.classes.renpass_gis_sink,\ Base.classes.renpass_gis_storage
# -*- coding: utf-8 -*- from sqlalchemy import (Column, Float, ForeignKey, Integer, MetaData, String, Table, join, create_engine, ForeignKeyConstraint, Boolean, DateTime, Sequence) from sqlalchemy.orm import sessionmaker, relationship, configure_mappers # from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.automap import automap_base # from geoalchemy2 import Geometry, shape import configparser as cp # from sqlalchemy.sql import func # from sqlalchemy.dialects import postgresql import os.path as path # read configuration file FILENAME = 'config.ini' FILE = path.join(path.expanduser("~"), '.open_eGo', FILENAME) cfg = cp.ConfigParser() cfg.read(FILE) # establish db connection section = 'Connection' conn = create_engine( "postgresql+psycopg2://{user}:{password}@{host}:{port}/{db}".format( user=cfg.get(section, 'username'), password=cfg.get(section, 'password'), host=cfg.get(section, 'host'), port=cfg.get(section, 'port'), db=cfg.get(section, 'db'))) print("Connected to database.") # establish second db connection to # change of init file in .open_ego and Server connetion via ssh required section2 = 'Coastdat' conn2 = create_engine( "postgresql+psycopg2://{user}:{password}@{host}:{port}/{db}".format( user=cfg.get(section2, 'username'), password=cfg.get(section2, 'password'), host=cfg.get(section2, 'host'), port=cfg.get(section2, 'port'), db=cfg.get(section2, 'db'))) print("Connected to database 2.") # map schema session = sessionmaker(bind=conn)() meta = MetaData() meta.bind = conn meta.reflect(bind=conn, schema='calc_renpass_gis', only=['renpass_gis_scenario', 'renpass_gis_linear_transformer', 'renpass_gis_source', 'renpass_gis_sink', 'renpass_gis_storage']) # map to classes Base = automap_base(metadata=meta) Base.prepare() Scenario, LinearTransformer, Source, Sink, Storage = \ Base.classes.renpass_gis_scenario,\ Base.classes.renpass_gis_linear_transformer,\ Base.classes.renpass_gis_source,\ Base.classes.renpass_gis_sink,\ Base.classes.renpass_gis_storage # map schema of coastdat-2 session2 = sessionmaker(bind=conn)() meta2 = MetaData() meta2.bind = conn2 meta2.reflect(bind=conn2, schema='coastdat', only=['cosmoclmgrid', 'datatype', 'located', 'projection', 'scheduled', 'spatial', 'timeseries', 'typified', 'year']) # map to classes of coastdat weather data Coastdat = automap_base(metadata=meta2) Coastdat.prepare()
Python
0
14a7d5305a3e5dfc73834cb6164def4c0706e740
Fix : cmdline GET output
cli/client.py
cli/client.py
# -*- coding: utf-8 -*- # Copyright (c) 2012 theo crevon # # See the file LICENSE for copying permission. from __future__ import absolute_import import zmq from elevator.constants import * from .errors import * from .message import Request, ResponseHeader, Response class Client(object): def __init__(self, *args, **kwargs): self.protocol = kwargs.pop('protocol', 'tcp') self.endpoint = kwargs.pop('endpoint', '127.0.0.1:4141') self.host = "%s://%s" % (self.protocol, self.endpoint) self.context = None self.socket = None self.timeout = kwargs.pop('timeout', 10000) self.db_uid = None self.connect() def setup_socket(self): self.context = zmq.Context() self.socket = self.context.socket(zmq.XREQ) self.socket.setsockopt(zmq.LINGER, 0) self.socket.setsockopt(zmq.RCVTIMEO, self.timeout) self.socket.connect(self.host) def teardown_socket(self): self.socket.close() self.context.term() def connect(self, db_name=None, *args, **kwargs): self.setup_socket() db_name = 'default' if db_name is None else db_name self.db_uid = self.send_cmd(None, 'DBCONNECT', [db_name], *args, **kwargs)[0] self.db_name = db_name return def _format_response(self, req_cmd, res_datas): if req_cmd == "GET": return res_datas[0] return res_datas def send_cmd(self, db_uid, command, arguments, *args, **kwargs): self.socket.send_multipart([Request(db_uid=db_uid, command=command, args=arguments, meta={})],) try: raw_header, raw_response = self.socket.recv_multipart() header = ResponseHeader(raw_header) response = Response(raw_response) if header.status == FAILURE_STATUS: return fail_with(ELEVATOR_ERROR[header.err_code], header.err_msg) except zmq.core.error.ZMQError: # Restore original timeout and raise return fail_with("TimeoutError", "Server did not respond in time") return self._format_response(command, response.datas)
# -*- coding: utf-8 -*- # Copyright (c) 2012 theo crevon # # See the file LICENSE for copying permission. from __future__ import absolute_import import zmq from elevator.constants import * from .errors import * from .message import Request, ResponseHeader, Response class Client(object): def __init__(self, *args, **kwargs): self.protocol = kwargs.pop('protocol', 'tcp') self.endpoint = kwargs.pop('endpoint', '127.0.0.1:4141') self.host = "%s://%s" % (self.protocol, self.endpoint) self.context = None self.socket = None self.timeout = kwargs.pop('timeout', 10000) self.db_uid = None self.connect() def setup_socket(self): self.context = zmq.Context() self.socket = self.context.socket(zmq.XREQ) self.socket.setsockopt(zmq.LINGER, 0) self.socket.setsockopt(zmq.RCVTIMEO, self.timeout) self.socket.connect(self.host) def teardown_socket(self): self.socket.close() self.context.term() def connect(self, db_name=None, *args, **kwargs): self.setup_socket() db_name = 'default' if db_name is None else db_name self.db_uid = self.send_cmd(None, 'DBCONNECT', [db_name], *args, **kwargs)[0] self.db_name = db_name return def send_cmd(self, db_uid, command, arguments, *args, **kwargs): self.socket.send_multipart([Request(db_uid=db_uid, command=command, args=arguments, meta={})],) try: raw_header, raw_response = self.socket.recv_multipart() header = ResponseHeader(raw_header) response = Response(raw_response) if header.status == FAILURE_STATUS: return fail_with(ELEVATOR_ERROR[header.err_code], header.err_msg) except zmq.core.error.ZMQError: # Restore original timeout and raise return fail_with("TimeoutError", "Server did not respond in time") return response.datas
Python
0.000002
900b37fee45db789b413d55b497d87992c3dab00
Remove Welcome! Flash
bakery/gitauth/views.py
bakery/gitauth/views.py
# coding: utf-8 # Copyright 2013 The Font Bakery Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # See AUTHORS.txt for the list of Authors and LICENSE.txt for the License. from flask import (Blueprint, request, flash, g, session, redirect, url_for, current_app) from .models import User from ..extensions import db, github from flask.ext.babel import gettext as _ gitauth = Blueprint('gitauth', __name__, url_prefix='/auth') @gitauth.after_request def after_request(response): return response @gitauth.route('/me') def me(): """ Bypass Github authentication and login as 1st user in the database. Usage: Visit /auth/me to log in as the 1st user in the database, to work offline as that user. To allow a github username to work in this single user mode, change GITAUTH_LOGIN_LIST config property. You need to login using GitHub at least once before this will work. This only works if server is in debug mode. """ if current_app.debug: # pylint:disable-msg=E1101 user = User.get_or_init('offline') if user.id and user.login in current_app.config['GITAUTH_LOGIN_LIST']: session['user_id'] = user.id g.user = user flash(_('Welcome to single user mode!')) return redirect(url_for('frontend.splash')) @gitauth.route('/login') def login(): if session.get('user_id', None) is None: redirect_uri = url_for('.authorized', next=request.args.get('next') or request.referrer or None, _external=True) params = {'redirect_uri': redirect_uri, 'scope': 'user:email,public_repo'} return redirect(github.get_authorize_url(**params)) else: flash(_('Already logged in')) return redirect(url_for('frontend.splash')) @gitauth.route('/callback') def authorized(next = None): next_url = request.args.get('next') or url_for('frontend.splash') if not 'code' in request.args: flash(_('You did not authorize the request')) return redirect(next_url) redirect_uri = url_for('.authorized', _external=True) data = dict(code=request.args['code'], redirect_uri=redirect_uri) auth = github.get_auth_session(data=data) token = auth.access_token me = auth.get('user').json() user = User.get_or_init(me['login']) # if user.id is None: # new record isn't saved yet # flash(_('Welcome to Bakery.')) # update user data user.name = me.get('name', me.get('login')) user.github_access_token = token user.avatar = me['gravatar_id'] user.email = me['email'] # save to database db.session.add(user) db.session.commit() session['user_id'] = user.id g.user = user return redirect(next_url) @gitauth.route('/logout') def logout(): session.pop('user_id', None) return redirect(url_for('frontend.splash'))
# coding: utf-8 # Copyright 2013 The Font Bakery Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # See AUTHORS.txt for the list of Authors and LICENSE.txt for the License. from flask import (Blueprint, request, flash, g, session, redirect, url_for, current_app) from .models import User from ..extensions import db, github from flask.ext.babel import gettext as _ gitauth = Blueprint('gitauth', __name__, url_prefix='/auth') @gitauth.after_request def after_request(response): return response @gitauth.route('/me') def me(): """ Bypass Github authentication and login as 1st user in the database. Usage: Visit /auth/me to log in as the 1st user in the database, to work offline as that user. To allow a github username to work in this single user mode, change GITAUTH_LOGIN_LIST config property. You need to login using GitHub at least once before this will work. This only works if server is in debug mode. """ if current_app.debug: # pylint:disable-msg=E1101 user = User.get_or_init('offline') if user.id and user.login in current_app.config['GITAUTH_LOGIN_LIST']: session['user_id'] = user.id g.user = user flash(_('Welcome to single user mode!')) return redirect(url_for('frontend.splash')) @gitauth.route('/login') def login(): if session.get('user_id', None) is None: redirect_uri = url_for('.authorized', next=request.args.get('next') or request.referrer or None, _external=True) params = {'redirect_uri': redirect_uri, 'scope': 'user:email,public_repo'} return redirect(github.get_authorize_url(**params)) else: flash(_('Already logged in')) return redirect(url_for('frontend.splash')) @gitauth.route('/callback') def authorized(next = None): next_url = request.args.get('next') or url_for('frontend.splash') if not 'code' in request.args: flash(_('You did not authorize the request')) return redirect(next_url) redirect_uri = url_for('.authorized', _external=True) data = dict(code=request.args['code'], redirect_uri=redirect_uri) auth = github.get_auth_session(data=data) token = auth.access_token me = auth.get('user').json() user = User.get_or_init(me['login']) # if user.id is None: # new record isn't saved yet # flash(_('Welcome to Bakery.')) # update user data user.name = me.get('name', me.get('login')) user.github_access_token = token user.avatar = me['gravatar_id'] user.email = me['email'] # save to database db.session.add(user) db.session.commit() session['user_id'] = user.id g.user = user flash(_('Welcome!')) return redirect(next_url) @gitauth.route('/logout') def logout(): session.pop('user_id', None) return redirect(url_for('frontend.splash'))
Python
0.000008
49152781ecbfb4f51707e6e54641301038eba80f
set varchar length
king/DataPoint.py
king/DataPoint.py
from sqlalchemy import create_engine from sqlalchemy import Column, Integer, PickleType, Boolean, String, DateTime from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from datetime import datetime engine = create_engine('mysql+pymysql://ucb_268_measure:ucb_268_measure@data.cnobwey0khau.us-west-2.rds.amazonaws.com:3306/mydb', echo=False) Base = declarative_base(bind=engine) Session = sessionmaker(bind=engine) class DataPoint(Base): __tablename__ = 'data' id = Column(Integer, primary_key=True) timestamp = Column(DateTime) name1 = Column(String(length=200)) name2 = Column(String(length=200)) target1 = Column(PickleType) target2 = Column(PickleType) start = Column(DateTime) end = Column(DateTime) pings = Column(PickleType) address = Column(PickleType) test_point = Column(String(length=200)) success = Column(Boolean) def __init__(self, name1, name2, target1, target2, start, end, pings, address, test_point, success): self.timestamp = datetime.now() self.name1 = name1 self.name2 = name2 self.target1 = target1 self.target2 = target2 self.start = start self.end = end self.pings = pings self.address = address self.test_point = test_point self.success = success Base.metadata.create_all()
from sqlalchemy import create_engine from sqlalchemy import Column, Integer, PickleType, Boolean, String, DateTime from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from datetime import datetime engine = create_engine('mysql+pymysql://ucb_268_measure:ucb_268_measure@data.cnobwey0khau.us-west-2.rds.amazonaws.com:3306/mydb', echo=False) Base = declarative_base(bind=engine) Session = sessionmaker(bind=engine) class DataPoint(Base): __tablename__ = 'data' id = Column(Integer, primary_key=True) timestamp = Column(DateTime) name1 = Column(String) name2 = Column(String) target1 = Column(PickleType) target2 = Column(PickleType) start = Column(DateTime) end = Column(DateTime) pings = Column(PickleType) address = Column(PickleType) test_point = Column(String) success = Column(Boolean) def __init__(self, name1, name2, target1, target2, start, end, pings, address, test_point, success): self.timestamp = datetime.now() self.name1 = name1 self.name2 = name2 self.target1 = target1 self.target2 = target2 self.start = start self.end = end self.pings = pings self.address = address self.test_point = test_point self.success = success Base.metadata.create_all()
Python
0.000068
93db3543a576ccde905fc77d7c3ad825f6a100a1
change threshold
misc_scripts/compare_bounds.py
misc_scripts/compare_bounds.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os, sys, re from fontTools.ttLib import TTFont from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen class ConcordanceInfo(object): def __init__(self): self.glyphs = 0 self.concordant_glyphs = 0 self.maxdiff = 0 self.maxdiff_gname = None def update(self, diff, gname): self.glyphs += 1 if diff <= 1: self.concordant_glyphs += 1 elif diff > self.maxdiff: self.maxdiff = round(diff, 2) self.maxdiff_gname = gname def calc_bounds(font, gname, penclass): gs = font.getGlyphSet() g = gs[gname] pen = penclass(gs) g.draw(pen) return [round(v, 2) for v in pen.bounds] if pen.bounds is not None else None def bounds_differ(bounds1, bounds2): for v1, v2 in zip(bounds1, bounds2): if abs(v1 - v2) >= 1: return True return False def compare_bounds(): font1_path = sys.argv[1] font2_path = sys.argv[2] font1 = TTFont(font1_path, fontNumber=0) font2 = TTFont(font2_path, fontNumber=0) for gname in font1.getGlyphOrder(): bounds1 = calc_bounds(font1, gname, BoundsPen) bounds2 = calc_bounds(font2, gname, BoundsPen) if bounds1 is None or bounds2 is None: if bounds1 is not None or bounds2 is not None: print "[{}] {} {}".format(gname, bounds1, bounds2) elif bounds_differ(bounds1, bounds2): print "[{}] {} {}".format(gname, bounds1, bounds2) def main(): compare_bounds() if __name__ == "__main__": main()
#! /usr/bin/env python # -*- coding: utf-8 -*- import os, sys, re from fontTools.ttLib import TTFont from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen class ConcordanceInfo(object): def __init__(self): self.glyphs = 0 self.concordant_glyphs = 0 self.maxdiff = 0 self.maxdiff_gname = None def update(self, diff, gname): self.glyphs += 1 if diff <= 1: self.concordant_glyphs += 1 elif diff > self.maxdiff: self.maxdiff = round(diff, 2) self.maxdiff_gname = gname def calc_bounds(font, gname, penclass): gs = font.getGlyphSet() g = gs[gname] pen = penclass(gs) g.draw(pen) return [round(v, 2) for v in pen.bounds] if pen.bounds is not None else None def bounds_differ(bounds1, bounds2): for v1, v2 in zip(bounds1, bounds2): if abs(v1 - v2) > 1: return True return False def compare_bounds(): font1_path = sys.argv[1] font2_path = sys.argv[2] font1 = TTFont(font1_path, fontNumber=0) font2 = TTFont(font2_path, fontNumber=0) for gname in font1.getGlyphOrder(): bounds1 = calc_bounds(font1, gname, BoundsPen) bounds2 = calc_bounds(font2, gname, BoundsPen) if bounds1 is None or bounds2 is None: if bounds1 is not None or bounds2 is not None: print "[{}] {} {}".format(gname, bounds1, bounds2) elif bounds_differ(bounds1, bounds2): print "[{}] {} {}".format(gname, bounds1, bounds2) def main(): compare_bounds() if __name__ == "__main__": main()
Python
0.000001
df88bc165e0a505b07c84aea4a29bf3c048895ac
replace OSError with FileNotFoundError when appropriate
dwi/hdf5.py
dwi/hdf5.py
"""Support for HDF5 files.""" from collections import OrderedDict import numpy as np import h5py from dwi.types import Path DEFAULT_DSETNAME = 'default' DEFAULT_DSETPARAMS = dict( compression='gzip', # Smaller, compatible. # compression='lzf', # Faster. shuffle=True, # Rearrange bytes for better compression. fletcher32=True, # Flether32 checksum. track_times=False, # Dataset creation timestamps. ) def iterable(x): """Tell whether an object is iterable or not.""" try: iter(x) except TypeError: return False else: return True def write_hdf5(filename, array, attrs, fillvalue=None, dsetname=DEFAULT_DSETNAME): """Write an array with attributes into a newly created, compressed HDF5 file. """ f = h5py.File(filename, 'w') dset = f.create_dataset(dsetname, data=array, fillvalue=fillvalue, **DEFAULT_DSETPARAMS) write_attrs(dset, attrs) f.close() def read_hdf5(filename, ondisk=False, dsetname=DEFAULT_DSETNAME): """Read an array with attributes from an HDF5 file. With parameter "ondisk" True it will not be read into memory.""" try: f = h5py.File(filename, 'r') # On missing file, h5py raises OSError without errno (or filename). except OSError as e: if not Path(filename).exists(): raise FileNotFoundError(2, 'No such file or directory', filename) e.filename = e.filename or filename raise except IOError as e: e.filename = e.filename or filename raise if dsetname not in f: # No dataset of given name, try the one there is. try: dsetname, = f.keys() except ValueError: raise ValueError('Ambiguous content: {}'.format(filename)) dset = f[dsetname] if ondisk: array = dset else: array = np.array(dset) attrs = read_attrs(dset) if not ondisk: f.close() return array, attrs def create_hdf5(filename, shape, dtype, fillvalue=None, dsetname=DEFAULT_DSETNAME): """Create a HDF5 file and return the dataset for manipulation. Attributes and the file object can be accessed by dset.attrs and dset.file. """ f = h5py.File(filename, 'w') dset = f.create_dataset(dsetname, shape, dtype=dtype, fillvalue=fillvalue, **DEFAULT_DSETPARAMS) return dset def write_attrs(dset, attrs): """Update dataset attributes from dictionary. This is a wrapper for conversion needs, like string encoding and None to nan. """ for k, v in attrs.items(): dset.attrs[k] = convert_value_write(v) def read_attrs(dset): """Read attributes from dataset. This is a wrapper for conversion needs.""" return OrderedDict((k, convert_value_read(v)) for k, v in dset.attrs.items()) def convert_value_write(v): """HDF5 doesn't understand None objects, so replace any with nan values.""" def convert_item(x): """Convert sequence item.""" if x is None: return np.nan if isinstance(x, str): return x.encode() return x if iterable(v) and not isinstance(v, str): v = [convert_item(x) for x in v] return v def convert_value_read(value): """Convert attribute value from bytes to string.""" if isinstance(value, bytes): return value.decode() elif not np.isscalar(value) and np.issubsctype(value, np.bytes_): return value.astype(np.str_) return value
"""Support for HDF5 files.""" from collections import OrderedDict import numpy as np import h5py import dwi.util DEFAULT_DSETNAME = 'default' DEFAULT_DSETPARAMS = dict( compression='gzip', # Smaller, compatible. # compression='lzf', # Faster. shuffle=True, # Rearrange bytes for better compression. fletcher32=True, # Flether32 checksum. track_times=False, # Dataset creation timestamps. ) def iterable(x): """Tell whether an object is iterable or not.""" try: iter(x) except TypeError: return False else: return True def write_hdf5(filename, array, attrs, fillvalue=None, dsetname=DEFAULT_DSETNAME): """Write an array with attributes into a newly created, compressed HDF5 file. """ f = h5py.File(filename, 'w') dset = f.create_dataset(dsetname, data=array, fillvalue=fillvalue, **DEFAULT_DSETPARAMS) write_attrs(dset, attrs) f.close() def read_hdf5(filename, ondisk=False, dsetname=DEFAULT_DSETNAME): """Read an array with attributes from an HDF5 file. With parameter "ondisk" True it will not be read into memory.""" try: f = h5py.File(filename, 'r') except IOError as e: if e.filename is None: e.filename = filename raise if dsetname not in f: # No dataset of given name, try the one there is. try: dsetname, = f.keys() except ValueError: raise ValueError('Ambiguous content: {}'.format(filename)) dset = f[dsetname] if ondisk: array = dset else: array = np.array(dset) attrs = read_attrs(dset) if not ondisk: f.close() return array, attrs def create_hdf5(filename, shape, dtype, fillvalue=None, dsetname=DEFAULT_DSETNAME): """Create a HDF5 file and return the dataset for manipulation. Attributes and the file object can be accessed by dset.attrs and dset.file. """ f = h5py.File(filename, 'w') dset = f.create_dataset(dsetname, shape, dtype=dtype, fillvalue=fillvalue, **DEFAULT_DSETPARAMS) return dset def write_attrs(dset, attrs): """Update dataset attributes from dictionary. This is a wrapper for conversion needs, like string encoding and None to nan. """ for k, v in attrs.items(): dset.attrs[k] = convert_value_write(v) def read_attrs(dset): """Read attributes from dataset. This is a wrapper for conversion needs.""" return OrderedDict((k, convert_value_read(v)) for k, v in dset.attrs.items()) def convert_value_write(v): """HDF5 doesn't understand None objects, so replace any with nan values.""" def convert_item(x): """Convert sequence item.""" if x is None: return np.nan if isinstance(x, str): return x.encode() return x if iterable(v) and not isinstance(v, str): v = [convert_item(x) for x in v] return v def convert_value_read(value): """Convert attribute value from bytes to string.""" if isinstance(value, bytes): return value.decode() elif not np.isscalar(value) and np.issubsctype(value, np.bytes_): return value.astype(np.str_) return value
Python
0.000039
5d1f9d3eaa27c0abf555fe3c79e9c11f9f7167ae
Fix redundant warning about 'file_name' config value.
foliant/pandoc.py
foliant/pandoc.py
"""Wrapper around Pandoc. Used by builder.""" from __future__ import print_function import subprocess from . import gitutils PANDOC_PATH = "pandoc" FROM_PARAMS = "-f markdown_strict+simple_tables+multiline_tables+grid_tables+pipe_tables+table_captions+fenced_code_blocks+line_blocks+definition_lists+all_symbols_escapable+strikeout+superscript+subscript+lists_without_preceding_blankline+implicit_figures+raw_tex+citations+tex_math_dollars+header_attributes+auto_identifiers+startnum+footnotes+inline_notes+fenced_code_attributes+intraword_underscores+escaped_line_breaks" LATEX_PARAMS = "--no-tex-ligatures --smart --normalize --listings --latex-engine=xelatex" def generate_variable(key, value): """Generate a ``--variable key=value`` entry.""" return '--variable "%s"="%s"' % (key, value) def generate_command(params, output_file, src_file, cfg): """Generate the entire Pandoc command with params to invoke.""" params = ["-o " + output_file, FROM_PARAMS, LATEX_PARAMS, params] for key, value in cfg.items(): if key in ("title", "second_title", "year", "date", "title_page", "tof", "toc"): params.append(generate_variable(key, value)) elif key == "template": params.append('--template="%s.tex"' % value) elif key == "lang": if value in ("russian", "english"): params.append(generate_variable(value, "true")) else: params.append(generate_variable("russian", "true")) elif key == "version": if value == "auto": params.append(generate_variable(key, gitutils.get_version())) else: params.append(generate_variable(key, value)) elif key == "company": if value in ("restream", "undev"): params.append(generate_variable(value, "true")) else: raise RuntimeError("Unsupported company: %s" % value) elif key in ("type", "alt_doc_type"): if value: params.append(generate_variable(key, value)) elif key == "filters": for filt in value: params.append("-F %s" % filt) elif key == "file_name": pass else: print("Unsupported config key: %s" % key) return ' '.join([PANDOC_PATH] + params + [src_file]) def run(command, src_dir): """Invoke the Pandoc executable with the generated params.""" print("Baking output... ", end='') try: proc = subprocess.check_output( command, cwd=src_dir, stderr=subprocess.PIPE ) print("Done!") except subprocess.CalledProcessError as e: quit(e.stderr.decode()) def to_pdf(src_file, output_file, tmp_path, cfg): """Convert Markdown to PDF via Pandoc.""" pandoc_command = generate_command( "-t latex", output_file, src_file, cfg ) run(pandoc_command, tmp_path) def to_docx(src_file, output_file, tmp_path, cfg): """Convert Markdown to Docx via Pandoc.""" pandoc_command = generate_command( '--reference-docx="ref.docx"', output_file, src_file, cfg ) run(pandoc_command, tmp_path) def to_tex(src_file, output_file, tmp_path, cfg): """Convert Markdown to TeX via Pandoc.""" pandoc_command = generate_command( "-t latex", output_file, src_file, cfg ) run(pandoc_command, tmp_path)
"""Wrapper around Pandoc. Used by builder.""" from __future__ import print_function import subprocess from . import gitutils PANDOC_PATH = "pandoc" FROM_PARAMS = "-f markdown_strict+simple_tables+multiline_tables+grid_tables+pipe_tables+table_captions+fenced_code_blocks+line_blocks+definition_lists+all_symbols_escapable+strikeout+superscript+subscript+lists_without_preceding_blankline+implicit_figures+raw_tex+citations+tex_math_dollars+header_attributes+auto_identifiers+startnum+footnotes+inline_notes+fenced_code_attributes+intraword_underscores+escaped_line_breaks" LATEX_PARAMS = "--no-tex-ligatures --smart --normalize --listings --latex-engine=xelatex" def generate_variable(key, value): """Generate a ``--variable key=value`` entry.""" return '--variable "%s"="%s"' % (key, value) def generate_command(params, output_file, src_file, cfg): """Generate the entire Pandoc command with params to invoke.""" params = ["-o " + output_file, FROM_PARAMS, LATEX_PARAMS, params] for key, value in cfg.items(): if key in ("title", "second_title", "year", "date", "title_page", "tof", "toc"): params.append(generate_variable(key, value)) elif key == "template": params.append('--template="%s.tex"' % value) elif key == "lang": if value in ("russian", "english"): params.append(generate_variable(value, "true")) else: params.append(generate_variable("russian", "true")) elif key == "version": if value == "auto": params.append(generate_variable(key, gitutils.get_version())) else: params.append(generate_variable(key, value)) elif key == "company": if value in ("restream", "undev"): params.append(generate_variable(value, "true")) else: raise RuntimeError("Unsupported company: %s" % value) elif key in ("type", "alt_doc_type"): if value: params.append(generate_variable(key, value)) elif key == "filters": for filt in value: params.append("-F %s" % filt) else: print("Unsupported config key: %s" % key) return ' '.join([PANDOC_PATH] + params + [src_file]) def run(command, src_dir): """Invoke the Pandoc executable with the generated params.""" print("Baking output... ", end='') try: proc = subprocess.check_output( command, cwd=src_dir, stderr=subprocess.PIPE ) print("Done!") except subprocess.CalledProcessError as e: quit(e.stderr.decode()) def to_pdf(src_file, output_file, tmp_path, cfg): """Convert Markdown to PDF via Pandoc.""" pandoc_command = generate_command( "-t latex", output_file, src_file, cfg ) run(pandoc_command, tmp_path) def to_docx(src_file, output_file, tmp_path, cfg): """Convert Markdown to Docx via Pandoc.""" pandoc_command = generate_command( '--reference-docx="ref.docx"', output_file, src_file, cfg ) run(pandoc_command, tmp_path) def to_tex(src_file, output_file, tmp_path, cfg): """Convert Markdown to TeX via Pandoc.""" pandoc_command = generate_command( "-t latex", output_file, src_file, cfg ) run(pandoc_command, tmp_path)
Python
0
a324051e28d359a1591dff48fa4bbb32c3caf44a
add loaders __doc__
src/loaders/__init__.py
src/loaders/__init__.py
# -*- coding: utf-8 -*- # Copyright (c) 2018 shmilee ''' This is the subpackage ``loaders`` of gdpy3. It contains two kinds of loaders. 1. ``RawLoader``, get by :func:`get_rawloader`. ``RawLoader`` has attributes :attr:`base.BaseRawLoader.path``, :attr:`base.BaseRawLoader.filenames` and methods :meth:`base.BaseRawLoader.keys`, :meth:`base.BaseRawLoader.get`, :meth:`base.BaseLoader.find`, :meth:`base.BaseLoader.all_in_loader`. 2. ``PckLoader``, get by :func:`get_pckloader`. ``PckLoader`` has attributes :attr:`base.BasePckLoader.path``, :attr:`base.BasePckLoader.datakeys`, :attr:`base.BasePckLoader.datagroups`, :attr:`base.BasePckLoader.description`, :attr:`base.BasePckLoader.cache`, and methods :meth:`base.BasePckLoader.keys`, :meth:`base.BasePckLoader.get`, :meth:`base.BasePckLoader.get_many`, :meth:`base.BaseLoader.find`, :meth:`base.BaseLoader.all_in_loader`. ''' import os from ..glogger import getGLogger from . import base __all__ = ['get_rawloader', 'is_rawloader', 'get_pckloader', 'is_pckloader'] log = getGLogger('L') rawloader_names = ['DirRawLoader', 'TarRawLoader', 'SftpRawLoader'] pckloader_names = ['CachePckLoader', 'NpzPckLoader', 'Hdf5PckLoader'] pckloader_types = ['.cache', '.npz', '.hdf5'] def get_rawloader(path, filenames_filter=None): ''' Given a path, return a raw loader instance. Raises IOError if path not found, ValueError if path type not supported. Notes ----- *path* types: 1. local directory 2. tar archive file 3. directory in remote SSH server format: 'sftp://username[:passwd]@host[:port]##remote/path' ''' path = str(path) if os.path.isdir(path): from .dirraw import DirRawLoader loader = DirRawLoader(path, filenames_filter=filenames_filter) elif os.path.isfile(path): import tarfile if tarfile.is_tarfile(path): from .tarraw import TarRawLoader loader = TarRawLoader(path, filenames_filter=filenames_filter) else: raise ValueError( "Unsupported File '%s'! Try with an tar archive!" % path) elif path.startswith('sftp://'): from .sftpraw import SftpRawLoader loader = SftpRawLoader(path, filenames_filter=filenames_filter) else: raise IOError("Can't find path '%s'!" % path) return loader def is_rawloader(obj): ''' Return True if obj is a raw loader instance, else return False. ''' return isinstance(obj, base.BaseRawLoader) def get_pckloader(path, datagroups_filter=None): ''' Given a file path or dict cache, return a pickled loader instance. Raises IOError if path not found, ValueError if path type not supported. Notes ----- *path* types: 1. '.npz' file 2. '.hdf5' file 3. dict object ''' if isinstance(path, str) and os.path.isfile(path): ext = os.path.splitext(path)[1] if ext == '.npz': from .npzpck import NpzPckLoader loader = NpzPckLoader(path, datagroups_filter=datagroups_filter) elif ext == '.hdf5': from .hdf5pck import Hdf5PckLoader loader = Hdf5PckLoader(path, datagroups_filter=datagroups_filter) else: raise ValueError('Unsupported Filetype: "%s"! ' 'Did you mean one of: "%s"?' % (ext, ', '.join(pckloader_types[1:]))) elif isinstance(path, dict): from .cachepck import CachePckLoader loader = CachePckLoader(path, datagroups_filter=datagroups_filter) else: raise IOError("Can't find path '%s'!" % path) return loader def is_pckloader(obj): ''' Return True if obj is a pickled loader instance, else return False. ''' return isinstance(obj, base.BasePckLoader)
# -*- coding: utf-8 -*- # Copyright (c) 2018 shmilee import os from ..glogger import getGLogger from . import base __all__ = ['get_rawloader', 'is_rawloader', 'get_pckloader', 'is_pckloader'] log = getGLogger('L') rawloader_names = ['DirRawLoader', 'TarRawLoader', 'SftpRawLoader'] pckloader_names = ['CachePckLoader', 'NpzPckLoader', 'Hdf5PckLoader'] pckloader_types = ['.cache', '.npz', '.hdf5'] def get_rawloader(path, filenames_filter=None): ''' Given a path, return a raw loader instance. Raises IOError if path not found, ValueError if path type not supported. Notes ----- *path* types: 1. local directory 2. tar archive file 3. directory in remote SSH server format: 'sftp://username[:passwd]@host[:port]##remote/path' ''' path = str(path) if os.path.isdir(path): from .dirraw import DirRawLoader loader = DirRawLoader(path, filenames_filter=filenames_filter) elif os.path.isfile(path): import tarfile if tarfile.is_tarfile(path): from .tarraw import TarRawLoader loader = TarRawLoader(path, filenames_filter=filenames_filter) else: raise ValueError( "Unsupported File '%s'! Try with an tar archive!" % path) elif path.startswith('sftp://'): from .sftpraw import SftpRawLoader loader = SftpRawLoader(path, filenames_filter=filenames_filter) else: raise IOError("Can't find path '%s'!" % path) return loader def is_rawloader(obj): ''' Return True if obj is a raw loader instance, else return False. ''' return isinstance(obj, base.BaseRawLoader) def get_pckloader(path, datagroups_filter=None): ''' Given a file or cache path, return a pickled loader instance. Raises IOError if path not found, ValueError if path type not supported. Notes ----- *path* types: 1. '.npz' file 2. '.hdf5' file 3. dict object ''' if isinstance(path, str) and os.path.isfile(path): ext = os.path.splitext(path)[1] if ext == '.npz': from .npzpck import NpzPckLoader loader = NpzPckLoader(path, datagroups_filter=datagroups_filter) elif ext == '.hdf5': from .hdf5pck import Hdf5PckLoader loader = Hdf5PckLoader(path, datagroups_filter=datagroups_filter) else: raise ValueError('Unsupported Filetype: "%s"! ' 'Did you mean one of: "%s"?' % (ext, ', '.join(pckloader_types[1:]))) elif isinstance(path, dict): from .cachepck import CachePckLoader loader = CachePckLoader(path, datagroups_filter=datagroups_filter) else: raise IOError("Can't find path '%s'!" % path) return loader def is_pckloader(obj): ''' Return True if obj is a pickled loader instance, else return False. ''' return isinstance(obj, base.BasePckLoader)
Python
0.000002
bfe6c752aa2a95cc28109f4819cf6a9e88e7ee4b
remove unnecessary comments
stores/views.py
stores/views.py
from newt.views import JSONRestView from common.response import json_response from django.conf import settings import json store_adapter = __import__(settings.NEWT_CONFIG['ADAPTERS']['STORES'], globals(), locals(), ['adapter'], -1) import logging logger = logging.getLogger(__name__) class StoresRootView(JSONRestView): def get(self, request): logger.debug("Entering %s:%s" % (self.__class__.__name__, __name__)) return store_adapter.get_stores() def post(self, request): # Temporary: creates a store with a random name if no name is specified import uuid import random import string rand_key = random.choice(string.ascii_letters) + str(uuid.uuid4())[0:8] while(rand_key in store_adapter.get_stores()): rand_key = str(uuid.uuid4())[0:8] if request.POST.get("data", False): return store_adapter.create_store(rand_key, [request.POST.get("data")]) else: return store_adapter.create_store(rand_key) class StoresView(JSONRestView): def get(self, request, store_name): try: if request.GET.get("query", False): data = store_adapter.query_store(store_name, request.GET.get("query")) else: data = store_adapter.get_store_contents(store_name) except Exception as e: logger.error("Invalid store requested: %s", store_name) return json_response(status="ERROR", status_code=500, error=e.args[0]) return data def post(self, request, store_name): if store_name in store_adapter.get_stores(): if request.POST.get("data", False): data = request.POST.get("data") return store_adapter.store_insert(store_name, data) else: return json_response(status="ERROR", status_code=500, error="No data recieved.") else: if request.POST.get("data", False): return store_adapter.create_store(store_name, [request.POST.get("data")]) else: return store_adapter.create_store(store_name) def put(self, request, store_name): data = json.loads(request.read()) store_adapter.update(store_name, data) def delete(self, request, store_name): return store_adapter.delete_store(store_name) class StoresObjView(JSONRestView): def get(self, request, store_name, obj_id): try: return store_adapter.store_get_obj(store_name, obj_id) except Exception as e: return json_response(status="ERROR", status_code="500", error=e.args[0]) def put(self, request, store_name, obj_id): from django.http import QueryDict body = QueryDict(request.body) if body.get("data", False): data = body.get("data") return store_adapter.store_update(store_name, obj_id, data) else: return json_response(status="ERROR", status_code=500, error="No data recieved.")
from newt.views import JSONRestView from common.response import json_response from django.conf import settings import json store_adapter = __import__(settings.NEWT_CONFIG['ADAPTERS']['STORES'], globals(), locals(), ['adapter'], -1) import logging logger = logging.getLogger(__name__) class StoresRootView(JSONRestView): def get(self, request): logger.debug("Entering %s:%s" % (self.__class__.__name__, __name__)) return store_adapter.get_stores() def post(self, request): # Temporary: creates a store with a random name # TODO: create a store with a given name import uuid import random import string rand_key = random.choice(string.ascii_letters) + str(uuid.uuid4())[0:8] while(rand_key in store_adapter.get_stores()): rand_key = str(uuid.uuid4())[0:8] # TODO: parse post body for initial datas if request.POST.get("data", False): return store_adapter.create_store(rand_key, [request.POST.get("data")]) else: return store_adapter.create_store(rand_key) class StoresView(JSONRestView): def get(self, request, store_name): try: if request.GET.get("query", False): data = store_adapter.query_store(store_name, request.GET.get("query")) else: data = store_adapter.get_store_contents(store_name) except Exception as e: logger.error("Invalid store requested: %s", store_name) return json_response(status="ERROR", status_code=500, error=e.args[0]) return data def post(self, request, store_name): if store_name in store_adapter.get_stores(): if request.POST.get("data", False): data = request.POST.get("data") return store_adapter.store_insert(store_name, data) else: return json_response(status="ERROR", status_code=500, error="No data recieved.") else: if request.POST.get("data", False): return store_adapter.create_store(store_name, [request.POST.get("data")]) else: return store_adapter.create_store(store_name) def put(self, request, store_name): data = json.loads(request.read()) store_adapter.update(store_name, data) def delete(self, request, store_name): return store_adapter.delete_store(store_name) class StoresObjView(JSONRestView): def get(self, request, store_name, obj_id): try: return store_adapter.store_get_obj(store_name, obj_id) except Exception as e: return json_response(status="ERROR", status_code="500", error=e.args[0]) def put(self, request, store_name, obj_id): from django.http import QueryDict body = QueryDict(request.body) if body.get("data", False): data = body.get("data") return store_adapter.store_update(store_name, obj_id, data) else: return json_response(status="ERROR", status_code=500, error="No data recieved.")
Python
0
d97bb53f74c11b654f506f7e14342e7b3582a4c4
Fix duplicate test method names.
eliot/tests/test_api.py
eliot/tests/test_api.py
""" Tests for the public API exposed by L{eliot}. """ from __future__ import unicode_literals from unittest import TestCase from .._output import Logger import eliot class PublicAPITests(TestCase): """ Tests for the public API. """ def test_addDestination(self): """ L{eliot.addDestination} adds destinations to the L{Destinations} attached to L{Logger}. """ self.assertEqual(eliot.addDestination, Logger._destinations.add) def test_removeDestination(self): """ L{eliot.addDestination} removes destinations from the L{Destinations} attached to L{Logger}. """ self.assertEqual(eliot.removeDestination, Logger._destinations.remove) def test_addGlobalFields(self): """ L{eliot.addGlobalFields} calls the corresponding method on the L{Destinations} attached to L{Logger}. """ self.assertEqual(eliot.addGlobalFields, Logger._destinations.addGlobalFields) class PEP8Tests(TestCase): """ Tests for the PEP 8 variant of the the public API. """ def test_add_destination(self): """ L{eliot.addDestionation} is the same as L{eliot.add_destination}. """ self.assertIs(eliot.add_destination, eliot.addDestination) def test_remove_destination(self): """ L{eliot.removeDestionation} is the same as L{eliot.remove_destination}. """ self.assertIs(eliot.remove_destination, eliot.removeDestination) def test_add_global_fields(self): """ L{eliot.add_global_fields} is the same as L{eliot.addGlobalFields}. """ self.assertIs(eliot.add_global_fields, eliot.addGlobalFields) def test_write_traceback(self): """ L{eliot.writeTraceback} is the same as L{eliot.write_traceback}. """ self.assertIs(eliot.write_traceback, eliot.writeTraceback) def test_write_failure(self): """ L{eliot.writeFailure} is the same as L{eliot.write_failure}. """ self.assertIs(eliot.write_failure, eliot.writeFailure) def test_start_task(self): """ L{eliot.startTask} is the same as L{eliot.start_task}. """ self.assertIs(eliot.start_task, eliot.startTask) def test_start_action(self): """ L{eliot.startAction} is the same as L{eliot.start_action}. """ self.assertIs(eliot.start_action, eliot.startAction)
""" Tests for the public API exposed by L{eliot}. """ from __future__ import unicode_literals from unittest import TestCase from .._output import Logger import eliot class PublicAPITests(TestCase): """ Tests for the public API. """ def test_addDestination(self): """ L{eliot.addDestination} adds destinations to the L{Destinations} attached to L{Logger}. """ self.assertEqual(eliot.addDestination, Logger._destinations.add) def test_removeDestination(self): """ L{eliot.addDestination} removes destinations from the L{Destinations} attached to L{Logger}. """ self.assertEqual(eliot.removeDestination, Logger._destinations.remove) def test_removeDestination(self): """ L{eliot.addGlobalFields} calls the corresponding method on the L{Destinations} attached to L{Logger}. """ self.assertEqual(eliot.addGlobalFields, Logger._destinations.addGlobalFields) class PEP8Tests(TestCase): """ Tests for the PEP 8 variant of the the public API. """ def test_add_destination(self): """ L{eliot.addDestionation} is the same as L{eliot.add_destination}. """ self.assertIs(eliot.add_destination, eliot.addDestination) def test_remove_destination(self): """ L{eliot.removeDestionation} is the same as L{eliot.remove_destination}. """ self.assertIs(eliot.remove_destination, eliot.removeDestination) def test_add_global_fields(self): """ L{eliot.add_global_fields} is the same as L{eliot.addGlobalFields}. """ self.assertIs(eliot.add_global_fields, eliot.addGlobalFields) def test_write_traceback(self): """ L{eliot.writeTraceback} is the same as L{eliot.write_traceback}. """ self.assertIs(eliot.write_traceback, eliot.writeTraceback) def test_write_failure(self): """ L{eliot.writeFailure} is the same as L{eliot.write_failure}. """ self.assertIs(eliot.write_failure, eliot.writeFailure) def test_start_task(self): """ L{eliot.startTask} is the same as L{eliot.start_task}. """ self.assertIs(eliot.start_task, eliot.startTask) def test_start_action(self): """ L{eliot.startAction} is the same as L{eliot.start_action}. """ self.assertIs(eliot.start_action, eliot.startAction)
Python
0.000013
be0b85f50b8cd4f7323d5c6def5c388c7a8fad36
fix webhook
webhooks.py
webhooks.py
from http.server import HTTPServer, BaseHTTPRequestHandler import json import os import shutil from episode import GitRepo, Episode WORK_DIR = "repo" class WebHookHandler(BaseHTTPRequestHandler): def do_POST(self): event_type = self.headers.get('X-Github-Event') if event_type != 'push': return length = int(self.headers.get('Content-Length')) http_body = self.rfile.read(length).decode('utf-8') data = json.loads(http_body) ref = data.get('ref') if ref != 'refs/heads/source': return # todo: pull repo & branch to source & build & push to master repo_addr = data.get("repository")['ssh_url'] print('repo', repo_addr) repo = GitRepo(repo_address=repo_addr, dst=WORK_DIR) repo.clone() os.chdir(WORK_DIR) repo.checkout_or_create("source") Episode().deploy() os.chdir("..") shutil.rmtree(WORK_DIR) self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() # self.wfile.write(bytes("Hello World", "utf-8")) return if __name__ == "__main__": port = 8000 Handler = WebHookHandler httpd = HTTPServer(("0.0.0.0", port), Handler) print("Serving at http://127.0.0.1:{port}".format(port=port)) httpd.serve_forever()
from http.server import HTTPServer, BaseHTTPRequestHandler import json import os import shutil from episode import GitRepo, Episode WORK_DIR = "repo" class WebHookHandler(BaseHTTPRequestHandler): def do_POST(self): event_type = self.headers.get('X-Github-Event') if event_type != 'push': return length = int(self.headers.get('Content-Length')) http_body = self.rfile.read(length).decode('utf-8') data = json.loads(http_body) ref = data.get('ref') if ref != 'refs/heads/source': return # todo: pull repo & branch to source & build & push to master repo_addr = data.get("repository")['ssh_url'] print('repo', repo_addr) repo = GitRepo(repo_address=repo_addr, dst=WORK_DIR) repo.clone() os.chdir(WORK_DIR) repo.checkout_or_create("source") Episode().deploy() shutil.rmtree(WORK_DIR) self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() # self.wfile.write(bytes("Hello World", "utf-8")) return if __name__ == "__main__": port = 8000 Handler = WebHookHandler httpd = HTTPServer(("0.0.0.0", port), Handler) print("Serving at http://127.0.0.1:{port}".format(port=port)) httpd.serve_forever()
Python
0.000017
4787c9e1b895b5ce0bdd0fedeb537a971fab5933
add management command to benchmark get_direct_ccz
corehq/apps/app_manager/management/commands/benchmark_direct_ccz.py
corehq/apps/app_manager/management/commands/benchmark_direct_ccz.py
from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import json from django.core.management import BaseCommand from corehq.apps.app_manager.dbaccessors import get_app from corehq.apps.app_manager.management.commands.benchmark_build_times import Timer from corehq.apps.app_manager.views.cli import get_direct_ccz class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( 'domain_app_id_pairs', help='A JSON list where each element has the format [<domain>, <app_id>]', type=json.loads, ) def handle(self, domain_app_id_pairs, **options): for (domain, app_id) in domain_app_id_pairs: print("%s: %s" % (domain, app_id)) with Timer(): app = get_app(domain, app_id) get_direct_ccz(domain, app, None, None)
Python
0.000001
db356499cf079ec9284baf16817d3c3054d8688d
Add source_added tests
tests/integration/states/test_chocolatey.py
tests/integration/states/test_chocolatey.py
# -*- coding: utf-8 -*- """ Tests for the Chocolatey State """ # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform # Import Salt Testing libs from tests.support.case import ModuleCase from tests.support.helpers import destructiveTest from tests.support.mixins import SaltReturnAssertsMixin from tests.support.unit import skipIf log = logging.getLogger(__name__) __testcontext__ = {} @destructiveTest @skipIf(not salt.utils.platform.is_windows(), "Windows Specific Test") class ChocolateyTest(ModuleCase, SaltReturnAssertsMixin): """ Chocolatey State Tests These tests are destructive as the install and remove software """ def setUp(self): """ Ensure that Chocolatey is installed """ super(ChocolateyTest, self).setUp() if "chocolatey" not in __testcontext__: self.run_function("chocolatey.bootstrap") __testcontext__["chocolatey"] = True def test_chocolatey(self): """ Test the following: - `chocolatey.installed` - `chocolatey.upgraded` - `chocolatey.uninstalled` - `chocolatey.source_added` """ # If this assert fails, we need to find new targets, this test needs to # be able to test successful installation of packages, so this package # needs to NOT be installed before we run the states below target = "firefox" pre_version = "52.0.2" upg_version = "57.0.2" src_name = "test_repo" src_location = "https://repo.test.com/chocolatey" log.debug("Making sure %s is not installed", target) self.assertFalse(self.run_function("chocolatey.version", [target])) try: #################################################### # Test `chocolatey.installed` #################################################### # Install the package log.debug("Testing chocolatey.installed") ret = self.run_state( "chocolatey.installed", name=target, version=pre_version ) self.assertSaltTrueReturn(ret) # Verify the package is installed log.debug("Verifying install success") ret = self.run_function("chocolatey.version", [target]) self.assertEqual(ret, {"Firefox": [pre_version]}) #################################################### # Test `chocolatey.upgraded` #################################################### # Upgrade the package log.debug("Testing chocolatey.upgraded") ret = self.run_state( "chocolatey.upgraded", name=target, version=upg_version ) self.assertSaltTrueReturn(ret) # Verify the package is upgraded log.debug("Verifying upgrade success") ret = self.run_function("chocolatey.version", [target]) self.assertEqual(ret, {"Firefox": [upg_version]}) #################################################### # Test `chocolatey.uninstalled` #################################################### # uninstall the package log.debug("Testing chocolatey.uninstalled") ret = self.run_state("chocolatey.uninstalled", name=target) self.assertSaltTrueReturn(ret) # Verify the package is uninstalled log.debug("Verifying uninstall success") ret = self.run_function("chocolatey.version", [target]) self.assertEqual(ret, {}) #################################################### # Test `chocolatey.source_added` #################################################### # add the source log.debug("Testing chocolatey.source_added") ret = self.run_state("chocolatey.source_added", name=src_name, source_location=src_location) self.assertSaltTrueReturn(ret) # Verify the source is added log.debug("Verifying source_add success") ret = self.run_function("chocolatey.list_sources") self.assertTrue(source_name in ret.keys()) finally: # Always uninstall log.debug("Uninstalling %s", target) self.run_function("chocolatey.uninstall", [target])
# -*- coding: utf-8 -*- """ Tests for the Chocolatey State """ # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform # Import Salt Testing libs from tests.support.case import ModuleCase from tests.support.helpers import destructiveTest from tests.support.mixins import SaltReturnAssertsMixin from tests.support.unit import skipIf log = logging.getLogger(__name__) __testcontext__ = {} @destructiveTest @skipIf(not salt.utils.platform.is_windows(), "Windows Specific Test") class ChocolateyTest(ModuleCase, SaltReturnAssertsMixin): """ Chocolatey State Tests These tests are destructive as the install and remove software """ def setUp(self): """ Ensure that Chocolatey is installed """ super(ChocolateyTest, self).setUp() if "chocolatey" not in __testcontext__: self.run_function("chocolatey.bootstrap") __testcontext__["chocolatey"] = True def test_chocolatey(self): """ Test the following: - `chocolatey.installed` - `chocolatey.upgraded` - `chocolatey.uninstalled` """ # If this assert fails, we need to find new targets, this test needs to # be able to test successful installation of packages, so this package # needs to NOT be installed before we run the states below target = "firefox" pre_version = "52.0.2" upg_version = "57.0.2" log.debug("Making sure %s is not installed", target) self.assertFalse(self.run_function("chocolatey.version", [target])) try: #################################################### # Test `chocolatey.installed` #################################################### # Install the package log.debug("Testing chocolatey.installed") ret = self.run_state( "chocolatey.installed", name=target, version=pre_version ) self.assertSaltTrueReturn(ret) # Verify the package is installed log.debug("Verifying install success") ret = self.run_function("chocolatey.version", [target]) self.assertEqual(ret, {"Firefox": [pre_version]}) #################################################### # Test `chocolatey.upgraded` #################################################### # Upgrade the package log.debug("Testing chocolatey.upgraded") ret = self.run_state( "chocolatey.upgraded", name=target, version=upg_version ) self.assertSaltTrueReturn(ret) # Verify the package is upgraded log.debug("Verifying upgrade success") ret = self.run_function("chocolatey.version", [target]) self.assertEqual(ret, {"Firefox": [upg_version]}) #################################################### # Test `chocolatey.uninstalled` #################################################### # uninstall the package log.debug("Testing chocolatey.uninstalled") ret = self.run_state("chocolatey.uninstalled", name=target) self.assertSaltTrueReturn(ret) # Verify the package is uninstalled log.debug("Verifying uninstall success") ret = self.run_function("chocolatey.version", [target]) self.assertEqual(ret, {}) finally: # Always uninstall log.debug("Uninstalling %s", target) self.run_function("chocolatey.uninstall", [target])
Python
0.000001
585317f3a03f55f6487a98446d4a9279f91714d2
Add a test of the linearity of scalar multiplication
tests/test_vector2_scalar_multiplication.py
tests/test_vector2_scalar_multiplication.py
import pytest # type: ignore from hypothesis import given from hypothesis.strategies import floats from utils import vectors from ppb_vector import Vector2 @pytest.mark.parametrize("x, y, expected", [ (Vector2(6, 1), 0, Vector2(0, 0)), (Vector2(6, 1), 2, Vector2(12, 2)), (Vector2(0, 0), 3, Vector2(0, 0)), (Vector2(-1.5, 2.4), -2, Vector2(3.0, -4.8)), (Vector2(1, 2), 0.1, Vector2(0.1, 0.2)) ]) def test_scalar_multiplication(x, y, expected): assert x * y == expected @given( x=floats(min_value=-1e75, max_value=1e75), y=floats(min_value=-1e75, max_value=1e75), v=vectors(max_magnitude=1e150) ) def test_scalar_associative(x: float, y: float, v: Vector2): left = (x * y) * v right = x * (y * v) assert left.isclose(right) @given( l=floats(min_value=-1e150, max_value=1e150), x=vectors(max_magnitude=1e150), y=vectors(max_magnitude=1e150), ) def test_scalar_linear(l: float, x: Vector2, y: Vector2): assert (l * (x + y)).isclose(l*x + l*y)
import pytest # type: ignore from hypothesis import given from hypothesis.strategies import floats from utils import vectors from ppb_vector import Vector2 @pytest.mark.parametrize("x, y, expected", [ (Vector2(6, 1), 0, Vector2(0, 0)), (Vector2(6, 1), 2, Vector2(12, 2)), (Vector2(0, 0), 3, Vector2(0, 0)), (Vector2(-1.5, 2.4), -2, Vector2(3.0, -4.8)), (Vector2(1, 2), 0.1, Vector2(0.1, 0.2)) ]) def test_scalar_multiplication(x, y, expected): assert x * y == expected @given( x=floats(min_value=-1e75, max_value=1e75), y=floats(min_value=-1e75, max_value=1e75), v=vectors(max_magnitude=1e150) ) def test_scalar_associative(x: float, y: float, v: Vector2): left = (x * y) * v right = x * (y * v) assert left.isclose(right)
Python
0.002828
cecbb5951ef806c5b4b7b6894c05e4d086730fb0
order fy descending (newest on top)
base_ordered/ordered.py
base_ordered/ordered.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2012 Camptocamp Austria (<http://www.camptocamp.at>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from osv import fields, osv class purchase_order(osv.osv): _inherit = "purchase.order" _order = 'date_order desc, id desc' purchase_order() class sale_order(osv.osv): _inherit = "sale.order" _order = 'date_order desc, id desc' sale_order() class stock_picking(osv.osv): _inherit = "stock.picking" _order = 'date desc, id desc' stock_picking() class stock_move(osv.osv): _inherit = "stock.move" _order = 'date desc, id desc' stock_move() class account_invoice(osv.osv): _inherit = "account.invoice" _order = 'date_invoice desc, id desc' account_invoice() class account_fiscalyear(osv.osv): _inherit = "account.fiscalyear" _order = 'date_start desc, id desc' account_fiscalyear()
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2012 Camptocamp Austria (<http://www.camptocamp.at>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from osv import fields, osv class purchase_order(osv.osv): _inherit = "purchase.order" _order = 'date_order desc, id desc' purchase_order() class sale_order(osv.osv): _inherit = "sale.order" _order = 'date_order desc, id desc' sale_order() class stock_picking(osv.osv): _inherit = "stock.picking" _order = 'date desc, id desc' stock_picking() class stock_move(osv.osv): _inherit = "stock.move" _order = 'date desc, id desc' stock_move() class account_invoice(osv.osv): _inherit = "account.invoice" _order = 'date_invoice desc, id desc' account_invoice()
Python
0
2fedc43c50bd933924046b6f79633687a452116a
bump version
src/mrfitty/__init__.py
src/mrfitty/__init__.py
__version__ = '0.12.0'
__version__ = '0.11.0'
Python
0
f48601ceacbf9d05412aa5f45b6d4f9bb46d266e
update the script for GMC
utilities/scripts/correct_momentum_conservation.py
utilities/scripts/correct_momentum_conservation.py
#!/usr/bin/env python import sys from numpy import * from os import path def parse_data(data_line): data = data_line.split() data = list(map(int, data[:2])) + list(map(float, data[2:])) return(data) OSCAR_file_path = str(sys.argv[1]) OSCAR_file = open(OSCAR_file_path, 'r') output_file = open('OSCAR_w_GMC.DAT', 'w') a = 2 # w_i = pT^a/<pT^a> line_count = 0 Nparticle = 0 event_header_line = 3 iev = 0 event_data = [] for temp_line in OSCAR_file: if line_count < 3: output_file.write(temp_line) if line_count == event_header_line: output_file.write(temp_line) Nparticle = int(temp_line.split()[1]) event_header_line += Nparticle + 1 iev += 1 print("analysis event %d with %d particles" % (iev, Nparticle)) event_data = [] if line_count > (event_header_line - Nparticle - 1): data = parse_data(temp_line) event_data.append(data) if line_count > 3 and line_count == event_header_line - 1: event_data = array(event_data) CM_Px = sum(event_data[:, 2]) CM_Py = sum(event_data[:, 3]) CM_Pz = sum(event_data[:, 4]) mean_Px = CM_Px/Nparticle mean_Py = CM_Py/Nparticle mean_Pz = CM_Pz/Nparticle pT_array = sqrt(event_data[:, 2]**2. + event_data[:, 3]**2.) mean_pT = mean(pT_array) mean_pTsq = mean(pT_array**2.) weight = (pT_array**a)/mean(pT_array**a) total_E = sum(event_data[:, 5]) print("total energy = %g GeV" % total_E) print("<px> = %g GeV, <py> = %g GeV, <pz> = %g GeV" % (mean_Px, mean_Py, mean_Pz)) print("<pT> = %g GeV, <pT^2> = %g GeV^2" % (mean_pT, mean_pTsq)) event_data[:, 2] -= mean_Px*weight event_data[:, 3] -= mean_Py*weight event_data[:, 4] -= mean_Pz*weight event_data[:, 5] = sqrt(event_data[:, 2]**2. + event_data[:, 3]**2. + event_data[:, 4]**2. + event_data[:, 6]**2.) for iline in range(Nparticle): output_file.write( "%10d %10d %24.16e %24.16e %24.16e %24.16e %24.16e %24.16e %24.16e %24.16e %24.16e\n" % (event_data[iline, 0], event_data[iline, 1], event_data[iline, 2], event_data[iline, 3], event_data[iline, 4], event_data[iline, 5], event_data[iline, 6], event_data[iline, 7], event_data[iline, 8], event_data[iline, 9], event_data[iline, 10])) line_count += 1 OSCAR_file.close() output_file.close()
#!/usr/bin/env python3 import sys from numpy import * from os import path def parse_data(data_line): data = data_line.split() data = list(map(int, data[:2])) + list(map(float, data[2:])) return(data) OSCAR_file_path = str(sys.argv[1]) OSCAR_file = open(OSCAR_file_path, 'r') output_file = open('OSCAR_w_GMC.DAT', 'w') line_count = 0 Nparticle = 0 event_header_line = 3 iev = 0 event_data = [] for temp_line in OSCAR_file: if line_count < 3: output_file.write(temp_line) if line_count == event_header_line: output_file.write(temp_line) Nparticle = int(temp_line.split()[1]) event_header_line += Nparticle + 1 iev += 1 print("analysis event %d with %d particles" % (iev, Nparticle)) event_data = [] if line_count > (event_header_line - Nparticle - 1): data = parse_data(temp_line) event_data.append(data) if line_count > 3 and line_count == event_header_line - 1: event_data = array(event_data) CM_Px = sum(event_data[:, 2]) CM_Py = sum(event_data[:, 3]) CM_Pz = sum(event_data[:, 4]) total_E = sum(event_data[:, 5]) print("total energy = %g GeV" % total_E) print("correction per particle: delta_px = %g GeV, " "delta_py = %g GeV, delta_pz = %g GeV" % (CM_Px/Nparticle, CM_Py/Nparticle, CM_Pz/Nparticle)) event_data[:, 2] -= CM_Px/Nparticle event_data[:, 3] -= CM_Py/Nparticle event_data[:, 4] -= CM_Pz/Nparticle event_data[:, 5] = sqrt(event_data[:, 2]**2. + event_data[:, 3]**2. + event_data[:, 4]**2. + event_data[:, 6]**2.) for iline in range(Nparticle): output_file.write( "%d %d %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n" % (event_data[iline, 0], event_data[iline, 1], event_data[iline, 2], event_data[iline, 3], event_data[iline, 4], event_data[iline, 5], event_data[iline, 6], event_data[iline, 7], event_data[iline, 8], event_data[iline, 9], event_data[iline, 10])) line_count += 1 OSCAR_file.close() output_file.close()
Python
0
f5e2e7cbb494fc111efcf4abd5c744091e9ee8aa
Fix function name error
module/submodules/graphs.py
module/submodules/graphs.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim: ai ts=4 sts=4 et sw=4 nu import time from shinken.log import logger from .metamodule import MetaModule class GraphsMetaModule(MetaModule): _functions = ['get_graph_uris'] _custom_log = "You should configure the module 'graphite' in your broker and the module 'ui-graphite' in webui.cfg file to be able to display graphs." def get_graph_uris(self, elt, graphstart=None, graphend=None, duration=None, source='detail'): ''' Aggregate the get_graph_uris of all the submodules. The source parameter defines the source of the calling: Are we displaying graphs for the element detail page (detail), or a widget in the dashboard (dashboard) ? If duration is not None, we consider it as a number of seconds to graph and we call the module get_relative_graphs_uri If get_relative_graphs_uri is not a module function we compute graphstart and graphend and we call we call the module get_graphs_uri If graphstart and graphend are not None, we call the module get_graphs_uri ''' uris = [] for mod in self.modules: if not duration: uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source)) else: f = getattr(mod, 'get_relative_graph_uris', None) if f and callable(f): uris.extend(f(elt, duration, source)) else: graphend = time.time() graphstart = graphend - duration uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source)) logger.debug("[WebUI] Got graphs: %s", uris) return uris
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim: ai ts=4 sts=4 et sw=4 nu import time from shinken.log import logger from .metamodule import MetaModule class GraphsMetaModule(MetaModule): _functions = ['get_graph_uris'] _custom_log = "You should configure the module 'graphite' in your broker and the module 'ui-graphite' in webui.cfg file to be able to display graphs." def get_graph_uris(self, elt, graphstart=None, graphend=None, duration=None, source='detail'): ''' Aggregate the get_graph_uris of all the submodules. The source parameter defines the source of the calling: Are we displaying graphs for the element detail page (detail), or a widget in the dashboard (dashboard) ? If duration is not None, we consider it as a number of seconds to graph and we call the module get_relative_graphs_uri If get_relative_graphs_uri is not a module function we compute graphstart and graphend and we call we call the module get_graphs_uri If graphstart and graphend are not None, we call the module get_graphs_uri ''' uris = [] for mod in self.modules: if not duration: uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source)) else: f = getattr(mod, 'get_relative_graphs_uri', None) if f and callable(f): uris.extend(f(elt, duration, source)) else: graphend = time.time() graphstart = graphend - duration uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source)) logger.debug("[WebUI] Got graphs: %s", uris) return uris
Python
0.002384
e69542c01959e7cf874c6ca1ae5c94d0c9a0ba1f
Fix tarball URL's for htslib (#5993)
var/spack/repos/builtin/packages/htslib/package.py
var/spack/repos/builtin/packages/htslib/package.py
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Htslib(AutotoolsPackage): """C library for high-throughput sequencing data formats.""" homepage = "https://github.com/samtools/htslib" version('1.6', 'd6fd14e208aca7e08cbe9072233d0af9') version('1.4', '2a22ff382654c033c40e4ec3ea880050') version('1.3.1', '16d78f90b72f29971b042e8da8be6843') version('1.2', '64026d659c3b062cfb6ddc8a38e9779f') depends_on('zlib') depends_on('bzip2', when="@1.4:") depends_on('xz', when="@1.4:") depends_on('m4', when="@1.2") depends_on('autoconf', when="@1.2") depends_on('automake', when="@1.2") depends_on('libtool', when="@1.2") # v1.2 uses the automagically assembled tarball from .../archive/... # everything else uses the tarballs uploaded to the release def url_for_version(self, version): if version.string == '1.2': return 'https://github.com/samtools/htslib/archive/1.2.tar.gz' else: url = "https://github.com/samtools/htslib/releases/download/{0}/htslib-{0}.tar.bz2" return url.format(version.dotted)
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Htslib(AutotoolsPackage): """C library for high-throughput sequencing data formats.""" homepage = "https://github.com/samtools/htslib" url = "https://github.com/samtools/htslib/releases/download/1.3.1/htslib-1.3.1.tar.bz2" version('1.6', 'd6fd14e208aca7e08cbe9072233d0af9') version('1.4', '2a22ff382654c033c40e4ec3ea880050') version('1.3.1', '16d78f90b72f29971b042e8da8be6843') version('1.2', '64026d659c3b062cfb6ddc8a38e9779f', url='https://github.com/samtools/htslib/archive/1.2.tar.gz') depends_on('zlib') depends_on('bzip2', when="@1.4:") depends_on('xz', when="@1.4:") depends_on('m4', when="@1.2") depends_on('autoconf', when="@1.2") depends_on('automake', when="@1.2") depends_on('libtool', when="@1.2")
Python
0
1a2c69e95eb02010f0a72aebe2554be58db63f42
Generate name from German title if possible
ckanext/switzerland/dcat/harvesters.py
ckanext/switzerland/dcat/harvesters.py
from ckanext.dcat.harvesters.rdf import DCATRDFHarvester import logging log = logging.getLogger(__name__) class SwissDCATRDFHarvester(DCATRDFHarvester): def info(self): return { 'name': 'dcat_ch_rdf', 'title': 'DCAT-AP Switzerland RDF Harvester', 'description': 'Harvester for DCAT-AP Switzerland datasets from an RDF graph' # noqa } def _get_guid(self, dataset_dict, source_url=None): # noqa ''' Try to get a unique identifier for a harvested dataset It will be the first found of: * URI (rdf:about) * dcat:identifier * Source URL + Dataset name * Dataset name The last two are obviously not optimal, as depend on title, which might change. Returns None if no guid could be decided. ''' guid = None for extra in dataset_dict.get('extras', []): if extra['key'] == 'uri' and extra['value']: return extra['value'] if dataset_dict.get('uri'): return dataset_dict['uri'] for extra in dataset_dict.get('extras', []): if extra['key'] == 'identifier' and extra['value']: return extra['value'] if dataset_dict.get('identifier'): return dataset_dict['identifier'] for extra in dataset_dict.get('extras', []): if extra['key'] == 'dcat_identifier' and extra['value']: return extra['value'] if dataset_dict.get('name'): guid = dataset_dict['name'] if source_url: guid = source_url.rstrip('/') + '/' + guid return guid def _gen_new_name(self, title): try: return super(SwissDCATRDFHarvester, self)._gen_new_name(title['de']) # noqa except TypeError: return super(SwissDCATRDFHarvester, self)._gen_new_name(title) # noqa
# flake8: noqa import json import ckan.plugins as p import ckan.model as model from ckanext.harvest.model import HarvestObject from ckanext.dcat.parsers import RDFParserException, RDFParser from ckanext.dcat.interfaces import IDCATRDFHarvester from ckanext.dcat.harvesters.rdf import DCATRDFHarvester import logging log = logging.getLogger(__name__) class SwissDCATRDFHarvester(DCATRDFHarvester): def info(self): return { 'name': 'dcat_ch_rdf', 'title': 'DCAT-AP Switzerland RDF Harvester', 'description': 'Harvester for DCAT-AP Switzerland datasets from an RDF graph' # noqa } def _get_guid(self, dataset_dict, source_url=None): # noqa ''' Try to get a unique identifier for a harvested dataset It will be the first found of: * URI (rdf:about) * dcat:identifier * Source URL + Dataset name * Dataset name The last two are obviously not optimal, as depend on title, which might change. Returns None if no guid could be decided. ''' guid = None for extra in dataset_dict.get('extras', []): if extra['key'] == 'uri' and extra['value']: return extra['value'] if dataset_dict.get('uri'): return dataset_dict['uri'] for extra in dataset_dict.get('extras', []): if extra['key'] == 'identifier' and extra['value']: return extra['value'] if dataset_dict.get('identifier'): return dataset_dict['identifier'] for extra in dataset_dict.get('extras', []): if extra['key'] == 'dcat_identifier' and extra['value']: return extra['value'] if dataset_dict.get('name'): guid = dataset_dict['name'] if source_url: guid = source_url.rstrip('/') + '/' + guid return guid def gather_stage(self, harvest_job): log.debug('In DCATRDFHarvester gather_stage') # Get file contents url = harvest_job.source.url for harvester in p.PluginImplementations(IDCATRDFHarvester): url, before_download_errors = harvester.before_download(url, harvest_job) for error_msg in before_download_errors: self._save_gather_error(error_msg, harvest_job) if not url: return False rdf_format = None if harvest_job.source.config: rdf_format = json.loads(harvest_job.source.config).get("rdf_format") content, rdf_format = self._get_content_and_type(url, harvest_job, 1, content_type=rdf_format) # TODO: store content? for harvester in p.PluginImplementations(IDCATRDFHarvester): content, after_download_errors = harvester.after_download(content, harvest_job) for error_msg in after_download_errors: self._save_gather_error(error_msg, harvest_job) if not content: return False # TODO: profiles conf parser = RDFParser() try: parser.parse(content, _format=rdf_format) except RDFParserException, e: self._save_gather_error('Error parsing the RDF file: {0}'.format(e), harvest_job) return False guids_in_source = [] object_ids = [] for dataset in parser.datasets(): if not dataset.get('name'): dataset['name'] = self._gen_new_name(dataset['title']['de']) # Unless already set by the parser, get the owner organization (if any) # from the harvest source dataset if not dataset.get('owner_org'): source_dataset = model.Package.get(harvest_job.source.id) if source_dataset.owner_org: dataset['owner_org'] = source_dataset.owner_org # Try to get a unique identifier for the harvested dataset guid = self._get_guid(dataset) if not guid: log.error('Could not get a unique identifier for dataset: {0}'.format(dataset)) continue dataset['extras'].append({'key': 'guid', 'value': guid}) guids_in_source.append(guid) obj = HarvestObject(guid=guid, job=harvest_job, content=json.dumps(dataset)) obj.save() object_ids.append(obj.id) # Check if some datasets need to be deleted object_ids_to_delete = self._mark_datasets_for_deletion(guids_in_source, harvest_job) object_ids.extend(object_ids_to_delete) return object_ids
Python
1
7f29770766a30bf821689960189e95526eee6bdc
print python version if using file directly, not as import
getDataRemotely.py
getDataRemotely.py
import sys from dictAsFile_wrapper import * def run(): hashtableName = 'hashtable.pkl' data = {} # use different import based on python version number: if (sys.version_info > (3, 0)): # python 3: if __name__ == '__main__': print('python 3') import urllib.request url = 'https://raw.githubusercontent.com/hchiam/cognateLanguage/master/hashtable.pkl' urllib.request.urlretrieve(url) # download file data = readFileToDict(hashtableName) # with urllib.request.urlopen('https://raw.githubusercontent.com/hchiam/cognateLanguage/master/output_shortlist.txt') as response: # line = response.readline().decode('utf-8').replace('\n','') # while line != '': # data.append(line) # line = response.readline().decode('utf-8').replace('\n','') else: # python 2: if __name__ == '__main__': print('python 2') import urllib2 url = 'https://raw.githubusercontent.com/hchiam/cognateLanguage/master/hashtable.pkl' response = urllib2.urlopen(url) # download file data = readFileToDict(hashtableName) # response = urllib2.urlopen('https://raw.githubusercontent.com/hchiam/cognateLanguage/master/output_shortlist.txt') # data = response.read().split('\n') return data # this if statement is so that the following code only runs if this .py file is not being imported if __name__ == '__main__': data = run() # debug print out: print ('debug output: data[\"hi\"] = ' + data["hi"])
import sys from dictAsFile_wrapper import * def run(): hashtableName = 'hashtable.pkl' data = {} # use different import based on python version number: if (sys.version_info > (3, 0)): # python 3: print('python 3') import urllib.request url = 'https://raw.githubusercontent.com/hchiam/cognateLanguage/master/hashtable.pkl' urllib.request.urlretrieve(url) # download file data = readFileToDict(hashtableName) # with urllib.request.urlopen('https://raw.githubusercontent.com/hchiam/cognateLanguage/master/output_shortlist.txt') as response: # line = response.readline().decode('utf-8').replace('\n','') # while line != '': # data.append(line) # line = response.readline().decode('utf-8').replace('\n','') else: # python 2: print('python 2') import urllib2 url = 'https://raw.githubusercontent.com/hchiam/cognateLanguage/master/hashtable.pkl' response = urllib2.urlopen(url) # download file data = readFileToDict(hashtableName) # response = urllib2.urlopen('https://raw.githubusercontent.com/hchiam/cognateLanguage/master/output_shortlist.txt') # data = response.read().split('\n') return data # this if statement is so that the following code only runs if this .py file is not being imported if __name__ == '__main__': data = run() # debug print out: print ('debug output: data[\"hi\"] = ' + data["hi"])
Python
0.000001
9c218079f00e9b3c7285cd94dcc7836531f722a5
Install RMPISNOW wrapper in prefix.bin for r-snow (#16479)
var/spack/repos/builtin/packages/r-snow/package.py
var/spack/repos/builtin/packages/r-snow/package.py
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RSnow(RPackage): """Support for simple parallel computing in R.""" homepage = "https://cloud.r-project.org/package=snow" url = "https://cloud.r-project.org/src/contrib/snow_0.4-2.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/snow" version('0.4-3', sha256='8512537daf334ea2b8074dbb80cf5e959a403a78d68bc1e97664e8a4f64576d8') version('0.4-2', sha256='ee070187aea3607c9ca6235399b3db3e181348692405d038e962e06aefccabd7') depends_on('r@2.13.1:', type=('build', 'run')) @run_after('install') def install_wrapper(self): mkdir(self.prefix.bin) install('inst/RMPISNOW', self.prefix.bin)
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RSnow(RPackage): """Support for simple parallel computing in R.""" homepage = "https://cloud.r-project.org/package=snow" url = "https://cloud.r-project.org/src/contrib/snow_0.4-2.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/snow" version('0.4-3', sha256='8512537daf334ea2b8074dbb80cf5e959a403a78d68bc1e97664e8a4f64576d8') version('0.4-2', sha256='ee070187aea3607c9ca6235399b3db3e181348692405d038e962e06aefccabd7') depends_on('r@2.13.1:', type=('build', 'run'))
Python
0
f42744558b989f8122f67d24bf65c8514eb516cb
Use better names for generated IR files.
runac/__init__.py
runac/__init__.py
from . import tokenizer, ast, blocks, ti, specialize, codegen from util import Error import sys, os, subprocess, tempfile BASE = os.path.dirname(__path__[0]) CORE_DIR = os.path.join(BASE, 'core') TRIPLES = { 'darwin': 'x86_64-apple-darwin11.0.0', 'linux2': 'x86_64-pc-linux-gnu', } def tokenize(f): return tokenizer.tokenize(f) def parse(tokens): return ast.parse(tokens) def module(ast): mod = blocks.Module(ast) for fn in os.listdir(CORE_DIR): if not fn.endswith('.rns'): continue with open(os.path.join(CORE_DIR, fn)) as f: mod.merge(blocks.Module(parse(tokenize(f)))) return mod def type(mod): ti.typer(mod) def spec(mod): specialize.specialize(mod) def generate(mod): triple = 'target triple = "%s"\n\n' % TRIPLES[sys.platform] with open('core/rt.ll') as f: rt = f.read() return triple + rt + '\n' + codegen.source(mod) def compile(ir, outfn): name = outfn + '.ll' with open(name, 'wb') as f: f.write(ir) try: subprocess.check_call(('clang', '-o', outfn, name)) except OSError as e: if e.errno == 2: print 'error: clang not found' except subprocess.CalledProcessError: pass finally: os.unlink(name) def full(fn, outfn): with open(fn) as f: mod = module(parse(tokenize(f))) type(mod) spec(mod) compile(generate(mod), outfn)
from . import tokenizer, ast, blocks, ti, specialize, codegen from util import Error import sys, os, subprocess, tempfile BASE = os.path.dirname(__path__[0]) CORE_DIR = os.path.join(BASE, 'core') TRIPLES = { 'darwin': 'x86_64-apple-darwin11.0.0', 'linux2': 'x86_64-pc-linux-gnu', } def tokenize(f): return tokenizer.tokenize(f) def parse(tokens): return ast.parse(tokens) def module(ast): mod = blocks.Module(ast) for fn in os.listdir(CORE_DIR): if not fn.endswith('.rns'): continue with open(os.path.join(CORE_DIR, fn)) as f: mod.merge(blocks.Module(parse(tokenize(f)))) return mod def type(mod): ti.typer(mod) def spec(mod): specialize.specialize(mod) def generate(mod): triple = 'target triple = "%s"\n\n' % TRIPLES[sys.platform] with open('core/rt.ll') as f: rt = f.read() return triple + rt + '\n' + codegen.source(mod) def compile(ir, outfn): fd, name = tempfile.mkstemp('.ll', dir='.') f = os.fdopen(fd, 'wb') f.write(ir) f.close() try: subprocess.check_call(('clang', '-o', outfn, name)) except OSError as e: if e.errno == 2: print 'error: clang not found' except subprocess.CalledProcessError: pass finally: os.unlink(name) def full(fn, outfn): with open(fn) as f: mod = module(parse(tokenize(f))) type(mod) spec(mod) compile(generate(mod), outfn)
Python
0
a328a1974b985eda47191748e28a69d1e521f070
实现FREEBUF的AJAX页面爬取的几种小爬虫-json库解析-科学方法
freebufspider2.py
freebufspider2.py
import requests from bs4 import BeautifulSoup import json for i in range(1, 20): url = 'http://www.freebuf.com/www.freebuf.com?action=ajax_wenku&year=all&score=all&type=all&tech=0&keyword=&page=' + str( i) r = requests.get(url) data = json.loads(r.text)#使用json库解析,科学的做法 soup = BeautifulSoup(data['cont']) for i in soup.select('h3 a'): print(i.getText(), i.get('href'))
import requests from bs4 import BeautifulSoup import json for i in range(1, 20): url = 'http://www.freebuf.com/www.freebuf.com?action=ajax_wenku&year=all&score=all&type=all&tech=0&keyword=&page=' + str( i) r = requests.get(url) data = json.loads(r.text) soup = BeautifulSoup(data['cont']) for i in soup.select('h3 a'): print(i.getText(), i.get('href'))
Python
0
cd9e8c1595e0e987e2ec0067c9532a9778e64ea3
Update test_plugin.py
logstash_plugin/tests/test_plugin.py
logstash_plugin/tests/test_plugin.py
######## # Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. import os import subprocess from logstash_test_utils import ( LogstashTestUtils, DEFAULT_UBUNTU_UNINSTALL, DEFAULT_LOGSTASH_STOP, DEFAULT_LOGSTASH_CONFIG_PATH ) # from cloudify import ctx class TestLogstashPlugin(LogstashTestUtils): def SetUp(self): super(LogstashTestUtils, self).setUp() self._set_up() def test_install_static_clean(self): inputs = self.get_static_config_inputs() self._set_up(inputs) self.addCleanup(subprocess.call, DEFAULT_UBUNTU_UNINSTALL) self.localenv.execute('install', task_retries=10) self.addCleanup(subprocess.call, DEFAULT_LOGSTASH_STOP) logstash_started = subprocess.call( "sudo service logstash status", shell=True) self.assertIn('started', logstash_started) self.addCleanup(os.remove, DEFAULT_LOGSTASH_CONFIG_PATH) with open(DEFAULT_LOGSTASH_CONFIG_PATH, 'r') as default: self.assertEqual(default.read(), self.get_config()) def test_uninstall_static_clean(self): self.addCleanup(subprocess.call, DEFAULT_UBUNTU_UNINSTALL) self.addCleanup(subprocess.call, DEFAULT_LOGSTASH_STOP) self.addCleanup(os.remove, DEFAULT_LOGSTASH_CONFIG_PATH) inputs = self.get_static_config_inputs() self._set_up(inputs) self.localenv.execute('install', task_retries=10) self.localenv.execute('uninstall', task_retries=10) logstash_stopped = subprocess.call( "sudo service logstash status", shell=True) self.assertNotIn('started', logstash_stopped)
######## # Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. import os import subprocess from logstash_test_utils import ( LogstashTestUtils, DEFAULT_UBUNTU_UNINSTALL, DEFAULT_LOGSTASH_STOP, DEFAULT_LOGSTASH_CONFIG_PATH ) # from cloudify import ctx class TestLogstashPlugin(LogstashTestUtils): def SetUp(self): super(LogstashTestUtils, self).setUp() self._set_up() def test_install_static_clean(self): inputs = self.get_static_config_inputs() self._set_up(inputs) self.addCleanup(subprocess.call, DEFAULT_UBUNTU_UNINSTALL) self.env.execute('install', task_retries=10) self.addCleanup(subprocess.call, DEFAULT_LOGSTASH_STOP) logstash_started = subprocess.call( "sudo service logstash status", shell=True) self.assertIn('started', logstash_started) self.addCleanup(os.remove, DEFAULT_LOGSTASH_CONFIG_PATH) with open(DEFAULT_LOGSTASH_CONFIG_PATH, 'r') as default: self.assertEqual(default.read(), self.get_config()) def test_uninstall_static_clean(self): self.addCleanup(subprocess.call, DEFAULT_UBUNTU_UNINSTALL) self.addCleanup(subprocess.call, DEFAULT_LOGSTASH_STOP) self.addCleanup(os.remove, DEFAULT_LOGSTASH_CONFIG_PATH) inputs = self.get_static_config_inputs() self._set_up(inputs) self.env.execute('install', task_retries=10) self.env.execute('uninstall', task_retries=10) logstash_stopped = subprocess.call( "sudo service logstash status", shell=True) self.assertNotIn('started', logstash_stopped)
Python
0.000003
74a182a13bae5dde3e2b4fe604a839e5ec05e771
load palette hoohah
cooperhewitt/swatchbook/palettes/__init__.py
cooperhewitt/swatchbook/palettes/__init__.py
def palettes(): return [ 'css3', 'css4' ] def load_palette(reference): if not reference in palettes(): raise Exception, "Invalid palette" # Please figure out the hoo-hah to make dynamic # loading work (20140623/straup) if reference == 'css3': import css3 return css3.colours() if __name__ == '__main__': p = load_palette('css5') print p
# I blame, Guido
Python
0.000002
ff99ed308edd661db7b692cb92eb3c6465843204
Add JSON parser
pande_gas/utils/molecule_net/__init__.py
pande_gas/utils/molecule_net/__init__.py
""" Utilities for MoleculeNet. """ import json import re import xml.etree.cElementTree as et class PcbaJsonParser(object): """ Parser for PubChemBioAssay JSON. Parameters ---------- filename : str Filename. """ def __init__(self, filename): self.tree = json.load(filename) # should just be one record per file assert len(self.tree['PC_AssayContainer']) == 1 # move in to the assay description self.root = self.tree['PC_AssayContainer'][0]['assay']['descr'] def get_name(self): """ Get assay name. """ return self.root['name'] def get_description(self): """ Get assay description. """ return '\n'.join(self.root['description']) def get_protocol(self): """ Get assay protocol. """ return '\n'.join(self.root['protocol']) def get_target(self): """ Get assay target. TODO: Decide which fields are important. We may be able to match targets by mol-id. Returns ------- target : dict A dictionary containing keys for target information types, such as 'name', 'mol-id', and 'molecule-type'. """ return self.root['target'] class PcbaXmlParser(object): """ Parser for PubChem BioAssay XML. Parameters ---------- filename : str Filename. """ def __init__(self, filename): self.tree = et.parse(filename) self.root = self.tree.getroot() # default prefix for all tags self.prefix = '{http://www.ncbi.nlm.nih.gov}' def find(self, tag, root=None): """ Return a list of the elements with a given tag. Note that this only searches the direct children of root. Parameters ---------- tag : str XML tag. root : bool, optional (default False) Root of XML tree. """ if root is None: root = self.root return root.findall(self.prefix + tag) def join_children(self, elem): """ Join the text for the children of an element. Parameters ---------- elem : Element Element. """ text = '' for child in elem.getchildren(): if child.text is not None: text += child.text + child.tail return text def get_name(self): """ Get assay name. """ elem = self.find('PC-AssayDescription_name') assert len(elem) == 1 return elem[0].text def get_description(self): """ Get assay description. """ elem = self.find('PC-AssayDescription_description') assert len(elem) == 1 return self.join_children(elem[0]) def get_protocol(self): """ Get assay protocol. """ elem = self.find('PC-AssayDescription_protocol') assert len(elem) == 1 return self.join_children(elem[0]) def get_target(self): """ Get assay target. Returns ------- target : dict A dictionary containing keys for target information types, such as 'name', 'mol-id', and 'molecule-type'. """ elem = self.find('PC-AssayDescription_target') assert len(elem) == 1 info = self.find('PC-AssayTargetInfo', elem[0]) assert len(info) == 1 target = {} for e in info[0].getchildren(): if not e.text.strip(): continue # skip blank entries m = re.search('PC-AssayTargetInfo_(.*)', e.tag) key = m.groups()[0] target[key] = e.text return target
""" Utilities for MoleculeNet. """ import re import xml.etree.cElementTree as et class PcbaXmlParser(object): """ Parser for PubChem BioAssay XML. Parameters ---------- filename : str Filename. """ def __init__(self, filename): self.tree = et.parse(filename) self.root = self.tree.getroot() # default prefix for all tags self.prefix = '{http://www.ncbi.nlm.nih.gov}' def find(self, tag, root=None): """ Return a list of the elements with a given tag. Note that this only searches the direct children of root. Parameters ---------- tag : str XML tag. root : bool, optional (default False) Root of XML tree. """ if root is None: root = self.root return root.findall(self.prefix + tag) def join_children(self, elem): """ Join the text for the children of an element. Parameters ---------- elem : Element Element. """ text = '' for child in elem.getchildren(): if child.text is not None: text += child.text + child.tail return text def get_name(self): """ Get assay name. """ elem = self.find('PC-AssayDescription_name') assert len(elem) == 1 return elem[0].text def get_description(self): """ Get assay description. """ elem = self.find('PC-AssayDescription_description') assert len(elem) == 1 return self.join_children(elem[0]) def get_protocol(self): """ Get assay protocol. """ elem = self.find('PC-AssayDescription_protocol') assert len(elem) == 1 return self.join_children(elem[0]) def get_target(self): """ Get assay target. Returns ------- target : dict A dictionary containing keys for target information types, such as 'name', 'mol-id', and 'molecule-type'. """ elem = self.find('PC-AssayDescription_target') assert len(elem) == 1 info = self.find('PC-AssayTargetInfo', elem[0]) assert len(info) == 1 target = {} for e in info[0].getchildren(): if not e.text.strip(): continue # skip blank entries m = re.search('PC-AssayTargetInfo_(.*)', e.tag) key = m.groups()[0] target[key] = e.text return target
Python
0.000084
220983a4cf75f4e27f5491812de9ff04f4104510
fix butter_bandpass
code/python/seizures/preprocessing/preprocessing.py
code/python/seizures/preprocessing/preprocessing.py
import scipy.signal as signal def preprocess_multichannel_data(matrix,fs): """ :param matrix: multichannel EEG data :param fs: sampling frequency :return: data without mains, electrical artefacts etc authors: Lea and Vincent """ n_channel,m= matrix.shape for i in range(n_channel): preprocess_single_channel(matrix[i,:],fs) def preprocess_single_channel(x,fs): x = remove_elec_noise(x,fs) x = anti_alias_filter(x) x = remove_dc(x) return x def remove_dc(x): """ Remove mean of signal: use 0.5Hz cut-off hp filter :return: """ x = signal.medfilt(x) return x def remove_elec_noise(x,fs): """ Bandpass remove:59-61Hz (US); if data from EU/UK 49-51Hz :return: """ bandstop = 60 lowcut = bandstop-1 highcut = bandstop+1 def butter_bandpass(lowcut, highcut, fs, order=5): nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq b, a = signal.butter(order, [low, high], btype='band') return b, a def butter_bandpass_filter(x, lowcut, highcut, fs, order=5): b, a = signal.butter_bandpass(lowcut, highcut, fs, order=order) y = signal.lfilter(b, a, data) return y return butter_bandpass_filter(x,fs) def anti_alias_filter(x,fs): """ Anti_aliasing: use Nyquist frequ cutoff low-pass filter :return: """ numtaps = 1 cutoff = 0.5 * fs x = signal.firwin(numtaps, cutoff) return x
import scipy.signal as signal def preprocess_multichannel_data(matrix,fs): """ :param matrix: multichannel EEG data :param fs: sampling frequency :return: data without mains, electrical artefacts etc authors: Lea and Vincent """ n_channel,m= matrix.shape for i in range(n_channel): preprocess_single_channel(matrix[i,:],fs) def preprocess_single_channel(x,fs): x = remove_elec_noise(x,fs) x = anti_alias_filter(x) x = remove_dc(x) return x def remove_dc(x): """ Remove mean of signal: use 0.5Hz cut-off hp filter :return: """ x = signal.medfilt(x) return x def remove_elec_noise(x,fs): """ Bandpass remove:59-61Hz (US); if data from EU/UK 49-51Hz :return: """ lowcut = 59 highcut = 61 def butter_bandpass(lowcut, highcut, fs, order=5): nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq b, a = signal.butter(order, [low, high], btype='band') return b, a def butter_bandpass_filter(data, lowcut, highcut, fs, order=5): b, a = signal.butter_bandpass(lowcut, highcut, fs, order=order) y = signal.lfilter(b, a, data) return y return butter_bandpass_filter(x,fs) def anti_alias_filter(x,fs): """ Anti_aliasing: use Nyquist frequ cutoff low-pass filter :return: """ numtaps = 1 cutoff = 0.5 * fs x = signal.firwin(numtaps, cutoff) return x
Python
0.000004
4a4e56a0909d8e89d82462c846f365b0849b3cb4
add missing import
generator/aidl.py
generator/aidl.py
#!/usr/bin/python from eclipse2buck.generator.base_target import BaseTarget from eclipse2buck.decorator import target from eclipse2buck.util import util from eclipse2buck import config import os class AIDL(BaseTarget): """ generated all aidl targets """ aidl_path_list = [] def __init__(self, root, name): BaseTarget.__init__(self, root, name, config.aidl_suffix) self.aidl_path_list = self._find_all_aidls(os.path.join(self.lib_path, 'src')) for aidl_path in self.aidl_path_list: name = self.target_name(util.path_get_basename(aidl_path)) self.deps.append(":%s" % name) def dump_src(self): for aidl in self.aidl_path_list: #remove .aild aidl = aidl[:-5] print "genfile( '%s.java' )," % aidl def dump(self): for aidl_path in self.aidl_path_list: name = self.target_name(util.path_get_basename(aidl_path)) self._gen_aidl_target(name, aidl_path) def is_existed_aidl(self): return len(self.aidl_path_list) > 0 def _find_all_aidls(self, relative_path): path_list = util.find_all_files_with_suffix(relative_path, "*.aidl") exclude_aidls = ["src/com/tencent/mm/cache/MCacheItem.aidl", "src/com/tencent/tmassistantsdk/downloadclient/TMAssistantDownloadTaskInfo.aidl"] #some aidl file needn't be generated for exclude_aidl in exclude_aidls: if exclude_aidl in path_list: path_list.remove(exclude_aidl) return path_list @target("gen_aidl") def _gen_aidl_target(self, aidl_name, path): """ print the aidl target Returns: str: the target name which lib target should depend on """ print "name = '%s'," % aidl_name print "aidl = '%s'," % path print "import_path = '%s/src/'," % self.proj_name
#!/usr/bin/python from eclipse2buck.generator.base_target import BaseTarget from eclipse2buck.decorator import target from eclipse2buck.util import util from eclipse2buck import config class AIDL(BaseTarget): """ generated all aidl targets """ aidl_path_list = [] def __init__(self, root, name): BaseTarget.__init__(self, root, name, config.aidl_suffix) self.aidl_path_list = self._find_all_aidls(os.path.join(self.lib_path, 'src')) for aidl_path in self.aidl_path_list: name = self.target_name(util.path_get_basename(aidl_path)) self.deps.append(":%s" % name) def dump_src(self): for aidl in self.aidl_path_list: #remove .aild aidl = aidl[:-5] print "genfile( '%s.java' )," % aidl def dump(self): for aidl_path in self.aidl_path_list: name = self.target_name(util.path_get_basename(aidl_path)) self._gen_aidl_target(name, aidl_path) def is_existed_aidl(self): return len(self.aidl_path_list) > 0 def _find_all_aidls(self, relative_path): path_list = util.find_all_files_with_suffix(relative_path, "*.aidl") exclude_aidls = ["src/com/tencent/mm/cache/MCacheItem.aidl", "src/com/tencent/tmassistantsdk/downloadclient/TMAssistantDownloadTaskInfo.aidl"] #some aidl file needn't be generated for exclude_aidl in exclude_aidls: if exclude_aidl in path_list: path_list.remove(exclude_aidl) return path_list @target("gen_aidl") def _gen_aidl_target(self, aidl_name, path): """ print the aidl target Returns: str: the target name which lib target should depend on """ print "name = '%s'," % aidl_name print "aidl = '%s'," % path print "import_path = '%s/src/'," % self.proj_name
Python
0.000042
6cf322bbce2bfd4088cc4c5af96cd72cad86ea95
Add aspect as a parameter of Displayer init.
manifold/infrastructure/displayer.py
manifold/infrastructure/displayer.py
import math import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn.metrics import confusion_matrix class Displayer(object): def __init__(self, **kwargs): self.aspect = kwargs.pop('aspect', (20, -40)) self.parameters = ', '.join(['%s: %s' % (k, str(v)) for k, v in kwargs.items()]) self.items = [] def load(self, data, color=None, title=None): # Always copy the data, and, of course, only the first three dimensions. self.items.append((data[:, :3], color, title)) return self def render(self): # Assert that there is at least one graph to show. assert self.items, 'nothing graphs to render.' fig = plt.figure(figsize=(16, 9)) plt.suptitle(self.parameters) count = len(self.items) items_in_row = math.ceil(math.sqrt(count)) rows_count = math.ceil(count / items_in_row) for i, item in enumerate(self.items): data, color, title = item samples, dimension = data.shape # Grab data set components. It necessarily has 3 dimensions, as it was cut during load(). components = [data[:, i] for i in range(dimension)] if dimension == 1: components.append(np.zeros((samples, 1))) if color is None: color = np.zeros(samples) kwargs = {'projection': '3d'} if dimension > 2 else {} ax = fig.add_subplot( rows_count * 100 + items_in_row * 10 + 1 + i, **kwargs) ax.scatter(*components, **{ 'c': color, 's': 50, 'cmap': plt.cm.rainbow }) if title: plt.title(title) plt.axis('tight') if dimension > 2: ax.view_init(*self.aspect) plt.show() return self @classmethod def confusion_matrix_for(cls, target_test, target_predicted, title='Confusion matrix'): cm = confusion_matrix(target_test, target_predicted) np.set_printoptions(precision=2) plt.figure() plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) plt.title(title) plt.colorbar() plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show()
import math import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn.metrics import confusion_matrix class Displayer(object): def __init__(self, **kwargs): self.items = [] self.parameters = ', '.join(['%s: %s' % (k, str(v)) for k, v in kwargs.items()]) self.aspect = (20, -40) def load(self, data, color=None, title=None): # Always copy the data, and, of course, only the first three dimensions. self.items.append((data[:, :3], color, title)) return self def render(self): # Assert that there is at least one graph to show. assert self.items, 'nothing graphs to render.' fig = plt.figure(figsize=(16, 9)) plt.suptitle(self.parameters) count = len(self.items) items_in_row = math.ceil(math.sqrt(count)) rows_count = math.ceil(count / items_in_row) for i, item in enumerate(self.items): data, color, title = item samples, dimension = data.shape # Grab data set components. It necessarily has 3 dimensions, as it was cut during load(). components = [data[:, i] for i in range(dimension)] if dimension == 1: components.append(np.zeros((samples, 1))) kwargs = {} if dimension > 2: kwargs['projection'] = '3d' ax = fig.add_subplot( rows_count * 100 + items_in_row * 10 + 1 + i, **kwargs) kwargs = {} if color is not None: kwargs['c'] = color ax.scatter(*components, s=50.0, cmap=plt.cm.rainbow, **kwargs) if title: plt.title(title) plt.axis('tight') if dimension > 2: ax.view_init(*self.aspect) plt.show() return self @classmethod def confusion_matrix_for(cls, target_test, target_predicted, title='Confusion matrix'): cm = confusion_matrix(target_test, target_predicted) np.set_printoptions(precision=2) plt.figure() plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) plt.title(title) plt.colorbar() plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show()
Python
0
da54f60def189953b9ebbd754200103668e00042
Handle full result sets.
marshmallow_pagination/paginators.py
marshmallow_pagination/paginators.py
# -*- coding: utf-8 -*- import abc import math import six import sqlalchemy as sa from marshmallow_sqlalchemy.convert import ModelConverter from marshmallow_pagination import pages converter = ModelConverter() def convert_value(row, attr): field = converter._get_field_class_for_property(attr.property) value = getattr(row, attr.key) return field()._serialize(value, None, None) class BasePaginator(six.with_metaclass(abc.ABCMeta, object)): def __init__(self, cursor, per_page, count=None): self.cursor = cursor self.count = count or self._count() self.per_page = per_page or self.count def _count(self): return self.cursor.count() @abc.abstractproperty def page_type(self): pass @property def pages(self): if self.per_page: return int(math.ceil(self.count / self.per_page)) return 0 @abc.abstractproperty def get_page(self): pass class OffsetPaginator(BasePaginator): """Paginator based on offsets and limits. Not performant for large result sets. """ page_type = pages.OffsetPage def get_page(self, page, eager=True): offset, limit = self.per_page * (page - 1), self.per_page return self.page_type(self, page, self._fetch(offset, limit, eager=eager)) def _fetch(self, offset, limit, eager=True): offset += (self.cursor._offset or 0) if self.cursor._limit: limit = min(limit, self.cursor._limit - offset) query = self.cursor.offset(offset).limit(limit) return query.all() if eager else query class SeekPaginator(BasePaginator): """Paginator using keyset pagination for performance on large result sets. See http://use-the-index-luke.com/no-offset for details. """ page_type = pages.SeekPage def __init__(self, cursor, per_page, index_column, sort_column=None, count=None): self.index_column = index_column self.sort_column = sort_column super(SeekPaginator, self).__init__(cursor, per_page, count=count) def get_page(self, last_index=None, sort_index=None, eager=True): limit = self.per_page return self.page_type(self, self._fetch(last_index, sort_index, limit, eager=eager)) def _fetch(self, last_index, sort_index=None, limit=None, eager=True): cursor = self.cursor direction = self.sort_column[1] if self.sort_column else sa.asc lhs, rhs = (), () if sort_index is not None: lhs += (self.sort_column, ) rhs += (sort_index, ) if last_index is not None: lhs += (self.index_column, ) rhs += (last_index, ) lhs = sa.tuple_(*lhs) rhs = sa.tuple_(*rhs) if rhs.clauses: filter = lhs > rhs if direction == sa.asc else lhs < rhs cursor = cursor.filter(filter) query = cursor.order_by(direction(self.index_column)).limit(limit) return query.all() if eager else query def _get_index_values(self, result): """Get index values from last result, to be used in seeking to the next page. Optionally include sort values, if any. """ ret = {'last_index': convert_value(result, self.index_column)} if self.sort_column: key = 'last_{0}'.format(self.sort_column[0].key) ret[key] = convert_value(result, self.sort_column[0]) return ret
# -*- coding: utf-8 -*- import abc import math import six import sqlalchemy as sa from marshmallow_sqlalchemy.convert import ModelConverter from marshmallow_pagination import pages converter = ModelConverter() def convert_value(row, attr): field = converter._get_field_class_for_property(attr.property) value = getattr(row, attr.key) return field()._serialize(value, None, None) class BasePaginator(six.with_metaclass(abc.ABCMeta, object)): def __init__(self, cursor, per_page, count=None): self.cursor = cursor self.per_page = per_page self.count = count or self._count() def _count(self): return self.cursor.count() @abc.abstractproperty def page_type(self): pass @property def pages(self): if self.per_page: return int(math.ceil(self.count / self.per_page)) return 0 @abc.abstractproperty def get_page(self): pass class OffsetPaginator(BasePaginator): """Paginator based on offsets and limits. Not performant for large result sets. """ page_type = pages.OffsetPage def get_page(self, page, eager=True): offset, limit = self.per_page * (page - 1), self.per_page return self.page_type(self, page, self._fetch(offset, limit, eager=eager)) def _fetch(self, offset, limit, eager=True): offset += (self.cursor._offset or 0) if self.cursor._limit: limit = min(limit, self.cursor._limit - offset) query = self.cursor.offset(offset).limit(limit) return query.all() if eager else query class SeekPaginator(BasePaginator): """Paginator using keyset pagination for performance on large result sets. See http://use-the-index-luke.com/no-offset for details. """ page_type = pages.SeekPage def __init__(self, cursor, per_page, index_column, sort_column=None, count=None): self.index_column = index_column self.sort_column = sort_column super(SeekPaginator, self).__init__(cursor, per_page, count=count) def get_page(self, last_index=None, sort_index=None, eager=True): limit = self.per_page return self.page_type(self, self._fetch(last_index, sort_index, limit, eager=eager)) def _fetch(self, last_index, sort_index=None, limit=None, eager=True): cursor = self.cursor direction = self.sort_column[1] if self.sort_column else sa.asc lhs, rhs = (), () if sort_index is not None: lhs += (self.sort_column, ) rhs += (sort_index, ) if last_index is not None: lhs += (self.index_column, ) rhs += (last_index, ) lhs = sa.tuple_(*lhs) rhs = sa.tuple_(*rhs) if rhs.clauses: filter = lhs > rhs if direction == sa.asc else lhs < rhs cursor = cursor.filter(filter) query = cursor.order_by(direction(self.index_column)).limit(limit) return query.all() if eager else query def _get_index_values(self, result): """Get index values from last result, to be used in seeking to the next page. Optionally include sort values, if any. """ ret = {'last_index': convert_value(result, self.index_column)} if self.sort_column: key = 'last_{0}'.format(self.sort_column[0].key) ret[key] = convert_value(result, self.sort_column[0]) return ret
Python
0
9105326cdb6ad7a6d4d23504ef36beb6303eaf65
make offset_date type unaware
custom/opm/opm_reports/tests/case_reports.py
custom/opm/opm_reports/tests/case_reports.py
from collections import defaultdict from datetime import datetime, date from unittest import TestCase from jsonobject import (JsonObject, DictProperty, DateTimeProperty, StringProperty, IntegerProperty, BooleanProperty) from casexml.apps.case.models import CommCareCase from custom.opm.opm_reports.reports import SharedDataProvider from dimagi.utils.dates import DateSpan, add_months from ..beneficiary import OPMCaseRow class AggressiveDefaultDict(defaultdict): def __contains__(self, item): return True class MockDataProvider(SharedDataProvider): """ Mock data provider to manually specify vhnd availability per user """ def __init__(self, datespan, vhnd_map=None): super(MockDataProvider, self).__init__(datespan) self.vhnd_map = vhnd_map if vhnd_map is not None else AggressiveDefaultDict(lambda: True) @property def vhnd_availability(self): return self.vhnd_map class Report(JsonObject): month = IntegerProperty(required=True) year = IntegerProperty(required=True) block = StringProperty(required=True) _data_provider = None @property def data_provider(self): return self._data_provider @property def datespan(self): return DateSpan.from_month(self.month, self.year, inclusive=True) class Form(JsonObject): xmlns = StringProperty('something') form = DictProperty(required=True) received_on = DateTimeProperty(required=True) class OPMCase(CommCareCase): opened_on = DateTimeProperty(datetime(2010, 1, 1)) block_name = StringProperty("Sahora") type = StringProperty("pregnancy") closed = BooleanProperty(default=False) closed_on = DateTimeProperty() awc_name = StringProperty("Atri") owner_id = StringProperty("Sahora") def __init__(self, forms=None, **kwargs): super(OPMCase, self).__init__(**kwargs) self._fake_forms = forms if forms is not None else [] def get_forms(self): return self._fake_forms class MockCaseRow(OPMCaseRow): """ Spoof the following fields to create example cases """ def __init__(self, case, report, data_provider=None): self.case = case self.report = report self.report.snapshot = None self.report.is_rendered_as_email = None self.report._data_provider = data_provider or MockDataProvider(self.report.datespan) super(MockCaseRow, self).__init__(case, report) class OPMCaseReportTestBase(TestCase): def setUp(self): self.report_date = date(2014, 6, 1) self.report_datetime = datetime(2014, 6, 1) self.report = Report(month=6, year=2014, block="Atri") def get_relative_edd_from_preg_month(report_date, month): months_until_edd = 10 - month new_year, new_month = add_months(report_date.year, report_date.month, months_until_edd) return type(report_date)(new_year, new_month, 1) def offset_date(reference_date, offset): new_year, new_month = add_months(reference_date.year, reference_date.month, offset) return type(reference_date)(new_year, new_month, 1) class MockDataTest(OPMCaseReportTestBase): def test_mock_data(self): report = Report(month=6, year=2014, block="Atri") form = Form(form={'foo': 'bar'}, received_on=datetime(2014, 6, 15)) case = OPMCase( forms=[form], # add/override any desired case properties here edd=date(2014, 12, 10), ) row = MockCaseRow(case, report)
from collections import defaultdict from datetime import datetime, date from unittest import TestCase from jsonobject import (JsonObject, DictProperty, DateTimeProperty, StringProperty, IntegerProperty, BooleanProperty) from casexml.apps.case.models import CommCareCase from custom.opm.opm_reports.reports import SharedDataProvider from dimagi.utils.dates import DateSpan, add_months from ..beneficiary import OPMCaseRow class AggressiveDefaultDict(defaultdict): def __contains__(self, item): return True class MockDataProvider(SharedDataProvider): """ Mock data provider to manually specify vhnd availability per user """ def __init__(self, datespan, vhnd_map=None): super(MockDataProvider, self).__init__(datespan) self.vhnd_map = vhnd_map if vhnd_map is not None else AggressiveDefaultDict(lambda: True) @property def vhnd_availability(self): return self.vhnd_map class Report(JsonObject): month = IntegerProperty(required=True) year = IntegerProperty(required=True) block = StringProperty(required=True) _data_provider = None @property def data_provider(self): return self._data_provider @property def datespan(self): return DateSpan.from_month(self.month, self.year, inclusive=True) class Form(JsonObject): xmlns = StringProperty('something') form = DictProperty(required=True) received_on = DateTimeProperty(required=True) class OPMCase(CommCareCase): opened_on = DateTimeProperty(datetime(2010, 1, 1)) block_name = StringProperty("Sahora") type = StringProperty("pregnancy") closed = BooleanProperty(default=False) closed_on = DateTimeProperty() awc_name = StringProperty("Atri") owner_id = StringProperty("Sahora") def __init__(self, forms=None, **kwargs): super(OPMCase, self).__init__(**kwargs) self._fake_forms = forms if forms is not None else [] def get_forms(self): return self._fake_forms class MockCaseRow(OPMCaseRow): """ Spoof the following fields to create example cases """ def __init__(self, case, report, data_provider=None): self.case = case self.report = report self.report.snapshot = None self.report.is_rendered_as_email = None self.report._data_provider = data_provider or MockDataProvider(self.report.datespan) super(MockCaseRow, self).__init__(case, report) class OPMCaseReportTestBase(TestCase): def setUp(self): self.report_date = date(2014, 6, 1) self.report_datetime = datetime(2014, 6, 1) self.report = Report(month=6, year=2014, block="Atri") def get_relative_edd_from_preg_month(report_date, month): months_until_edd = 10 - month new_year, new_month = add_months(report_date.year, report_date.month, months_until_edd) return type(report_date)(new_year, new_month, 1) def offset_date(reference_date, offset): new_year, new_month = add_months(reference_date.year, reference_date.month, offset) return date(new_year, new_month, 1) class MockDataTest(OPMCaseReportTestBase): def test_mock_data(self): report = Report(month=6, year=2014, block="Atri") form = Form(form={'foo': 'bar'}, received_on=datetime(2014, 6, 15)) case = OPMCase( forms=[form], # add/override any desired case properties here edd=date(2014, 12, 10), ) row = MockCaseRow(case, report)
Python
0.000047
ba4396f1868dad9a637ddd3cbf9e935fa8d93cf0
print Exception error
git_downloader.py
git_downloader.py
#!/usr/bin/env python # import sys, os, argparse, logging, fnmatch, urllib, posixpath, urlparse, socket from github import Github def main(args, loglevel): logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel) socket.setdefaulttimeout(args.timeout) g = Github() with open(args.repo_file, 'r') as f: file_counter = 0 for line in f.readlines(): logging.info('Fetching repository: %s' % line) try: repo_str = line.rstrip().split('github.com/')[-1] repo = g.get_repo(repo_str) tree = repo.get_git_tree('master', recursive=True) files_to_download = [] for file in tree.tree: if fnmatch.fnmatch(file.path, args.wildcard): files_to_download.append('https://github.com/%s/raw/master/%s' % (repo_str, file.path)) for file in files_to_download: logging.info('Downloading %s' % file) file_counter += 1 filename = posixpath.basename(urlparse.urlsplit(file).path) output_path = os.path.join(args.output_dir, filename) if os.path.exists(output_path): output_path += "-" + str(file_counter) try: urllib.urlretrieve(file, output_path) except Exception as error: logging.error('Error downloading %s. %s' % (file, error)) except Exception as error: logging.error('Error fetching repository %s. %s' % (line, error)) args.yara_meta = os.path.join(args.output_dir, args.yara_meta) with open(args.yara_meta, 'w') as f: for i in os.listdir(args.output_dir): try: f.write("include \"" + i + "\"\n") except Exception as error: logging.error('Couldn\'t write to %s: %s' % (args.yara_meta, error)) if __name__ == '__main__': parser = argparse.ArgumentParser(description = "Github file downloader") parser.add_argument("-r", "--repo_file", help = "Path for the input file which contains a url of a Github repository for each separate line") parser.add_argument("-w", "--wildcard", help = "Unix shell-style wildcard to match files to download (for example: *.txt)") parser.add_argument("-o", "--output_dir", default = "", help = "Directory to store all downloaded files") parser.add_argument("-y", "--yara-meta", default = "rules.yara", help = "Yara meta rule filename to create") parser.add_argument("-t", "--timeout", default = 30, help = "Socket timeout (seconds)") parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true") args = parser.parse_args() # Setup logging if args.verbose: loglevel = logging.DEBUG else: loglevel = logging.INFO main(args, loglevel)
#!/usr/bin/env python # import sys, os, argparse, logging, fnmatch, urllib, posixpath, urlparse, socket from github import Github def main(args, loglevel): logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel) socket.setdefaulttimeout(args.timeout) g = Github() with open(args.repo_file, 'r') as f: file_counter = 0 for line in f.readlines(): logging.info('Fetching repository: %s' % line) try: repo_str = line.rstrip().split('github.com/')[-1] repo = g.get_repo(repo_str) tree = repo.get_git_tree('master', recursive=True) files_to_download = [] for file in tree.tree: if fnmatch.fnmatch(file.path, args.wildcard): files_to_download.append('https://github.com/%s/raw/master/%s' % (repo_str, file.path)) for file in files_to_download: logging.info('Downloading %s' % file) file_counter += 1 filename = posixpath.basename(urlparse.urlsplit(file).path) output_path = os.path.join(args.output_dir, filename) if os.path.exists(output_path): output_path += "-" + str(file_counter) try: urllib.urlretrieve(file, output_path) except: logging.error('Error downloading %s' % file) except: logging.error('Error fetching repository %s' % line) args.yara_meta = os.path.join(args.output_dir, args.yara_meta) with open(args.yara_meta, 'w') as f: for i in os.listdir(args.output_dir): try: f.write("include \"" + i + "\"\n") except: logging.error('Couldn\'t write to %s' % args.yara_meta) if __name__ == '__main__': parser = argparse.ArgumentParser(description = "Github file downloader") parser.add_argument("-r", "--repo_file", help = "Path for the input file which contains a url of a Github repository for each separate line") parser.add_argument("-w", "--wildcard", help = "Unix shell-style wildcard to match files to download (for example: *.txt)") parser.add_argument("-o", "--output_dir", default = "", help = "Directory to store all downloaded files") parser.add_argument("-y", "--yara-meta", default = "rules.yara", help = "Yara meta rule filename to create") parser.add_argument("-t", "--timeout", default = 30, help = "Socket timeout (seconds)") parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true") args = parser.parse_args() # Setup logging if args.verbose: loglevel = logging.DEBUG else: loglevel = logging.INFO main(args, loglevel)
Python
0.999548
cadf61287b9b68af5b734b4ab2fefd9c758cfc26
Update skiptest
data_importer/tests/test_generic_importer.py
data_importer/tests/test_generic_importer.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import mock import os import django from django.test import TestCase from unittest import skipIf from data_importer.importers.generic import GenericImporter from data_importer.readers.xls_reader import XLSReader from data_importer.readers.xlsx_reader import XLSXReader from data_importer.readers.csv_reader import CSVReader from data_importer.readers.xml_reader import XMLReader from data_importer.core.exceptions import UnsuportedFile from data_importer.models import FileHistory LOCAL_DIR = os.path.dirname(__file__) class TestGeneralImpoerterSetup(TestCase): def setUp(self): self.xls_file = os.path.join(LOCAL_DIR, 'data/test.xls') self.xlsx_file = os.path.join(LOCAL_DIR, 'data/test.xlsx') self.csv_file = os.path.join(LOCAL_DIR, 'data/test.csv') self.xml_file = os.path.join(LOCAL_DIR, 'data/test.xml') self.unsuported_file = os.path.join(LOCAL_DIR, 'data/test_json_descriptor.json') def test_xls_reader_set(self): importer = GenericImporter(source=self.xls_file) self.assertEquals(importer.get_reader_class(), XLSReader) def test_xlsx_reader_set(self): importer = GenericImporter(source=self.xlsx_file) self.assertEquals(importer.get_reader_class(), XLSXReader) def test_csv_reader_set(self): importer = GenericImporter(source=self.csv_file) self.assertEquals(importer.get_reader_class(), CSVReader) def test_xml_reader_set(self): importer = GenericImporter(source=self.xml_file) self.assertEquals(importer.get_reader_class(), XMLReader) def test_getting_source_file_extension(self): importer = GenericImporter(source=self.csv_file) self.assertEquals(importer.get_source_file_extension(), 'csv') @skipIf(django.VERSION < (1, 4), "not supported in this library version") def test_unsuported_raise_error_message(self): with self.assertRaisesMessage(UnsuportedFile, 'Unsuported File'): GenericImporter(source=self.unsuported_file) def test_import_with_file_instance(self): file_instance = open(self.csv_file) importer = GenericImporter(source=file_instance) self.assertEquals(importer.get_source_file_extension(), 'csv') def test_import_with_model_instance(self): file_mock = mock.MagicMock(spec=FileHistory, name='FileHistoryMock') file_mock.file_upload = '/media/test.csv' importer = GenericImporter(source=file_mock) self.assertEquals(importer.get_source_file_extension(), 'csv')
#!/usr/bin/env python # -*- coding: utf-8 -*- import mock import os import django from django.test import TestCase from unittest import skipIf from data_importer.importers.generic import GenericImporter from data_importer.readers.xls_reader import XLSReader from data_importer.readers.xlsx_reader import XLSXReader from data_importer.readers.csv_reader import CSVReader from data_importer.readers.xml_reader import XMLReader from data_importer.core.exceptions import UnsuportedFile from data_importer.models import FileHistory LOCAL_DIR = os.path.dirname(__file__) class TestGeneralImpoerterSetup(TestCase): def setUp(self): self.xls_file = os.path.join(LOCAL_DIR, 'data/test.xls') self.xlsx_file = os.path.join(LOCAL_DIR, 'data/test.xlsx') self.csv_file = os.path.join(LOCAL_DIR, 'data/test.csv') self.xml_file = os.path.join(LOCAL_DIR, 'data/test.xml') self.unsuported_file = os.path.join(LOCAL_DIR, 'data/test_json_descriptor.json') def test_xls_reader_set(self): importer = GenericImporter(source=self.xls_file) self.assertEquals(importer.get_reader_class(), XLSReader) def test_xlsx_reader_set(self): importer = GenericImporter(source=self.xlsx_file) self.assertEquals(importer.get_reader_class(), XLSXReader) def test_csv_reader_set(self): importer = GenericImporter(source=self.csv_file) self.assertEquals(importer.get_reader_class(), CSVReader) def test_xml_reader_set(self): importer = GenericImporter(source=self.xml_file) self.assertEquals(importer.get_reader_class(), XMLReader) def test_getting_source_file_extension(self): importer = GenericImporter(source=self.csv_file) self.assertEquals(importer.get_source_file_extension(), 'csv') @skipIf((django.get_version() < '1.4'), "not supported in this library version") def test_unsuported_raise_error_message(self): with self.assertRaisesMessage(UnsuportedFile, 'Unsuported File'): GenericImporter(source=self.unsuported_file) def test_import_with_file_instance(self): file_instance = open(self.csv_file) importer = GenericImporter(source=file_instance) self.assertEquals(importer.get_source_file_extension(), 'csv') def test_import_with_model_instance(self): file_mock = mock.MagicMock(spec=FileHistory, name='FileHistoryMock') file_mock.file_upload = '/media/test.csv' importer = GenericImporter(source=file_mock) self.assertEquals(importer.get_source_file_extension(), 'csv')
Python
0.000001
6e7a20675cd66d9ca7d4a286958404198369dece
Validate ReplicationTopology data
dbaas/physical/forms/replication_topology.py
dbaas/physical/forms/replication_topology.py
# -*- coding: utf-8 -*- from django import forms from django.forms.widgets import SelectMultiple #from django.forms.widgets import CheckboxSelectMultiple from ..models import ReplicationTopology, Parameter, DatabaseInfraParameter class ReplicationTopologyForm(forms.ModelForm): class Meta: model = ReplicationTopology def __init__(self, *args, **kwargs): super(ReplicationTopologyForm, self).__init__(*args, **kwargs) self.fields["parameter"].widget = SelectMultiple() #self.fields["parameter"].widget = CheckboxSelectMultiple() self.fields["parameter"].queryset = Parameter.objects.all() self.fields["parameter"].help_text = 'Select the parameters that can be changed in this topology' def clean(self): cleaned_data = super(ReplicationTopologyForm, self).clean() if self.instance.id and 'parameter' in self.changed_data: form_parameters = cleaned_data.get("parameter") topology_parameters = Parameter.objects.filter( replication_topologies=self.instance ) for topology_parameter in topology_parameters: if topology_parameter not in form_parameters: parametersinfra = DatabaseInfraParameter.objects.filter( parameter=topology_parameter, databaseinfra__plan__replication_topology=self.instance ) if parametersinfra: parameterinfra = parametersinfra[0] msg = "The parameter {} can not be deleted. It has been set in the databaseinfra {}.".format( parameterinfra.parameter, parameterinfra.databaseinfra ) raise forms.ValidationError(msg) return cleaned_data
from django import forms from django.forms.widgets import SelectMultiple #from django.forms.widgets import CheckboxSelectMultiple from ..models import ReplicationTopology, Parameter class ReplicationTopologyForm(forms.ModelForm): class Meta: model = ReplicationTopology def __init__(self, *args, **kwargs): super(ReplicationTopologyForm, self).__init__(*args, **kwargs) self.fields["parameter"].widget = SelectMultiple() #self.fields["parameter"].widget = CheckboxSelectMultiple() self.fields["parameter"].queryset = Parameter.objects.all() self.fields["parameter"].help_text = 'Select the parameters that can be changed in this topology'
Python
0.000001
1820001e6ec6960014b5e9cf23eb7a2f8b90c213
Remove a broken test case from decorators_test
dm_control/mujoco/testing/decorators_test.py
dm_control/mujoco/testing/decorators_test.py
# Copyright 2017 The dm_control Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests of the decorators module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Internal dependencies. from absl.testing import absltest from dm_control.mujoco.testing import decorators import mock from six.moves import xrange # pylint: disable=redefined-builtin class RunThreadedTest(absltest.TestCase): @mock.patch(decorators.__name__ + ".threading") def test_number_of_threads(self, mock_threading): num_threads = 5 mock_threads = [mock.MagicMock() for _ in xrange(num_threads)] for thread in mock_threads: thread.start = mock.MagicMock() thread.join = mock.MagicMock() mock_threading.Thread = mock.MagicMock(side_effect=mock_threads) test_decorator = decorators.run_threaded(num_threads=num_threads) tested_method = mock.MagicMock() tested_method.__name__ = "foo" test_runner = test_decorator(tested_method) test_runner(self) for thread in mock_threads: thread.start.assert_called_once() thread.join.assert_called_once() def test_number_of_iterations(self): calls_per_thread = 5 tested_method = mock.MagicMock() tested_method.__name__ = "foo" test_decorator = decorators.run_threaded( num_threads=1, calls_per_thread=calls_per_thread) test_runner = test_decorator(tested_method) test_runner(self) self.assertEqual(calls_per_thread, tested_method.call_count) if __name__ == "__main__": absltest.main()
# Copyright 2017 The dm_control Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests of the decorators module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Internal dependencies. from absl.testing import absltest from absl.testing import parameterized from dm_control.mujoco.testing import decorators import mock from six.moves import xrange # pylint: disable=redefined-builtin class RunThreadedTest(absltest.TestCase): @mock.patch(decorators.__name__ + ".threading") def test_number_of_threads(self, mock_threading): num_threads = 5 mock_threads = [mock.MagicMock() for _ in xrange(num_threads)] for thread in mock_threads: thread.start = mock.MagicMock() thread.join = mock.MagicMock() mock_threading.Thread = mock.MagicMock(side_effect=mock_threads) test_decorator = decorators.run_threaded(num_threads=num_threads) tested_method = mock.MagicMock() tested_method.__name__ = "foo" test_runner = test_decorator(tested_method) test_runner(self) for thread in mock_threads: thread.start.assert_called_once() thread.join.assert_called_once() def test_number_of_iterations(self): calls_per_thread = 5 tested_method = mock.MagicMock() tested_method.__name__ = "foo" test_decorator = decorators.run_threaded( num_threads=1, calls_per_thread=calls_per_thread) test_runner = test_decorator(tested_method) test_runner(self) self.assertEqual(calls_per_thread, tested_method.call_count) def test_works_with_named_parameters(self): func = mock.MagicMock() names = ["foo", "bar", "baz"] params = [1, 2, 3] calls_per_thread = 2 num_threads = 4 class FakeTest(parameterized.TestCase): @parameterized.named_parameters(zip(names, params)) @decorators.run_threaded(calls_per_thread=calls_per_thread, num_threads=num_threads) def test_method(self, param): func(param) suite = absltest.TestLoader().loadTestsFromTestCase(FakeTest) suite.debug() # Run tests without collecting the output. expected_call_count = len(params) * calls_per_thread * num_threads self.assertEqual(func.call_count, expected_call_count) actual_params = {call[0][0] for call in func.call_args_list} self.assertSetEqual(set(params), actual_params) if __name__ == "__main__": absltest.main()
Python
0.000015
b58d296373ed4ba75d0e6409e332e70abea76086
add more axes labels
data/boada/analysis_all/redshifts/redshift_stats.py
data/boada/analysis_all/redshifts/redshift_stats.py
import pandas as pd import pylab as pyl from glob import glob files = glob('*.csv') for f in files: results = pd.read_csv(f) # good redshifts try: q0 = pyl.append(q0, results[results.Q == 0].r.values) q1 = pyl.append(q1, results[results.Q == 1].r.values) x = ~pyl.isnan(results.fiber) & pyl.isnan(results.Q) q2 = pyl.append(q2, results.r.values[x.values]) except NameError: q0 = results[results.Q==0].r.values q1 = results[results.Q==1].r.values x = ~pyl.isnan(results.fiber) & pyl.isnan(results.Q) q2 = results.r.values[x.values] # make a figure f = pyl.figure(1,figsize=(5,5*(pyl.sqrt(5.)-1.0)/2.0)) ax = f.add_subplot(111) bins = pyl.linspace(14,22,15) ax.hist(q2, weights=pyl.zeros_like(q2)+1./q2.size, histtype='step', bins=bins, lw=2, label='Q=2') ax.hist(q1, weights=pyl.zeros_like(q1)+1./q1.size, histtype='step', bins=bins, lw=2, label='Q=1') q0 = q0[~pyl.isnan(q0)] ax.hist(q0, weights=pyl.zeros_like(q0)+1./q0.size, histtype='step', bins=bins, lw=2, label='Q=0') ax.legend(loc='upper right') ax.invert_xaxis() ax.set_ylim(0,0.5) ax.set_xlabel('$m_r$') ax.set_ylabel('Fraction of Total') pyl.show()
import pandas as pd import pylab as pyl from glob import glob files = glob('*.csv') for f in files: results = pd.read_csv(f) # good redshifts try: q0 = pyl.append(q0, results[results.Q == 0].r.values) q1 = pyl.append(q1, results[results.Q == 1].r.values) x = ~pyl.isnan(results.fiber) & pyl.isnan(results.Q) q2 = pyl.append(q2, results.r.values[x.values]) except NameError: q0 = results[results.Q==0].r.values q1 = results[results.Q==1].r.values x = ~pyl.isnan(results.fiber) & pyl.isnan(results.Q) q2 = results.r.values[x.values] bins = pyl.linspace(14,22,15) pyl.hist(q2, weights=pyl.zeros_like(q2)+1./q2.size, histtype='step', bins=bins, lw=2, label='Q=2') pyl.hist(q1, weights=pyl.zeros_like(q1)+1./q1.size, histtype='step', bins=bins, lw=2, label='Q=1') q0 = q0[~pyl.isnan(q0)] pyl.hist(q0, weights=pyl.zeros_like(q0)+1./q0.size, histtype='step', bins=bins, lw=2, label='Q=0') pyl.legend(loc='upper right') pyl.gca().invert_xaxis() pyl.ylim(0,0.5) pyl.xlabel('$m_r$')
Python
0
b8399e48872271ccac6431d9f875238ff509a03a
Increment number of JS files in test_js_load
InvenTree/InvenTree/test_views.py
InvenTree/InvenTree/test_views.py
""" Unit tests for the main web views """ import re import os from django.test import TestCase from django.urls import reverse from django.contrib.auth import get_user_model class ViewTests(TestCase): """ Tests for various top-level views """ username = 'test_user' password = 'test_pass' def setUp(self): # Create a user self.user = get_user_model().objects.create_user(self.username, 'user@email.com', self.password) self.user.set_password(self.password) self.user.save() result = self.client.login(username=self.username, password=self.password) self.assertEqual(result, True) def test_api_doc(self): """ Test that the api-doc view works """ api_url = os.path.join(reverse('index'), 'api-doc') + '/' response = self.client.get(api_url) self.assertEqual(response.status_code, 200) def test_index_redirect(self): """ top-level URL should redirect to "index" page """ response = self.client.get("/") self.assertEqual(response.status_code, 302) def get_index_page(self): """ Retrieve the index page (used for subsequent unit tests) """ response = self.client.get("/index/") self.assertEqual(response.status_code, 200) return str(response.content.decode()) def test_panels(self): """ Test that the required 'panels' are present """ content = self.get_index_page() self.assertIn("<div id='detail-panels'>", content) # TODO: In future, run the javascript and ensure that the panels get created! def test_js_load(self): """ Test that the required javascript files are loaded correctly """ # Change this number as more javascript files are added to the index page N_SCRIPT_FILES = 40 content = self.get_index_page() # Extract all required javascript files from the index page content script_files = re.findall("<script type='text\\/javascript' src=\"([^\"]*)\"><\\/script>", content) self.assertEqual(len(script_files), N_SCRIPT_FILES) # TODO: Request the javascript files from the server, and ensure they are correcty loaded
""" Unit tests for the main web views """ import re import os from django.test import TestCase from django.urls import reverse from django.contrib.auth import get_user_model class ViewTests(TestCase): """ Tests for various top-level views """ username = 'test_user' password = 'test_pass' def setUp(self): # Create a user self.user = get_user_model().objects.create_user(self.username, 'user@email.com', self.password) self.user.set_password(self.password) self.user.save() result = self.client.login(username=self.username, password=self.password) self.assertEqual(result, True) def test_api_doc(self): """ Test that the api-doc view works """ api_url = os.path.join(reverse('index'), 'api-doc') + '/' response = self.client.get(api_url) self.assertEqual(response.status_code, 200) def test_index_redirect(self): """ top-level URL should redirect to "index" page """ response = self.client.get("/") self.assertEqual(response.status_code, 302) def get_index_page(self): """ Retrieve the index page (used for subsequent unit tests) """ response = self.client.get("/index/") self.assertEqual(response.status_code, 200) return str(response.content.decode()) def test_panels(self): """ Test that the required 'panels' are present """ content = self.get_index_page() self.assertIn("<div id='detail-panels'>", content) # TODO: In future, run the javascript and ensure that the panels get created! def test_js_load(self): """ Test that the required javascript files are loaded correctly """ # Change this number as more javascript files are added to the index page N_SCRIPT_FILES = 39 content = self.get_index_page() # Extract all required javascript files from the index page content script_files = re.findall("<script type='text\\/javascript' src=\"([^\"]*)\"><\\/script>", content) self.assertEqual(len(script_files), N_SCRIPT_FILES) # TODO: Request the javascript files from the server, and ensure they are correcty loaded
Python
0.000003
83b6e177fccaef7d62682c25a0e82f29bcba01e6
Remove autofilling "GSSAPI" mechanism in hue.ini
desktop/core/src/desktop/lib/mapr_config_changer.py
desktop/core/src/desktop/lib/mapr_config_changer.py
import re import os MAPR_SECURITY = "MAPR-SECURITY" SECURE = "secure" SECURITY_ENABLED = 'security_enabled' MECHANISM = 'mechanism' MAPR_CLUSTERS_CONF_PATH = "/opt/mapr/conf/mapr-clusters.conf" templates = { MECHANISM: 'none', SECURITY_ENABLED: 'false' } def read_values_from_mapr_clusters_conf(): if not os.path.exists(MAPR_CLUSTERS_CONF_PATH): return mapr_clusters_conf = open(MAPR_CLUSTERS_CONF_PATH, "r").read() cluster_props = dict(re.findall(r'(\S+)=(".*?"|\S+)', mapr_clusters_conf)) templates[SECURITY_ENABLED] = cluster_props[SECURE] if SECURE in cluster_props else "false" if templates[SECURITY_ENABLED] == "true": templates[MECHANISM] = MAPR_SECURITY templateRegEx = re.compile(r'^\${(.+?)}') def change_config(config): for key in config: if isinstance(config[key], dict): change_config(config[key]) elif type(config[key]) == str: match = templateRegEx.search(config[key]) if (match != None) and (match.group(1) in templates): config[key] = templates[match.group(1)] return config def fill_templates(config): read_values_from_mapr_clusters_conf() change_config(config) return config
import re import os GSSAPI = "GSSAPI" MAPR_SECURITY = "MAPR-SECURITY" KERBEROS_ENABLE = "kerberosEnable" SECURE = "secure" SECURITY_ENABLED = 'security_enabled' MECHANISM = 'mechanism' MAPR_CLUSTERS_CONF_PATH = "/opt/mapr/conf/mapr-clusters.conf" templates = { MECHANISM: 'none', SECURITY_ENABLED: 'false' } def read_values_from_mapr_clusters_conf(): if not os.path.exists(MAPR_CLUSTERS_CONF_PATH): return mapr_clusters_conf = open(MAPR_CLUSTERS_CONF_PATH, "r").read() cluster_props = dict(re.findall(r'(\S+)=(".*?"|\S+)', mapr_clusters_conf)) templates[SECURITY_ENABLED] = cluster_props[SECURE] if SECURE in cluster_props else "false" if templates[SECURITY_ENABLED] == "true": templates[MECHANISM] = MAPR_SECURITY if (KERBEROS_ENABLE in cluster_props) and (cluster_props[KERBEROS_ENABLE] == "true"): templates[MECHANISM] = GSSAPI templateRegEx = re.compile(r'^\${(.+?)}') def change_config(config): for key in config: if isinstance(config[key], dict): change_config(config[key]) elif type(config[key]) == str: match = templateRegEx.search(config[key]) if (match != None) and (match.group(1) in templates): config[key] = templates[match.group(1)] return config def fill_templates(config): read_values_from_mapr_clusters_conf() change_config(config) return config
Python
0
13bf2bcbbfd079c75b84a993a86086493d2e6dee
Cleaned up output #2
django_postgres/management/commands/sync_pgviews.py
django_postgres/management/commands/sync_pgviews.py
"""Syncronise SQL Views. """ from django.core.management.base import BaseCommand from django.db import models from django_postgres.view import create_views class Command(BaseCommand): args = '<appname appname ...>' help = 'Creates and Updates all SQL Views' def handle(self, *args, **options): """Run the create_views command. """ if args: self.stdout.write( 'Creating Views for {modules}'.format(modules=args)) for module in args: create_views(module) else: self.handle_noargs(**options) def handle_noargs(self, **options): all_modules = models.get_apps() self.stdout.write( 'Creating Views for all modules: {modules}'.format( modules=all_modules ) ) for module in all_modules: create_views(module)
"""Syncronise SQL Views. """ from django.core.management.base import BaseCommand from django.db import models from django_postgres.view import create_views class Command(BaseCommand): args = '<appname appname ...>' help = 'Creates and Updates all SQL Views' def handle(self, *args, **options): """Run the create_views command. """ self.stdout.write('Creating Views for {modules}'.format(modules=args)) if args: for module in args: create_views(module) else: self.handle_noargs(**options) def handle_noargs(self, **options): all_modules = models.get_apps() self.stdout.write( 'Creating Views for {modules}'.format(modules=all_modules)) for module in all_modules: create_views(module)
Python
0.998498
a44d2a9239e755b9e5726521b11aca9734b89180
Fix crashing when Authorization is not set
ereuse_devicehub/resources/account/domain.py
ereuse_devicehub/resources/account/domain.py
import base64 from bson.objectid import ObjectId from ereuse_devicehub.exceptions import WrongCredentials, BasicError, StandardError from ereuse_devicehub.resources.account.role import Role from ereuse_devicehub.resources.account.settings import AccountSettings from ereuse_devicehub.resources.domain import Domain, ResourceNotFound from ereuse_devicehub.utils import ClassProperty from flask import current_app from flask import g from flask import request from passlib.handlers.sha2_crypt import sha256_crypt from werkzeug.http import parse_authorization_header class AccountDomain(Domain): resource_settings = AccountSettings @staticmethod def get_requested_database(): requested_database = request.path.split('/')[1] if requested_database not in current_app.config['DATABASES']: raise NotADatabase({'requested_path': requested_database}) else: return requested_database # noinspection PyNestedDecorators @ClassProperty @classmethod def actual(cls) -> dict: try: # the values of g are inherited when doing inner requests so we need # to always check the token in the headers (cls.actual_token) # https://stackoverflow.com/questions/20036520/what-is-the-purpose-of-flasks-context-stacks # http://stackoverflow.com/a/33382823/2710757 token = cls.actual_token if not hasattr(g, '_actual_user') or g._actual_user['token'] != token: from flask import request try: from flask import current_app as app g._actual_user = AccountDomain.get_one({'token': token}) g._actual_user['role'] = Role(g._actual_user['role']) except UserNotFound: raise UserIsAnonymous("You need to be logged in.") except TypeError: raise NoUserForGivenToken() return g._actual_user except RuntimeError as e: # Documentation access this variable if str(e) != 'working outside of application context': raise e # noinspection PyNestedDecorators @ClassProperty @classmethod def actual_token(cls) -> str: """Gets the **unhashed** token. Use `hash_token` to hash it.""" try: x = request.headers.environ['HTTP_AUTHORIZATION'] return parse_authorization_header(x)['username'] except (KeyError, TypeError) as e: raise StandardError('The Authorization header is not well written or missing', 400) from e @classmethod def get_one(cls, id_or_filter: dict or ObjectId or str): try: return super().get_one(id_or_filter) except ResourceNotFound: raise UserNotFound() @staticmethod def import_key(key: str) -> str: """ Imports the key for the user :param key: GPG Public Key :raises CannotImportKey: :return: Fingerprint of the imported key """ result = current_app.gpg.import_keys(key) if result.count == 0: raise CannotImportKey() return result.fingerprint[0] @staticmethod def hash_token(token): # Framework needs ':' at the end before send it to client return base64.b64encode(str.encode(token + ':')) @staticmethod def encrypt_password(password: str) -> str: return sha256_crypt.encrypt(password) @staticmethod def verify_password(password: str, original: str) -> bool: return sha256_crypt.verify(password, original) class UserIsAnonymous(WrongCredentials): pass class NoUserForGivenToken(WrongCredentials): pass class NotADatabase(BasicError): status_code = 400 class CannotImportKey(StandardError): status_code = 400 message = "We could not import the key. Make sure it is a valid GPG Public key." class UserNotFound(ResourceNotFound): pass class WrongHeader(StandardError): pass
import base64 from bson.objectid import ObjectId from ereuse_devicehub.exceptions import WrongCredentials, BasicError, StandardError from ereuse_devicehub.resources.account.role import Role from ereuse_devicehub.resources.account.settings import AccountSettings from ereuse_devicehub.resources.domain import Domain, ResourceNotFound from ereuse_devicehub.utils import ClassProperty from flask import current_app from flask import g from flask import request from passlib.handlers.sha2_crypt import sha256_crypt from werkzeug.http import parse_authorization_header class AccountDomain(Domain): resource_settings = AccountSettings @staticmethod def get_requested_database(): requested_database = request.path.split('/')[1] if requested_database not in current_app.config['DATABASES']: raise NotADatabase({'requested_path': requested_database}) else: return requested_database # noinspection PyNestedDecorators @ClassProperty @classmethod def actual(cls) -> dict: try: # the values of g are inherited when doing inner requests so we need # to always check the token in the headers (cls.actual_token) # https://stackoverflow.com/questions/20036520/what-is-the-purpose-of-flasks-context-stacks # http://stackoverflow.com/a/33382823/2710757 token = cls.actual_token if not hasattr(g, '_actual_user') or g._actual_user['token'] != token: from flask import request try: from flask import current_app as app g._actual_user = AccountDomain.get_one({'token': token}) g._actual_user['role'] = Role(g._actual_user['role']) except UserNotFound: raise UserIsAnonymous("You need to be logged in.") except TypeError: raise NoUserForGivenToken() return g._actual_user except RuntimeError as e: # Documentation access this variable if str(e) != 'working outside of application context': raise e # noinspection PyNestedDecorators @ClassProperty @classmethod def actual_token(cls) -> str: """Gets the **unhashed** token. Use `hash_token` to hash it.""" x = request.headers.environ['HTTP_AUTHORIZATION'] header = parse_authorization_header(x) if header is None: raise StandardError('The Authorization header is not well written: ' + x, 400) return header['username'] @classmethod def get_one(cls, id_or_filter: dict or ObjectId or str): try: return super().get_one(id_or_filter) except ResourceNotFound: raise UserNotFound() @staticmethod def import_key(key: str) -> str: """ Imports the key for the user :param key: GPG Public Key :raises CannotImportKey: :return: Fingerprint of the imported key """ result = current_app.gpg.import_keys(key) if result.count == 0: raise CannotImportKey() return result.fingerprint[0] @staticmethod def hash_token(token): # Framework needs ':' at the end before send it to client return base64.b64encode(str.encode(token + ':')) @staticmethod def encrypt_password(password: str) -> str: return sha256_crypt.encrypt(password) @staticmethod def verify_password(password: str, original: str) -> bool: return sha256_crypt.verify(password, original) class UserIsAnonymous(WrongCredentials): pass class NoUserForGivenToken(WrongCredentials): pass class NotADatabase(BasicError): status_code = 400 class CannotImportKey(StandardError): status_code = 400 message = "We could not import the key. Make sure it is a valid GPG Public key." class UserNotFound(ResourceNotFound): pass class WrongHeader(StandardError): pass
Python
0.000004
dba78b02daa5674769cccf56b867f5266d6ec0f1
Add voluptuous to locative (#3254)
homeassistant/components/device_tracker/locative.py
homeassistant/components/device_tracker/locative.py
""" Support for the Locative platform. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.locative/ """ import logging from homeassistant.const import HTTP_UNPROCESSABLE_ENTITY, STATE_NOT_HOME from homeassistant.components.http import HomeAssistantView # pylint: disable=unused-import from homeassistant.components.device_tracker import ( # NOQA DOMAIN, PLATFORM_SCHEMA) _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['http'] def setup_scanner(hass, config, see): """Setup an endpoint for the Locative application.""" hass.wsgi.register_view(LocativeView(hass, see)) return True class LocativeView(HomeAssistantView): """View to handle locative requests.""" url = '/api/locative' name = 'api:locative' def __init__(self, hass, see): """Initialize Locative url endpoints.""" super().__init__(hass) self.see = see def get(self, request): """Locative message received as GET.""" return self.post(request) def post(self, request): """Locative message received.""" # pylint: disable=too-many-return-statements data = request.values if 'latitude' not in data or 'longitude' not in data: return ('Latitude and longitude not specified.', HTTP_UNPROCESSABLE_ENTITY) if 'device' not in data: _LOGGER.error('Device id not specified.') return ('Device id not specified.', HTTP_UNPROCESSABLE_ENTITY) if 'id' not in data: _LOGGER.error('Location id not specified.') return ('Location id not specified.', HTTP_UNPROCESSABLE_ENTITY) if 'trigger' not in data: _LOGGER.error('Trigger is not specified.') return ('Trigger is not specified.', HTTP_UNPROCESSABLE_ENTITY) device = data['device'].replace('-', '') location_name = data['id'].lower() direction = data['trigger'] if direction == 'enter': self.see(dev_id=device, location_name=location_name) return 'Setting location to {}'.format(location_name) elif direction == 'exit': current_state = self.hass.states.get( '{}.{}'.format(DOMAIN, device)) if current_state is None or current_state.state == location_name: self.see(dev_id=device, location_name=STATE_NOT_HOME) return 'Setting location to not home' else: # Ignore the message if it is telling us to exit a zone that we # aren't currently in. This occurs when a zone is entered # before the previous zone was exited. The enter message will # be sent first, then the exit message will be sent second. return 'Ignoring exit from {} (already in {})'.format( location_name, current_state) elif direction == 'test': # In the app, a test message can be sent. Just return something to # the user to let them know that it works. return 'Received test message.' else: _LOGGER.error('Received unidentified message from Locative: %s', direction) return ('Received unidentified message: {}'.format(direction), HTTP_UNPROCESSABLE_ENTITY)
""" Support for the Locative platform. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.locative/ """ import logging from homeassistant.components.device_tracker import DOMAIN from homeassistant.const import HTTP_UNPROCESSABLE_ENTITY, STATE_NOT_HOME from homeassistant.components.http import HomeAssistantView _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['http'] def setup_scanner(hass, config, see): """Setup an endpoint for the Locative application.""" hass.wsgi.register_view(LocativeView(hass, see)) return True class LocativeView(HomeAssistantView): """View to handle locative requests.""" url = "/api/locative" name = "api:locative" def __init__(self, hass, see): """Initialize Locative url endpoints.""" super().__init__(hass) self.see = see def get(self, request): """Locative message received as GET.""" return self.post(request) def post(self, request): """Locative message received.""" # pylint: disable=too-many-return-statements data = request.values if 'latitude' not in data or 'longitude' not in data: return ("Latitude and longitude not specified.", HTTP_UNPROCESSABLE_ENTITY) if 'device' not in data: _LOGGER.error("Device id not specified.") return ("Device id not specified.", HTTP_UNPROCESSABLE_ENTITY) if 'id' not in data: _LOGGER.error("Location id not specified.") return ("Location id not specified.", HTTP_UNPROCESSABLE_ENTITY) if 'trigger' not in data: _LOGGER.error("Trigger is not specified.") return ("Trigger is not specified.", HTTP_UNPROCESSABLE_ENTITY) device = data['device'].replace('-', '') location_name = data['id'].lower() direction = data['trigger'] if direction == 'enter': self.see(dev_id=device, location_name=location_name) return "Setting location to {}".format(location_name) elif direction == 'exit': current_state = self.hass.states.get( "{}.{}".format(DOMAIN, device)) if current_state is None or current_state.state == location_name: self.see(dev_id=device, location_name=STATE_NOT_HOME) return "Setting location to not home" else: # Ignore the message if it is telling us to exit a zone that we # aren't currently in. This occurs when a zone is entered # before the previous zone was exited. The enter message will # be sent first, then the exit message will be sent second. return 'Ignoring exit from {} (already in {})'.format( location_name, current_state) elif direction == 'test': # In the app, a test message can be sent. Just return something to # the user to let them know that it works. return "Received test message." else: _LOGGER.error("Received unidentified message from Locative: %s", direction) return ("Received unidentified message: {}".format(direction), HTTP_UNPROCESSABLE_ENTITY)
Python
0.00002
06277ea30094ff6669537f2365b6ad9f5a19642b
Update laundry.py
fxcmminer_v1.1/fxcmminer/cleaning/laundry.py
fxcmminer_v1.1/fxcmminer/cleaning/laundry.py
from event import CleanedDataEvent class DataCleaner(object): """ The DataCleaner class is the process of correcting (or removing) corrupt or inaccurate records from a record set and refers to identifying incomplete, incorrect, inaccurate or irrelevant parts of the data and then replacing, modifying, or deleting the dirty or coarse data. Most of the above is not implemented in the code below. """ def __init__(self, events_queue): """ Initialize varables """ self.events_queue = events_queue def _remove_duplicates(self, data): """ Drop any duplicates in the Datetime Index """ return data.reset_index().drop_duplicates( subset='date', keep='last').set_index('date') def _remove_not_a_number(self, data): """ Drop any rows that contain NaN values """ return data.dropna() def _remove_incorrect_values( self, data, ao='askopen',ah='askhigh', al='asklow', ac='askclose', bo='bidopen',bh='bidhigh', bl='bidlow', bc='bidclose', v='volume' ): """ Removes errors from the open high low close values """ data = data.loc[data[ac] <= data[ah]] data = data.loc[data[ac] >= data[al]] data = data.loc[data[ao] <= data[ah]] data = data.loc[data[ao] >= data[al]] data = data.loc[data[ah] >= data[al]] data = data.loc[data[bc] <= data[bh]] data = data.loc[data[bc] >= data[bl]] data = data.loc[data[bo] <= data[bh]] data = data.loc[data[bo] >= data[bl]] data = data.loc[data[bh] >= data[bl]] data = data.loc[data[v] >= 0] return data def clean_data(self, event): """ Encapsulates the above cleaning processes """ data = self._remove_not_a_number(event.data) data = self._remove_incorrect_values(data) data = self._remove_duplicates(data) self.events_queue.put(CleanedDataEvent( data, event.instrument, event.time_frame))
from event import CleanedDataEvent class DataCleaner(object): """ Basic data cleaning """ def __init__(self, events_queue): """ """ self.events_queue = events_queue def _remove_duplicates(self, data): """ Drop any duplicates in the Datetime Index """ return data.reset_index().drop_duplicates( subset='date', keep='last').set_index('date') def _remove_not_a_number(self, data): """ Drop any rows that contain NaN values. """ return data.dropna() def _remove_incorrect_values( self, data, ao='askopen',ah='askhigh', al='asklow', ac='askclose', bo='bidopen',bh='bidhigh', bl='bidlow', bc='bidclose', v='volume' ): """ Removes errors from the open high low close values. """ data = data.loc[data[ac] <= data[ah]] data = data.loc[data[ac] >= data[al]] data = data.loc[data[ao] <= data[ah]] data = data.loc[data[ao] >= data[al]] data = data.loc[data[ah] >= data[al]] data = data.loc[data[bc] <= data[bh]] data = data.loc[data[bc] >= data[bl]] data = data.loc[data[bo] <= data[bh]] data = data.loc[data[bo] >= data[bl]] data = data.loc[data[bh] >= data[bl]] data = data.loc[data[v] >= 0] return data def clean_data(self, event): data = self._remove_not_a_number(event.data) data = self._remove_incorrect_values(data) data = self._remove_duplicates(data) self.events_queue.put(CleanedDataEvent( data, event.instrument, event.time_frame))
Python
0.000001
4844ba065d86fdce3f01b7b191ecc6a4ef43661e
Add autoclass directives to Visualization/__init__.py. This will enable Visualization module methods to appear in function reference.
OpenPNM/Visualization/__init__.py
OpenPNM/Visualization/__init__.py
r""" ******************************************************************************* :mod:`OpenPNM.Visualization`: Network Visualization ******************************************************************************* .. module:: OpenPNM.Visualization Contents -------- tbd .. note:: n/a Classes ------- .. autoclass:: GenericVisualization :members: :undoc-members: :show-inheritance: .. autoclass:: VTK :members: :undoc-members: :show-inheritance: """ from __GenericVisualization__ import GenericVisualization from __VTK__ import VTK
r""" ******************************************************************************* :mod:`OpenPNM.Visualization`: Network Visualization ******************************************************************************* .. module:: OpenPNM.Visualization Contents -------- tbd .. note:: n/a Import ------ >>> import OpenPNM as PNM >>> tmp=PNM.Visualization .... Submodules ---------- :: None --- No subpackages at the moment """ from __GenericVisualization__ import GenericVisualization from __VTK__ import VTK
Python
0.000034
6e6ccc8566fe90323d900fd0ebd38f45ad4d0b63
Update TipCalculator.py
PracticePrograms/TipCalculator.py
PracticePrograms/TipCalculator.py
''' Author : DORIAN JAVA BROWN Version : N/A Copyright : All Rights Reserve; You may use, distribute and modify this code. Description : This program provides the user with options on how much tip the customer should leave the waiter/waitress ''' import os total = 21.49 def cls(): os.system('cls' if os.name=='nt' else 'clear') # menu print('\n\n\t\t JUICEY BURGER\n\n') print('') print('1 Juicey Burger $ 18.99') print('1 Orange Drink $ 1.00') print('-------------------------------------------') print('') print('Sub Total: $ 19.99') print('Local Tax: $ 1.50') print('Bill Total: $ 21.49') print('\n\n') answer = raw_input('Correct ? ') if answer == 'YES' or answer == 'Yes' or answer == 'yes' : cls() # tip suggestion list print('\n\n\t Tip Suggestions') print('----------------------------------') print('A) %%20 $ %0.3f' %((total * .20))) print('B) %%15 $ %0.3f' %((total * .15))) print('C) %%10 $ %0.3f' %((total * .10))) print('D) %%5 $ %0.3f' %((total * .05))) elif answer == 'NO' or answer == 'No' or answer == 'no' : print ('\n\n\t\t please wait one moment for assistance...\n\n') else: print('\n\n\t\t error:. invaild value \n\n') #https://www.youtube.com/watch?annotation_id=annotation_3770292585&feature=iv&src_vid=bguKhMnvmb8&v=LtGEp9c6Z-U
''' Author : DORIAN JAVA BROWN Version : N/A Copyright : All Rights Reserve; You may use, distribute and modify this code. Description : This program provides the user with options on how much tip the customer should leave the waiter/waitress ''' import os total = 21.49 def cls(): os.system('cls' if os.name=='nt' else 'clear') # menu print('\n\n\t\t JUICEY BURGER\n\n') print('') print('1 Juicey Burger $ 18.99') print('1 Orange Drink $ 1.00') print('-------------------------------------------') print('') print('Sub Total: $ 19.99') print('Local Tax: $ 1.50') print('Bill Total: $ 21.49') print('\n\n') answer = raw_input('Correct ? ') if answer == 'YES' or answer == 'Yes' or answer == 'yes' : cls() # tip suggestion list print('\n\n\t Tip Suggestions') print('----------------------------------') print('A) %%20 $ %0.3f' %((total * .20))) print('B) %%20 $ %0.3f' %((total * .20))) print('C) %%20 $ %0.3f' %((total * .20))) print('D) %%20 $ %0.3f' %((total * .20))) elif answer == 'NO' or answer == 'No' or answer == 'no' : print ('\n\n\t\t please wait one moment for assistance...\n\n') else: print('\n\n\t\t error:. invaild value \n\n') #https://www.youtube.com/watch?annotation_id=annotation_3770292585&feature=iv&src_vid=bguKhMnvmb8&v=LtGEp9c6Z-U
Python
0
953ce15f2a3b2ffdc0e27d95afbe4f8cda2cdbfd
set default behavior to add datacenters
SoftLayer/CLI/image/datacenter.py
SoftLayer/CLI/image/datacenter.py
"""Edit details of an image.""" # :license: MIT, see LICENSE for more details. import click import SoftLayer from SoftLayer.CLI import environment from SoftLayer.CLI import helpers @click.command() @click.argument('identifier') @click.option('--add/--remove', default=True, help="To add or remove Datacenter") @click.argument('locations', nargs=-1, required=True) @environment.pass_env def cli(env, identifier, add, locations): """Add/Remove datacenter of an image.""" image_mgr = SoftLayer.ImageManager(env.client) image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image') if add: result = image_mgr.add_locations(image_id, locations) else: result = image_mgr.remove_locations(image_id, locations) env.fout(result)
"""Edit details of an image.""" # :license: MIT, see LICENSE for more details. import click import SoftLayer from SoftLayer.CLI import environment from SoftLayer.CLI import helpers @click.command() @click.argument('identifier') @click.option('--add/--remove', default=False, help="To add or remove Datacenter") @click.argument('locations', nargs=-1, required=True) @environment.pass_env def cli(env, identifier, add, locations): """Add/Remove datacenter of an image.""" image_mgr = SoftLayer.ImageManager(env.client) image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image') if add: result = image_mgr.add_locations(image_id, locations) else: result = image_mgr.remove_locations(image_id, locations) env.fout(result)
Python
0
1c31d23dd95fb4ca8a5daaf37aaa7d75472f1d24
fix run time error: list indices must be integers not str (#611)
src/harness/reference_models/pre_iap_filtering/pre_iap_filtering.py
src/harness/reference_models/pre_iap_filtering/pre_iap_filtering.py
# Copyright 2018 SAS Project Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ================================================================================== This is the main Pre-IAP reference model which invokes the sub pre-IAP reference models to filter out grants and CBSDs before IAP model is invoked. ================================================================================== """ from reference_models.pre_iap_filtering import fss_purge from reference_models.pre_iap_filtering import inter_sas_duplicate_grant from reference_models.pre_iap_filtering import zone_purge from reference_models.pre_iap_filtering import pre_iap_util def preIapReferenceModel(protected_entities, sas_uut_fad, sas_test_harness_fads): """ The main function that invokes all pre-IAP filtering models. The grants/CBSDs to be purged are removed from the input parameters. Args: protected_entities: A dictionary containing the list of protected entities. The key is a protected enity type and the value is a list of corresponding protected entity records. The format is {'entityName':[record1, record2]}. sas_uut_fad: A FullActivityDump object containing the FAD records of SAS UUT. sas_test_harness_fads: A list of FullActivityDump objects containing the FAD records from SAS test harnesses. """ # Invoke Inter SAS duplicate grant purge list reference model inter_sas_duplicate_grant.interSasDuplicateGrantPurgeReferenceModel(sas_uut_fad, sas_test_harness_fads) # Invoke PPA, EXZ, GWPZ, and FSS+GWBL purge list reference models # Initialize expected keys in protected_entities to empty array if type does not exist for key in ['gwblRecords', 'fssRecords', 'ppaRecords', 'palRecords', 'gwpzRecords']: if key not in protected_entities: protected_entities[key] = [] list_of_fss_neighboring_gwbl = pre_iap_util.getFssNeighboringGwbl( protected_entities['gwblRecords'], protected_entities['fssRecords']) zone_purge.zonePurgeReferenceModel(sas_uut_fad, sas_test_harness_fads, protected_entities['ppaRecords'], protected_entities['palRecords'], protected_entities['gwpzRecords'], list_of_fss_neighboring_gwbl) # Invoke FSS purge list reference model if 'fssRecords' in protected_entities: fss_purge.fssPurgeReferenceModel(sas_uut_fad, sas_test_harness_fads, protected_entities['fssRecords'])
# Copyright 2018 SAS Project Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ================================================================================== This is the main Pre-IAP reference model which invokes the sub pre-IAP reference models to filter out grants and CBSDs before IAP model is invoked. ================================================================================== """ from reference_models.pre_iap_filtering import fss_purge from reference_models.pre_iap_filtering import inter_sas_duplicate_grant from reference_models.pre_iap_filtering import zone_purge from reference_models.pre_iap_filtering import pre_iap_util def preIapReferenceModel(protected_entities, sas_uut_fad, sas_test_harness_fads): """ The main function that invokes all pre-IAP filtering models. The grants/CBSDs to be purged are removed from the input parameters. Args: protected_entities: A dictionary containing the list of protected entities. The key is a protected enity type and the value is a list of corresponding protected entity records. The format is {'entityName':[record1, record2]}. sas_uut_fad: A FullActivityDump object containing the FAD records of SAS UUT. sas_test_harness_fads: A list of FullActivityDump objects containing the FAD records from SAS test harnesses. """ # Invoke Inter SAS duplicate grant purge list reference model inter_sas_duplicate_grant.interSasDuplicateGrantPurgeReferenceModel(sas_uut_fad, sas_test_harness_fads) # Invoke PPA, EXZ, GWPZ, and FSS+GWBL purge list reference models list_of_fss_neighboring_gwbl = pre_iap_util.getFssNeighboringGwbl( protected_entities['gwblRecords'], protected_entities['fssRecords']) zone_purge.zonePurgeReferenceModel(sas_uut_fad, sas_test_harness_fads, protected_entities['ppaRecords'], protected_entities['palRecords'], protected_entities['gwpzRecords'], list_of_fss_neighboring_gwbl) # Invoke FSS purge list reference model if 'fssRecords' in protected_entities: fss_purge.fssPurgeReferenceModel(sas_uut_fad, sas_test_harness_fads, protected_entities['fssRecords'])
Python
0.000014
cdd28cba2c6299e18b5d5221f8d10b8649c1faed
Use numpy
tests/chainer_tests/functions_tests/array_tests/test_expand_dims.py
tests/chainer_tests/functions_tests/array_tests/test_expand_dims.py
import unittest import numpy import chainer from chainer import cuda from chainer import functions from chainer import gradient_check from chainer import testing from chainer.testing import attr @testing.parameterize( {'in_shape': (3, 2), 'out_shape': (1, 3, 2), 'axis': 0}, {'in_shape': (3, 2), 'out_shape': (3, 1, 2), 'axis': 1}, {'in_shape': (3, 2), 'out_shape': (3, 2, 1), 'axis': 2}, {'in_shape': (3, 2), 'out_shape': (3, 2, 1), 'axis': -1}, {'in_shape': (3, 2), 'out_shape': (3, 1, 2), 'axis': -2}, {'in_shape': (3, 2), 'out_shape': (1, 3, 2), 'axis': -3}, ) class TestExpandDims(unittest.TestCase): def setUp(self): self.x = numpy.random.uniform(-1, 1, self.in_shape) \ .astype(numpy.float32) self.gy = numpy.random.uniform(-1, 1, self.out_shape) \ .astype(numpy.float32) def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.expand_dims(x, self.axis) self.assertEqual(y.data.shape, self.out_shape) y_expect = numpy.expand_dims(cuda.to_cpu(x_data), self.axis) numpy.testing.assert_array_equal(cuda.to_cpu(y.data), y_expect) def check_backward(self, x_data, y_grad): x = chainer.Variable(x_data) y = functions.expand_dims(x, self.axis) y.grad = y_grad y.backward() func = y.creator f = lambda: func.forward((x_data,)) gx, = gradient_check.numerical_grad(f, (x_data,), (y_grad,)) gradient_check.assert_allclose(cuda.to_cpu(x.grad), cuda.to_cpu(gx)) def test_forward_cpu(self): self.check_forward(self.x) @attr.gpu def test_forward(self): self.check_forward(cuda.to_gpu(self.x)) def test_backward_cpu(self): self.check_backward(self.x, self.gy) @attr.gpu def test_backward(self): self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy)) testing.run_module(__name__, __file__)
import unittest import numpy import chainer from chainer import cuda from chainer import functions from chainer import gradient_check from chainer import testing from chainer.testing import attr @testing.parameterize( {'in_shape': (3, 2), 'out_shape': (1, 3, 2), 'axis': 0}, {'in_shape': (3, 2), 'out_shape': (3, 1, 2), 'axis': 1}, {'in_shape': (3, 2), 'out_shape': (3, 2, 1), 'axis': 2}, {'in_shape': (3, 2), 'out_shape': (3, 2, 1), 'axis': -1}, {'in_shape': (3, 2), 'out_shape': (3, 1, 2), 'axis': -2}, {'in_shape': (3, 2), 'out_shape': (1, 3, 2), 'axis': -3}, ) class TestExpandDims(unittest.TestCase): def setUp(self): self.x = numpy.random.uniform(-1, 1, self.in_shape) \ .astype(numpy.float32) self.gy = numpy.random.uniform(-1, 1, self.out_shape) \ .astype(numpy.float32) def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.expand_dims(x, self.axis) self.assertEqual(y.data.shape, self.out_shape) y_expect = numpy.expand_dims(cuda.to_cpu(x_data), self.axis) cuda.cupy.testing.assert_array_equal(y.data, y_expect) def check_backward(self, x_data, y_grad): x = chainer.Variable(x_data) y = functions.expand_dims(x, self.axis) y.grad = y_grad y.backward() func = y.creator f = lambda: func.forward((x_data,)) gx, = gradient_check.numerical_grad(f, (x_data,), (y_grad,)) gradient_check.assert_allclose(cuda.to_cpu(x.grad), cuda.to_cpu(gx)) def test_forward_cpu(self): self.check_forward(self.x) @attr.gpu def test_forward(self): self.check_forward(cuda.to_gpu(self.x)) def test_backward_cpu(self): self.check_backward(self.x, self.gy) @attr.gpu def test_backward(self): self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy)) testing.run_module(__name__, __file__)
Python
0.00006
d1fa13bdf3ca7d1c4eabdaace5758d6b031ef909
Set up the download url, which I forgot about.
exampleSettings/urls.py
exampleSettings/urls.py
from django.conf.urls import patterns, include, url from django.conf.urls.static import static from django.conf import settings # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', url(r'^$', 'post.views.list', name='home'), (r'^post/(.*)/$', 'post.views.post'), # Examples: # url(r'^$', 'exampleSettings.views.home', name='home'), # url(r'^exampleSettings/', include('exampleSettings.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), (r'^edit/(.*)/$', 'post.views.edit'), (r'^create/$', 'post.views.create'), (r'^tag/(.*)/$', 'post.views.tag'), (r'^tagcloud/$', 'post.views.tagcloud' ), #Search urls (r'^search/', include('haystack.urls')), #captcha urls url(r'^captcha/', include('captcha.urls')), (r'^register/$', 'userProfile.views.register'), (r'^login/$', 'userProfile.views.login_user'), (r'^logout/$', 'userProfile.views.logout_user'), (r'^editProfile/$', 'userProfile.views.edit'), #preview pages for the STL files. (r'^thumbs/jsc3d/(.*)', 'thumbnailer.views.stlthumb'), (r'^preview/jsc3d/(.*)', 'thumbnailer.views.stlview'), url(r'', include('multiuploader.urls')), (r'^taggit_autosuggest/', include('taggit_autosuggest.urls')), #user profile pages url(r'^userProfile/', include('userProfile.urls')), (r'^editUser/', 'userProfile.views.edit'), (r'^download/(.*)/$', 'filemanager.views.download'), )+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
from django.conf.urls import patterns, include, url from django.conf.urls.static import static from django.conf import settings # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', url(r'^$', 'post.views.list', name='home'), (r'^post/(.*)/$', 'post.views.post'), # Examples: # url(r'^$', 'exampleSettings.views.home', name='home'), # url(r'^exampleSettings/', include('exampleSettings.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), (r'^edit/(.*)/$', 'post.views.edit'), (r'^create/$', 'post.views.create'), (r'^tag/(.*)/$', 'post.views.tag'), (r'^tagcloud/$', 'post.views.tagcloud' ), (r'^editorg/(.*)/$', 'organization.views.orgedit'), #Search urls (r'^search/', include('haystack.urls')), #captcha urls url(r'^captcha/', include('captcha.urls')), (r'^register/$', 'userProfile.views.register'), (r'^login/$', 'userProfile.views.login_user'), (r'^logout/$', 'userProfile.views.logout_user'), (r'^editProfile/$', 'userProfile.views.edit'), #preview pages for the STL files. (r'^thumbs/jsc3d/(.*)', 'thumbnailer.views.stlthumb'), (r'^preview/jsc3d/(.*)', 'thumbnailer.views.stlview'), url(r'', include('multiuploader.urls')), (r'^taggit_autosuggest/', include('taggit_autosuggest.urls')), #user profile pages url(r'^userProfile/', include('userProfile.urls')), (r'^editUser/', 'userProfile.views.edit'), )+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
Python
0
5b1eefb315cd9094de8c8827e0f3a8c0eeefe95a
delete view: make sure item is closed before it is removed from storage
bepasty/views/delete.py
bepasty/views/delete.py
# Copyright: 2014 Dennis Schmalacker <github@progde.de> # License: BSD 2-clause, see LICENSE for details. import errno from flask import current_app, redirect, url_for, render_template, abort from flask.views import MethodView from werkzeug.exceptions import NotFound from . import blueprint from ..utils.permissions import * class DeleteView(MethodView): def get(self, name): if not may(DELETE): abort(403) try: with current_app.storage.open(name) as item: if item.meta.get('locked'): error = 'File locked.' elif not item.meta.get('complete'): error = 'Upload incomplete. Try again later.' else: error = None if error: return render_template('display_error.html', name=name, item=item, error=error), 409 current_app.storage.remove(name) except (OSError, IOError) as e: if e.errno == errno.ENOENT: return render_template('file_not_found.html'), 404 raise return redirect(url_for('bepasty.display', name=name)) blueprint.add_url_rule('/<itemname:name>/+delete', view_func=DeleteView.as_view('delete'))
# Copyright: 2014 Dennis Schmalacker <github@progde.de> # License: BSD 2-clause, see LICENSE for details. import errno from flask import current_app, redirect, url_for, render_template, abort from flask.views import MethodView from werkzeug.exceptions import NotFound from . import blueprint from ..utils.permissions import * class DeleteView(MethodView): def get(self, name): if not may(DELETE): abort(403) try: item = current_app.storage.open(name) except (OSError, IOError) as e: if e.errno == errno.ENOENT: return render_template('file_not_found.html'), 404 raise if item.meta.get('locked'): error = 'File locked.' elif not item.meta.get('complete'): error = 'Upload incomplete. Try again later.' else: error = None if error: try: return render_template('display_error.html', name=name, item=item, error=error), 409 finally: item.close() try: item = current_app.storage.remove(name) except (OSError, IOError) as e: if e.errno == errno.ENOENT: return render_template('file_not_found.html'), 404 return redirect(url_for('bepasty.display', name=name)) blueprint.add_url_rule('/<itemname:name>/+delete', view_func=DeleteView.as_view('delete'))
Python
0
aa5259efac8f7fbe8e2afd263198feaaa45fc4c3
Change test for running on Tingbot
tingbot/platform_specific/__init__.py
tingbot/platform_specific/__init__.py
import platform, os def is_tingbot(): """ Return True if running as a tingbot. """ # TB_RUN_ON_LCD is an environment variable set by tbprocessd when running tingbot apps. return 'TB_RUN_ON_LCD' in os.environ if platform.system() == 'Darwin': from osx import fixup_env, create_main_surface, register_button_callback elif is_tingbot(): from pi import fixup_env, create_main_surface, register_button_callback else: from sdl_wrapper import fixup_env, create_main_surface, register_button_callback
import platform def is_tingbot(): """return True if running as a tingbot. We can update this function to be more smart in future""" return platform.machine().startswith('armv71') if platform.system() == 'Darwin': from osx import fixup_env, create_main_surface, register_button_callback elif is_tingbot(): from pi import fixup_env, create_main_surface, register_button_callback else: from sdl_wrapper import fixup_env, create_main_surface, register_button_callback
Python
0
6f0676877f5c23c0e6d04422cb8365e16958eb82
document potential for streaming
camerav4.py
camerav4.py
import picamera from picamera import PiCamera import time from datetime import datetime import os.path from subprocess32 import Popen print "\nSecurity Camera Logger v3 | Ben Broce & William Hampton\n\n" print "Streams video to vids/vidstream.h264 | Captures to pics/[timestamp].jpg" print "Ctrl-C quits.\n\n" stream = raw_input("Should I stream video (y/n)? ") length = float(raw_input("How long should I run (in minutes): "))*60 interval = float(raw_input("How often should I take a picture (in seconds): ")) print "Running..." #http://www.raspberry-projects.com/pi/pi-hardware/raspberry-pi-camera/streaming-video-using-vlc-player #http://www.diveintopython.net/scripts_and_streams/stdin_stdout_stderr.html #Ouput video (record) => stream => stdout => | => cvlc livestream => browser if stream == "y": Popen(["./livestream.sh"]) camera = PiCamera() camera.annotate_background = picamera.Color('black') camera.rotation = 180 camera.resolution = (640, 480) counter = 0 try: camera.start_preview() while (counter <= length): timestamp = datetime.now().strftime("%m-%d-%Y_%H:%M:%S") camera.annotate_text = timestamp path = '/var/www/PiCamServer/pics/' + timestamp + '.jpg' camera.capture(path, use_video_port=True) time.sleep(interval) counter += interval finally: print "Exiting..." camera.stop_preview()
import picamera from picamera import PiCamera import time from datetime import datetime import os.path from subprocess32 import Popen print "\nSecurity Camera Logger v3 | Ben Broce & William Hampton\n\n" print "Streams video to vids/vidstream.h264 | Captures to pics/[timestamp].jpg" print "Ctrl-C quits.\n\n" stream = raw_input("Should I stream video (y/n)? ") length = float(raw_input("How long should I run (in minutes): "))*60 interval = float(raw_input("How often should I take a picture (in seconds): ")) print "Running..." if stream == "y": Popen(["./livestream.sh"]) camera = PiCamera() camera.annotate_background = picamera.Color('black') camera.rotation = 180 camera.resolution = (640, 480) counter = 0 try: camera.start_preview() while (counter <= length): timestamp = datetime.now().strftime("%m-%d-%Y_%H:%M:%S") camera.annotate_text = timestamp path = '/var/www/PiCamServer/pics/' + timestamp + '.jpg' camera.capture(path, use_video_port=True) time.sleep(interval) counter += interval finally: print "Exiting..." camera.stop_preview()
Python
0
37e2813d2b972100e7a562fe99cde72c99f5a544
Add handler to create token when a user is created
popit/signals/handlers.py
popit/signals/handlers.py
from django.db.models.signals import pre_delete from django.db.models.signals import post_save from django.dispatch import receiver from django.contrib.auth.models import User from popit.models import Person from popit.models import Organization from popit.models import Membership from popit.models import Post from popit.serializers import PersonSerializer from popit.serializers import OrganizationSerializer from popit.serializers import MembershipSerializer from popit.serializers import PostSerializer from popit_search.utils import search from rest_framework.authtoken.models import Token @receiver(post_save, sender=Person) def person_save_handler(sender, instance, created, raw, using, update_fields, **kwargs): if raw: return language_code = instance.language_code id = instance.id query = "id:%s AND language_code:%s" % (id, language_code) indexer = search.SerializerSearch("person") check = indexer.search(query, language=language_code) if not check: indexer.add(instance, PersonSerializer) else: indexer.update(instance, PersonSerializer) # use pre_delete event is a good idea, this ensure data exist before data is emoved in indexer @receiver(pre_delete, sender=Person) def person_delete_handler(sender, instance, using, **kwargs): indexer = search.SerializerSearch("person") indexer.delete(instance) @receiver(post_save, sender=Organization) def organization_save_handler(sender, instance, created, raw, using, update_fields, **kwargs): if raw: return language_code = instance.language_code id = instance.id query = "id:%s AND language_code:%s" % (id, language_code) indexer = search.SerializerSearch("organization") check = indexer.search(query, language=language_code) if not check: indexer.add(instance, OrganizationSerializer) else: indexer.update(instance, OrganizationSerializer) @receiver(pre_delete, sender=Organization) def organization_delete_handler(sender, instance, using, **kwargs): indexer = search.SerializerSearch("organization") indexer.delete(instance) @receiver(post_save, sender=Membership) def membership_save_handler(sender, instance, created, raw, using, update_fields, **kwargs): if raw: return language_code = instance.language_code id = instance.id query = "id:%s AND language_code:%s" % (id, language_code) indexer = search.SerializerSearch("membership") check = indexer.search(query, language=language_code) if not check: indexer.add(instance, MembershipSerializer) else: indexer.update(instance, MembershipSerializer) @receiver(pre_delete, sender=Membership) def membership_delete_handler(sender, instance, using, **kwargs): indexer = search.SerializerSearch("membership") indexer.delete(instance) @receiver(post_save, sender=Post) def post_save_handler(sender, instance, created, raw, using, update_fields, **kwargs): if raw: return language_code = instance.language_code id = instance.id query = "id:%s AND language_code:%s" % (id, language_code) indexer = search.SerializerSearch("post") check = indexer.search(query, language=language_code) if not check: indexer.add(instance, PostSerializer) else: indexer.update(instance, PostSerializer) @receiver(pre_delete, sender=Post) def post_delete_handler(sender, instance, using, **kwargs): indexer = search.SerializerSearch("post") indexer.delete(instance) @receiver(post_save, sender=User) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: Token.objects.create(user=instance)
# TODO: Implement for each from django.db.models.signals import pre_delete from django.db.models.signals import post_save from django.dispatch import receiver from popit.models import Person from popit.models import Organization from popit.models import Membership from popit.models import Post from popit.serializers import PersonSerializer from popit.serializers import OrganizationSerializer from popit.serializers import MembershipSerializer from popit.serializers import PostSerializer from popit_search.utils import search @receiver(post_save, sender=Person) def person_save_handler(sender, instance, created, raw, using, update_fields, **kwargs): if raw: return language_code = instance.language_code id = instance.id query = "id:%s AND language_code:%s" % (id, language_code) indexer = search.SerializerSearch("person") check = indexer.search(query, language=language_code) if not check: indexer.add(instance, PersonSerializer) else: indexer.update(instance, PersonSerializer) # use pre_delete event is a good idea, this ensure data exist before data is emoved in indexer @receiver(pre_delete, sender=Person) def person_delete_handler(sender, instance, using, **kwargs): indexer = search.SerializerSearch("person") indexer.delete(instance) @receiver(post_save, sender=Organization) def organization_save_handler(sender, instance, created, raw, using, update_fields, **kwargs): if raw: return language_code = instance.language_code id = instance.id query = "id:%s AND language_code:%s" % (id, language_code) indexer = search.SerializerSearch("organization") check = indexer.search(query, language=language_code) if not check: indexer.add(instance, OrganizationSerializer) else: indexer.update(instance, OrganizationSerializer) @receiver(pre_delete, sender=Organization) def organization_delete_handler(sender, instance, using, **kwargs): indexer = search.SerializerSearch("organization") indexer.delete(instance) @receiver(post_save, sender=Membership) def membership_save_handler(sender, instance, created, raw, using, update_fields, **kwargs): if raw: return language_code = instance.language_code id = instance.id query = "id:%s AND language_code:%s" % (id, language_code) indexer = search.SerializerSearch("membership") check = indexer.search(query, language=language_code) if not check: indexer.add(instance, MembershipSerializer) else: indexer.update(instance, MembershipSerializer) @receiver(pre_delete, sender=Membership) def membership_delete_handler(sender, instance, using, **kwargs): indexer = search.SerializerSearch("membership") indexer.delete(instance) @receiver(post_save, sender=Post) def post_save_handler(sender, instance, created, raw, using, update_fields, **kwargs): if raw: return language_code = instance.language_code id = instance.id query = "id:%s AND language_code:%s" % (id, language_code) indexer = search.SerializerSearch("post") check = indexer.search(query, language=language_code) if not check: indexer.add(instance, PostSerializer) else: indexer.update(instance, PostSerializer) @receiver(pre_delete, sender=Post) def post_delete_handler(sender, instance, using, **kwargs): indexer = search.SerializerSearch("post") indexer.delete(instance)
Python
0
48a30aade7e606e671db44e8ee69092c0e67b363
Complete lc051_n_queens.py
lc051_n_queens.py
lc051_n_queens.py
"""Leetcode 51. N-Queens. Hard. URL: https://leetcode.com/problems/n-queens/ The n-queens puzzle is the problem of placing n queens on an nxn chessboard such that no two queens attack each other. Given an integer n, return all distinct solutions to the n-queens puzzle. Each solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.' both indicate a queen and an empty space respectively. Example: Input: 4 Output: [ [".Q..", // Solution 1 "...Q", "Q...", "..Q."], ["..Q.", // Solution 2 "Q...", "...Q", ".Q.."] ] Explanation: There exist two distinct solutions to the 4-queens puzzle as shown above. """ class Solution(object): def _is_valid(self, queens): """Check current queen position is valid among previous queens.""" current_row, current_col = len(queens) - 1, queens[-1] # Check any queens can attack the current queen. for row, col in enumerate(queens[:-1]): col_diff = abs(current_col - col) row_diff = abs(current_row - row) if col_diff == 0 or col_diff == row_diff: return False return True def _dfs(self, n, res=[], queens=[]): """DFS for putting queens in suitable position.""" if n == len(queens): res.append(queens[:]) return None for col in range(n): # Append current queen's column id. queens.append(col) if self._is_valid(queens): # If current queen's position is valid, search the next level. self._dfs(n, res, queens) # Backtrack by poping out current queen. queens.pop() def solveNQueens(self, n): """ :type n: int :rtype: List[List[str]] Time complexity: O(n!). Space complexity: O(n). """ # res to collect multiple solutions for n_queens. res = [] # queens is an 1-d array to store the column ids of queens. queens = [] self._dfs(n, res, queens) # Make solution configs. sol = [['.'*j + 'Q' + '.'*(n - j - 1) for j in queens] for queens in res] return sol def main(): n = 4 print Solution().solveNQueens(n) if __name__ == '__main__': main()
"""Leetcode 51. N-Queens. Hard. URL: https://leetcode.com/problems/n-queens/ The n-queens puzzle is the problem of placing n queens on an nxn chessboard such that no two queens attack each other. Given an integer n, return all distinct solutions to the n-queens puzzle. Each solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.' both indicate a queen and an empty space respectively. Example: Input: 4 Output: [ [".Q..", // Solution 1 "...Q", "Q...", "..Q."], ["..Q.", // Solution 2 "Q...", "...Q", ".Q.."] ] Explanation: There exist two distinct solutions to the 4-queens puzzle as shown above. """ class Solution(object): def _is_valid(self, queens): """Check current queen position is valid.""" current_row, current_col = len(queens) - 1, queens[-1] # Check any queens can attack the current queen. for row, col in enumerate(queens[:-1]): col_diff = abs(current_col - col) row_diff = abs(current_row - row) if col_diff == 0 or col_diff == row_diff: return False return True def solveNQueens(self, n, res, queens): """ :type n: int :rtype: List[List[str]] """ # queens is an 1-d array to store the column ids of queens. if n == len(queens): res.append(queens[:]) return res for col in range(n): # Append current queen's column id. queens.append(col) if self._is_valid(queens): # If current queen's position is valid, search the next level. self.solveNQueens(n, res, queens) # Backtrack by poping out current queens. queens.pop() return res def main(): n = 4 print Solution().solveNQueens(n, [], []) if __name__ == '__main__': main()
Python
0.99916
16c1ae09e0288036aae87eb4337c24b23b1e6638
Clean up some unused imports and comments
classify.py
classify.py
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import Pipeline import numpy as np import json import sys from analyze import Analyzer # for some train data labelling def main(argv): group = argv[0] if len(argv) > 0 else "id" train_size = int(argv[1]) if len(argv) > 1 else 1000 train_data = [] train_labels = [] analyzer = Analyzer(group) for message, _ in analyzer.read_json(sys.stdin): label = analyzer.analyze(message)[0] train_data.append(message) train_labels.append(label) if len(train_data) >= train_size: break regressor = Pipeline([ ('tfidf', TfidfVectorizer(input='content')), ('clf', RandomForestRegressor()) ]) regressor.fit(train_data, train_labels) for message, group in analyzer.read_json(sys.stdin): # Call predict for every message which might be slow in practice but # avoids memory hog due to not being able to use iterators if done in # one batch. prediction = regressor.predict([message])[0] if analyzer.display: # Take the color for this group of predictions c = cmp(prediction, 0) message = analyzer.colors[c] + message + analyzer.END_COLOR analyzer.output(group, message, prediction, "") if __name__ == "__main__": main(sys.argv[1:])
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import Pipeline import numpy as np import json import sys import time from analyze import Analyzer # for some train data labelling def main(argv): group = argv[0] if len(argv) > 0 else "id" train_size = int(argv[1]) if len(argv) > 1 else 1000 train_data = [] train_labels = [] analyzer = Analyzer(group) for message, _ in analyzer.read_json(sys.stdin): label = analyzer.analyze(message)[0] train_data.append(message) train_labels.append(label) if len(train_data) >= train_size: break regressor = Pipeline([ ('tfidf', TfidfVectorizer(input='content')), ('clf', RandomForestRegressor()) #('clf', KNeighborsRegressor()) ]) regressor.fit(train_data, train_labels) for message, group in analyzer.read_json(sys.stdin): # Call predict for every message which might be slow in practice but # avoids memory hog due to not being able to use iterators if done in # one batch. prediction = regressor.predict([message])[0] if analyzer.display: # Take the color for this group of predictions c = cmp(prediction, 0) message = analyzer.colors[c] + message + analyzer.END_COLOR analyzer.output(group, message, prediction, "") if __name__ == "__main__": main(sys.argv[1:])
Python
0
4f3d1e90ec4af618ada415f53ddd9eec42bafb38
Indent with 4 spaces, not 3
wafer/talks/tests/test_wafer_basic_talks.py
wafer/talks/tests/test_wafer_basic_talks.py
# This tests the very basic talk stuff, to ensure some levels of sanity def test_add_talk(): """Create a user and add a talk to it""" from django.contrib.auth.models import User from wafer.talks.models import Talks user = User.objects.create_user('john', 'best@wafer.test', 'johnpassword') talk = Talks.objects.create(title="This is a test talk", abstract="This should be a long and interesting abstract, but isn't", corresponding_author_id=user.id) assert user.contact_talks.count() == 1
# This tests the very basic talk stuff, to ensure some levels of sanity def test_add_talk(): """Create a user and add a talk to it""" from django.contrib.auth.models import User from wafer.talks.models import Talks user = User.objects.create_user('john', 'best@wafer.test', 'johnpassword') talk = Talks.objects.create(title="This is a test talk", abstract="This should be a long and interesting abstract, but isn't", corresponding_author_id=user.id) assert user.contact_talks.count() == 1
Python
0.998158
82f648557d3c14568811038a63d766d3d84a9b79
Fix dataverse upload bug
waterbutler/providers/dataverse/provider.py
waterbutler/providers/dataverse/provider.py
import asyncio import http import tempfile import xmltodict from waterbutler.core import streams from waterbutler.core import provider from waterbutler.core import exceptions from waterbutler.providers.dataverse import settings from waterbutler.providers.dataverse.metadata import DataverseDatasetMetadata from waterbutler.providers.dataverse import utils as dataverse_utils class DataverseProvider(provider.BaseProvider): EDIT_MEDIA_BASE_URL = settings.EDIT_MEDIA_BASE_URL DOWN_BASE_URL = settings.DOWN_BASE_URL METADATA_BASE_URL = settings.METADATA_BASE_URL def __init__(self, auth, credentials, settings): super().__init__(auth, credentials, settings) self.token = self.credentials['token'] self.doi = self.settings['doi'] @asyncio.coroutine def download(self, path, **kwargs): resp = yield from self.make_request( 'GET', provider.build_url(self.DOWN_BASE_URL, path), expects=(200, ), throws=exceptions.DownloadError, params={'key': self.token}, ) return streams.ResponseStreamReader(resp) @asyncio.coroutine def upload(self, stream, path, **kwargs): filename = path.strip('/') stream = streams.ZipStreamReader( filename=filename, file_stream=stream, ) # Write stream to disk (Necessary to find zip file size) f = tempfile.TemporaryFile() chunk = yield from stream.read() while chunk: f.write(chunk) chunk = yield from stream.read() stream = streams.FileStreamReader(f) dv_headers = { "Content-Disposition": "filename=temp.zip", "Content-Type": "application/zip", "Packaging": "http://purl.org/net/sword/package/SimpleZip", "Content-Length": str(stream.size), } yield from self.make_request( 'POST', provider.build_url(self.EDIT_MEDIA_BASE_URL, 'study', self.doi), headers=dv_headers, auth=(self.token, ), data=stream, expects=(201, ), throws=exceptions.UploadError ) # Find appropriate version of file from metadata url data = yield from self.metadata() filename, version = dataverse_utils.unpack_filename(filename) highest_compatible = None # Reduce to files of the same base name of the same/higher version filtered_data = sorted([ f for f in data if f['extra']['original'] == filename and f['extra']['version'] >= version ], key=lambda f: f['extra']['version']) # Find highest version from original without a gap in between for item in filtered_data: if item['extra']['version'] == version: highest_compatible = item version += 1 else: break return highest_compatible, True @asyncio.coroutine def delete(self, path, **kwargs): yield from self.make_request( 'DELETE', provider.build_url(self.EDIT_MEDIA_BASE_URL, 'file', path), auth=(self.token, ), expects=(204, ), throws=exceptions.DeleteError, ) @asyncio.coroutine def metadata(self, path='/', state='draft', **kwargs): url = provider.build_url(self.METADATA_BASE_URL, self.doi) resp = yield from self.make_request( 'GET', url, auth=(self.token, ), expects=(200, ), throws=exceptions.MetadataError ) data = yield from resp.text() data = xmltodict.parse(data) dataset_metadata = DataverseDatasetMetadata(data, state).serialized() # Dataset metadata if path == '/': return dataset_metadata # File metadata else: try: return next( item for item in dataset_metadata if item['path'] == path ) except StopIteration: raise exceptions.MetadataError( "Could not retrieve file '{}'".format(path), code=http.client.NOT_FOUND, )
import asyncio import http import tempfile import xmltodict from waterbutler.core import streams from waterbutler.core import provider from waterbutler.core import exceptions from waterbutler.providers.dataverse import settings from waterbutler.providers.dataverse.metadata import DataverseDatasetMetadata from waterbutler.providers.dataverse import utils as dataverse_utils class DataverseProvider(provider.BaseProvider): EDIT_MEDIA_BASE_URL = settings.EDIT_MEDIA_BASE_URL DOWN_BASE_URL = settings.DOWN_BASE_URL METADATA_BASE_URL = settings.METADATA_BASE_URL def __init__(self, auth, credentials, settings): super().__init__(auth, credentials, settings) self.token = self.credentials['token'] self.doi = self.settings['doi'] @asyncio.coroutine def download(self, path, **kwargs): resp = yield from self.make_request( 'GET', provider.build_url(self.DOWN_BASE_URL, path), expects=(200, ), throws=exceptions.DownloadError, params={'key': self.token}, ) return streams.ResponseStreamReader(resp) @asyncio.coroutine def upload(self, stream, path, **kwargs): filename = path.strip('/') stream = streams.ZipStreamReader( filename=filename, file_stream=stream, ) # Write stream to disk (Necessary to find zip file size) f = tempfile.TemporaryFile() chunk = yield from stream.read() while chunk: f.write(chunk) chunk = yield from stream.read() stream = streams.FileStreamReader(f) dv_headers = { "Content-Disposition": "filename=temp.zip", "Content-Type": "application/zip", "Packaging": "http://purl.org/net/sword/package/SimpleZip", "Content-Length": str(stream.size), } yield from self.make_request( 'POST', provider.build_url(self.EDIT_MEDIA_BASE_URL, 'study', self.doi), headers=dv_headers, auth=(self.token, ), data=stream, expects=(201, ), throws=exceptions.UploadError ) # Find appropriate version of file from metadata url data = yield from self.metadata() filename, version = dataverse_utils.unpack_filename(filename) highest_compatible = None # Reduce to files of the same base name of the same/higher version filtered_data = sorted([ f for f in data if f['extra']['original'] == filename and f['extra']['version'] >= version ], key=lambda f: f['extra']['version']) # Find highest version from original without a gap in between for item in filtered_data: if item['extra']['version'] == version: highest_compatible = item version += 1 else: break return highest_compatible, True @asyncio.coroutine def delete(self, path, **kwargs): yield from self.make_request( 'DELETE', provider.build_url(self.EDIT_MEDIA_BASE_URL, 'file', path), auth=(self.token, ), expects=(204, ), throws=exceptions.DeleteError, ) @asyncio.coroutine def metadata(self, path, state='draft', **kwargs): url = provider.build_url(self.METADATA_BASE_URL, self.doi) resp = yield from self.make_request( 'GET', url, auth=(self.token, ), expects=(200, ), throws=exceptions.MetadataError ) data = yield from resp.text() data = xmltodict.parse(data) dataset_metadata = DataverseDatasetMetadata(data, state).serialized() # Dataset metadata if path == '/': return dataset_metadata # File metadata else: try: return next( item for item in dataset_metadata if item['path'] == path ) except StopIteration: raise exceptions.MetadataError( "Could not retrieve file '{}'".format(path), code=http.client.NOT_FOUND, )
Python
0.000001
7b0a6d27389f8e4abde77b2ed76dac795c33cfab
Use url_for
demo/app.py
demo/app.py
import flask from flask import request from markupsafe import Markup import diffhtml app = flask.Flask('Diff-HTML Demo') DEFAULT_A = """ I am the very model of a modern Major-General, I've information vegetable, animal, and mineral, I know the kings of England, and I quote the fights historical, From Marathon to Waterloo, in order categorical. """ DEFAULT_B = """ I am the very model of a cartoon individual, My animation's comical, unusual, and whimsical, I'm quite adept at funny gags, comedic theory I have read, From wicked puns and stupid jokes to anvils that drop on your head. """ @app.route('/ndiff', methods=['GET', 'POST']) def ndiff(): a = request.form.get('a', DEFAULT_A) b = request.form.get('b', DEFAULT_B) try: cutoff = float(request.form.get('cutoff', 0.6)) except ValueError: cutoff = 0.6 context = { 'result': None, 'cutoff': cutoff, 'input': {'a': a, 'b': b}, } if request.method == 'POST': context['result'] = Markup('<br>').join(diffhtml.ndiff( a.splitlines(), b.splitlines(), cutoff=cutoff, )) return flask.render_template('ndiff.html', **context) @app.route('/') def home(): return flask.redirect(flask.url_for('ndiff')) if __name__ == '__main__': app.run()
import flask from flask import request from markupsafe import Markup import diffhtml app = flask.Flask('Diff-HTML Demo') DEFAULT_A = """ I am the very model of a modern Major-General, I've information vegetable, animal, and mineral, I know the kings of England, and I quote the fights historical, From Marathon to Waterloo, in order categorical. """ DEFAULT_B = """ I am the very model of a cartoon individual, My animation's comical, unusual, and whimsical, I'm quite adept at funny gags, comedic theory I have read, From wicked puns and stupid jokes to anvils that drop on your head. """ @app.route('/ndiff', methods=['GET', 'POST']) def ndiff(): a = request.form.get('a', DEFAULT_A) b = request.form.get('b', DEFAULT_B) try: cutoff = float(request.form.get('cutoff', 0.6)) except ValueError: cutoff = 0.6 context = { 'result': None, 'cutoff': cutoff, 'input': {'a': a, 'b': b}, } if request.method == 'POST': context['result'] = Markup('<br>').join(diffhtml.ndiff( a.splitlines(), b.splitlines(), cutoff=cutoff, )) return flask.render_template('ndiff.html', **context) @app.route('/') def home(): return flask.redirect('/ndiff') if __name__ == '__main__': app.run()
Python
0.000354
dd249ca665f21f574d9ff992e0cd3e78433c7fa7
Use printf-style String Formatting for output
sanic/response.py
sanic/response.py
import ujson STATUS_CODES = { 200: b'OK', 400: b'Bad Request', 401: b'Unauthorized', 402: b'Payment Required', 403: b'Forbidden', 404: b'Not Found', 405: b'Method Not Allowed', 500: b'Internal Server Error', 501: b'Not Implemented', 502: b'Bad Gateway', 503: b'Service Unavailable', 504: b'Gateway Timeout', } class HTTPResponse: __slots__ = ('body', 'status', 'content_type', 'headers') def __init__(self, body=None, status=200, headers=[], content_type='text/plain', body_bytes=b''): self.content_type = content_type if not body is None: self.body = body.encode('utf-8') else: self.body = body_bytes self.status = status self.headers = headers def output(self, version="1.1", keep_alive=False, keep_alive_timeout=None): # This is all returned in a kind-of funky way # We tried to make this as fast as possible in pure python timeout_header = b'' if keep_alive and keep_alive_timeout: timeout_header = b'Keep-Alive: timeout=%d\r\n' % keep_alive_timeout headers = b'' if self.headers: headers = b''.join(b'%b: %b\r\n' % (name.encode(), value.encode('utf-8')) for name, value in self.headers.items()) return b'HTTP/%b %d %b\r\nContent-Type: %b\r\nContent-Length: %d\r\nConnection: %b\r\n%b%b\r\n%b' % ( version.encode(), self.status, STATUS_CODES.get(self.status, b'FAIL'), self.content_type.encode(), len(self.body), b'keep-alive' if keep_alive else b'close', timeout_header, headers, self.body ) def json(body, status=200, headers=None): return HTTPResponse(ujson.dumps(body), headers=headers, status=status, content_type="application/json; charset=utf-8") def text(body, status=200, headers=None): return HTTPResponse(body, status=status, headers=headers, content_type="text/plain; charset=utf-8") def html(body, status=200, headers=None): return HTTPResponse(body, status=status, headers=headers, content_type="text/html; charset=utf-8")
import ujson STATUS_CODES = { 200: 'OK', 400: 'Bad Request', 401: 'Unauthorized', 402: 'Payment Required', 403: 'Forbidden', 404: 'Not Found', 405: 'Method Not Allowed', 500: 'Internal Server Error', 501: 'Not Implemented', 502: 'Bad Gateway', 503: 'Service Unavailable', 504: 'Gateway Timeout', } class HTTPResponse: __slots__ = ('body', 'status', 'content_type', 'headers') def __init__(self, body=None, status=200, headers=[], content_type='text/plain', body_bytes=b''): self.content_type = content_type if not body is None: self.body = body.encode('utf-8') else: self.body = body_bytes self.status = status self.headers = headers def output(self, version="1.1", keep_alive=False, keep_alive_timeout=None): # This is all returned in a kind-of funky way # We tried to make this as fast as possible in pure python additional_headers = [] if keep_alive and not keep_alive_timeout is None: additional_headers = [b'Keep-Alive: timeout=', str(keep_alive_timeout).encode(), b's\r\n'] if self.headers: for name, value in self.headers.items(): additional_headers.append('{}: {}\r\n'.format(name, value).encode('utf-8')) return b''.join([ 'HTTP/{} {} {}\r\n'.format(version, self.status, STATUS_CODES.get(self.status, 'FAIL')).encode(), b'Content-Type: ', self.content_type.encode(), b'\r\n', b'Content-Length: ', str(len(self.body)).encode(), b'\r\n', b'Connection: ', ('keep-alive' if keep_alive else 'close').encode(), b'\r\n', ] + additional_headers + [ b'\r\n', self.body, ]) def json(body, status=200, headers=None): return HTTPResponse(ujson.dumps(body), headers=headers, status=status, content_type="application/json; charset=utf-8") def text(body, status=200, headers=None): return HTTPResponse(body, status=status, headers=headers, content_type="text/plain; charset=utf-8") def html(body, status=200, headers=None): return HTTPResponse(body, status=status, headers=headers, content_type="text/html; charset=utf-8")
Python
0.000006
68b2e1cb5a914d408761229bd27677e80967f5ff
Remove unused import.
hashbrown/management/commands/switches.py
hashbrown/management/commands/switches.py
from django.core.management.base import BaseCommand from django.utils.six.moves import input from hashbrown.models import Switch from hashbrown.utils import SETTINGS_KEY, is_active, get_defaults class Command(BaseCommand): help = 'Creates / deletes feature switches in the database' def add_arguments(self, parser): parser.add_argument( '--delete', action='store_true', default=False, help='Delete switches in the database that are not in ' + SETTINGS_KEY, ) parser.add_argument( '--force', action='store_true', default=False, help='Delete switches without confirmation (implies --delete)', ) def handle(self, *args, **kwargs): if kwargs['delete'] or kwargs['force']: self._delete_switches(force=kwargs['force']) self._create_switches() self.stderr.write('All switches up-to-date.') def _create_switches(self): create_switches(self.stderr) def _delete_switches(self, force=False): delete_switches(self.stderr, force=force) def create_switches(stderr): """Create switches listed in HASHBROWN_SWITCH_DEFAULTS which aren't in the database yet. """ defaults = get_defaults() installed_switches = set(Switch.objects.values_list('label', flat=True)) missing_switches = set(defaults) - installed_switches for label in sorted(missing_switches): is_active(label) stderr.write('Created switch %r.' % label) return missing_switches def delete_switches(stderr, force=False): defaults = get_defaults() installed_switches = set(Switch.objects.values_list('label', flat=True)) unknown_switches = sorted(installed_switches - set(defaults)) if not unknown_switches: return permission_granted = force or ask_permission(stderr, unknown_switches) if permission_granted: Switch.objects.filter(label__in=unknown_switches).delete() for label in unknown_switches: stderr.write('Deleted switch %r.' % label) def ask_permission(stderr, switches): stderr.write('The following switches are in the database but not in %s:' % SETTINGS_KEY) for label in switches: stderr.write(label) response = input('Delete switches? [y/N]: ') return response.lower().strip() in ('y', 'yes')
from optparse import make_option from django.core.management.base import BaseCommand from django.utils.six.moves import input from hashbrown.models import Switch from hashbrown.utils import SETTINGS_KEY, is_active, get_defaults class Command(BaseCommand): help = 'Creates / deletes feature switches in the database' def add_arguments(self, parser): parser.add_argument( '--delete', action='store_true', default=False, help='Delete switches in the database that are not in ' + SETTINGS_KEY, ) parser.add_argument( '--force', action='store_true', default=False, help='Delete switches without confirmation (implies --delete)', ) def handle(self, *args, **kwargs): if kwargs['delete'] or kwargs['force']: self._delete_switches(force=kwargs['force']) self._create_switches() self.stderr.write('All switches up-to-date.') def _create_switches(self): create_switches(self.stderr) def _delete_switches(self, force=False): delete_switches(self.stderr, force=force) def create_switches(stderr): """Create switches listed in HASHBROWN_SWITCH_DEFAULTS which aren't in the database yet. """ defaults = get_defaults() installed_switches = set(Switch.objects.values_list('label', flat=True)) missing_switches = set(defaults) - installed_switches for label in sorted(missing_switches): is_active(label) stderr.write('Created switch %r.' % label) return missing_switches def delete_switches(stderr, force=False): defaults = get_defaults() installed_switches = set(Switch.objects.values_list('label', flat=True)) unknown_switches = sorted(installed_switches - set(defaults)) if not unknown_switches: return permission_granted = force or ask_permission(stderr, unknown_switches) if permission_granted: Switch.objects.filter(label__in=unknown_switches).delete() for label in unknown_switches: stderr.write('Deleted switch %r.' % label) def ask_permission(stderr, switches): stderr.write('The following switches are in the database but not in %s:' % SETTINGS_KEY) for label in switches: stderr.write(label) response = input('Delete switches? [y/N]: ') return response.lower().strip() in ('y', 'yes')
Python
0
ac85a3ce64ea815fd3530085c085e384cf8269fb
Use pydeconz interface controls for button platform (#74654)
homeassistant/components/deconz/button.py
homeassistant/components/deconz/button.py
"""Support for deCONZ buttons.""" from __future__ import annotations from dataclasses import dataclass from pydeconz.models.event import EventType from pydeconz.models.scene import Scene as PydeconzScene from homeassistant.components.button import ( DOMAIN, ButtonEntity, ButtonEntityDescription, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.entity import EntityCategory from homeassistant.helpers.entity_platform import AddEntitiesCallback from .deconz_device import DeconzSceneMixin from .gateway import DeconzGateway, get_gateway_from_config_entry @dataclass class DeconzButtonDescriptionMixin: """Required values when describing deCONZ button entities.""" suffix: str button_fn: str @dataclass class DeconzButtonDescription(ButtonEntityDescription, DeconzButtonDescriptionMixin): """Class describing deCONZ button entities.""" ENTITY_DESCRIPTIONS = { PydeconzScene: [ DeconzButtonDescription( key="store", button_fn="store", suffix="Store Current Scene", icon="mdi:inbox-arrow-down", entity_category=EntityCategory.CONFIG, ) ] } async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the deCONZ button entity.""" gateway = get_gateway_from_config_entry(hass, config_entry) gateway.entities[DOMAIN] = set() @callback def async_add_scene(_: EventType, scene_id: str) -> None: """Add scene button from deCONZ.""" scene = gateway.api.scenes[scene_id] async_add_entities( DeconzButton(scene, gateway, description) for description in ENTITY_DESCRIPTIONS.get(PydeconzScene, []) ) gateway.register_platform_add_device_callback( async_add_scene, gateway.api.scenes, ) class DeconzButton(DeconzSceneMixin, ButtonEntity): """Representation of a deCONZ button entity.""" TYPE = DOMAIN def __init__( self, device: PydeconzScene, gateway: DeconzGateway, description: DeconzButtonDescription, ) -> None: """Initialize deCONZ number entity.""" self.entity_description: DeconzButtonDescription = description super().__init__(device, gateway) self._attr_name = f"{self._attr_name} {description.suffix}" async def async_press(self) -> None: """Store light states into scene.""" async_button_fn = getattr( self.gateway.api.scenes, self.entity_description.button_fn, ) await async_button_fn(self._device.group_id, self._device.id) def get_device_identifier(self) -> str: """Return a unique identifier for this scene.""" return f"{super().get_device_identifier()}-{self.entity_description.key}"
"""Support for deCONZ buttons.""" from __future__ import annotations from dataclasses import dataclass from pydeconz.models.event import EventType from pydeconz.models.scene import Scene as PydeconzScene from homeassistant.components.button import ( DOMAIN, ButtonEntity, ButtonEntityDescription, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.entity import EntityCategory from homeassistant.helpers.entity_platform import AddEntitiesCallback from .deconz_device import DeconzSceneMixin from .gateway import DeconzGateway, get_gateway_from_config_entry @dataclass class DeconzButtonDescriptionMixin: """Required values when describing deCONZ button entities.""" suffix: str button_fn: str @dataclass class DeconzButtonDescription(ButtonEntityDescription, DeconzButtonDescriptionMixin): """Class describing deCONZ button entities.""" ENTITY_DESCRIPTIONS = { PydeconzScene: [ DeconzButtonDescription( key="store", button_fn="store", suffix="Store Current Scene", icon="mdi:inbox-arrow-down", entity_category=EntityCategory.CONFIG, ) ] } async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the deCONZ button entity.""" gateway = get_gateway_from_config_entry(hass, config_entry) gateway.entities[DOMAIN] = set() @callback def async_add_scene(_: EventType, scene_id: str) -> None: """Add scene button from deCONZ.""" scene = gateway.api.scenes[scene_id] async_add_entities( DeconzButton(scene, gateway, description) for description in ENTITY_DESCRIPTIONS.get(PydeconzScene, []) ) gateway.register_platform_add_device_callback( async_add_scene, gateway.api.scenes, ) class DeconzButton(DeconzSceneMixin, ButtonEntity): """Representation of a deCONZ button entity.""" TYPE = DOMAIN def __init__( self, device: PydeconzScene, gateway: DeconzGateway, description: DeconzButtonDescription, ) -> None: """Initialize deCONZ number entity.""" self.entity_description: DeconzButtonDescription = description super().__init__(device, gateway) self._attr_name = f"{self._attr_name} {description.suffix}" async def async_press(self) -> None: """Store light states into scene.""" async_button_fn = getattr(self._device, self.entity_description.button_fn) await async_button_fn() def get_device_identifier(self) -> str: """Return a unique identifier for this scene.""" return f"{super().get_device_identifier()}-{self.entity_description.key}"
Python
0
9b529f2ba00c4ee76b571ef1c27faada89e4bc29
Add full compatibility functions.
alchy/_compat.py
alchy/_compat.py
#pylint: skip-file '''Python 2/3 compatibility Some py2/py3 compatibility support based on a stripped down version of six so we don't have to depend on a specific version of it. Borrowed from https://github.com/mitsuhiko/flask/blob/master/flask/_compat.py ''' import sys PY2 = sys.version_info[0] == 2 _identity = lambda x: x if not PY2: text_type = str string_types = (str,) integer_types = (int, ) iterkeys = lambda d: iter(d.keys()) itervalues = lambda d: iter(d.values()) iteritems = lambda d: iter(d.items()) from io import StringIO def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value implements_to_string = _identity else: text_type = unicode string_types = (str, unicode) integer_types = (int, long) iterkeys = lambda d: d.iterkeys() itervalues = lambda d: d.itervalues() iteritems = lambda d: d.iteritems() from cStringIO import StringIO exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') def implements_to_string(cls): cls.__unicode__ = cls.__str__ cls.__str__ = lambda x: x.__unicode__().encode('utf-8') return cls def with_metaclass(meta, *bases): # This requires a bit of explanation: the basic idea is to make a # dummy metaclass for one level of class instantiation that replaces # itself with the actual metaclass. Because of internal type checks # we also need to make sure that we downgrade the custom metaclass # for one level to something closer to type (that's why __call__ and # __init__ comes back from type etc.). # # This has the advantage over six.with_metaclass in that it does not # introduce dummy classes into the final MRO. class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass('temporary_class', None, {}) # Certain versions of pypy have a bug where clearing the exception stack # breaks the __exit__ function in a very peculiar way. This is currently # true for pypy 2.2.1 for instance. The second level of exception blocks # is necessary because pypy seems to forget to check if an exception # happend until the next bytecode instruction? BROKEN_PYPY_CTXMGR_EXIT = False if hasattr(sys, 'pypy_version_info'): class _Mgr(object): def __enter__(self): return self def __exit__(self, *args): sys.exc_clear() try: try: with _Mgr(): raise AssertionError() except: raise except TypeError: BROKEN_PYPY_CTXMGR_EXIT = True except AssertionError: pass
#pylint: skip-file '''Python 2/3 compatibility Some py2/py3 compatibility support based on a stripped down version of six so we don't have to depend on a specific version of it. Borrowed from https://github.com/mitsuhiko/flask/blob/master/flask/_compat.py ''' import sys PY2 = sys.version_info[0] == 2 if not PY2: text_type = str string_types = (str,) integer_types = (int, ) iterkeys = lambda d: iter(d.keys()) itervalues = lambda d: iter(d.values()) iteritems = lambda d: iter(d.items()) else: text_type = unicode string_types = (str, unicode) integer_types = (int, long) iterkeys = lambda d: d.iterkeys() itervalues = lambda d: d.itervalues() iteritems = lambda d: d.iteritems() def with_metaclass(meta, *bases): # This requires a bit of explanation: the basic idea is to make a # dummy metaclass for one level of class instantiation that replaces # itself with the actual metaclass. Because of internal type checks # we also need to make sure that we downgrade the custom metaclass # for one level to something closer to type (that's why __call__ and # __init__ comes back from type etc.). # # This has the advantage over six.with_metaclass in that it does not # introduce dummy classes into the final MRO. class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass('temporary_class', None, {}) # Certain versions of pypy have a bug where clearing the exception stack # breaks the __exit__ function in a very peculiar way. This is currently # true for pypy 2.2.1 for instance. The second level of exception blocks # is necessary because pypy seems to forget to check if an exception # happend until the next bytecode instruction? BROKEN_PYPY_CTXMGR_EXIT = False if hasattr(sys, 'pypy_version_info'): class _Mgr(object): def __enter__(self): return self def __exit__(self, *args): sys.exc_clear() try: try: with _Mgr(): raise AssertionError() except: raise except TypeError: BROKEN_PYPY_CTXMGR_EXIT = True except AssertionError: pass
Python
0
114d7bc6b45d18f528a7ed5c12e1938e35efb93c
Update hash_db_password.py
bin/hash_db_password.py
bin/hash_db_password.py
import sys from werkzeug.security import generate_password_hash from flask_appbuilder.security.models import User try: from app import app, db except: from flask import Flask from flask.ext.sqlalchemy import SQLAlchemy if len(sys.argv) < 2: print "Without typical app structure use parameter to config" print "Use example: python hash_db_password.py sqlite:////home/user/application/app.db" exit() con_str = sys.argv[1] app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = con_str db = SQLAlchemy(app) try: print "using connection string: {}".format(app.config['SQLALCHEMY_DATABASE_URI']) users = db.session.query(User).all() except Exception as e: print "Query, connection error {}".format(e) print "Config db key {}".format(app.config['SQLALCHEMY_DATABASE_URI']) exit() for user in users: print "Hashing password for {}".format(user.full_name) user.password = generate_password_hash(user.password) try: db.session.merge(user) db.session.commit() except: print "Error updating password for {}".format(user.full_name)
import sys from werkzeug.security import generate_password_hash from flask_appbuilder.security.models import User try: from app import app, db except: from flask import Flask from flask.ext.sqlalchemy import SQLAlchemy if len(sys.argv) < 2: print "Without typical app structure use parameter to config" print "Use example: python hash_db_password.py sqlite:////home/user/application/app.db" exit() con_str = sys.argv[1] app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = con_str db = SQLAlchemy(app) try: print "using connection string: {}".format(app.config['SQLALCHEMY_DATABASE_URI']) users = db.session.query(User).all() except Exception as e: print "Query, connection error {}".format(e.message) print "Config db key {}".format(app.config['SQLALCHEMY_DATABASE_URI']) exit() for user in users: print "Hashing password for {}".format(user.full_name) user.password = generate_password_hash(user.password) try: db.session.merge(user) db.session.commit() except: print "Error updating password for {}".format(user.full_name)
Python
0.000071
3224a95d79f6e3166e235f4cfc857a48d1b17c52
Revise docstring: memoization
alg_fibonacci.py
alg_fibonacci.py
"""Fibonacci series: 0, 1, 1, 2, 3, 5, 8,... - Fib(0) = 0 - Fib(1) = 1 - Fib(n) = Fib(n - 1) + Fib(n - 2) """ from __future__ import absolute_import from __future__ import print_function from __future__ import division def fibonacci_recur(n): """Get the nth number of Fibonacci series, Fn, by recursion. - Time complexity: 2Fn - 1 = O(Fn); too fast. - Space complexity: O(n). """ if n <= 1: return n else: return fibonacci_recur(n - 1) + fibonacci_recur(n - 2) def fibonacci_memo(n): """Get the nth number of Fibonacci series, Fn, by memoization. - Time complexity: O(n). - Space complexity: O(n). """ fn_d = {} fn_d[0] = 0 fn_d[1] = 1 for n in range(2, n + 1): fn_d[n] = fn_d[n - 1] + fn_d[n - 2] return fn_d[n] def fibonacci_dp(n): """Get the nth number of Fibonacci series by dynamic programming. - Time complexity is still O(n), like fibonacci_memo(). - Space complexity is O(1), improving a lot. """ a, b = 0, 1 for _ in range(n): a, b = a + b, a return a def fibonacci_gen(n): """Get the nth number of Fibonacci series by generator.""" a, b = 0, 1 for _ in range(n): a, b = a + b, a yield a def main(): import time n = 30 print('{}th number of Fibonacci series:'.format(n)) start_time = time.time() print('By recursion: {}'.format(fibonacci_recur(n))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By memorization: {}'.format(fibonacci_memo(n))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By dynamic programming: {}'.format(fibonacci_dp(n))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By generator: {}'.format(list(fibonacci_gen(n))[-1])) print('Time: {}'.format(time.time() - start_time)) if __name__ == '__main__': main()
"""Fibonacci series: 0, 1, 1, 2, 3, 5, 8,... - Fib(0) = 0 - Fib(1) = 1 - Fib(n) = Fib(n - 1) + Fib(n - 2) """ from __future__ import absolute_import from __future__ import print_function from __future__ import division def fibonacci_recur(n): """Get the nth number of Fibonacci series, Fn, by recursion. - Time complexity: 2Fn - 1 = O(Fn); too fast. - Space complexity: O(n). """ if n <= 1: return n else: return fibonacci_recur(n - 1) + fibonacci_recur(n - 2) def fibonacci_memo(n): """Get the nth number of Fibonacci series, Fn, by memorization. - Time complexity: O(n). - Space complexity: O(n). """ fn_d = {} fn_d[0] = 0 fn_d[1] = 1 for n in range(2, n + 1): fn_d[n] = fn_d[n - 1] + fn_d[n - 2] return fn_d[n] def fibonacci_dp(n): """Get the nth number of Fibonacci series by dynamic programming. - Time complexity is still O(n), like fibonacci_memo(). - Space complexity is O(1), improving a lot. """ a, b = 0, 1 for _ in range(n): a, b = a + b, a return a def fibonacci_gen(n): """Get the nth number of Fibonacci series by generator.""" a, b = 0, 1 for _ in range(n): a, b = a + b, a yield a def main(): import time n = 30 print('{}th number of Fibonacci series:'.format(n)) start_time = time.time() print('By recursion: {}'.format(fibonacci_recur(n))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By memorization: {}'.format(fibonacci_memo(n))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By dynamic programming: {}'.format(fibonacci_dp(n))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By generator: {}'.format(list(fibonacci_gen(n))[-1])) print('Time: {}'.format(time.time() - start_time)) if __name__ == '__main__': main()
Python
0.998763
858e84f336f76a1e65b730834ad8ffb346ee6b0f
fix logger singleton to work with pyjd
examples/mail/Logger.py
examples/mail/Logger.py
from pyjamas.ui.Grid import Grid _logger = None class LoggerCls(Grid): def __init__(self): Grid.__init__(self) self.targets=[] self.targets.append("app") #self.targets.append("ui") self.resize(len(self.targets)+1, 2) self.setBorderWidth("1px") self.counter=0 self.setHTML(0, 0, "<b>Log</b>") self.setText(1, 0, "app") for i in range(len(self.targets)): target=self.targets[i] self.setText(i+1, 0, target) def addTarget(self, target): self.targets.append(target) self.resize(len(self.targets)+1, 2) self.setText(len(self.targets), 0, target) return self.targets.index(target) def write(self, target, message): self.counter+=1 if target=='': target='app' try: target_idx=self.targets.index(target) except ValueError: target_idx = -1 # add new target if target_idx<0: target_idx=self.addTarget(target) target_row=target_idx+1 old_text=self.getHTML(target_row, 1) log_line="%d: " % self.counter + message if old_text=='&nbsp;': new_text=log_line else: new_text=old_text + "<br>" + log_line self.setHTML(target_row, 1, new_text) def Logger(target="", message=""): global _logger # make sure there is only one instance of this class if not _logger: _logger = LoggerCls() _logger.write(target, message) return _logger
from pyjamas.ui.Grid import Grid _logger = None class Logger(Grid): def __new__(cls): global _logger # make sure there is only one instance of this class if _logger: return _logger _logger = Grid.__new__(cls) return _logger def __init__(self, target="", message=""): #global _logger if message: return Logger().write(target, message) # make sure there is only one instance of this class if hasattr(self, target): return None self.setSingleton() Grid.__init__(self) self.targets=[] self.targets.append("app") #self.targets.append("ui") self.resize(len(self.targets)+1, 2) self.setBorderWidth("1px") self.counter=0 self.setHTML(0, 0, "<b>Log</b>") self.setText(1, 0, "app") for i in range(len(self.targets)): target=self.targets[i] self.setText(i+1, 0, target) def setSingleton(self): global _logger _logger = self def addTarget(self, target): self.targets.append(target) self.resize(len(self.targets)+1, 2) self.setText(len(self.targets), 0, target) return self.targets.index(target) def write(self, target, message): self.counter+=1 if target=='': target='app' try: target_idx=self.targets.index(target) except ValueError: target_idx = -1 # add new target if target_idx<0: target_idx=self.addTarget(target) target_row=target_idx+1 old_text=self.getHTML(target_row, 1) log_line="%d: " % self.counter + message if old_text=='&nbsp;': new_text=log_line else: new_text=old_text + "<br>" + log_line self.setHTML(target_row, 1, new_text)
Python
0.000007
f10dcd822f72e86d0eb0071acf7d38e81cfe32da
Add a blank line
examples/mnist/mnist.py
examples/mnist/mnist.py
import logging import qnd import tensorflow as tf logging.getLogger().setLevel(logging.INFO) qnd.add_flag("use_eval_input_fn", action="store_true") qnd.add_flag("use_model_fn_ops", action="store_true") def read_file(filename_queue): _, serialized = tf.TFRecordReader().read(filename_queue) scalar_feature = lambda dtype: tf.FixedLenFeature([], dtype) features = tf.parse_single_example(serialized, { "image_raw": scalar_feature(tf.string), "label": scalar_feature(tf.int64), }) image = tf.decode_raw(features["image_raw"], tf.uint8) image.set_shape([28**2]) return tf.to_float(image) / 255 - 0.5, features["label"] def minimize(loss): return tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), 0.01, "Adam") def mnist_model(image, number): h = tf.contrib.layers.fully_connected(image, 200) h = tf.contrib.layers.fully_connected(h, 10, activation_fn=None) loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(h, number)) predictions = tf.argmax(h, axis=1) train_op = minimize(loss) eval_metrics = { "accuracy": tf.reduce_mean(tf.to_float(tf.equal(predictions, number))) } if qnd.FLAGS.use_model_fn_ops: return tf.contrib.learn.estimators.model_fn.ModelFnOps( predictions=predictions, loss=loss, train_op=train_op, eval_metrics=eval_metrics) return predictions, loss, train_op, eval_metrics run = qnd.def_run() def main(): run(mnist_model, read_file, read_file if qnd.FLAGS.use_eval_input_fn else None) if __name__ == "__main__": main()
import logging import qnd import tensorflow as tf logging.getLogger().setLevel(logging.INFO) qnd.add_flag("use_eval_input_fn", action="store_true") qnd.add_flag("use_model_fn_ops", action="store_true") def read_file(filename_queue): _, serialized = tf.TFRecordReader().read(filename_queue) scalar_feature = lambda dtype: tf.FixedLenFeature([], dtype) features = tf.parse_single_example(serialized, { "image_raw": scalar_feature(tf.string), "label": scalar_feature(tf.int64), }) image = tf.decode_raw(features["image_raw"], tf.uint8) image.set_shape([28**2]) return tf.to_float(image) / 255 - 0.5, features["label"] def minimize(loss): return tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), 0.01, "Adam") def mnist_model(image, number): h = tf.contrib.layers.fully_connected(image, 200) h = tf.contrib.layers.fully_connected(h, 10, activation_fn=None) loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(h, number)) predictions = tf.argmax(h, axis=1) train_op = minimize(loss) eval_metrics = { "accuracy": tf.reduce_mean(tf.to_float(tf.equal(predictions, number))) } if qnd.FLAGS.use_model_fn_ops: return tf.contrib.learn.estimators.model_fn.ModelFnOps( predictions=predictions, loss=loss, train_op=train_op, eval_metrics=eval_metrics) return predictions, loss, train_op, eval_metrics run = qnd.def_run() def main(): run(mnist_model, read_file, read_file if qnd.FLAGS.use_eval_input_fn else None) if __name__ == "__main__": main()
Python
1
35d6d780bddf72ab5ff216a7603bd89f980c8deb
Bump version
libgrabsite/__init__.py
libgrabsite/__init__.py
__version__ = '0.4.1'
__version__ = '0.4.0'
Python
0
e511da2cb7b73891f26b93e684d9fba80042f3cd
fix syntax
bin/redis_app_update.py
bin/redis_app_update.py
import json import sys import redis r_server = redis.StrictRedis('127.0.0.1', db=2) app_key = "apps" app_info = json.loads(r_server.get(app_key)) app_name = sys.argv[1] app_port = sys.argv[2] app_namespace = sys.argv[3] app_service_name = sys.argv[4] check = False for app in app_info: if app["name"] == app_name: check = True break if not check: element = '[{"name":"%s", "namespace":"%s", "service_name":"%s" "port":"%s"}]' % (app_name, app_namespace, app_service_name, app_port) el = json.loads(element) app_info.extend(el) app_data = json.dumps(app_info) r_server.set(app_key, app_data) r_server.set("need_CSR", "1") r_server.bgsave() else: r_server.set("need_CSR", "0") r_server.bgsave()
import json import sys import redis r_server = redis.StrictRedis('127.0.0.1', db=2) app_key = "apps" app_info = json.loads(r_server.get(app_key)) app_name = sys.argv[1] app_port = sys.argv[2] app_namespace = sys.argv[3] app_service_name = sys.argv[4] check = False for app in app_info: if app["name"] == app_name: check = True break if not check: element = '[{"name":"%s", "namespace":"%s", "service_name":"%s" "port":"%s"}]' % (app_name, app_namespace, app_service_name app_port) el = json.loads(element) app_info.extend(el) app_data = json.dumps(app_info) r_server.set(app_key, app_data) r_server.set("need_CSR", "1") r_server.bgsave() else: r_server.set("need_CSR", "0") r_server.bgsave()
Python
0.000023
47dff0bd0c7d4be641268845d70d08228a621108
Make players moving more smoothly, add ability for recursion
Lesson5/MazeGame/Maze.py
Lesson5/MazeGame/Maze.py
import time class Maze(object): def __init__(self, map, mx, my): self.map = [] self.width = 0 self.mx = mx self.my = my for y in range(0, len(map)): s = map[y] self.width = max(self.width, len(s)) ls = list(s) if len(ls)>0: self.map.append(ls) self.height = len(self.map) self.players = {} for y in range(0, self.height): cs = self.map[y] for x in range(0, self.width): s = cs[x] if 'A'<=s and s<='Z': self.players[s] = [x, y] self.imgDict = {"#":loadImage("Brick32.png"), "*":loadImage("Chest32.gif")} self.plImgDict = {"A":loadImage("Okay32.png"), "B":loadImage("Troll32.png")} print self.players def show(self): for y in range(0, self.height): cs = self.map[y] for x in range(0, self.width): s = cs[x] if s != "#": noStroke() fill(0, 0, 0) rectMode(CORNERS) rect(x*self.mx, y*self.my, x*self.mx+self.mx, y*self.my+self.my) img = self.imgDict.get(s, None) if img: image(img, x*self.mx, y*self.my, self.mx, self.my) for playerName, coord in self.players.iteritems(): x, y = coord img = self.plImgDict.get(playerName, None) if img: image(img, x*self.mx, y*self.my, self.mx, self.my) def goLeft(self, player): return self.go(player, 0) def goRight(self, player): return self.go(player, 1) def goUp(self, player): return self.go(player, 2) def goDown(self, player): return self.go(player, 3) def go(self, player, d): px, py = self.players[player] dx, dy = 0, 0 if d == 0: dx = -1 if d == 1: dx = 1 if d == 2: dy = -1 if d == 3: dy = 1 s = self.map[int(py + dy)][int(px + dx)] if s == " ": for i in range(100): self.players[player] = [px + (dx / 100.0) * i, py + (dy / 100.0) * i] time.sleep(0.001) self.players[player] = [px + dx, py + dy] self.map[py][px] = " " self.map[int(py + dy)][int(px + dx)] = player # if player == "A": # if s == "*": # textSize(50) # text("OkayMan WIN", 200, 300) # time.sleep(10) # # if player == "B": # if s == "A": # textSize(50) # text("TrollMan WIN", 200, 300) # time.sleep(10) return True return False
import time class Maze(object): def __init__(self, map, mx, my): self.map = [] self.width = 0 self.mx = mx self.my = my for y in range(0, len(map)): s = map[y] self.width = max(self.width, len(s)) ls = list(s) if len(ls)>0: self.map.append(ls) self.height = len(self.map) self.players = {} for y in range(0, self.height): cs = self.map[y] for x in range(0, self.width): s = cs[x] if 'A'<=s and s<='Z': self.players[s] = [x, y] print self.players def show(self): brickImg = loadImage("Brick32.png"); okaymanImg = loadImage("Okay32.png"); trollmanImg = loadImage("Troll32.png"); chestImg = loadImage("Chest32.gif"); imgs = {"A":okaymanImg, "B":trollmanImg, "#":brickImg, "*":chestImg} for y in range(0, self.height): cs = self.map[y] for x in range(0, self.width): s = cs[x] if s == " ": noStroke() fill(0, 0, 0) rectMode(CORNERS) rect(x*self.mx, y*self.my, x*self.mx+self.mx, y*self.my+self.my) img = imgs.get(s, None) if img: image(img, x*self.mx, y*self.my, self.mx, self.my) def goLeft(self, player): return self.go(player, 0) def goRight(self, player): return self.go(player, 1) def goUp(self, player): return self.go(player, 2) def goDown(self, player): return self.go(player, 3) def go(self, player, d): px, py = self.players[player] x, y = px, py if d == 0: x = x - 1 if d == 1: x = x + 1 if d == 2: y = y - 1 if d == 3: y = y + 1 s = self.map[y][x] if s == " ": self.map[py][px] = " " self.map[y][x] = player self.players[player] = [x, y] # if player == "A": # if s == "*": # textSize(50) # text("OkayMan WIN", 200, 300) # time.sleep(10) # # if player == "B": # if s == "A": # textSize(50) # text("TrollMan WIN", 200, 300) # time.sleep(10) return True return False
Python
0.000001
0d68fbaef300c53db407f6296c00e493e4b040bf
use xdg-email by default, fallback to xdg-open + mailto uri
plyer/platforms/linux/email.py
plyer/platforms/linux/email.py
import subprocess from urllib import quote from plyer.facades import Email class LinuxEmail(Email): def _send(self, **kwargs): recipient = kwargs.get('recipient') subject = kwargs.get('subject') text = kwargs.get('text') create_chooser = kwargs.get('create_chooser') uri = "mailto:" args = ["xdg-email"] if recipient: uri += str(recipient) args += [str(recipient)] if subject: uri += "?" if not "?" in uri else "&" uri += "subject=" uri += quote(str(subject)) args += ["--subject", str(subject)] if text: uri += "?" if not "?" in uri else "&" uri += "body=" uri += quote(str(text)) args += ["--body", str(text)] try: subprocess.Popen(args) except OSError: try: subprocess.Popen(["xdg-open", uri]) except OSError: print "Warning: unable to start an email client. Make sure xdg-open is installed." def instance(): return LinuxEmail()
import subprocess from urllib import quote from plyer.facades import Email class LinuxEmail(Email): def _send(self, **kwargs): recipient = kwargs.get('recipient') subject = kwargs.get('subject') text = kwargs.get('text') create_chooser = kwargs.get('create_chooser') uri = "mailto:" if recipient: uri += str(recipient) if subject: uri += "?" if not "?" in uri else "&" uri += "subject=" uri += quote(str(subject)) if text: uri += "?" if not "?" in uri else "&" uri += "body=" uri += quote(str(text)) subprocess.Popen(["xdg-open", uri]) def instance(): return LinuxEmail()
Python
0
447a7c56401dce19f5bdd412e5f66ba40180b665
refactor take_catty_corner to use parse_analysis
OnStage/computer_dais.py
OnStage/computer_dais.py
from player_chair import Player class Computer(Player): name = 'computer' def choose(self, board): options = self.get_legal_moves(board) win_chance = self.take_win_chances(options, board) center = self.take_the_center(options) catty_corner = self.take_catty_corner(options, board) if win_chance: return win_chance elif center: return center elif catty_corner: return catty_corner return self.make_default_choice(options) def take_win_chances(self, options, board): analysis = self.scan_board(board) for condition in analysis: if condition == self.marker_code * 2: code = analysis.index(condition) return self.parse_analysis(options, code) return False def take_the_center(self, options): if 4 in options: return 4 return False def take_catty_corner(self, options, board): if board[4] == self.marker_code: analysis = self.scan_board(board) if analysis[6] == 11: return self.parse_analysis(options, 6) elif analysis[7] == 11: return self.parse_analysis(options, 7) return False def make_default_choice(self, options): priorities = [0,2,6,8] for priority in priorities: if priority in options: return priority return options[0] def parse_analysis(self, options, code): if code == 0: if 0 in options: return 0 elif 1 in options: return 1 return 2 elif code == 1: if 3 in options: return 3 elif 4 in options: return 4 return 5 elif code == 2: if 6 in options: return 6 elif 7 in options: return 7 return 8 elif code == 3: if 0 in options: return 0 elif 3 in options: return 3 return 6 elif code == 4: if 1 in options: return 1 elif 4 in options: return 4 return 7 elif code == 5: if 2 in options: return 2 elif 5 in options: return 5 return 8 elif code == 6: if 0 in options: return 0 elif 4 in options: return 4 return 8 elif code == 7: if 2 in options: return 2 elif 4 in options: return 4 return 6
from player_chair import Player class Computer(Player): name = 'computer' def choose(self, board): options = self.get_legal_moves(board) win_chance = self.take_win_chances(options, board) center = self.take_the_center(options) catty_corner = self.take_catty_corner(options, board) if win_chance: return win_chance elif center: return center elif catty_corner: return catty_corner return self.make_default_choice(options) def take_win_chances(self, options, board): analysis = self.scan_board(board) for condition in analysis: if condition == self.marker_code * 2: code = analysis.index(condition) return self.parse_analysis(options, code) return False def take_the_center(self, options): if 4 in options: return 4 return False def take_catty_corner(self, options, board): if board[4] == self.marker_code: analysis = self.scan_board(board) if analysis[6] == 11: if 0 in options: return 0 return 8 elif analysis[7] == 11: if 2 in options: return 2 return 6 return False def make_default_choice(self, options): priorities = [0,2,6,8] for priority in priorities: if priority in options: return priority return options[0] def parse_analysis(self, options, code): if code == 0: if 0 in options: return 0 elif 1 in options: return 1 return 2 elif code == 1: if 3 in options: return 3 elif 4 in options: return 4 return 5 elif code == 2: if 6 in options: return 6 elif 7 in options: return 7 return 8 elif code == 3: if 0 in options: return 0 elif 3 in options: return 3 return 6 elif code == 4: if 1 in options: return 1 elif 4 in options: return 4 return 7 elif code == 5: if 2 in options: return 2 elif 5 in options: return 5 return 8 elif code == 6: if 0 in options: return 0 elif 4 in options: return 4 return 8 elif code == 7: if 2 in options: return 2 elif 4 in options: return 4 return 6
Python
0.000018
8fb201b866c0eabc99c370cf3ccc993d2de06264
Update version 0.10.1
src/iteration_utilities/__init__.py
src/iteration_utilities/__init__.py
# Licensed under Apache License Version 2.0 - see LICENSE """Utilities based on Pythons iterators and generators.""" from ._iteration_utilities import * from ._convenience import * from ._recipes import * from ._additional_recipes import * from ._classes import * __version__ = '0.10.1'
# Licensed under Apache License Version 2.0 - see LICENSE """Utilities based on Pythons iterators and generators.""" from ._iteration_utilities import * from ._convenience import * from ._recipes import * from ._additional_recipes import * from ._classes import * __version__ = '0.10.0'
Python
0.000001
ffa551d8e4519005791f42bb2862f0411c54ced3
Update projectfiles_unchanged script
projectfiles_unchanged.py
projectfiles_unchanged.py
#!/usr/bin/env python3 # # This script is used on Linux, OS X and Windows. # Python 3 required. # Returns 0 if project files are unchanged and 1 else. # # Script version: 3 import os import glob import hashlib import sys matches = [] tmp_file = "projectfiles.md5.tmp" exlude_dirs = set(['.git', 'docs']) def get_subdirs(path): return set([name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name))]) def find_in(path): # print(path) out = [] out += glob.glob(path + "/*.pro") out += glob.glob(path + "/CMakeLists.txt") out += glob.glob(path + "/Info.plist") subs = get_subdirs(path) - exlude_dirs for s in subs: out += find_in(os.path.join(path, s)) out.sort() return out pros = find_in(".") # print(pros) hasher = hashlib.md5() for pro in pros: with open(pro) as f: s = f.read() hasher.update(s.encode('utf8')) current = hasher.hexdigest() if os.path.isfile(tmp_file): with open(tmp_file) as f: old = f.read() else: old = "" if current.strip() == old.strip(): sys.exit(0) else: with open(tmp_file, "w") as f: print(current, file=f) sys.exit(1)
# version: 2 import os import glob import hashlib import sys matches = [] exlude_dirs = set(['.git', 'docs']) def get_subdirs(path): return set([name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name))]) def find_in(path): # print(path) out = [] out += glob.glob(path + "/*.pro") out += glob.glob(path + "/CMakeLists.txt") out += glob.glob(path + "/Info.plist") subs = get_subdirs(path) - exlude_dirs for s in subs: out += find_in(os.path.join(path, s)) out.sort() return out pros = find_in(".") # print(pros) hasher = hashlib.md5() for pro in pros: with open(pro) as f: s = f.read() hasher.update(s.encode('utf8')) current = hasher.hexdigest() if os.path.isfile("projectfiles.md5.tmp"): with open("projectfiles.md5.tmp") as f: old = f.read() else: old = "" if current.strip() == old.strip(): sys.exit(0) else: with open("projectfiles.md5.tmp", "w") as f: print(current, file=f) sys.exit(1)
Python
0.000001
bbe43817fdb7d60b5f9cfbcda858fadd4ad88304
Update create_img_item.py
Logic/create_img_item.py
Logic/create_img_item.py
import tkinter as tk from PIL import ImageTk from PIL import Image from urllib.request import urlopen import io import base64 class CardImage: def __init__(self): self.Heart_Suit = list() self.Diamond_Suit = list() self.Club_Suit = list() self.Spade_Suit = list() self.github_path = "https://raw.githubusercontent.com/ACBL-Bridge/Bridge-Application/master/Deck/" # Store created image items into list - Heart for a in range(1, 14): self.Heart_Suit.append(tk.PhotoImage(data=self.get_raw("Heart" + str(a + 1) + ".gif"))) # Store created image items into list - Diamond for a in range(1, 14): self.Diamond_Suit.append(tk.PhotoImage(data=self.get_raw("Diamond" + str(a + 1) + ".gif"))) # Store created image items into list - Club for a in range(1, 14): self.Club_Suit.append(tk.PhotoImage(data=self.get_raw("Club" + str(a + 1) + ".gif"))) # Store created image items into list - Spade for a in range(1, 14): self.Spade_Suit.append(tk.PhotoImage(data=self.get_raw("Spades" + str(a + 1) + ".gif"))) # Store created image items into variables - Others self.heart = tk.PhotoImage(data=self.get_raw('heart.gif')) self.spades = tk.PhotoImage(data=self.get_raw('spades.gif')) self.club = tk.PhotoImage(data=self.get_raw('club.gif')) self.diamond = tk.PhotoImage(data=self.get_raw('diamond.gif')) self.heartT = tk.PhotoImage(data=self.get_raw('heartT.png')) self.spadesT = tk.PhotoImage(data=self.get_raw('spadesT.png')) self.clubT = tk.PhotoImage(data=self.get_raw('clubT.png')) self.diamondT = tk.PhotoImage(data=self.get_raw('diamondT.png')) self.nt = tk.PhotoImage(data=self.get_raw('nt.gif')) self.empty = tk.PhotoImage(data=self.get_raw('empty.png')) self.back_card_v = tk.PhotoImage(data=self.get_raw('back_card.gif')) self.back_card = tk.PhotoImage(data=self.get_raw('back_card_H.gif')) def get_raw(self, image): URL = self.github_path + image image_byte = urlopen(URL).read() image_b64 = base64.encodebytes(image_byte) return image_b64 def s_suit(self): return self.Spade_Suit def h_suit(self): return self.Heart_Suit def c_suit(self): return self.Club_Suit def d_suit(self): return self.Diamond_Suit def Cheart(self): return self.heart def Cspade(self): return self.spades def Cclub(self): return self.club def Cdiamond(self): return self.diamond def CTheart(self): return self.heartT def CTspade(self): return self.spadesT def CTclub(self): return self.clubT def CTdiamond(self): return self.diamondT def Cnt(self): return self.nt def CbackV(self): return self.back_card_v def Cback(self): return self.back_card def Cempty(self): return self.empty
import tkinter as tk from PIL import ImageTk from PIL import Image class CardImage: def __init__(self): self.Heart_Suit = list() self.Diamond_Suit = list() self.Club_Suit = list() self.Spade_Suit = list() # Store created image items into list - Heart for a in range(1, 14): self.Heart_Suit.append(tk.PhotoImage(file=("Heart" + str(a + 1) + ".gif"))) # Store created image items into list - Diamond for a in range(1, 14): self.Diamond_Suit.append(tk.PhotoImage(file=("Diamond" + str(a + 1) + ".gif"))) # Store created image items into list - Club for a in range(1, 14): self.Club_Suit.append(tk.PhotoImage(file=("Club" + str(a + 1) + ".gif"))) # Store created image items into list - Spade for a in range(1, 14): self.Spade_Suit.append(tk.PhotoImage(file=("Spades" + str(a + 1) + ".gif"))) # Store created image items into variables - Others self.heart = tk.PhotoImage(file='heart.gif') self.spades = tk.PhotoImage(file='spades.gif') self.club = tk.PhotoImage(file='club.gif') self.diamond = tk.PhotoImage(file='diamond.gif') self.heartT = tk.PhotoImage(file='heartT.png') self.spadesT = tk.PhotoImage(file='spadesT.png') self.clubT = tk.PhotoImage(file='clubT.png') self.diamondT = tk.PhotoImage(file='diamondT.png') self.nt = tk.PhotoImage(file='nt.gif') image = Image.open('back_card.gif') # back card vertical self.back_card_v = ImageTk.PhotoImage(image) image = image.transpose(Image.ROTATE_90) # back card horizontal self.back_card = ImageTk.PhotoImage(image) self.empty = tk.PhotoImage(file='empty.png') def s_suit(self): return self.Spade_Suit def h_suit(self): return self.Heart_Suit def c_suit(self): return self.Club_Suit def d_suit(self): return self.Diamond_Suit def Cheart(self): return self.heart def Cspade(self): return self.spades def Cclub(self): return self.club def Cdiamond(self): return self.diamond def CTheart(self): return self.heartT def CTspade(self): return self.spadesT def CTclub(self): return self.clubT def CTdiamond(self): return self.diamondT def Cnt(self): return self.nt def CbackV(self): return self.back_card_v def Cback(self): return self.back_card def Cempty(self): return self.empty
Python
0.000003
4e62d7d9514449be5afc5a27b15726a254077e89
Remove dangling argument
MachineSettingsAction.py
MachineSettingsAction.py
from cura.MachineAction import MachineAction import cura.Settings.CuraContainerRegistry from UM.i18n import i18nCatalog from UM.Settings.DefinitionContainer import DefinitionContainer from UM.Application import Application from PyQt5.QtCore import pyqtSlot, QObject catalog = i18nCatalog("cura") class MachineSettingsAction(MachineAction, QObject): def __init__(self, parent = None): MachineAction.__init__(self, "MachineSettingsAction", catalog.i18nc("@action", "Machine Settings")) self._qml_url = "MachineSettingsAction.qml" cura.Settings.CuraContainerRegistry.getInstance().containerAdded.connect(self._onContainerAdded) def _execute(self): pass def _reset(self): pass def _onContainerAdded(self, container): # Add this action as a supported action to all machine definitions if isinstance(container, DefinitionContainer) and container.getMetaDataEntry("type") == "machine": Application.getInstance().getMachineActionManager().addSupportedAction(container.getId(), self.getKey()) @pyqtSlot() def forceUpdate(self): # Force rebuilding the build volume by reloading the global container stack. # This is a bit of a hack, but it seems quick enough. Application.getInstance().globalContainerStackChanged.emit()
from cura.MachineAction import MachineAction import cura.Settings.CuraContainerRegistry from UM.i18n import i18nCatalog from UM.Settings.DefinitionContainer import DefinitionContainer from UM.Application import Application from PyQt5.QtCore import pyqtSlot, QObject catalog = i18nCatalog("cura") class MachineSettingsAction(MachineAction, QObject, ): def __init__(self, parent = None): MachineAction.__init__(self, "MachineSettingsAction", catalog.i18nc("@action", "Machine Settings")) self._qml_url = "MachineSettingsAction.qml" cura.Settings.CuraContainerRegistry.getInstance().containerAdded.connect(self._onContainerAdded) def _execute(self): pass def _reset(self): pass def _onContainerAdded(self, container): # Add this action as a supported action to all machine definitions if isinstance(container, DefinitionContainer) and container.getMetaDataEntry("type") == "machine": Application.getInstance().getMachineActionManager().addSupportedAction(container.getId(), self.getKey()) @pyqtSlot() def forceUpdate(self): # Force rebuilding the build volume by reloading the global container stack. # This is a bit of a hack, but it seems quick enough. Application.getInstance().globalContainerStackChanged.emit()
Python
0.000021
2e071c0e37fac657955de70fb7193b3e46ba2aef
Update subscribe_speakers_to_talks.py
p3/management/commands/subscribe_speakers_to_talks.py
p3/management/commands/subscribe_speakers_to_talks.py
# -*- coding: UTF-8 -*- from django.core.management.base import BaseCommand, CommandError from django.contrib.auth import get_user_model from conference import models as cmodels from hcomments import models as hmodels info = get_user_model().objects.get(email='info@pycon.it') class Command(BaseCommand): def handle(self, *args, **options): try: conf = args[0] except IndexError: raise CommandError('conference missing') qs = cmodels.TalkSpeaker.objects\ .filter(talk__conference=conf)\ .select_related('talk', 'speaker__user') for row in qs: u = row.speaker.user t = row.talk print '%s %s -> %s' % (u.first_name, u.last_name, t.title) hmodels.ThreadSubscription.objects.subscribe(t, u) hmodels.ThreadSubscription.objects.subscribe(t, info)
# -*- coding: UTF-8 -*- from django.core.management.base import BaseCommand, CommandError from conference import models as cmodels from hcomments import models as hmodels class Command(BaseCommand): def handle(self, *args, **options): try: conf = args[0] except IndexError: raise CommandError('conference missing') qs = cmodels.TalkSpeaker.objects\ .filter(talk__conference=conf)\ .select_related('talk', 'speaker__user') for row in qs: u = row.speaker.user t = row.talk print '%s %s -> %s' % (u.first_name, u.last_name, t.title) hmodels.ThreadSubscription.objects.subscribe(t, u)
Python
0
069100c9f4a117c76403567fd3862f0139d701bb
converted power plot into plotnine
power_ranker/web/power_plot.py
power_ranker/web/power_plot.py
#!/usr/bin/env python """Create box-plot of power rankings vs points scored""" import logging from pathlib import Path import pandas as pd from plotnine import * import warnings __author__ = 'Ryne Carbone' logger = logging.getLogger(__name__) def get_team_scores(df_schedule, team, week): """Get all scores for a team :param df_schedule: data frame with scores and team ids for each game :param team: id for team :param week: current week :return: series of scores for team up to week """ return ( df_schedule .query(f'(home_id=={team} | away_id=={team}) & (matchupPeriodId <= {week} & winner != "UNDECIDED")') .apply(lambda x: x.home_total_points if x.home_id == team else x.away_total_points, axis=1) ) def make_power_plot(df_ranks, df_schedule, df_teams, year, week): """Create plot of weekly scores and current power rankings :param df_ranks: data frame with current power rankings :param df_schedule: data frame with scores for each game :param df_teams: data frame with team names :param year: current year :param week: current week :return: None """ # Grab team id and power score, convert power to ranking df_plot = df_ranks[['team_id', 'power', 'tier']].reset_index(drop=True) # Add power rankings as categorical variable for plot df_plot['power'] = pd.Categorical( df_plot.get('power').rank(ascending=False).astype(int), categories=[i for i in range(df_plot.team_id.size, 0, -1)], ordered=True ) # Add in first names for each team df_plot['Name'] = df_plot.apply( lambda x: df_teams.loc[df_teams.team_id == x.get('team_id'), 'firstName'].values[0], axis=1) # Add in weekly scores df_plot['scores'] = df_plot.apply( lambda x: get_team_scores(df_schedule=df_schedule, team=x.get('team_id'), week=week).values, axis=1) # Add in where to put labels df_plot['label_pos'] = df_plot.scores.apply(lambda x: max(x) + 10) # Explode list into a row for each week df_plot = df_plot.explode('scores') df_plot.scores = df_plot.scores.astype(float) # noinspection PyTypeChecker p = ( ggplot(aes(y='scores', x='factor(power)', group='factor(power)', color='factor(tier)'), data=df_plot) + geom_boxplot(alpha=.8, outlier_alpha=0) + geom_jitter(width=.1, alpha=.3, color='black') + geom_text(aes(label='Name', x='factor(power)', y='label_pos'), color='black', nudge_y=3, data=df_plot.groupby(['team_id']).agg(max).reset_index(drop=True)) + coord_flip() + labs(x='Power Ranking', y='Weekly Score') + theme_bw() + theme(legend_title=element_text(text='Tiers', size=10), legend_position=(0.18, .72), legend_background=element_rect(alpha=0), panel_grid_major_y=element_blank()) ) # Specify where to save the plot out_dir = Path(f'output/{year}/week{week}') out_dir.mkdir(parents=True, exist_ok=True) out_name = out_dir / 'power_plot.png' warnings.filterwarnings('ignore') p.save(out_name, width=10, height=5.6, dpi=300) warnings.filterwarnings('default') logger.info(f'Saved power ranking plot to local file: {out_name.resolve()}')
#!/usr/bin/env python """Create boxplot of power rankings vs points scored""" import logging from pathlib import Path import matplotlib.pyplot as plt import numpy as np __author__ = 'Ryne Carbone' logger = logging.getLogger(__name__) #_____________________________________ def make_power_plot(teams, year, week): '''Make plot of power ranking versus average score''' scores = [] owners = [] powers = [] colors = [] # Tier colors c = [(133/255.,205/255.,242/255.), (122/255.,201/255.,96/255.), (224/255.,183/255.,0/255.), (255/255.,106/255.,43/255.), (168/255.,106/255.,156/255.)] tiers = [1,2,3,4,5] my_dpi = 96 minx=200 maxx=0 # Save scores, tiers, names for t in teams: t_scores = [] for s in t.stats.scores[:week]: t_scores.append(s) scores.append(t_scores) owners.append(t.owner.split()[0].title()) powers.append(float(t.rank.power)) colors.append(c[t.rank.tier-1]) t_min = min(t_scores) t_max = max(t_scores) if t_min < minx: minx = t_min if t_max > maxx: maxx = t_max # Set up plot f = plt.figure() f.set_size_inches(992./my_dpi,558./my_dpi) ax = f.add_subplot(111) ax.set_xlim(minx-10, maxx+30) plt.xlabel('Weekly Score') plt.ylabel('Power Rank') # create list of boxplots for each player bp_dict = plt.boxplot(scores, vert=False, showfliers=False, showmeans=False, showcaps=False) # change color for i,w in enumerate(bp_dict['whiskers']): j= int(i/2) if i%2 else int((i+1)/2) w.set(color=colors[j],linestyle="solid") for i,b in enumerate(bp_dict['boxes']): b.set(color=colors[i]) # show each score as red dot for i in range(len(teams)): x = scores[i] y = np.random.normal(i+1, 0.04, size=len(x)) ax.plot(x, y, 'r.', alpha=0.4) # put name to left of boxplot for i,line in enumerate(bp_dict['whiskers']): x, y = line.get_xydata()[1] # bottom of line (1 is top) if(i%2): ax.text(x+5,y, '%s'%owners[int(i/2)], horizontalalignment='left', verticalalignment='center') # Add a legend ax.legend(loc=2,title='Tier',labels=tiers) for i,leg_item in enumerate(ax.get_legend().legendHandles): leg_item.set_color(c[i]) plt.ylim(len(teams)+1,0) out_dir = Path(f'output/{year}/week{week}') out_dir.mkdir(parents=True, exist_ok=True) out_name = out_dir / 'power_plot.png' f.savefig(out_name, dpi=my_dpi*2, bbox_inches='tight') logger.info(f'Saved power ranking plot to local file: {out_name.resolve()}')
Python
0.999989
50266b417208f562e7fe430d5ab2906b56c323ca
remove alpha bleeding debug
PyTexturePacker/Utils.py
PyTexturePacker/Utils.py
# -*- coding: utf-8 -*- """---------------------------------------------------------------------------- Author: Huang Quanyong (wo1fSea) quanyongh@foxmail.com Date: 2016/10/19 Description: Utils.py ----------------------------------------------------------------------------""" SUPPORTED_IMAGE_FORMAT = [".png", ".jpg", ".bmp"] def load_images_from_paths(image_path_list): from .ImageRect import ImageRect image_rect_list = [] for file_path in image_path_list: image_rect = ImageRect(file_path) image_rect_list.append(image_rect) return image_rect_list def load_images_from_dir(dir_path): import os image_rect_path = [] for root, dirs, files in os.walk(dir_path): for f in files: file_path = os.path.join(root, f) _, ext = os.path.splitext(f) if ext.lower() in SUPPORTED_IMAGE_FORMAT: image_rect_path.append(file_path) return load_images_from_paths(image_rect_path) def save_plist(data_dict, file_name): import plistlib if hasattr(plistlib, "dump"): with open(file_name, 'wb') as fp: plistlib.dump(data_dict, fp) else: plistlib.writePlist(data_dict, file_name) def save_image(image, file_name): image.save(file_name) def alpha_bleeding(image, bleeding_pixel = 32): offsets = ((-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1,1)) image = image.copy() width, height = image.size if image.mode != "RGBA": image = image.convert("RGBA") pa = image.load() bleeding = set() borders = [] def _tell_border(x, y): if pa[x, y][3] == 0: return False for offset in offsets: ox = x + offset[0] oy = y + offset[1] if ox >= 0 and ox < width and oy >= 0 and oy < height \ and pa[ox, oy][3] == 0: return True return False def _bleeding(x ,y): borders = [] pixel = pa[x, y] for offset in offsets: ox = x + offset[0] oy = y + offset[1] if ox >= 0 and ox < width and oy >= 0 and oy < height \ and pa[ox, oy][3] == 0 and (ox, oy) not in bleeding: pa[ox, oy] = (pixel[0], pixel[1], pixel[2], 0) bleeding.add(pa) if _tell_border(ox, oy): borders.append((ox, oy)) return borders for x in range(width): for y in range(height): if _tell_border(x, y): borders.append((x, y)) for i in range(bleeding_pixel): pending = [] for border in borders: pending.extend(_bleeding(*border)) borders = pending return image def alpha_remove(image): image = image.copy() width, height = image.size if image.mode != "RGBA": image = image.convert("RGBA") pa = image.load() for x in range(width): for y in range(height): pixel = pa[x,y] pa[x, y] = (pixel[0], pixel[1], pixel[2], 255) return image
# -*- coding: utf-8 -*- """---------------------------------------------------------------------------- Author: Huang Quanyong (wo1fSea) quanyongh@foxmail.com Date: 2016/10/19 Description: Utils.py ----------------------------------------------------------------------------""" SUPPORTED_IMAGE_FORMAT = [".png", ".jpg", ".bmp"] def load_images_from_paths(image_path_list): from .ImageRect import ImageRect image_rect_list = [] for file_path in image_path_list: image_rect = ImageRect(file_path) image_rect_list.append(image_rect) return image_rect_list def load_images_from_dir(dir_path): import os image_rect_path = [] for root, dirs, files in os.walk(dir_path): for f in files: file_path = os.path.join(root, f) _, ext = os.path.splitext(f) if ext.lower() in SUPPORTED_IMAGE_FORMAT: image_rect_path.append(file_path) return load_images_from_paths(image_rect_path) def save_plist(data_dict, file_name): import plistlib if hasattr(plistlib, "dump"): with open(file_name, 'wb') as fp: plistlib.dump(data_dict, fp) else: plistlib.writePlist(data_dict, file_name) def save_image(image, file_name): image.save(file_name) def alpha_bleeding(image, bleeding_pixel = 32): offsets = ((-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1,1)) image = image.copy() width, height = image.size if image.mode != "RGBA": image = image.convert("RGBA") pa = image.load() bleeding = set() borders = [] def _tell_border(x, y): if pa[x, y][3] == 0: return False for offset in offsets: ox = x + offset[0] oy = y + offset[1] if ox >= 0 and ox < width and oy >= 0 and oy < height \ and pa[ox, oy][3] == 0: return True return False def _bleeding(x ,y): borders = [] pixel = pa[x, y] for offset in offsets: ox = x + offset[0] oy = y + offset[1] if ox >= 0 and ox < width and oy >= 0 and oy < height \ and pa[ox, oy][3] == 0 and (ox, oy) not in bleeding: pa[ox, oy] = (pixel[0], pixel[1], pixel[2], 255) bleeding.add(pa) if _tell_border(ox, oy): borders.append((ox, oy)) return borders for x in range(width): for y in range(height): if _tell_border(x, y): borders.append((x, y)) for i in range(bleeding_pixel): pending = [] for border in borders: pending.extend(_bleeding(*border)) borders = pending return image def alpha_remove(image): image = image.copy() width, height = image.size if image.mode != "RGBA": image = image.convert("RGBA") pa = image.load() for x in range(width): for y in range(height): pixel = pa[x,y] pa[x, y] = (pixel[0], pixel[1], pixel[2], 255) return image
Python
0
ea245f90cf0fb18cc0894d4b959ce7c3a75cf0c5
align librispeech
SCT/benchmark_aligner.py
SCT/benchmark_aligner.py
import sys import shutil, os sys.path.insert(0, os.path.expanduser('~/Montreal-Forced-Aligner')) import time import logging import platform import csv import statistics from datetime import datetime from aligner.command_line.train_and_align import align_corpus, align_corpus_no_dict corpus_dir = '/media/share/datasets/aligner_benchmarks/LibriSpeech/standard' #corpus_dir = '/media/share/datasets/aligner_benchmarks/sorted_quebec_french' dict_path = os.path.expanduser('~/Montreal-Forced-Aligner/librispeech-lexicon.txt') #dict_path = os.path.expanduser('~/Montreal-Forced-Aligner/dist/montreal-forced-aligner/prosodylab.dictionaries/fr.dict') output_directory = '/data/michaela/aligned_librispeech' #output_directory = '/data/michaela/aligned_quebec_french' output_model_path = os.path.expanduser('~/Documents/librispeech_models.zip') num_jobs = 12 def benchmark_align_corpus(corpus_dir, dict_path, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose): beg = time.time() align_corpus(corpus_dir, dict_path, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose) end = time.time() return [(end - beg)] def benchmark_align_corpus_no_dict(corpus_dir, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose): beg = time.time() align_corpus_no_dict(corpus_dir, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose) end = time.time() return [(end - beg)] if dict_path == None: nodict = benchmark_align_corpus_no_dict(corpus_dir, output_directory, 0, False, output_model_path, num_jobs, False, False) else: yesdict = benchmark_align_corpus(corpus_dir, dict_path, output_directory, 0, False, output_model_path, num_jobs, True, False) def WriteDictToCSV(csv_file,csv_columns,dict_data): with open(csv_file, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for data in dict_data: writer.writerow(data) return csv_columns = ['Computer','Date','Corpus', 'Type of benchmark', 'Total time', 'Num_jobs'] if dict_path == None: dict_data = [ {'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': corpus_dir, 'Type of benchmark': 'train and align', 'Total time': nodict[0], 'Num_jobs': num_jobs} ] else: dict_data = [ {'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': corpus_dir, 'Type of benchmark': 'train and align', 'Total time': yesdict[0], 'Num_jobs': num_jobs} ] now = datetime.now() date = str(now.year)+str(now.month)+str(now.day) if not os.path.exists('aligner_benchmark'+date+'.csv'): open('aligner_benchmark'+date+'.csv', 'a') with open('aligner_benchmark'+date+'.csv', 'a') as csv_file: writer = csv.DictWriter(csv_file, fieldnames=csv_columns) writer.writeheader() csv_file = 'aligner_benchmark'+date+'.csv' with open('aligner_benchmark'+date+'.csv', 'a') as csv_file: writer = csv.DictWriter(csv_file, fieldnames=csv_columns) writer.writerow(dict_data[0])
import sys import shutil, os sys.path.insert(0, os.path.expanduser('~/Montreal-Forced-Aligner')) import time import logging import platform import csv import statistics from datetime import datetime from aligner.command_line.train_and_align import align_corpus, align_corpus_no_dict #corpus_dir = '/media/share/datasets/aligner_benchmarks/LibriSpeech/standard' corpus_dir = '/media/share/datasets/aligner_benchmarks/sorted_quebec_french' #dict_path = os.path.expanduser('~/Montreal-Forced-Aligner/librispeech-lexicon.txt') dict_path = os.path.expanduser('~/Montreal-Forced-Aligner/dist/montreal-forced-aligner/prosodylab.dictionaries/fr.dict') #output_directory = '/data/michaela/aligned_librispeech' output_directory = '/data/michaela/aligned_quebec_french' output_model_path = os.path.expanduser('~/Documents/quebec_french_models.zip') num_jobs = 2 def benchmark_align_corpus(corpus_dir, dict_path, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose): beg = time.time() align_corpus(corpus_dir, dict_path, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose) end = time.time() return [(end - beg)] def benchmark_align_corpus_no_dict(corpus_dir, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose): beg = time.time() align_corpus_no_dict(corpus_dir, output_directory, speaker_characters, fast, output_model_path, num_jobs, verbose) end = time.time() return [(end - beg)] if dict_path == None: nodict = benchmark_align_corpus_no_dict(corpus_dir, output_directory, 0, False, output_model_path, num_jobs, False) else: yesdict = benchmark_align_corpus(corpus_dir, dict_path, output_directory, 0, False, output_model_path, num_jobs, True) def WriteDictToCSV(csv_file,csv_columns,dict_data): with open(csv_file, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for data in dict_data: writer.writerow(data) return csv_columns = ['Computer','Date','Corpus', 'Type of benchmark', 'Total time', 'Num_jobs'] if dict_path == None: dict_data = [ {'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': corpus_dir, 'Type of benchmark': 'train and align', 'Total time': nodict[0], 'Num_jobs': num_jobs} ] else: dict_data = [ {'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': corpus_dir, 'Type of benchmark': 'train and align', 'Total time': yesdict[0], 'Num_jobs': num_jobs} ] now = datetime.now() date = str(now.year)+str(now.month)+str(now.day) if not os.path.exists('aligner_benchmark'+date+'.csv'): open('aligner_benchmark'+date+'.csv', 'a') with open('aligner_benchmark'+date+'.csv', 'a') as csv_file: writer = csv.DictWriter(csv_file, fieldnames=csv_columns) writer.writeheader() csv_file = 'aligner_benchmark'+date+'.csv' with open('aligner_benchmark'+date+'.csv', 'a') as csv_file: writer = csv.DictWriter(csv_file, fieldnames=csv_columns) writer.writerow(dict_data[0])
Python
0.998401
843121f8c0e4c0f2a533540633c62060bb676ea1
remove unnecessary dependency
_unittests/ut_xmlhelper/test_xml_iterator.py
_unittests/ut_xmlhelper/test_xml_iterator.py
#-*- coding: utf-8 -*- """ @brief test log(time=20s) """ import sys import os import unittest try: import src except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", ".."))) if path not in sys.path: sys.path.append(path) import src try: import pyquickhelper as skip_ except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", "..", "..", "pyquickhelper", "src"))) if path not in sys.path: sys.path.append(path) import pyquickhelper as skip_ try: import pyensae as skip__ except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", "..", "..", "pyensae", "src"))) if path not in sys.path: sys.path.append(path) import pyensae as skip__ from pyquickhelper.loghelper import fLOG from pyquickhelper.pycode import get_temp_folder, ExtTestCase from src.pyrsslocal.xmlhelper import xml_filter_iterator class TestXmlIterator(ExtTestCase): def test_enumerate_xml_row(self): fLOG( __file__, self._testMethodName, OutputPrint=__name__ == "__main__") temp = get_temp_folder(__file__, "temp_enumerate_wolf_xml_row") data = os.path.join(temp, "..", "data", "sample.wolf.xml") rows = xml_filter_iterator(data, fLOG=fLOG, xmlformat=False, log=True) n = 0 node = None for i, row in enumerate(rows): if node is None: node = row #fLOG(type(row), row) s = str(row) self.assertTrue(s is not None) for obj in row.iterfields(): s = str(obj) self.assertTrue(s is not None) if i % 2 == 0: row._convert_into_list() xout = row.get_xml_output() self.assertTrue(xout is not None) row.find_node_value("SYNET") n += 1 self.assertGreater(n, 0) # node += node if __name__ == "__main__": unittest.main()
#-*- coding: utf-8 -*- """ @brief test log(time=20s) """ import sys import os import unittest try: import src except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", ".."))) if path not in sys.path: sys.path.append(path) import src try: import pyquickhelper as skip_ except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", "..", "..", "pyquickhelper", "src"))) if path not in sys.path: sys.path.append(path) import pyquickhelper as skip_ try: import pyensae as skip__ except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", "..", "..", "pyensae", "src"))) if path not in sys.path: sys.path.append(path) import pyensae as skip__ try: import pymyinstall as skip___ except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", "..", "..", "pymyinstall", "src"))) if path not in sys.path: sys.path.append(path) import pymyinstall as skip___ from pyquickhelper.loghelper import fLOG from pyquickhelper.pycode import get_temp_folder, ExtTestCase from src.pyrsslocal.xmlhelper import xml_filter_iterator class TestXmlIterator(ExtTestCase): def test_enumerate_xml_row(self): fLOG( __file__, self._testMethodName, OutputPrint=__name__ == "__main__") temp = get_temp_folder(__file__, "temp_enumerate_wolf_xml_row") data = os.path.join(temp, "..", "data", "sample.wolf.xml") rows = xml_filter_iterator(data, fLOG=fLOG, xmlformat=False, log=True) n = 0 node = None for i, row in enumerate(rows): if node is None: node = row #fLOG(type(row), row) s = str(row) self.assertTrue(s is not None) for obj in row.iterfields(): s = str(obj) self.assertTrue(s is not None) if i % 2 == 0: row._convert_into_list() xout = row.get_xml_output() self.assertTrue(xout is not None) row.find_node_value("SYNET") n += 1 self.assertGreater(n, 0) # node += node if __name__ == "__main__": unittest.main()
Python
0.000413
35c5fd2606682827e43f707400d726f84f41104a
Fix for issue 11.
abusehelper/core/opts.py
abusehelper/core/opts.py
import os import sys import inspect from optparse import OptionParser from ConfigParser import SafeConfigParser def long_name(key): return key.replace("_", "-").lower() def action_and_type(default): if isinstance(default, bool): if default: return "store_false", None return "store_true", None elif isinstance(default, int): return "store", "int" elif isinstance(default, float): return "store", "float" return "store", "string" def optparse(func, argv=list(sys.argv[1:])): args, varargs, varkw, defaults = inspect.getargspec(func) assert varargs is None, "variable argument definitions are not supported" assert varkw is None, "variable keyword definitions are not supported" if not defaults: positionals = args defaults = dict() else: positionals = args[:-len(defaults)] defaults = dict(zip(args[-len(defaults):], defaults)) parser = OptionParser() usage = list() usage.append("Usage: %prog [options]") for name in positionals: usage.append(name) parser.set_usage(" ".join(usage)) # Add the INI config file parsing options. The INI section name is # magically determined from the given function's module file name # (e.g. filename '../lib/testmodule.py' -> section 'testmodule'). parser.add_option("--ini-file", dest="ini_file", default=None, help="INI file used for configuration", metavar="ini_file") _, module_file = os.path.split(inspect.getmodule(func).__file__) section_name, _ = os.path.splitext(module_file) parser.add_option("--ini-section", dest="ini_section", default=section_name, help=("if an INI configuration file is specified, "+ "use this section (default: %default)"), metavar="ini_section") long_names = dict() actions = dict() for key in args: long = getattr(func, key + "_long", long_name(key)) long_names[key] = long short = getattr(func, key + "_short", None) names = list() if short is not None: names.append("-" + short) names.append("--" + long) kwargs = dict() kwargs["dest"] = key kwargs["help"] = getattr(func, key + "_help", None) kwargs["metavar"] = getattr(func, key + "_metavar", key) if key in defaults: default = defaults[key] action, type = action_and_type(default) kwargs["default"] = default kwargs["action"] = getattr(func, key + "_action", action) kwargs["type"] = getattr(func, key + "_type", type) actions[key] = kwargs["action"] option = parser.add_option(*names, **kwargs) options, params = parser.parse_args(list(argv)) # Open and parse the INI configuration file, if given. if options.ini_file is not None: config = SafeConfigParser() config.read([options.ini_file]) section = options.ini_section argv = list(argv) for key in args: if not config.has_option(section, key): continue action = actions.get(key, None) if action == "store_true": if config.getboolean(section, key): argv.insert(0, "--%s" % long_names[key]) elif action == "store_false": if not config.getboolean(section, key): argv.insert(0, "--%s" % long_names[key]) else: value = config.get(section, key) argv.insert(0, "--%s=%s" % (long_names[key], value)) options, params = parser.parse_args(argv) arglist = list() for key in args: positional = key in positionals if not positional or getattr(options, key) is not None: arglist.append(getattr(options, key)) elif positional and params: arglist.append(params.pop(0)) else: parser.error("missing value for argument %s" % key) if params: parser.error("too many positional arguments") return func(*arglist)
import os import sys import inspect from optparse import OptionParser from ConfigParser import SafeConfigParser def long_name(key): return key.replace("_", "-").lower() def action_and_type(default): if isinstance(default, bool): if default: return "store_false", None return "store_true", None elif isinstance(default, int): return "store", "int" elif isinstance(default, float): return "store", "float" return "store", "string" def optparse(func, argv=list(sys.argv[1:])): args, varargs, varkw, defaults = inspect.getargspec(func) assert varargs is None, "variable argument definitions are not supported" assert varkw is None, "variable keyword definitions are not supported" if not defaults: positionals = args defaults = dict() else: positionals = args[:-len(defaults)] defaults = dict(zip(args[-len(defaults):], defaults)) parser = OptionParser() usage = list() usage.append("Usage: %prog [options]") for name in positionals: usage.append(name) parser.set_usage(" ".join(usage)) # Add the INI config file parsing options. The INI section name is # magically determined from the given function's module file name # (e.g. filename '../lib/testmodule.py' -> section 'testmodule'). parser.add_option("--ini-file", dest="ini_file", default=None, help="INI file used for configuration", metavar="ini_file") _, module_file = os.path.split(inspect.getmodule(func).__file__) section_name, _ = os.path.splitext(module_file) parser.add_option("--ini-section", dest="ini_section", default=section_name, help=("if an INI configuration file is specified, "+ "use this section (default: %default)"), metavar="ini_section") long_names = dict() for key in args: long = getattr(func, key + "_long", long_name(key)) long_names[key] = long short = getattr(func, key + "_short", None) names = list() if short is not None: names.append("-" + short) names.append("--" + long) kwargs = dict() kwargs["dest"] = key kwargs["help"] = getattr(func, key + "_help", None) kwargs["metavar"] = getattr(func, key + "_metavar", key) if key in defaults: default = defaults[key] action, type = action_and_type(default) kwargs["default"] = default kwargs["action"] = getattr(func, key + "_action", action) kwargs["type"] = getattr(func, key + "_type", type) option = parser.add_option(*names, **kwargs) options, params = parser.parse_args(list(argv)) # Open and parse the INI configuration file, if given. if options.ini_file is not None: config = SafeConfigParser() config.read([options.ini_file]) section_name = options.ini_section if config.has_section(section_name): section = dict(config.items(section_name)) else: section = config.defaults() argv = list(argv) for key in args: if key in section: argv.insert(0, "--%s=%s" % (long_names[key], section[key])) options, params = parser.parse_args(argv) arglist = list() for key in args: positional = key in positionals if not positional or getattr(options, key) is not None: arglist.append(getattr(options, key)) elif positional and params: arglist.append(params.pop(0)) else: parser.error("missing value for argument %s" % key) if params: parser.error("too many positional arguments") return func(*arglist)
Python
0
78dd9bb220a8e1a03b51b801e023e4401a351892
Support animated pngs
gif_split/views.py
gif_split/views.py
import os import posixpath from cStringIO import StringIO import logging import requests from PIL import Image, ImageSequence from paste.httpheaders import CONTENT_DISPOSITION from pyramid.response import FileIter, FileResponse from pyramid.view import view_config from pyramid_duh import argify LOG = logging.getLogger(__name__) @view_config( route_name='root', renderer='index.jinja2') @argify def index_view(request, url=None): """ Root view '/' """ if url: filename, ext = posixpath.splitext(posixpath.basename(url)) ext = ext.lower() filename = filename + "_sprite" + ext if ext == '.gif': img_format = 'GIF' elif ext == '.png': img_format = 'PNG' else: img_format = None stream = download_gif(url) sprite = convert_gif(stream) data = StringIO() sprite.save(data, format=img_format) data.seek(0) disp = CONTENT_DISPOSITION.tuples(filename=filename) request.response.headers.update(disp) request.response.app_iter = FileIter(data) return request.response else: return {} def download_gif(url): return StringIO(requests.get(url).content) def convert_gif(stream): image = Image.open(stream) frames = ImageSequence.Iterator(image) frame_width, frame_height = 0, 0 frame_width, frame_height = frames[0].size width = frame_width*len(list(frames)) height = frame_height out = Image.new('RGBA', (width, height)) stream.seek(0) image = Image.open(stream) for i, frame in enumerate(ImageSequence.Iterator(image)): out.paste(frame, (frame_width*i, 0)) return out
import os import posixpath from cStringIO import StringIO import logging import requests from PIL import Image, ImageSequence from paste.httpheaders import CONTENT_DISPOSITION from pyramid.response import FileIter, FileResponse from pyramid.view import view_config from pyramid_duh import argify LOG = logging.getLogger(__name__) @view_config( route_name='root', renderer='index.jinja2') @argify def index_view(request, url=None): """ Root view '/' """ if url is not None: filename = posixpath.basename(url).replace('.gif', '') filename = filename + "_sprite.gif" stream = download_gif(url) sprite = convert_gif(stream) data = StringIO() sprite.save(data, format='GIF') data.seek(0) disp = CONTENT_DISPOSITION.tuples(filename=filename) request.response.headers.update(disp) request.response.app_iter = FileIter(data) return request.response else: return {} def download_gif(url): return StringIO(requests.get(url).content) def convert_gif(stream): image = Image.open(stream) frames = ImageSequence.Iterator(image) frame_width, frame_height = 0, 0 frame_width, frame_height = frames[0].size width = frame_width*len(list(frames)) height = frame_height out = Image.new('RGBA', (width, height)) stream.seek(0) image = Image.open(stream) for i, frame in enumerate(ImageSequence.Iterator(image)): out.paste(frame, (frame_width*i, 0)) return out
Python
0
eafe5c77a05d0f6d24bfb61c2be3a7d46c411b25
add new projections.
examples/plot_tissot.py
examples/plot_tissot.py
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap from mpl_toolkits.basemap import __version__ as basemap_version # Tissot's Indicatrix (http://en.wikipedia.org/wiki/Tissot's_Indicatrix). # These diagrams illustrate the distortion inherent in all map projections. # In conformal projections, where angles are conserved around every location, # the Tissot's indicatrix are all circles, with varying sizes. In equal-area # projections, where area proportions between objects are conserved, the # Tissot's indicatrix have all unit area, although their shapes and # orientations vary with location. # requires Basemap version 0.99.1 if basemap_version < '0.99.1': raise SystemExit("this example requires Basemap version 0.99.1 or higher") # create Basemap instances with several different projections m1 = Basemap(llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80, projection='cyl') m2 = Basemap(llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80, projection='mill') m3 = Basemap(lon_0=-60,lat_0=45,projection='ortho') m4 = Basemap(llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80, projection='merc',lat_ts=20) m5 = Basemap(lon_0=270,lat_0=90,boundinglat=5,projection='npstere') m6 = Basemap(lon_0=270,lat_0=90,boundinglat=5,projection='nplaea') m7 = Basemap(lon_0=0,projection='moll') m8 = Basemap(lon_0=0,projection='robin') m9 = Basemap(lon_0=0,projection='hammer') m10 = Basemap(lon_0=0,projection='mbtfpq') m11 = Basemap(lon_0=0,projection='eck4') m12 = Basemap(lon_0=0,projection='kav7') m13 = Basemap(lon_0=270,lat_0=90,boundinglat=5,projection='npaeqd') #for m in [m1,m2,m3,m4,m5,m6,m7,m8,m9,m10,m11,m12,m13]: for m in [m6]: # make a new figure. fig = plt.figure() # draw "circles" at specified longitudes and latitudes. for parallel in range(-70,71,20): for meridian in range(-150,151,60): poly = m.tissot(meridian,parallel,6,100,facecolor='green',zorder=10,alpha=0.5) # draw meridians and parallels. m.drawparallels(np.arange(-60,61,30),labels=[1,0,0,0]) m.drawmeridians(np.arange(-180,180,60),labels=[0,0,0,1]) # draw coastlines, fill continents, plot title. m.drawcoastlines() m.drawmapboundary(fill_color='aqua') m.fillcontinents(color='coral',lake_color='aqua') title = 'Tissot Diagram: projection = %s' % m.projection print title plt.title(title) plt.show()
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap from mpl_toolkits.basemap import __version__ as basemap_version # Tissot's Indicatrix (http://en.wikipedia.org/wiki/Tissot's_Indicatrix). # These diagrams illustrate the distortion inherent in all map projections. # In conformal projections, where angles are conserved around every location, # the Tissot's indicatrix are all circles, with varying sizes. In equal-area # projections, where area proportions between objects are conserved, the # Tissot's indicatrix have all unit area, although their shapes and # orientations vary with location. # requires Basemap version 0.99.1 if basemap_version < '0.99.1': raise SystemExit("this example requires Basemap version 0.99.1 or higher") # create Basemap instances with several different projections m1 = Basemap(llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80, projection='cyl') m2 = Basemap(lon_0=-60,lat_0=45,projection='ortho') m3 = Basemap(llcrnrlon=-180,llcrnrlat=-70,urcrnrlon=180,urcrnrlat=70, projection='merc',lat_ts=20) m4 = Basemap(lon_0=270,lat_0=90,boundinglat=10,projection='npstere') m5 = Basemap(lon_0=270,lat_0=90,boundinglat=10,projection='nplaea') m6 = Basemap(lon_0=0,projection='moll') m7 = Basemap(lon_0=0,projection='robin') m8 = Basemap(lon_0=0,projection='hammer') m9 = Basemap(lon_0=0,projection='mbtfpq') m10 = Basemap(lon_0=270,lat_0=90,boundinglat=10,projection='npaeqd') for m in [m1,m2,m3,m4,m5,m6,m7,m8,m9,m10]: # make a new figure. fig = plt.figure() # draw "circles" at specified longitudes and latitudes. for parallel in range(-60,61,30): for meridian in range(-165,166,30): poly = m.tissot(meridian,parallel,6,100,facecolor='green',zorder=10,alpha=0.5) # draw meridians and parallels. if m.projection != 'ortho': labels = [1,0,0,0] else: labels = [0,0,0,0] m.drawparallels(np.arange(-60,61,30),labels=labels) if m.projection not in ['moll','hammer','mbtfpq','ortho']: labels = [0,0,0,1] else: labels = [0,0,0,0] m.drawmeridians(np.arange(-180,180,60),labels=labels) # draw coastlines, fill continents, plot title. m.drawcoastlines() m.drawmapboundary(fill_color='aqua') m.fillcontinents(color='coral',lake_color='aqua') title = 'Tissot Diagram: projection = %s' % m.projection print title plt.title(title) plt.show()
Python
0