commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
b4f0bbb8e9fd198cfa60daa3a01a4a48a0fd18af
Replace assertFalse/assertTrue(a in b)
openstack/sahara,openstack/sahara
sahara/tests/unit/plugins/storm/test_config_helper.py
sahara/tests/unit/plugins/storm/test_config_helper.py
# Copyright 2017 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from testtools import testcase from sahara.plugins.storm import config_helper as s_config from sahara.plugins.storm import plugin as s_plugin class TestStormConfigHelper(testcase.TestCase): def test_generate_storm_config(self): STORM_092 = '0.9.2' STORM_101 = '1.0.1' STORM_110 = '1.1.0' tested_versions = [] master_hostname = "s-master" zk_hostnames = ["s-zoo"] configs_092 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_092) self.assertIn('nimbus.host', configs_092.keys()) self.assertNotIn('nimbus.seeds', configs_092.keys()) tested_versions.append(STORM_092) configs_101 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_101) self.assertNotIn('nimbus.host', configs_101.keys()) self.assertIn('nimbus.seeds', configs_101.keys()) self.assertIn('client.jartransformer.class', configs_101.keys()) self.assertEqual(configs_101['client.jartransformer.class'], 'org.apache.storm.hack.StormShadeTransformer') tested_versions.append(STORM_101) configs_110 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_110) self.assertNotIn('nimbus.host', configs_110.keys()) self.assertIn('nimbus.seeds', configs_110.keys()) self.assertIn('client.jartransformer.class', configs_110.keys()) self.assertEqual(configs_110['client.jartransformer.class'], 'org.apache.storm.hack.StormShadeTransformer') tested_versions.append(STORM_110) storm = s_plugin.StormProvider() self.assertEqual(storm.get_versions(), tested_versions)
# Copyright 2017 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from testtools import testcase from sahara.plugins.storm import config_helper as s_config from sahara.plugins.storm import plugin as s_plugin class TestStormConfigHelper(testcase.TestCase): def test_generate_storm_config(self): STORM_092 = '0.9.2' STORM_101 = '1.0.1' STORM_110 = '1.1.0' tested_versions = [] master_hostname = "s-master" zk_hostnames = ["s-zoo"] configs_092 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_092) self.assertTrue('nimbus.host' in configs_092.keys()) self.assertFalse('nimbus.seeds' in configs_092.keys()) tested_versions.append(STORM_092) configs_101 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_101) self.assertFalse('nimbus.host' in configs_101.keys()) self.assertTrue('nimbus.seeds' in configs_101.keys()) self.assertTrue('client.jartransformer.class' in configs_101.keys()) self.assertEqual(configs_101['client.jartransformer.class'], 'org.apache.storm.hack.StormShadeTransformer') tested_versions.append(STORM_101) configs_110 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_110) self.assertFalse('nimbus.host' in configs_110.keys()) self.assertTrue('nimbus.seeds' in configs_110.keys()) self.assertTrue('client.jartransformer.class' in configs_110.keys()) self.assertEqual(configs_110['client.jartransformer.class'], 'org.apache.storm.hack.StormShadeTransformer') tested_versions.append(STORM_110) storm = s_plugin.StormProvider() self.assertEqual(storm.get_versions(), tested_versions)
apache-2.0
Python
603c36aec2a4704bb4cf41c224194a5f83f9babe
Set the module as auto_install
BT-ojossen/e-commerce,Antiun/e-commerce,raycarnes/e-commerce,jt-xx/e-commerce,gurneyalex/e-commerce,BT-ojossen/e-commerce,Endika/e-commerce,damdam-s/e-commerce,vauxoo-dev/e-commerce,charbeljc/e-commerce,brain-tec/e-commerce,BT-jmichaud/e-commerce,Endika/e-commerce,brain-tec/e-commerce,JayVora-SerpentCS/e-commerce,fevxie/e-commerce,JayVora-SerpentCS/e-commerce,jt-xx/e-commerce,Antiun/e-commerce,raycarnes/e-commerce,cloud9UG/e-commerce,vauxoo-dev/e-commerce,BT-fgarbely/e-commerce
sale_payment_method_automatic_workflow/__openerp__.py
sale_payment_method_automatic_workflow/__openerp__.py
# -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## {'name': 'Sale Payment Method - Automatic Reconcile', 'version': '1.0', 'author': ['Camptocamp', 'Akretion'], 'license': 'AGPL-3', 'category': 'Generic Modules/Others', 'depends': ['sale_payment_method', 'sale_automatic_workflow'], 'website': 'http://www.camptocamp.com', 'data': [], 'test': [], 'installable': True, 'auto_install': True, }
# -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## {'name': 'Sale Payment Method - Automatic Reconcile', 'version': '1.0', 'author': ['Camptocamp', 'Akretion'], 'license': 'AGPL-3', 'category': 'Generic Modules/Others', 'depends': ['sale_payment_method', 'sale_automatic_workflow'], 'website': 'http://www.camptocamp.com', 'data': [], 'test': [], 'installable': True, 'auto_install': False, }
agpl-3.0
Python
4a6060f476aebac163dbac8f9822539596379c0a
Use current_app.babel_instance instead of babel
Turbo87/welt2000,Turbo87/welt2000
welt2000/__init__.py
welt2000/__init__.py
from flask import Flask, request, session, current_app from flask.ext.babel import Babel from babel.core import negotiate_locale from welt2000.__about__ import ( __title__, __summary__, __uri__, __version__, __author__, __email__, __license__, ) # noqa app = Flask(__name__) app.secret_key = '1234567890' babel = Babel(app) @app.template_global() @babel.localeselector def get_locale(): available = ['en'] available.extend(map(str, current_app.babel_instance.list_translations())) lang = session.get('lang') if lang and lang in available: return lang preferred = map(lambda l: l[0], request.accept_languages) return negotiate_locale(preferred, available) from welt2000 import views # noqa
from flask import Flask, request, session from flask.ext.babel import Babel from babel.core import negotiate_locale from welt2000.__about__ import ( __title__, __summary__, __uri__, __version__, __author__, __email__, __license__, ) # noqa app = Flask(__name__) app.secret_key = '1234567890' babel = Babel(app) translations = ['en'] translations.extend(map(str, babel.list_translations())) @app.template_global() @babel.localeselector def get_locale(): lang = session.get('lang') if lang and lang in translations: return lang preferred = map(lambda l: l[0], request.accept_languages) return negotiate_locale(preferred, translations) from welt2000 import views # noqa
mit
Python
58ec62fe47bf6e7acb3302a29fd0df48c4342cec
Enable break and continue in templates
yaph/logya,elaOnMars/logya,elaOnMars/logya,yaph/logya,elaOnMars/logya
logya/template.py
logya/template.py
# -*- coding: utf-8 -*- import io import os from jinja2 import Environment, BaseLoader, TemplateNotFound, escape def filesource(logya_inst, name, lines=None): """Read and return source of text files. A template function that reads the source of the given file and returns it. The text is escaped so it can be rendered safely on a Web page. The lines keyword argument is used to limit the number of lines returned. A use case is for documentation projects to show the source code used to render the current example. """ fname = os.path.join(logya_inst.dir_site, name) with io.open(fname, 'r', encoding='utf-8') as f: if lines is None: content = f.read() else: content = ''.join(f.readlines()[:lines]) return escape(content) def get_doc(logya_inst, url): """Get document located at given URL.""" return logya_inst.docs.get(url) class Template(): """Class to handle templates.""" def __init__(self, logya_inst): """Initialize template environment.""" self.vars = {} self.dir_templates = logya_inst.dir_templates self.env = Environment(loader=TemplateLoader(self.dir_templates)) # Enable break and continue in templates self.env.add_extension('jinja2.ext.loopcontrols') # self.env.trim_blocks = True # add filesource global to allow for including the source of a file self.env.globals['filesource'] = lambda x, lines=None: filesource( logya_inst, x, lines=lines) self.env.globals['get_doc'] = lambda x: get_doc(logya_inst, x) class TemplateLoader(BaseLoader): """Class to handle template Loading.""" def __init__(self, path): """Set template path.""" self.path = path def get_source(self, environment, template): """Set template source.""" path = os.path.join(self.path, template) if not os.path.exists(path): raise TemplateNotFound(template) mtime = os.path.getmtime(path) with io.open(path, 'r', encoding='utf-8') as f: source = f.read() return source, path, lambda: mtime == os.path.getmtime(path)
# -*- coding: utf-8 -*- import io import os from jinja2 import Environment, BaseLoader, TemplateNotFound, escape def filesource(logya_inst, name, lines=None): """Read and return source of text files. A template function that reads the source of the given file and returns it. The text is escaped so it can be rendered safely on a Web page. The lines keyword argument is used to limit the number of lines returned. A use case is for documentation projects to show the source code used to render the current example. """ fname = os.path.join(logya_inst.dir_site, name) with io.open(fname, 'r', encoding='utf-8') as f: if lines is None: content = f.read() else: content = ''.join(f.readlines()[:lines]) return escape(content) def get_doc(logya_inst, url): """Get document located at given URL.""" return logya_inst.docs.get(url) class Template(): """Class to handle templates.""" def __init__(self, logya_inst): """Initialize template environment.""" self.vars = {} self.dir_templates = logya_inst.dir_templates self.env = Environment(loader=TemplateLoader(self.dir_templates)) # self.env.trim_blocks = True # add filesource global to allow for including the source of a file self.env.globals['filesource'] = lambda x, lines=None: filesource( logya_inst, x, lines=lines) self.env.globals['get_doc'] = lambda x: get_doc(logya_inst, x) class TemplateLoader(BaseLoader): """Class to handle template Loading.""" def __init__(self, path): """Set template path.""" self.path = path def get_source(self, environment, template): """Set template source.""" path = os.path.join(self.path, template) if not os.path.exists(path): raise TemplateNotFound(template) mtime = os.path.getmtime(path) with io.open(path, 'r', encoding='utf-8') as f: source = f.read() return source, path, lambda: mtime == os.path.getmtime(path)
mit
Python
6f0740fbd94acc2398f0628552a6329c2a90a348
Allow start and end arguments to take inputs of multiple words such as 'New York'
MikeVasmer/GreenGraphCoursework
greengraph/command.py
greengraph/command.py
from argparse import ArgumentParser from matplotlib import pyplot as plt from graph import Greengraph def process(): parser = ArgumentParser( description="Produce graph quantifying the amount of green land between two locations") parser.add_argument("--start", required=True, nargs="+", help="The starting location ") parser.add_argument("--end", required=True, nargs="+", help="The ending location") parser.add_argument("--steps", help="The number of steps between the starting and ending locations, defaults to 10") parser.add_argument("--out", help="The output filename, defaults to graph.png") arguments = parser.parse_args() #mygraph = Greengraph(arguments.start, arguments.end) if arguments.steps: data = mygraph.green_between(arguments.steps) else: data = mygraph.green_between(10) plt.plot(data) # TODO add a title and axis labels to this graph if arguments.out: plt.savefig(arguments.out) else: plt.savefig("graph.png") print arguments.start print arguments.end if __name__ == "__main__": process()
from argparse import ArgumentParser from matplotlib import pyplot as plt from graph import Greengraph def process(): parser = ArgumentParser( description="Produce graph quantifying the amount of green land between two locations") parser.add_argument("--start", required=True, help="The starting location ") parser.add_argument("--end", required=True, help="The ending location") parser.add_argument("--steps", help="The number of steps between the starting and ending locations, defaults to 10") parser.add_argument("--out", help="The output filename, defaults to graph.png") arguments = parser.parse_args() mygraph = Greengraph(arguments.start, arguments.end) if arguments.steps: data = mygraph.green_between(arguments.steps) else: data = mygraph.green_between(10) plt.plot(data) # TODO add a title and axis labels to this graph if arguments.out: plt.savefig(arguments.out) else: plt.savefig("graph.png") if __name__ == "__main__": process()
mit
Python
3fe0a520a458a575117fc8d809f21efd133d2887
Add license file
tranlyvu/find-link,tranlyvu/findLink
wikilink/__init__.py
wikilink/__init__.py
""" wiki-link ~~~~~~~~ wiki-link is a web-scraping application to find minimum number of links between two given wiki pages. :copyright: (c) 2016 - 2018 by Tran Ly VU. All Rights Reserved. :license: Apache License 2.0. """ __all__ = ["wiki_link"] __author__ = "Tran Ly Vu (vutransingapore@gmail.com)" __version__ = "1.0.0" __copyright__ = "Copyright (c) 2016 - 2018 Tran Ly Vu. All Rights Reserved." __license__ = "Apache License 2.0"
""" wiki-link ~~~~~~~~ wiki-link is a web-scraping application to find minimum number of links between two given wiki pages. :copyright: (c) 2016 - 2018 by Tran Ly VU. All Rights Reserved. :license: Apache License 2.0. """ __all__ = ["wiki_link"] __author__ = "Tran Ly Vu (vutransingapore@gmail.com)" __version__ = "1.0.0" __copyright__ = "Copyright (c) 2016 - 2018 Tran Ly Vu. All Rights Reserved." __license__ = "Apache License 2.0"
apache-2.0
Python
7a9f3f6cc880d2bcf0cdac8b5193b471eb2b9095
Refactor Adapter pattern
zitryss/Design-Patterns-in-Python
structural/adapter.py
structural/adapter.py
""" Convert the interface of a class into another interface clients expect. Adapter lets classes work together that couldn't otherwise because of incompatible interfaces. """ import abc class Target(metaclass=abc.ABCMeta): """ Define the domain-specific interface that Client uses. """ def __init__(self, adaptee): self._adaptee = adaptee @abc.abstractmethod def request(self): pass class Adapter(Target): """ Adapt the interface of Adaptee to the Target interface. """ def request(self): self._adaptee.specific_request() class Adaptee: """ Define an existing interface that needs adapting. """ def specific_request(self): pass def main(): adaptee = Adaptee() adapter = Adapter(adaptee) adapter.request() if __name__ == "__main__": main()
""" Convert the interface of a class into another interface clients expect. Adapter lets classes work together that couldn't otherwise because of incompatible interfaces. """ import abc class Target(metaclass=abc.ABCMeta): """ Define the domain-specific interface that Client uses. """ def __init__(self): self._adaptee = Adaptee() @abc.abstractmethod def request(self): pass class Adapter(Target): """ Adapt the interface of Adaptee to the Target interface. """ def request(self): self._adaptee.specific_request() class Adaptee: """ Define an existing interface that needs adapting. """ def specific_request(self): pass def main(): adapter = Adapter() adapter.request() if __name__ == "__main__": main()
mit
Python
3c63201d6113d01c870748f21be2501282a2316a
Remove unneeded import in gmail.py.
pbl-cloud/paas-manager,pbl-cloud/paas-manager,pbl-cloud/paas-manager
paas_manager/app/util/gmail.py
paas_manager/app/util/gmail.py
import sys import smtplib from email.mime.text import MIMEText from email.utils import formatdate from ... import config def create_message(from_addr, to_addr, subject, message, encoding): body = MIMEText(message, 'plain', encoding) body['Subject'] = subject body['From'] = from_addr body['To'] = to_addr body['Date'] = formatdate() return body def send_via_gmail(from_addr, to_addr, body): s = smtplib.SMTP('smtp.gmail.com', 587) s.ehlo() s.starttls() s.ehlo() s.login( config['gmail']['user'], config['gmail']['password']) s.sendmail(from_addr, [to_addr], body.as_string()) s.close() def gmail(message, to_addr): body = create_message( config['gmail']['user'], to_addr, '[Notification]', message, 'utf8') send_via_gmail(config['gmail']['user'], to_addr, body) return if __name__ == '__main__': argvs = sys.argv argc = len(argvs) if (argc < 3): print('USAGE: python gmail.py address message') raise SystemExit(0) else: to_addr = argvs[1] message = argvs[2] gmail(message, to_addr)
import sys import smtplib from email.mime.text import MIMEText from email.utils import formatdate import yaml from ... import config def create_message(from_addr, to_addr, subject, message, encoding): body = MIMEText(message, 'plain', encoding) body['Subject'] = subject body['From'] = from_addr body['To'] = to_addr body['Date'] = formatdate() return body def send_via_gmail(from_addr, to_addr, body): s = smtplib.SMTP('smtp.gmail.com', 587) s.ehlo() s.starttls() s.ehlo() s.login( config['gmail']['user'], config['gmail']['password']) s.sendmail(from_addr, [to_addr], body.as_string()) s.close() def gmail(message, to_addr): body = create_message( config['gmail']['user'], to_addr, '[Notification]', message, 'utf8') send_via_gmail(config['gmail']['user'], to_addr, body) return if __name__ == '__main__': argvs = sys.argv argc = len(argvs) if (argc < 3): print('USAGE: python gmail.py address message') raise SystemExit(0) else: to_addr = argvs[1] message = argvs[2] gmail(message, to_addr)
mit
Python
4588a52ebfc3aee127a34a9e10067c0121c4f72e
add 'tab' and 'shift tab' for down/up movement
CanonicalLtd/subiquity,CanonicalLtd/subiquity
subiquity/ui/frame.py
subiquity/ui/frame.py
# Copyright 2015 Canonical, Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Base Frame Widget """ from urwid import Frame, WidgetWrap from subiquity.ui.anchors import Header, Footer, Body import logging log = logging.getLogger('subiquity.ui.frame') class SubiquityUI(WidgetWrap): key_conversion_map = {'tab': 'down', 'shift tab': 'up'} def __init__(self, header=None, body=None, footer=None): self.header = header if header else Header() self.body = body if body else Body() self.footer = footer if footer else Footer() self.frame = Frame(self.body, header=self.header, footer=self.footer) super().__init__(self.frame) def keypress(self, size, key): key = self.key_conversion_map.get(key, key) return super().keypress(size, key) def set_header(self, title, excerpt): self.frame.header = Header(title, excerpt) def set_footer(self, message): self.frame.footer = Footer(message) def set_body(self, widget): self.frame.body = widget
# Copyright 2015 Canonical, Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Base Frame Widget """ from urwid import Frame, WidgetWrap from subiquity.ui.anchors import Header, Footer, Body import logging log = logging.getLogger('subiquity.ui.frame') class SubiquityUI(WidgetWrap): def __init__(self, header=None, body=None, footer=None): self.header = header if header else Header() self.body = body if body else Body() self.footer = footer if footer else Footer() self.frame = Frame(self.body, header=self.header, footer=self.footer) super().__init__(self.frame) def set_header(self, title, excerpt): self.frame.header = Header(title, excerpt) def set_footer(self, message): self.frame.footer = Footer(message) def set_body(self, widget): self.frame.body = widget
agpl-3.0
Python
791fb484937cabeb3a098bcd173db782efe53d7c
support filtering of Authors by organization and positions
liddiard/skry
authors/views.py
authors/views.py
from rest_framework import viewsets, permissions from . import serializers from . import models class AuthorViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Author.objects.all() serializer_class = serializers.AuthorSerializer filter_fields = ('organization', 'positions') search_fields = ('first_name', 'last_name', 'organization', 'title', 'email', 'twitter', 'bio') ordering_fields = "__all__" class OrganizationViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Organization.objects.all() serializer_class = serializers.OrganizationSerializer filter_fields = () search_fields = ('name',) ordering_fields = "__all__" class PositionViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Position.objects.all() serializer_class = serializers.PositionSerializer filter_fields = () search_fields = ('name', 'description') ordering_fields = "__all__"
from rest_framework import viewsets, permissions from . import serializers from . import models class AuthorViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Author.objects.all() serializer_class = serializers.AuthorSerializer filter_fields = () search_fields = ('first_name', 'last_name', 'organization', 'title', 'email', 'twitter', 'bio') ordering_fields = "__all__" class OrganizationViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Organization.objects.all() serializer_class = serializers.OrganizationSerializer filter_fields = () search_fields = ('name',) ordering_fields = "__all__" class PositionViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Position.objects.all() serializer_class = serializers.PositionSerializer filter_fields = () search_fields = ('name', 'description') ordering_fields = "__all__"
mit
Python
8aa52ea8f07f922bc6d5952ca8ad56bedd042a1f
Bump version number.
GreatFruitOmsk/nativeconfig
nativeconfig/version.py
nativeconfig/version.py
VERSION = '2.4.0'
VERSION = '2.3.0'
mit
Python
fb223397ccdee519af7e17dc73db864fe0120e8b
Create a random HDFS folder for unit testing
duedil-ltd/pyfilesystem
fs/tests/test_hadoop.py
fs/tests/test_hadoop.py
""" fs.tests.test_hadoop: TestCases for the HDFS Hadoop Filesystem This test suite is skipped unless the following environment variables are configured with valid values. * PYFS_HADOOP_NAMENODE_ADDR * PYFS_HADOOP_NAMENODE_PORT [default=50070] * PYFS_HADOOP_NAMENODE_PATH [default="/"] All tests will be executed within a subdirectory "pyfs-hadoop" for safety. """ import os import unittest import uuid from fs.tests import FSTestCases, ThreadingTestCases from fs.path import * try: from fs import hadoop except ImportError: raise unittest.SkipTest("hadoop fs wasn't importable") class TestHadoopFS(unittest.TestCase, FSTestCases, ThreadingTestCases): def __init__(self, *args, **kwargs): self.namenode_host = os.environ.get("PYFS_HADOOP_NAMENODE_ADDR") self.namenode_port = os.environ.get("PYFS_HADOOP_NAMENODE_PORT", "50070") self.base_path = os.path.join( os.environ.get("PYFS_HADOOP_NAMENODE_PATH", "/"), "pyfstest-" + str(uuid.uuid4()) ) super(TestHadoopFS, self).__init__(*args, **kwargs) def setUp(self): if not self.namenode_host: raise unittest.SkipTest("Skipping HDFS tests (missing config)") self.fs = hadoop.HadoopFS( namenode=self.namenode_host, port=self.namenode_port, base=self.base_path ) def tearDown(self): for dir_path in self.fs.ilistdir(dirs_only=True): if dir_path == "/": continue self.fs.removedir(dir_path, recursive=False, force=True) for file_path in self.fs.ilistdir(files_only=True): self.fs.remove(file_path) self.fs.close() @unittest.skip("HadoopFS does not support seek") def test_readwriteappendseek(self): pass @unittest.skip("HadoopFS does not support truncate") def test_truncate(self): pass @unittest.skip("HadoopFS does not support truncate") def test_truncate_to_larger_size(self): pass @unittest.skip("HadoopFS does not support seek") def test_write_past_end_of_file(self): pass
""" fs.tests.test_hadoop: TestCases for the HDFS Hadoop Filesystem This test suite is skipped unless the following environment variables are configured with valid values. * PYFS_HADOOP_NAMENODE_ADDR * PYFS_HADOOP_NAMENODE_PORT [default=50070] * PYFS_HADOOP_NAMENODE_PATH [default="/"] All tests will be executed within a subdirectory "pyfs-hadoop" for safety. """ import os import unittest from fs.tests import FSTestCases, ThreadingTestCases from fs.path import * try: from fs import hadoop except ImportError: raise unittest.SkipTest("hadoop fs wasn't importable") class TestHadoopFS(unittest.TestCase, FSTestCases, ThreadingTestCases): def setUp(self): namenode_host = os.environ.get("PYFS_HADOOP_NAMENODE_ADDR") namenode_port = os.environ.get("PYFS_HADOOP_NAMENODE_PORT", "50070") base_path = os.environ.get("PYFS_HADOOP_NAMENODE_PATH", "/") if not namenode_host or not namenode_port or not base_path: raise unittest.SkipTest("Skipping HDFS tests due to lack of config") self.fs = hadoop.HadoopFS( namenode=namenode_host, port=namenode_port, base=base_path ) def tearDown(self): for dir_path in self.fs.ilistdir(dirs_only=True): if dir_path == "/": continue self.fs.removedir(dir_path, recursive=False, force=True) for file_path in self.fs.ilistdir(files_only=True): self.fs.remove(file_path) self.fs.close() @unittest.skip("HadoopFS does not support seek") def test_readwriteappendseek(self): pass @unittest.skip("HadoopFS does not support truncate") def test_truncate(self): pass @unittest.skip("HadoopFS does not support truncate") def test_truncate_to_larger_size(self): pass @unittest.skip("HadoopFS does not support seek") def test_write_past_end_of_file(self): pass
bsd-3-clause
Python
926bf60c77673571cb8f6d12e3754507f41b9e80
add optional args
20c/ngage,20c/ngage
ngage/plugins/napalm.py
ngage/plugins/napalm.py
from __future__ import absolute_import import ngage from ngage.exceptions import AuthenticationError, ConfigError import napalm_base from napalm_base.exceptions import ( ConnectionException, ReplaceConfigException, MergeConfigException ) @ngage.plugin.register('napalm') class Driver(ngage.plugins.DriverPlugin): plugin_type = 'napalm' def _do_init(self): config = self.config self.host = config.get('host') self.user = config.get('user') self.password = config.get('password') self.optional_args = config.get('driver_args', {}) if ':' not in config['type']: raise ValueError('napalm requires a subtype') driver = config['type'].split(':', 2)[1] cls = napalm_base.get_network_driver(driver) self.dev = cls(self.host, self.user, self.password, optional_args=self.optional_args) def _do_open(self): try: self.dev.open() except ConnectionException: raise AuthenticationError def _do_close(self): self.dev.close() def _do_pull(self): if not hasattr(self.dev, 'get_config'): raise NotImplementedError('get_config not implemented, please update napalm') return self.dev.get_config(retrieve='candidate')['candidate'] def _do_push(self, fname, **kwargs): try: self.dev.load_merge_candidate(filename=fname) except (MergeConfigException, ReplaceConfigException) as e: raise ConfigError(e.message) def _do_diff(self, index=0): if index != 0: raise NotImplementedError('version index not implemented') return self.dev.compare_config() def _do_lock(self): self.dev.lock() def _do_unlock(self): self.dev.unlock() def _do_commit(self, **kwargs): self.dev.commit_config() # def _do_check(self): # not impl by napalm def _do_rollback(self, index=0): if index == 0: self.dev.discard_config() elif index == 1: self.dev.rollback() else: raise NotImplementedError('version index not implemented')
from __future__ import absolute_import import ngage from ngage.exceptions import AuthenticationError, ConfigError import napalm_base from napalm_base.exceptions import ( ConnectionException, ReplaceConfigException, MergeConfigException ) @ngage.plugin.register('napalm') class Driver(ngage.plugins.DriverPlugin): plugin_type = 'napalm' def _do_init(self): config = self.config self.host = config.get('host') self.user = config.get('user') self.password = config.get('password') if ':' not in config['type']: raise ValueError('napalm requires a subtype') (na, driver) = config['type'].split(':', 2) cls = napalm_base.get_network_driver(driver) self.dev = cls(self.host, self.user, self.password) def _do_open(self): try: self.dev.open() except ConnectionException: raise AuthenticationError def _do_close(self): self.dev.close() def _do_pull(self): if not hasattr(self.dev, 'get_config'): raise NotImplementedError('get_config not implemented, please update napalm') return self.dev.get_config(retrieve='candidate')['candidate'] def _do_push(self, fname, **kwargs): try: self.dev.load_merge_candidate(filename=fname) except (MergeConfigException, ReplaceConfigException) as e: raise ConfigError(e.message) def _do_diff(self, index=0): if index != 0: raise NotImplementedError('version index not implemented') return self.dev.compare_config() def _do_lock(self): self.dev.lock() def _do_unlock(self): self.dev.unlock() def _do_commit(self, **kwargs): self.dev.commit_config() # def _do_check(self): # not impl by napalm def _do_rollback(self, index=0): if index == 0: self.dev.discard_config() elif index == 1: self.dev.rollback() else: raise NotImplementedError('version index not implemented')
apache-2.0
Python
68cf8281b512ea5941ec0b88ca532409e0e97866
Fix circular import
comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django
app/evaluation/emails.py
app/evaluation/emails.py
import json from django.conf import settings from django.core.mail import send_mail from comicsite.core.urlresolvers import reverse def send_failed_job_email(job): message = ( f'Unfortunately the evaluation for the submission to ' f'{job.challenge.short_name} failed with an error. The error message ' f'is:\n\n' f'{job.output}\n\n' f'You may wish to try and correct this, or contact the challenge ' f'organizers. The following information may help them:\n' f'User: {job.submission.creator.username}\n' f'Job ID: {job.pk}\n' f'Submission ID: {job.submission.pk}' ) recipient_list = [o.email for o in job.challenge.get_admins()] recipient_list.append(job.submission.creator.email) for r in recipient_list: send_mail( subject='Evaluation Failed', message=message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[r.email], ) def send_new_result_email(result): recipient_list = [o.email for o in result.challenge.get_admins()] message = ( f'There is a new result for {result.challenge.short_name} from ' f'{result.job.submission.creator.username}. The following metrics ' f'were calculated:\n\n' f'{json.dumps(result.metrics, indent=2)}\n\n' ) if result.public: leaderboard_url = reverse( 'evaluation:results-list', kwargs={ 'challenge_short_name': result.challenge.short_name, } ) message += ( f'You can view the result on the leaderboard here: ' f'{leaderboard_url}' ) recipient_list.append(result.job.submission.creator.email) else: message += ( f'You can publish the result on the leaderboard here: ' f'{result.get_absolute_url()}' ) for r in recipient_list: send_mail( subject=f'New Result for {result.challenge.short_name}', message=message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[r.email], )
import json from django.conf import settings from django.core.mail import send_mail from comicsite.core.urlresolvers import reverse from evaluation.models import Result, Job def send_failed_job_email(job: Job): message = ( f'Unfortunately the evaluation for the submission to ' f'{job.challenge.short_name} failed with an error. The error message ' f'is:\n\n' f'{job.output}\n\n' f'You may wish to try and correct this, or contact the challenge ' f'organizers. The following information may help them:\n' f'User: {job.submission.creator.username}\n' f'Job ID: {job.pk}\n' f'Submission ID: {job.submission.pk}' ) recipient_list = [o.email for o in job.challenge.get_admins()] recipient_list.append(job.submission.creator.email) for r in recipient_list: send_mail( subject='Evaluation Failed', message=message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[r.email], ) def send_new_result_email(result: Result): recipient_list = [o.email for o in result.challenge.get_admins()] message = ( f'There is a new result for {result.challenge.short_name} from ' f'{result.job.submission.creator.username}. The following metrics ' f'were calculated:\n\n' f'{json.dumps(result.metrics, indent=2)}\n\n' ) if result.public: leaderboard_url = reverse( 'evaluation:results-list', kwargs={ 'challenge_short_name': result.challenge.short_name, } ) message += ( f'You can view the result on the leaderboard here: ' f'{leaderboard_url}' ) recipient_list.append(result.job.submission.creator.email) else: message += ( f'You can publish the result on the leaderboard here: ' f'{result.get_absolute_url()}' ) for r in recipient_list: send_mail( subject=f'New Result for {result.challenge.short_name}', message=message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[r.email], )
apache-2.0
Python
04aa968a70b8065c9c9cd013d1266f8988c4220a
remove accidentally committed maxDiff change
hhursev/recipe-scraper
tests/__init__.py
tests/__init__.py
import os import unittest import pytest class ScraperTest(unittest.TestCase): online = False test_file_name = None def setUp(self): os.environ[ "RECIPE_SCRAPERS_SETTINGS" ] = "tests.test_data.test_settings_module.test_settings" test_file_name = ( self.test_file_name if self.test_file_name else self.scraper_class.__name__.lower() ) with open( "tests/test_data/{}.testhtml".format(test_file_name), encoding="utf-8" ) as testfile: self.harvester_class = self.scraper_class(testfile) canonical_url = self.harvester_class.canonical_url() if self.online: if not canonical_url: pytest.skip( f"could not find canonical url for online test of scraper '{self.scraper_class.__name__}'" ) self.harvester_class = self.scraper_class(url=canonical_url)
import os import unittest import pytest class ScraperTest(unittest.TestCase): maxDiff = None online = False test_file_name = None def setUp(self): os.environ[ "RECIPE_SCRAPERS_SETTINGS" ] = "tests.test_data.test_settings_module.test_settings" test_file_name = ( self.test_file_name if self.test_file_name else self.scraper_class.__name__.lower() ) with open( "tests/test_data/{}.testhtml".format(test_file_name), encoding="utf-8" ) as testfile: self.harvester_class = self.scraper_class(testfile) canonical_url = self.harvester_class.canonical_url() if self.online: if not canonical_url: pytest.skip( f"could not find canonical url for online test of scraper '{self.scraper_class.__name__}'" ) self.harvester_class = self.scraper_class(url=canonical_url)
mit
Python
c72b28ece7fe5313c7eff5f26d9ef0baaad1bad2
Update denormalization command
barberscore/barberscore-api,dbinetti/barberscore-django,dbinetti/barberscore-django,barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore,dbinetti/barberscore,barberscore/barberscore-api
project/apps/api/management/commands/denormalize.py
project/apps/api/management/commands/denormalize.py
from django.core.management.base import ( BaseCommand, ) from apps.api.models import ( Convention, Contest, Award, Contestant, Entrant, Session, Performance, Song, Singer, Director, Panelist, ) class Command(BaseCommand): help = "Command to denormailze data." def handle(self, *args, **options): vs = Convention.objects.all() for v in vs: v.save() ts = Contest.objects.all() for t in ts: t.save() ps = Panelist.objects.all() for p in ps: p.save() ws = Award.objects.all() for w in ws: w.save() es = Entrant.objects.all() for e in es: e.save() cs = Contestant.objects.all() for c in cs: c.save() ss = Session.objects.all() for s in ss: s.save() as_ = Performance.objects.all() for a in as_: a.save() ps = Song.objects.all() for p in ps: p.save() ss = Singer.objects.all() for s in ss: s.save() js = Panelist.objects.all() for j in js: j.save() ds = Director.objects.all() for d in ds: d.save() return "Done"
from django.core.management.base import ( BaseCommand, ) from apps.api.models import ( Convention, Contest, Contestant, Performance, Song, Group, Singer, Director, Panelist, ) class Command(BaseCommand): help = "Command to denormailze data." def handle(self, *args, **options): vs = Convention.objects.all() for v in vs: v.save() ts = Contest.objects.all() for t in ts: t.save() cs = Contestant.objects.all() for c in cs: c.save() as_ = Performance.objects.all() for a in as_: a.save() ps = Song.objects.all() for p in ps: p.save() ss = Singer.objects.all() for s in ss: s.save() js = Panelist.objects.all() for j in js: j.save() ds = Director.objects.all() for d in ds: d.save() return "Done"
bsd-2-clause
Python
74c4c832b5f99643ac23ad3885f22f7a493016f7
Update denormalization command
barberscore/barberscore-api,dbinetti/barberscore-django,barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore,dbinetti/barberscore,barberscore/barberscore-api,dbinetti/barberscore-django
project/apps/api/management/commands/denormalize.py
project/apps/api/management/commands/denormalize.py
from django.core.management.base import ( BaseCommand, ) from apps.api.models import ( Convention, Contest, Award, Contestant, Entrant, Session, Performance, Song, Singer, Director, Panelist, ) class Command(BaseCommand): help = "Command to denormailze data." def handle(self, *args, **options): vs = Convention.objects.all() for v in vs: v.save() ts = Contest.objects.all() for t in ts: t.save() ps = Panelist.objects.all() for p in ps: p.save() ws = Award.objects.all() for w in ws: w.save() es = Entrant.objects.all() for e in es: e.save() cs = Contestant.objects.all() for c in cs: c.save() ss = Session.objects.all() for s in ss: s.save() as_ = Performance.objects.all() for a in as_: a.save() ps = Song.objects.all() for p in ps: p.save() ss = Singer.objects.all() for s in ss: s.save() js = Panelist.objects.all() for j in js: j.save() ds = Director.objects.all() for d in ds: d.save() return "Done"
from django.core.management.base import ( BaseCommand, ) from apps.api.models import ( Convention, Contest, Contestant, Performance, Song, Group, Singer, Director, Panelist, ) class Command(BaseCommand): help = "Command to denormailze data." def handle(self, *args, **options): vs = Convention.objects.all() for v in vs: v.save() ts = Contest.objects.all() for t in ts: t.save() cs = Contestant.objects.all() for c in cs: c.save() as_ = Performance.objects.all() for a in as_: a.save() ps = Song.objects.all() for p in ps: p.save() ss = Singer.objects.all() for s in ss: s.save() js = Panelist.objects.all() for j in js: j.save() ds = Director.objects.all() for d in ds: d.save() return "Done"
bsd-2-clause
Python
ec9bc89372670e623dbe98c34591fba62a0ee64a
Rename merge to pack in postp.
tjcorona/PyFR,iyer-arvind/PyFR,tjcorona/PyFR,BrianVermeire/PyFR,Aerojspark/PyFR,tjcorona/PyFR
pyfr/scripts/postp.py
pyfr/scripts/postp.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from tempfile import NamedTemporaryFile from argparse import ArgumentParser, FileType import numpy as np from pyfr.util import rm def process_pack(args): # List the contents of the directory relnames = os.listdir(args.indir) # Get the absolute file names and extension-less file names absnames = [os.path.join(args.indir, f) for f in relnames] repnames = [f[:-4] for f in relnames] # Open/load the files files = [np.load(f, mmap_mode='r') for f in absnames] # Get the output pyfrs file name outname = args.outf or args.indir.rstrip('/') # Determine the dir and prefix of the temp file dirname, basename = os.path.split(outname) # Create a named temp file tempf = NamedTemporaryFile(prefix=basename, dir=dirname, delete=False) try: # Write the contents of the directory out as an npz (pyfrs) file np.savez(tempf, **dict(zip(repnames, files))) tempf.close() # Remove the output path if it should exist if os.path.exists(outname): rm(outname) # Rename the temp file into place os.rename(tempf.name, outname) except: # Clean up the temporary file if os.path.exists(tempf.name): os.remove(tempf.name) # Re-raise raise def main(): ap = ArgumentParser(prog='pyfr-postp', description='Post processes a ' 'PyFR simulation') sp = ap.add_subparsers(help='sub-command help') ap_pack = sp.add_parser('pack', help='pack --help', description='Packs a ' 'pyfrs-directory into a pyfrs-file. If no ' 'output file is specified then that of the ' 'input directory is taken. This command will ' 'replace any existing file or directory.') ap_pack.add_argument('indir', metavar='in', help='Input PyFR solution directory') ap_pack.add_argument('outf', metavar='out', nargs='?', help='Out PyFR solution file') ap_pack.set_defaults(process=process_pack) # Parse the arguments args = ap.parse_args() args.process(args) if __name__ == '__main__': main()
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from tempfile import NamedTemporaryFile from argparse import ArgumentParser, FileType import numpy as np from pyfr.util import rm def process_pack(args): # List the contents of the directory relnames = os.listdir(args.indir) # Get the absolute file names and extension-less file names absnames = [os.path.join(args.indir, f) for f in relnames] repnames = [f[:-4] for f in relnames] # Open/load the files files = [np.load(f, mmap_mode='r') for f in absnames] # Get the output pyfrs file name outname = args.outf or args.indir.rstrip('/') # Determine the dir and prefix of the temp file dirname, basename = os.path.split(outname) # Create a named temp file tempf = NamedTemporaryFile(prefix=basename, dir=dirname, delete=False) try: # Write the contents of the directory out as an npz (pyfrs) file np.savez(tempf, **dict(zip(repnames, files))) tempf.close() # Remove the output path if it should exist if os.path.exists(outname): rm(outname) # Rename the temp file into place os.rename(tempf.name, outname) except: # Clean up the temporary file if os.path.exists(tempf.name): os.remove(tempf.name) # Re-raise raise def main(): ap = ArgumentParser(prog='pyfr-postp', description='Post processes a ' 'PyFR simulation') sp = ap.add_subparsers(help='sub-command help') ap_merge = sp.add_parser('pack', help='pack --help', description='Packs a ' 'pyfrs-directory into a pyfrs-file. If no ' 'output file is specified then that of the ' 'input directory is taken. This command will ' 'replace any existing file or directory.') ap_merge.add_argument('indir', metavar='in', help='Input PyFR solution directory') ap_merge.add_argument('outf', metavar='out', nargs='?', help='Out PyFR solution file') ap_merge.set_defaults(process=process_pack) # Parse the arguments args = ap.parse_args() args.process(args) if __name__ == '__main__': main()
bsd-3-clause
Python
301b2ca9cdf33665312e092937c63b1db7db888f
Add missing imports
Cretezy/pymessenger2,karlinnolabs/pymessenger
pymessenger2/utils.py
pymessenger2/utils.py
import hashlib import hmac import six import attr import json def validate_hub_signature(app_secret, request_payload, hub_signature_header): """ @inputs: app_secret: Secret Key for application request_payload: request body hub_signature_header: X-Hub-Signature header sent with request @outputs: boolean indicated that hub signature is validated """ try: hash_method, hub_signature = hub_signature_header.split('=') except: pass else: digest_module = getattr(hashlib, hash_method) hmac_object = hmac.new( str(app_secret), unicode(request_payload), digest_module) generated_hash = hmac_object.hexdigest() if hub_signature == generated_hash: return True return False def generate_appsecret_proof(access_token, app_secret): """ @inputs: access_token: page access token app_secret_token: app secret key @outputs: appsecret_proof: HMAC-SHA256 hash of page access token using app_secret as the key """ if six.PY2: hmac_object = hmac.new( str(app_secret), unicode(access_token), hashlib.sha256) else: hmac_object = hmac.new( bytearray(app_secret, 'utf8'), str(access_token).encode('utf8'), hashlib.sha256) generated_hash = hmac_object.hexdigest() return generated_hash class ToJsonMixin: """ Derive from this with an `.asdict` member to get a working `to_json` function! """ def to_json(self): items_iterator = (attr.asdict(self).items() if six.PY3 else attr.asdict(self).iteritems()) return json.dumps({k: v for k, v in items_iterator if v is not None})
import hashlib import hmac import six def validate_hub_signature(app_secret, request_payload, hub_signature_header): """ @inputs: app_secret: Secret Key for application request_payload: request body hub_signature_header: X-Hub-Signature header sent with request @outputs: boolean indicated that hub signature is validated """ try: hash_method, hub_signature = hub_signature_header.split('=') except: pass else: digest_module = getattr(hashlib, hash_method) hmac_object = hmac.new( str(app_secret), unicode(request_payload), digest_module) generated_hash = hmac_object.hexdigest() if hub_signature == generated_hash: return True return False def generate_appsecret_proof(access_token, app_secret): """ @inputs: access_token: page access token app_secret_token: app secret key @outputs: appsecret_proof: HMAC-SHA256 hash of page access token using app_secret as the key """ if six.PY2: hmac_object = hmac.new( str(app_secret), unicode(access_token), hashlib.sha256) else: hmac_object = hmac.new( bytearray(app_secret, 'utf8'), str(access_token).encode('utf8'), hashlib.sha256) generated_hash = hmac_object.hexdigest() return generated_hash class ToJsonMixin: """ Derive from this with an `.asdict` member to get a working `to_json` function! """ def to_json(self): items_iterator = (attr.asdict(self).items() if six.PY3 else attr.asdict(self).iteritems()) return json.dumps({k: v for k, v in items_iterator if v is not None})
mit
Python
20d41656488ea43978f749e2e34303e49981695c
fix imports to include OR tools
paolodragone/PyMzn
pymzn/mzn/__init__.py
pymzn/mzn/__init__.py
from .model import * from .solvers import * from .minizinc import * from .templates import * __all__ = [ 'Solutions', 'minizinc', 'mzn2fzn', 'solns2out', 'MiniZincError', 'MiniZincUnsatisfiableError', 'MiniZincUnknownError', 'MiniZincUnboundedError', 'MiniZincModel', 'Statement', 'Constraint', 'Variable', 'ArrayVariable', 'OutputStatement', 'SolveStatement', 'Solver', 'Gecode', 'Chuffed', 'Optimathsat', 'Opturion', 'MIPSolver', 'Gurobi', 'CBC', 'G12Solver', 'G12Fd', 'G12Lazy', 'G12MIP', 'OscarCBLS', 'ORTools', 'gecode', 'chuffed', 'optimathsat', 'opturion', 'gurobi', 'cbc', 'g12fd', 'g12lazy', 'g12mip', 'oscar_cbls', 'or_tools', 'discretize', 'from_string', 'add_package', 'add_path' ]
from .model import * from .solvers import * from .minizinc import * from .templates import * __all__ = [ 'Solutions', 'minizinc', 'mzn2fzn', 'solns2out', 'MiniZincError', 'MiniZincUnsatisfiableError', 'MiniZincUnknownError', 'MiniZincUnboundedError', 'MiniZincModel', 'Statement', 'Constraint', 'Variable', 'ArrayVariable', 'OutputStatement', 'SolveStatement', 'Solver', 'Gecode', 'Chuffed', 'Optimathsat', 'Opturion', 'MIPSolver', 'Gurobi', 'CBC', 'G12Solver', 'G12Fd', 'G12Lazy', 'G12MIP', 'OscarCBLS', 'gecode', 'chuffed', 'optimathsat', 'opturion', 'gurobi', 'cbc', 'g12fd', 'g12lazy', 'g12mip', 'oscar_cbls', 'discretize', 'from_string', 'add_package', 'add_path' ]
mit
Python
80bc283676be51ef67fe7924bcc32adaa93fc985
Change timestamp format
anl-mcampos/GuestBook,anl-mcampos/GuestBook
guestbook/__init__.py
guestbook/__init__.py
# coding: utf-8 import pickle from datetime import datetime from collections import namedtuple, deque from flask import Flask, request, render_template, redirect, escape, Markup application = Flask(__name__) DATA_FILE = 'guestbook.dat' Post = namedtuple('Post', ['name', 'timestamp', 'comment']) def save_post(name, timestamp, comment): posts = pickle.load(DATA_FILE) assert isinstance(posts, deque) posts.appendleft(Post(name, timestamp, comment)) pickle.dump(posts, DATA_FILE) def load_posts(): return pickle.load(DATA_FILE) @application.route('/') def index(): return render_template('index.html', greeting_list=load_posts()) @application.route('/post', methods=['POST']) def post(): name = request.form.get('name') comment = request.form.get('comment') save_post(name, datetime.now(), comment) return redirect('/') @application.template_filter('nl2br') def nl2br_filter(s): return escape(s).replace('\n', Markup('<br />')) @application.template_filter('datetime_fmt') def datetime_fmt_filter(dt): return dt.strftime('%d/%m/%Y %H:%M:%S') def main(): application.run('127.0.0.1', 8000) if __name__ == "__main__": application.run('127.0.0.1', 8000, debug=True)
# coding: utf-8 import pickle from datetime import datetime from collections import namedtuple, deque from flask import Flask, request, render_template, redirect, escape, Markup application = Flask(__name__) DATA_FILE = 'guestbook.dat' Post = namedtuple('Post', ['name', 'timestamp', 'comment']) def save_post(name, timestamp, comment): posts = pickle.load(DATA_FILE) assert isinstance(posts, deque) posts.appendleft(Post(name, timestamp, comment)) pickle.dump(posts, DATA_FILE) def load_posts(): return pickle.load(DATA_FILE) @application.route('/') def index(): return render_template('index.html', greeting_list=load_posts()) @application.route('/post', methods=['POST']) def post(): name = request.form.get('name') comment = request.form.get('comment') save_post(name, datetime.now(), comment) return redirect('/') @application.template_filter('nl2br') def nl2br_filter(s): return escape(s).replace('\n', Markup('<br />')) @application.template_filter('datetime_fmt') def datetime_fmt_filter(dt): return dt.strftime('%Y%m%d %H:%M:%S') def main(): application.run('127.0.0.1', 8000) if __name__ == "__main__": application.run('127.0.0.1', 8000, debug=True)
mit
Python
f860a306b4c9fc583a83289ae2a6ecf407214e38
Add more checks to avoid crashing when input files are missing
pySTEPS/pysteps
pysteps/io/readers.py
pysteps/io/readers.py
"""Methods for reading files. """ import numpy as np def read_timeseries(inputfns, importer, **kwargs): """Read a list of input files using io tools and stack them into a 3d array. Parameters ---------- inputfns : list List of input files returned by any function implemented in archive. importer : function Any function implemented in importers. kwargs : dict Optional keyword arguments for the importer. Returns ------- out : tuple A three-element tuple containing the precipitation fields read, the quality fields, and associated metadata. """ # check for missing data Rref = None if all(ifn is None for ifn in inputfns): return None, None, None else: if len(inputfns[0]) == 0: return None, None, None for ifn in inputfns[0]: if ifn is not None: Rref, Qref, metadata = importer(ifn, **kwargs) break if Rref is None: return None, None, None R = [] Q = [] timestamps = [] for i,ifn in enumerate(inputfns[0]): if ifn is not None: R_, Q_, _ = importer(ifn, **kwargs) R.append(R_) Q.append(Q_) timestamps.append(inputfns[1][i]) else: R.append(Rref*np.nan) if Qref is not None: Q.append(Qref*np.nan) else: Q.append(None) timestamps.append(inputfns[1][i]) # Replace this with stack? R = np.concatenate([R_[None, :, :] for R_ in R]) #TODO: Q should be organized as R, but this is not trivial as Q_ can be also None or a scalar metadata["timestamps"] = np.array(timestamps) return R, Q, metadata
"""Methods for reading files. """ import numpy as np def read_timeseries(inputfns, importer, **kwargs): """Read a list of input files using io tools and stack them into a 3d array. Parameters ---------- inputfns : list List of input files returned by any function implemented in archive. importer : function Any function implemented in importers. kwargs : dict Optional keyword arguments for the importer. Returns ------- out : tuple A three-element tuple containing the precipitation fields read, the quality fields, and associated metadata. """ # check for missing data if all(ifn is None for ifn in inputfns): return None, None, None else: for ifn in inputfns[0]: if ifn is not None: Rref, Qref, metadata = importer(ifn, **kwargs) break R = [] Q = [] timestamps = [] for i,ifn in enumerate(inputfns[0]): if ifn is not None: R_, Q_, _ = importer(ifn, **kwargs) R.append(R_) Q.append(Q_) timestamps.append(inputfns[1][i]) else: R.append(Rref*np.nan) if Qref is not None: Q.append(Qref*np.nan) else: Q.append(None) timestamps.append(inputfns[1][i]) R = np.concatenate([R_[None, :, :] for R_ in R]) #TODO: Q should be organized as R, but this is not trivial as Q_ can be also None or a scalar metadata["timestamps"] = np.array(timestamps) return R, Q, metadata
bsd-3-clause
Python
dbc1df293f283367526b3a80c5f24d71e5d46be1
fix bug abort is undefined and return 204
ylerjen/pir-hat,ylerjen/pir-hat,ylerjen/pir-hat
middleware/app.py
middleware/app.py
from flask import Flask, jsonify, request, abort from sense_hat import SenseHat from hat_manager import HatManager app = Flask(__name__) sense_hat = SenseHat() hat_manager = HatManager(sense_hat) @app.route('/') def index(): return 'Welcome to the PI manager. Choose a route according to what you want to do.' @app.route('/status') def get_status(): status = {'pressure': hat_manager.get_pressure, 'temperature': hat_manager.get_temperature, 'humidity': hat_manager.get_humidity} return jsonify({'status': status}) @app.route('/message', methods=['POST']) def print_message(): if not request.json or not 'message' in request.json: abort(400) message = request.json['message'] color = request.json['text_color'] bg_color = request.json['bg_color'] hat_manager.set_message(message) return jsonify(), 204 if __name__ == '__main__': # 0.0.0.0 = accessible to any device on the network app.run(debug=True, host='0.0.0.0')
from flask import Flask, jsonify, request from sense_hat import SenseHat from hat_manager import HatManager app = Flask(__name__) sense_hat = SenseHat() hat_manager = HatManager(sense_hat) @app.route('/') def index(): return 'Welcome to the PI manager. Choose a route according to what you want to do.' @app.route('/status') def get_status(): status = {'pressure': hat_manager.get_pressure, 'temperature': hat_manager.get_temperature, 'humidity': hat_manager.get_humidity} return jsonify({'status': status}) @app.route('/message', methods=['POST']) def print_message(): if not request.json or not 'message' in request.json: abort(400) message = request.json['message'] color = request.json['text_color'] bg_color = request.json['bg_color'] hat_manager.set_message(message) if __name__ == '__main__': # 0.0.0.0 = accessible to any device on the network app.run(debug=True, host='0.0.0.0')
mit
Python
fb34eebd253727dcc718e2387cb6f4ac763f0bae
Add DateTime Completed Field to Task
csdevsc/mcs_website,csdevsc/colcat_crowdsourcing_application,csdevsc/colcat_crowdsourcing_application,csdevsc/colcat_crowdsourcing_application,csdevsc/mcs_website,csdevsc/mcs_website
tasks/models/tasks.py
tasks/models/tasks.py
"""Models for tasks Each new type of task corresponds to a task model """ from django.db import models from data import Data_FullGrid_Confidence, Data_FullGrid # Tasks class Task_Naming_001(Data_FullGrid_Confidence): class Meta: db_table = 'tbl_response_naming_001' def __unicode__(self): return 'Task Naming 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True) class Task_Foci_001(Data_FullGrid): class Meta: db_table = 'tbl_response_foci_001' def __unicode__(self): return 'Task Foci 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True) dt_completed = models.DateTimeField(auto_now=True) class Task_Mapping_001(Data_FullGrid): class Meta: db_table = 'tbl_response_mapping_001' def __unicode__(self): return 'Task Mapping 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True)
"""Models for tasks Each new type of task corresponds to a task model """ from django.db import models from data import Data_FullGrid_Confidence, Data_FullGrid # Tasks class Task_Naming_001(Data_FullGrid_Confidence): class Meta: db_table = 'tbl_response_naming_001' def __unicode__(self): return 'Task Naming 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True) class Task_Foci_001(Data_FullGrid): class Meta: db_table = 'tbl_response_foci_001' def __unicode__(self): return 'Task Foci 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True) class Task_Mapping_001(Data_FullGrid): class Meta: db_table = 'tbl_response_mapping_001' def __unicode__(self): return 'Task Mapping 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True)
mit
Python
547c1d5d1ff2ced0969a86eda6e0094f8b76d94f
Bump to 0.1.1 with setup.py fix
NitishT/minio-py,krishnasrinivas/minio-py,harshavardhana/minio-py,NitishT/minio-py,donatello/minio-py,minio/minio-py,minio/minio-py
minio/__init__.py
minio/__init__.py
# Minimal Object Storage Library, (C) 2015 Minio, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .minio import Minio from .acl import Acl from .parsers import Bucket, Object, ResponseError __author__ = "Minio, Inc." __version__ = "0.1.1"
# Minimal Object Storage Library, (C) 2015 Minio, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .minio import Minio from .acl import Acl from .parsers import Bucket, Object, ResponseError __author__ = "Minio, Inc." __version__ = "0.1.0"
apache-2.0
Python
4bb8a61cde27575865cdd2b7df5afcb5d6860523
Add weird SLP orientation to get_world_pedir
oesteban/fmriprep,poldracklab/preprocessing-workflow,oesteban/fmriprep,poldracklab/preprocessing-workflow,oesteban/fmriprep
fmriprep/interfaces/tests/test_reports.py
fmriprep/interfaces/tests/test_reports.py
import pytest from ..reports import get_world_pedir @pytest.mark.parametrize("orientation,pe_dir,expected", [ ('RAS', 'j', 'Posterior-Anterior'), ('RAS', 'j-', 'Anterior-Posterior'), ('RAS', 'i', 'Left-Right'), ('RAS', 'i-', 'Right-Left'), ('RAS', 'k', 'Inferior-Superior'), ('RAS', 'k-', 'Superior-Inferior'), ('LAS', 'j', 'Posterior-Anterior'), ('LAS', 'i-', 'Left-Right'), ('LAS', 'k-', 'Superior-Inferior'), ('LPI', 'j', 'Anterior-Posterior'), ('LPI', 'i-', 'Left-Right'), ('LPI', 'k-', 'Inferior-Superior'), ('SLP', 'k-', 'Posterior-Anterior'), ('SLP', 'k', 'Anterior-Posterior'), ('SLP', 'j-', 'Left-Right'), ('SLP', 'j', 'Right-Left'), ('SLP', 'i', 'Inferior-Superior'), ('SLP', 'i-', 'Superior-Inferior'), ]) def test_get_world_pedir(tmpdir, orientation, pe_dir, expected): assert get_world_pedir(orientation, pe_dir) == expected
import pytest from ..reports import get_world_pedir @pytest.mark.parametrize("orientation,pe_dir,expected", [ ('RAS', 'j', 'Posterior-Anterior'), ('RAS', 'j-', 'Anterior-Posterior'), ('RAS', 'i', 'Left-Right'), ('RAS', 'i-', 'Right-Left'), ('RAS', 'k', 'Inferior-Superior'), ('RAS', 'k-', 'Superior-Inferior'), ('LAS', 'j', 'Posterior-Anterior'), ('LAS', 'i-', 'Left-Right'), ('LAS', 'k-', 'Superior-Inferior'), ('LPI', 'j', 'Anterior-Posterior'), ('LPI', 'i-', 'Left-Right'), ('LPI', 'k-', 'Inferior-Superior'), ]) def test_get_world_pedir(tmpdir, orientation, pe_dir, expected): assert get_world_pedir(orientation, pe_dir) == expected
bsd-3-clause
Python
bfa66827e5afd175c15640b1678fbba347009953
Fix unit tests
ArchiveLabs/dweb_gateway,ArchiveLabs/dweb_gateway
python/test/_utils.py
python/test/_utils.py
from python.ServerGateway import DwebGatewayHTTPRequestHandler def _processurl(url, verbose, headers={}, **kwargs): # Simulates HTTP Server process - wont work for all methods args = url.split('/') method = args.pop(0) DwebGatewayHTTPRequestHandler.headers = headers # This is a kludge, put headers on class, method expects an instance. f = getattr(DwebGatewayHTTPRequestHandler, method) assert f namespace = args.pop(0) if verbose: kwargs["verbose"] = True res = f(DwebGatewayHTTPRequestHandler, namespace, *args, **kwargs) return res
from python.ServerGateway import DwebGatewayHTTPRequestHandler def _processurl(url, verbose, **kwargs): # Simulates HTTP Server process - wont work for all methods args = url.split('/') method = args.pop(0) f = getattr(DwebGatewayHTTPRequestHandler, method) assert f namespace = args.pop(0) if verbose: kwargs["verbose"] = True res = f(DwebGatewayHTTPRequestHandler, namespace, *args, **kwargs) return res
agpl-3.0
Python
c7e9ea888bbbcef9e7ae29340c45e9aaf211d1da
Fix tests
spyder-ide/qtpy,davvid/qtpy,davvid/qtpy,goanpeca/qtpy,goanpeca/qtpy
tests/travis.py
tests/travis.py
import os os.environ['QT_API'] = os.environ['USE_QT_API'].lower() from qtpy import QtCore, QtGui, QtWidgets print('Qt version:%s' % QtCore.__version__) print(QtCore.QEvent) print(QtGui.QPainter) print(QtWidgets.QWidget)
import os os.environ['QT_API'] = os.environ['USE_QT_API'] from qtpy import QtCore, QtGui, QtWidgets print('Qt version:%s' % QtCore.__version__) print(QtCore.QEvent) print(QtGui.QPainter) print(QtWidgets.QWidget)
mit
Python
efac3c253dcd71be2c6510b5025ddedbb9a7358e
work when there's no RAVEN_CONFIG
pulilab/rapidpro,pulilab/rapidpro,tsotetsi/textily-web,tsotetsi/textily-web,pulilab/rapidpro,tsotetsi/textily-web,tsotetsi/textily-web,tsotetsi/textily-web,pulilab/rapidpro,pulilab/rapidpro
temba/temba_celery.py
temba/temba_celery.py
from __future__ import absolute_import, unicode_literals import celery import os import raven import sys from django.conf import settings from raven.contrib.celery import register_signal, register_logger_signal # set the default Django settings module for the 'celery' program. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'temba.settings') app = celery.Celery('temba') app.config_from_object('django.conf:settings') app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) # register raven if configured raven_config = getattr(settings, 'RAVEN_CONFIG', None) if raven_config: client = raven.Client(settings.RAVEN_CONFIG['dsn']) register_logger_signal(client) register_signal(client) @app.task(bind=True) def debug_task(self): # pragma: needs cover print('Request: {0!r}'.format(self.request)) # this is needed to simulate CELERY_ALWAYS_EAGER for plain 'send' tasks if 'test' in sys.argv or getattr(settings, 'CELERY_ALWAYS_EAGER', False): from celery import current_app def send_task(name, args=(), kwargs={}, **opts): # pragma: needs cover task = current_app.tasks[name] return task.apply(args, kwargs, **opts) current_app.send_task = send_task
from __future__ import absolute_import, unicode_literals import celery import os import raven import sys from django.conf import settings from raven.contrib.celery import register_signal, register_logger_signal # set the default Django settings module for the 'celery' program. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'temba.settings') app = celery.Celery('temba') app.config_from_object('django.conf:settings') app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) client = raven.Client(settings.RAVEN_CONFIG['dsn']) register_logger_signal(client) register_signal(client) @app.task(bind=True) def debug_task(self): # pragma: needs cover print('Request: {0!r}'.format(self.request)) # this is needed to simulate CELERY_ALWAYS_EAGER for plain 'send' tasks if 'test' in sys.argv or getattr(settings, 'CELERY_ALWAYS_EAGER', False): from celery import current_app def send_task(name, args=(), kwargs={}, **opts): # pragma: needs cover task = current_app.tasks[name] return task.apply(args, kwargs, **opts) current_app.send_task = send_task
agpl-3.0
Python
c90fce44f30398fef0c20ec08f761ae19951308a
Delete unused pipeline settings
koopauy/django-vagrant-boilerplate,koopauy/django-vagrant-boilerplate,koopauy/django-vagrant-boilerplate,koopauy/django-vagrant-boilerplate
{{cookiecutter.repo_name}}/src/settings/project.py
{{cookiecutter.repo_name}}/src/settings/project.py
# -*- coding: utf-8 -*- """ Project settings for {{cookiecutter.project_name}} Author : {{cookiecutter.author_name}} <{{cookiecutter.email}}> """ from defaults import * from getenv import env INSTALLED_APPS += ( 'applications.front', ) GRAPPELLI_ADMIN_TITLE = "Admin"
# -*- coding: utf-8 -*- """ Project settings for {{cookiecutter.project_name}} Author : {{cookiecutter.author_name}} <{{cookiecutter.email}}> """ from defaults import * from getenv import env INSTALLED_APPS += ( 'applications.front', ) GRAPPELLI_ADMIN_TITLE = "Admin" PIPELINE_CSS = { 'stylesheets': { 'source_filenames': ( ), 'output_filename': 'stylesheets.css', 'extra_context': { 'media': 'screen,projection', }, }, } PIPELINE_JS = { 'scripts': { 'source_filenames': ( ), 'output_filename': 'scripts.js', } }
mit
Python
eaf390b065944a64a3b74c1b0e43b1df60d4e88f
Reimplement deduping hurr
frol/invoke,sophacles/invoke,mattrobenolt/invoke,frol/invoke,pyinvoke/invoke,mkusz/invoke,pyinvoke/invoke,alex/invoke,kejbaly2/invoke,mattrobenolt/invoke,mkusz/invoke,tyewang/invoke,kejbaly2/invoke,pfmoore/invoke,singingwolfboy/invoke,pfmoore/invoke
invoke/executor.py
invoke/executor.py
class Executor(object): """ An execution strategy for Task objects. Subclasses may override various extension points to change, add or remove behavior. """ def __init__(self, collection): """ Create executor with a pointer to the task collection ``collection``. This pointer is used for looking up tasks by name and storing/retrieving state, e.g. how many times a given task has been run this session and so on. """ self.collection = collection def execute(self, name, kwargs=None, dedupe=True): """ Execute task named ``name``, optionally passing along ``kwargs``. If ``dedupe`` is ``True`` (default), will ensure any given task within ``self.collection`` is only run once per session. To disable this behavior, say ``dedupe=False``. """ kwargs = kwargs or {} # Expand task list all_tasks = self.task_list(name) # Dedupe if requested if dedupe: # Compact (preserving order, so not using list+set) compact_tasks = [] for task in all_tasks: if task not in compact_tasks: compact_tasks.append(task) # Remove tasks already called tasks = [] for task in compact_tasks: if not task.called: tasks.append(task) else: tasks = all_tasks # Execute for task in tasks: task(**kwargs) def task_list(self, name): task = self.collection[name] tasks = [task] prereqs = [] for pretask in task.pre: prereqs.append(self.collection[pretask]) return prereqs + tasks
class Executor(object): """ An execution strategy for Task objects. Subclasses may override various extension points to change, add or remove behavior. """ def __init__(self, collection): """ Create executor with a pointer to the task collection ``collection``. This pointer is used for looking up tasks by name and storing/retrieving state, e.g. how many times a given task has been run this session and so on. """ self.collection = collection def execute(self, name, kwargs=None, dedupe=True): """ Execute task named ``name``, optionally passing along ``kwargs``. If ``dedupe`` is ``True`` (default), will ensure any given task within ``self.collection`` is only run once per session. To disable this behavior, say ``dedupe=False``. """ kwargs = kwargs or {} # Expand task list all_tasks = self.task_list(name) # Compact (preserving order, so not using list+set) compact_tasks = [] for task in all_tasks: if task not in compact_tasks: compact_tasks.append(task) # Remove tasks already called tasks = [] for task in compact_tasks: if not task.called: tasks.append(task) # Execute for task in tasks: task.body(**kwargs) def task_list(self, name): task = self.collection[name] tasks = [task] prereqs = [] for pretask in task.pre: prereqs.append(self.collection[pretask]) return prereqs + tasks
bsd-2-clause
Python
375d12ab7486f6bb0d57232d48c556e6c0eda0c1
Update P05_stylingExcel fixed PEP8 spacing
JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials
books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py
books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py
# This program uses the OpenPyXL module to manipulate Excel documents import openpyxl from openpyxl.styles import Font, NamedStyle wb = openpyxl.Workbook() sheet = wb["Sheet"] # Setting the Font Style of Cells italic24Font = NamedStyle(name="italic24Font") italic24Font.font = Font(size=24, italic=True) sheet["A1"].style = italic24Font sheet["A1"] = "Hello world!" wb.save("styled.xlsx") # Font Objects wb = openpyxl.Workbook() sheet = wb["Sheet"] fontObj1 = Font(name="Times New Roman", bold=True) styleObj1 = NamedStyle(name="styleObj1") styleObj1.font = fontObj1 sheet["A1"].style = styleObj1 sheet["A1"] = "Bold Times New Roman" fontObj2 = Font(size=24, italic=True) styleObj2 = NamedStyle(name="styleObj2") styleObj2.font = fontObj2 sheet["B3"].style = styleObj2 sheet["B3"] = "24 pt Italic" wb.save("styles.xlsx") # Formulas wb = openpyxl.Workbook() sheet = wb.active sheet["A1"] = 200 sheet["A2"] = 300 sheet["A3"] = "=SUM(A1:A2)" wb.save("writeFormula.xlsx") wbFormulas = openpyxl.load_workbook("writeFormula.xlsx") sheet = wbFormulas.active print(sheet["A3"].value) wbDataOnly = openpyxl.load_workbook("writeFormula.xlsx", data_only=True) sheet = wbDataOnly.active print(sheet["A3"].value) # Not working with LibreOffice 6.0.3.2 # Adjusting Rows and Columns wb = openpyxl.Workbook() sheet = wb.active sheet["A1"] = "Tall row" sheet["B2"] = "Wide column" sheet.row_dimensions[1].height = 70 sheet.column_dimensions['B'].width = 20 wb.save("dimensions.xlsx") wb = openpyxl.Workbook() sheet = wb.active sheet.merge_cells("A1:D3") sheet["A1"] = "Twelve cells merged together." sheet.merge_cells("C5:D5") sheet["C5"] = "Two merged cells." wb.save("merged.xlsx") wb = openpyxl.load_workbook("merged.xlsx") sheet = wb.active sheet.unmerge_cells("A1:D3") sheet.unmerge_cells("C5:D5") #wb.save("merged.xlsx") # uncomment to see changes wb = openpyxl.load_workbook("produceSales.xlsx") sheet = wb.active sheet.freeze_panes = "A2" wb.save("freezeExample.xlsx") # Charts wb = openpyxl.Workbook() sheet = wb.get_active_sheet() for i in range(1, 11): # create some data in column A sheet['A' + str(i)] = i refObj = openpyxl.charts.Reference(sheet, (1, 1), (10, 1)) seriesObj = openpyxl.charts.Series(refObj, title="First Series") chartObj = openpyxl.charts.BarChart() chartObj.append(seriesObj) chartObj.drawing.top = 50 # set the position chartObj.drawing.left = 100 chartObj.drawing.width = 300 # set the size chartObj.drawing.height = 200 sheet.add_chart(chartObj) wb.save("sampleChart.xlsx")
# This program uses the OpenPyXL module to manipulate Excel documents import openpyxl from openpyxl.styles import Font, NamedStyle wb = openpyxl.Workbook() sheet = wb["Sheet"] # Setting the Font Style of Cells italic24Font = NamedStyle(name="italic24Font") italic24Font.font = Font(size=24, italic=True) sheet["A1"].style = italic24Font sheet["A1"] = "Hello world!" wb.save("styled.xlsx") # Font Objects wb = openpyxl.Workbook() sheet = wb["Sheet"] fontObj1 = Font(name="Times New Roman", bold=True) styleObj1 = NamedStyle(name="styleObj1") styleObj1.font = fontObj1 sheet["A1"].style = styleObj1 sheet["A1"] = "Bold Times New Roman" fontObj2 = Font(size=24, italic=True) styleObj2 = NamedStyle(name="styleObj2") styleObj2.font = fontObj2 sheet["B3"].style = styleObj2 sheet["B3"] = "24 pt Italic" wb.save("styles.xlsx") # Formulas wb = openpyxl.Workbook() sheet = wb.active sheet["A1"] = 200 sheet["A2"] = 300 sheet["A3"] = "=SUM(A1:A2)" wb.save("writeFormula.xlsx") wbFormulas = openpyxl.load_workbook("writeFormula.xlsx") sheet = wbFormulas.active print(sheet["A3"].value) wbDataOnly = openpyxl.load_workbook("writeFormula.xlsx", data_only=True) sheet = wbDataOnly.active print(sheet["A3"].value) # Not working with LibreOffice 6.0.3.2 # Adjusting Rows and Columns wb = openpyxl.Workbook() sheet = wb.active sheet["A1"] = "Tall row" sheet["B2"] = "Wide column" sheet.row_dimensions[1].height = 70 sheet.column_dimensions['B'].width = 20 wb.save("dimensions.xlsx") wb = openpyxl.Workbook() sheet = wb.active sheet.merge_cells("A1:D3") sheet["A1"] = "Twelve cells merged together." sheet.merge_cells("C5:D5") sheet["C5"] = "Two merged cells." wb.save("merged.xlsx") wb = openpyxl.load_workbook("merged.xlsx") sheet = wb.active sheet.unmerge_cells("A1:D3") sheet.unmerge_cells("C5:D5") #wb.save("merged.xlsx") # uncomment to see changes wb = openpyxl.load_workbook("produceSales.xlsx") sheet = wb.active sheet.freeze_panes = "A2" wb.save("freezeExample.xlsx") # Charts wb = openpyxl.Workbook() sheet = wb.get_active_sheet() for i in range(1, 11): # create some data in column A sheet['A' + str(i)] = i refObj = openpyxl.charts.Reference(sheet, (1, 1), (10, 1)) seriesObj = openpyxl.charts.Series(refObj, title="First Series") chartObj = openpyxl.charts.BarChart() chartObj.append(seriesObj) chartObj.drawing.top = 50 # set the position chartObj.drawing.left = 100 chartObj.drawing.width = 300 # set the size chartObj.drawing.height = 200 sheet.add_chart(chartObj) wb.save("sampleChart.xlsx")
mit
Python
17793c9b3ceecc206aab1d1c34c0d3dc69892cbd
Use ArgumentParser to enforce required arguments
nickbattam/picamon,nickbattam/picamon,nickbattam/picamon,nickbattam/picamon
monitor/runner.py
monitor/runner.py
import sys from time import sleep from camera import Camera from controller import Controller from plotter_pygame import PyGamePlotter import epics import argparse if __name__ == "__main__": parser = argparse.ArgumentParser(description='') parser.add_argument('--prefix', required=True, dest='prefix', help='controller IOC prefix') parser.add_argument('--name', required=True, dest='name', help='name of monitor') parser.add_argument('--fullscreen', dest='fullscreen', default=1, help='1 for fullscreen (default), 0 for small window') args = parser.parse_args() controller = Controller(args.prefix, args.name) plotter = PyGamePlotter(args.name, args.fullscreen) camera = Camera() old_cmap = "" while True: try: # check for quit events if not plotter.i_shall_continue(): break # get camera name camera_name = controller.camera # if no camera is selected, make screen blank if camera_name == "": plotter.blank() # otherwise, display camera feed else: camera.set_name(camera_name) # update colormap cmap = controller.colourmap_name if cmap != old_cmap: old_cmap = cmap plotter.set_colormap(controller.colourmap_data) # update aspect ratio plotter.set_aspect_ratio(controller.aspect) # get camera data and process it plotter.process(camera.get_data()) # udpate label info if controller.label == 1: plotter.show_label(camera_name) pass # show and wait plotter.show() sleep(controller.rate) except KeyboardInterrupt: plotter.quit() pass plotter.quit()
import sys from time import sleep from camera import Camera from controller import Controller from plotter_pygame import PyGamePlotter import epics import argparse if __name__ == "__main__": parser = argparse.ArgumentParser(description='') parser.add_argument('--prefix', dest='prefix', help='controller IOC prefix') parser.add_argument('--name', dest='name', help='name of monitor') parser.add_argument('--fullscreen', dest='fullscreen', default=1, help='1 for fullscreen (default), 0 for small window') args = parser.parse_args() if not (args.prefix and args.name): parser.error("Arguments missing. Please use both --prefix and --name") controller = Controller(args.prefix, args.name) plotter = PyGamePlotter(args.name, args.fullscreen) camera = Camera() old_cmap = "" while True: try: # check for quit events if not plotter.i_shall_continue(): break # get camera name camera_name = controller.camera # if no camera is selected, make screen blank if camera_name == "": plotter.blank() # otherwise, display camera feed else: camera.set_name(camera_name) # update colormap cmap = controller.colourmap_name if cmap != old_cmap: old_cmap = cmap plotter.set_colormap(controller.colourmap_data) # update aspect ratio plotter.set_aspect_ratio(controller.aspect) # get camera data and process it plotter.process(camera.get_data()) # udpate label info if controller.label == 1: plotter.show_label(camera_name) pass # show and wait plotter.show() sleep(controller.rate) except KeyboardInterrupt: plotter.quit() pass plotter.quit()
apache-2.0
Python
81a4b04173033d7e678ad6c4b4efae654af9ac11
Use a threading local object to isolate MongoDB connection between different threads but reuse the same connection in the same thread
GeographicaGS/moocng,GeographicaGS/moocng,GeographicaGS/moocng,OpenMOOC/moocng,GeographicaGS/moocng,OpenMOOC/moocng
moocng/mongodb.py
moocng/mongodb.py
# Copyright 2013 Rooter Analysis S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urlparse from threading import local from django.conf import settings from django.core.exceptions import ImproperlyConfigured from pymongo.connection import Connection DEFAULT_MONGODB_HOST = 'localhost' DEFAULT_MONGODB_PORT = 27017 DEFAULT_MONGODB_NAME = 'moocng' DEFAULT_MONGODB_URI = 'mongodb://%s:%d/%s' % (DEFAULT_MONGODB_HOST, DEFAULT_MONGODB_PORT, DEFAULT_MONGODB_NAME) class MongoDB(object): def __init__(self, db_uri=DEFAULT_MONGODB_URI, connection_factory=Connection): self.db_uri = urlparse.urlparse(db_uri) self.connection = connection_factory( host=self.db_uri.hostname or DEFAULT_MONGODB_HOST, port=self.db_uri.port or DEFAULT_MONGODB_PORT) if self.db_uri.path: self.database_name = self.db_uri.path[1:] else: self.database_name = DEFAULT_MONGODB_NAME self.database = self.get_database() def get_connection(self): return self.connection def get_database(self): database = self.connection[self.database_name] if self.db_uri.username and self.db_uri.password: database.authenticate(self.db_uri.username, self.db_uri.password) return database def get_collection(self, collection): return self.database[collection] connections = local() def get_db(): try: db_uri = settings.MONGODB_URI except AttributeError: raise ImproperlyConfigured('Missing required MONGODB_URI setting') if not hasattr(connections, 'default'): connections.default = MongoDB(db_uri) return connections.default
# Copyright 2013 Rooter Analysis S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urlparse from django.conf import settings from django.core.exceptions import ImproperlyConfigured from pymongo.connection import Connection DEFAULT_MONGODB_HOST = 'localhost' DEFAULT_MONGODB_PORT = 27017 DEFAULT_MONGODB_NAME = 'moocng' DEFAULT_MONGODB_URI = 'mongodb://%s:%d/%s' % (DEFAULT_MONGODB_HOST, DEFAULT_MONGODB_PORT, DEFAULT_MONGODB_NAME) class MongoDB(object): def __init__(self, db_uri=DEFAULT_MONGODB_URI, connection_factory=Connection): self.db_uri = urlparse.urlparse(db_uri) self.connection = connection_factory( host=self.db_uri.hostname or DEFAULT_MONGODB_HOST, port=self.db_uri.port or DEFAULT_MONGODB_PORT) if self.db_uri.path: self.database_name = self.db_uri.path[1:] else: self.database_name = DEFAULT_MONGODB_NAME self.database = self.get_database() def get_connection(self): return self.connection def get_database(self): database = self.connection[self.database_name] if self.db_uri.username and self.db_uri.password: database.authenticate(self.db_uri.username, self.db_uri.password) return database def get_collection(self, collection): return self.database[collection] def get_db(): try: db_uri = settings.MONGODB_URI except AttributeError: raise ImproperlyConfigured('Missing required MONGODB_URI setting') return MongoDB(db_uri)
apache-2.0
Python
aa9143302b376e1274c8c11b53687771d0444b5a
Remove now-unused isInt code
pictuga/morss,pictuga/morss,pictuga/morss
morss/__main__.py
morss/__main__.py
# ran on `python -m morss` import os import sys from . import wsgi from . import cli from .morss import MorssException import wsgiref.simple_server import wsgiref.handlers PORT = int(os.getenv('PORT', 8080)) def main(): if 'REQUEST_URI' in os.environ: # mod_cgi (w/o file handler) app = wsgi.cgi_app app = wsgi.cgi_dispatcher(app) app = wsgi.cgi_error_handler(app) app = wsgi.cgi_encode(app) wsgiref.handlers.CGIHandler().run(app) elif len(sys.argv) <= 1: # start internal (basic) http server (w/ file handler) app = wsgi.cgi_app app = wsgi.cgi_file_handler(app) app = wsgi.cgi_dispatcher(app) app = wsgi.cgi_error_handler(app) app = wsgi.cgi_encode(app) print('Serving http://localhost:%s/' % port) httpd = wsgiref.simple_server.make_server('', PORT, app) httpd.serve_forever() else: # as a CLI app try: cli.cli_app() except (KeyboardInterrupt, SystemExit): raise except Exception as e: print('ERROR: %s' % e.message) if __name__ == '__main__': main()
# ran on `python -m morss` import os import sys from . import wsgi from . import cli from .morss import MorssException import wsgiref.simple_server import wsgiref.handlers PORT = int(os.getenv('PORT', 8080)) def isInt(string): try: int(string) return True except ValueError: return False def main(): if 'REQUEST_URI' in os.environ: # mod_cgi (w/o file handler) app = wsgi.cgi_app app = wsgi.cgi_dispatcher(app) app = wsgi.cgi_error_handler(app) app = wsgi.cgi_encode(app) wsgiref.handlers.CGIHandler().run(app) elif len(sys.argv) <= 1: # start internal (basic) http server (w/ file handler) app = wsgi.cgi_app app = wsgi.cgi_file_handler(app) app = wsgi.cgi_dispatcher(app) app = wsgi.cgi_error_handler(app) app = wsgi.cgi_encode(app) print('Serving http://localhost:%s/' % port) httpd = wsgiref.simple_server.make_server('', PORT, app) httpd.serve_forever() else: # as a CLI app try: cli.cli_app() except (KeyboardInterrupt, SystemExit): raise except Exception as e: print('ERROR: %s' % e.message) if __name__ == '__main__': main()
agpl-3.0
Python
3927fd757ff404af61e609cc1728d1f3fe398230
Fix on error text.
aperture321/hipbit
mp3datastorage.py
mp3datastorage.py
#store file attributes component import sqlite3 as sql import os import mp3metadata #TODO add directory of the database #Allow database recognition and resetting the database class SQLmgr: def __init__(self, username): #note everytime function is called MusicData table is dropped! self.serv = False self.errors = open("error.txt", "w") self.servcount=1 db = username + ".db" self.db = db if self.db in os.listdir("."): #database already exists pass else: try: serv = sql.connect(db) with serv: self.serv = serv.cursor() self.serv.execute("DROP TABLE IF EXISTS MusicData") self.serv.execute("CREATE TABLE MusicData(Id INT, ALBUM TEXT, ARTIST TEXT, TITLE TEXT, PATH TEXT)") self.serv.close() except sql.Error, e: print "Error executing SQL table. ", e.args[0] return 1 def wipe_database(self, username): self.db = username + ".db" try: serv = sql.connect(db) with serv: self.serv = serv.cursor() self.serv.execute("DROP TABLE IF EXISTS MusicData") self.serv.execute("CREATE TABLE MusicData(Id INT, ALBUM TEXT, ARTIST TEXT, TITLE TEXT, PATH TEXT)") self.serv.close() except sql.Error, e: print "Error wiping database." return 1 def add_db(self, case): try: with sql.connect(self.db) as serv: self.serv = serv.cursor() self.serv.execute("INSERT INTO MusicData VALUES (?, ?, ?, ?, ?);", case) self.servcount += 1 self.serv.close() except sql.Error, e: self.errors.write(str(case[-1])) def addmp3todb(self, filetup): try: case = [] case.append(self.servcount) for h,j in filetup[1].items(): if h in ["ALBUM", "ARTIST", "TITLE"]: case.append(j) case.append(filetup[0]) self.add_db(tuple(case)) except: self.errors.write("Error writing: " + filetup[1]) def add_test(self, filedir): try: tester = mp3metadata.mp3data().returnobj() case = [] case.append(self.servcount) #tuple pairings will proceed in this order. for k,v in tester.items(): if k in ["ALBUM", "ARTIST", "TITLE"]: case.append(v) case.append(filedir) self.add_db(tuple(case)) return 0 except sql.Error, e: print e.args[0] return 1
#store file attributes component import sqlite3 as sql import os import mp3metadata #TODO add directory of the database #Allow database recognition and resetting the database class SQLmgr: def __init__(self, username): #note everytime function is called MusicData table is dropped! self.serv = False self.errors = open("error.txt", "w") self.servcount=1 db = username + ".db" self.db = db if self.db in os.listdir("."): #database already exists pass else: try: serv = sql.connect(db) with serv: self.serv = serv.cursor() self.serv.execute("DROP TABLE IF EXISTS MusicData") self.serv.execute("CREATE TABLE MusicData(Id INT, ALBUM TEXT, ARTIST TEXT, TITLE TEXT, PATH TEXT)") self.serv.close() except sql.Error, e: print "Error executing SQL table. ", e.args[0] return 1 def wipe_database(self, username): self.db = username + ".db" try: serv = sql.connect(db) with serv: self.serv = serv.cursor() self.serv.execute("DROP TABLE IF EXISTS MusicData") self.serv.execute("CREATE TABLE MusicData(Id INT, ALBUM TEXT, ARTIST TEXT, TITLE TEXT, PATH TEXT)") self.serv.close() except sql.Error, e: print "Error wiping database." return 1 def add_db(self, case): try: with sql.connect(self.db) as serv: self.serv = serv.cursor() self.serv.execute("INSERT INTO MusicData VALUES (?, ?, ?, ?, ?);", case) self.servcount += 1 self.serv.close() except sql.Error, e: errors.write(str(case[-1])) def addmp3todb(self, filetup): try: case = [] case.append(self.servcount) for h,j in filetup[1].items(): if h in ["ALBUM", "ARTIST", "TITLE"]: case.append(j) case.append(filetup[0]) self.add_db(tuple(case)) except: errors.write("Error writing: " + filetup[1]) def add_test(self, filedir): try: tester = mp3metadata.mp3data().returnobj() case = [] case.append(self.servcount) #tuple pairings will proceed in this order. for k,v in tester.items(): if k in ["ALBUM", "ARTIST", "TITLE"]: case.append(v) case.append(filedir) self.add_db(tuple(case)) return 0 except sql.Error, e: print e.args[0] return 1
mit
Python
e1e25bc1166efa9a39fdf769f1081fafd08dd937
handle unknown source country, add recovered
lepinkainen/pyfibot,lepinkainen/pyfibot
pyfibot/modules/module_korona.py
pyfibot/modules/module_korona.py
# -*- coding: utf-8 -*- """ Koronavirus statistics from HS.fi open data https://github.com/HS-Datadesk/koronavirus-avoindata """ from __future__ import unicode_literals, print_function, division from collections import Counter def init(bot): global lang config = bot.config.get("module_posti", {}) lang = config.get("language", "en") def command_korona(bot, user, channel, args): """Get latest info about COVID-19 in Finland (Source: https://github.com/HS-Datadesk/koronavirus-avoindata )""" url = "https://w3qa5ydb4l.execute-api.eu-west-1.amazonaws.com/prod/finnishCoronaData" try: r = bot.get_url(url) data = r.json() except Exception as e: bot.say( channel, "Error while getting data.", ) raise e msg = "[COVID-19] Vahvistettuja tapauksia: %s Kuolleita: %s Parantunut: %s" % (len(data['confirmed']), len(data['deaths']), len(data['recovered'])) # top5 infection sources top5 = Counter(map(lambda x: x['infectionSourceCountry'], data['confirmed'])).most_common(5) msg = msg + " | Top5 lähdemaat: " topstr = [] for country, count in top5: if country == None: country = "N/A" topstr.append(country+":"+str(count)) msg = msg + " ".join(topstr) bot.say(channel, msg)
# -*- coding: utf-8 -*- """ Koronavirus statistics from HS.fi open data https://github.com/HS-Datadesk/koronavirus-avoindata """ from __future__ import unicode_literals, print_function, division from collections import Counter def init(bot): global lang config = bot.config.get("module_posti", {}) lang = config.get("language", "en") def command_korona(bot, user, channel, args): """Get latest info about COVID-19 in Finland (Source: https://github.com/HS-Datadesk/koronavirus-avoindata )""" url = "https://w3qa5ydb4l.execute-api.eu-west-1.amazonaws.com/prod/finnishCoronaData" try: r = bot.get_url(url) data = r.json() except Exception as e: bot.say( channel, "Error while getting data.", ) raise e msg = "[COVID-19] Vahvistettuja tapauksia: %s Kuolleita: %s" % (len(data['confirmed']), len(data['deaths'])) # top5 infection sources top5 = Counter(map(lambda x: x['infectionSourceCountry'], data['confirmed'])).most_common(5) msg = msg + " | Top5 lähdemaat: " topstr = [] for country, count in top5: topstr.append(country+":"+str(count)) msg = msg + " ".join(topstr) bot.say(channel, msg)
bsd-3-clause
Python
13f26d9007629be019140aa3bedd5f6fbfefe69b
delete all() method when apply document filter
kyunooh/JellyBlog,kyunooh/JellyBlog,kyunooh/JellyBlog
jellyblog/views.py
jellyblog/views.py
# -*- coding: utf-8 -*- from django.shortcuts import render, get_object_or_404 from django.core.paginator import Paginator from .models import Category, Document from htmlmin.decorators import minified_response from .util import get_page_number_range, get_documents, \ categoryList def home(request): Category.init_category() return render(request, 'jellyblog/home.html') def index(request): return index_with_page(request, 1) @minified_response def index_with_page(request, page): document_list = Document.objects.filter(public_doc=True).order_by('-id') paginator = Paginator(document_list, 4) documents = get_documents(paginator, page) context = { 'documents': documents, 'category_list': categoryList, 'page_range': get_page_number_range( paginator, documents ) } return render(request, 'jellyblog/index.html', context) def category_detail(request, category_id): return category_with_page(request, category_id, 1) @minified_response def category_with_page(request, category_id, page): selected_category = Category.objects.get(id=category_id) document_list = [] if selected_category.parent.id == 1: # 카테고리가 상위 카테고리인지 아닌지를 판별 후, 상위 카테고리일 경우엔 하위 카테고리의 문서 리스트를 추가함 children = Category.objects.all().filter(parent=selected_category.id) for child in children: document_list += Document.objects.all() \ .filter(category_id=child.id, public_doc=True) document_list += Document.objects.all().filter( category=category_id, public_doc=True) document_list.sort(key=lambda x: x.pk, reverse=True) paginator = Paginator(document_list, 4) documents = get_documents(paginator, page) context = { 'documents': documents, 'category_list': categoryList, 'category_id': category_id, 'page_range': get_page_number_range( paginator, documents), 'category_name': selected_category.name, } return render(request, 'jellyblog/category.html', context) @minified_response def detail(request, document_id): document = get_object_or_404(Document, pk=document_id) document.read() return render(request, 'jellyblog/detail.html', {'document': document, 'category_list': categoryList})
# -*- coding: utf-8 -*- from django.shortcuts import render, get_object_or_404 from django.core.paginator import Paginator from .models import Category, Document from htmlmin.decorators import minified_response from .util import get_page_number_range, get_documents, \ categoryList def home(request): Category.init_category() return render(request, 'jellyblog/home.html') def index(request): return index_with_page(request, 1) @minified_response def index_with_page(request, page): document_list = Document.objects.all().filter(public_doc=True).order_by('-id') paginator = Paginator(document_list, 4) documents = get_documents(paginator, page) context = { 'documents': documents, 'category_list': categoryList, 'page_range': get_page_number_range( paginator, documents ) } return render(request, 'jellyblog/index.html', context) def category_detail(request, category_id): return category_with_page(request, category_id, 1) @minified_response def category_with_page(request, category_id, page): selected_category = Category.objects.get(id=category_id) document_list = [] if selected_category.parent.id == 1: # 카테고리가 상위 카테고리인지 아닌지를 판별 후, 상위 카테고리일 경우엔 하위 카테고리의 문서 리스트를 추가함 children = Category.objects.all().filter(parent=selected_category.id) for child in children: document_list += Document.objects.all() \ .filter(category_id=child.id, public_doc=True) document_list += Document.objects.all().filter( category=category_id, public_doc=True) document_list.sort(key=lambda x: x.pk, reverse=True) paginator = Paginator(document_list, 4) documents = get_documents(paginator, page) context = { 'documents': documents, 'category_list': categoryList, 'category_id': category_id, 'page_range': get_page_number_range( paginator, documents), 'category_name': selected_category.name, } return render(request, 'jellyblog/category.html', context) @minified_response def detail(request, document_id): document = get_object_or_404(Document, pk=document_id) document.read() return render(request, 'jellyblog/detail.html', {'document': document, 'category_list': categoryList})
apache-2.0
Python
deaee894589a2247b9322ba5cdb94e4c127c35bd
correct docstring for KeyringLocked class
jaraco/keyring
keyring/errors.py
keyring/errors.py
import sys __metaclass__ = type class KeyringError(Exception): """Base class for exceptions in keyring """ class PasswordSetError(KeyringError): """Raised when the password can't be set. """ class PasswordDeleteError(KeyringError): """Raised when the password can't be deleted. """ class InitError(KeyringError): """Raised when the keyring could not be initialised """ class KeyringLocked(KeyringError): """Raised when the keyring failed unlocking """ class ExceptionRaisedContext: """ An exception-trapping context that indicates whether an exception was raised. """ def __init__(self, ExpectedException=Exception): self.ExpectedException = ExpectedException self.exc_info = None def __enter__(self): self.exc_info = object.__new__(ExceptionInfo) return self.exc_info def __exit__(self, *exc_info): self.exc_info.__init__(*exc_info) return self.exc_info.type and issubclass( self.exc_info.type, self.ExpectedException ) class ExceptionInfo: def __init__(self, *info): if not info: info = sys.exc_info() self.type, self.value, _ = info def __bool__(self): """ Return True if an exception occurred """ return bool(self.type) __nonzero__ = __bool__
import sys __metaclass__ = type class KeyringError(Exception): """Base class for exceptions in keyring """ class PasswordSetError(KeyringError): """Raised when the password can't be set. """ class PasswordDeleteError(KeyringError): """Raised when the password can't be deleted. """ class InitError(KeyringError): """Raised when the keyring could not be initialised """ class KeyringLocked(KeyringError): """Raised when the keyring could not be initialised """ class ExceptionRaisedContext: """ An exception-trapping context that indicates whether an exception was raised. """ def __init__(self, ExpectedException=Exception): self.ExpectedException = ExpectedException self.exc_info = None def __enter__(self): self.exc_info = object.__new__(ExceptionInfo) return self.exc_info def __exit__(self, *exc_info): self.exc_info.__init__(*exc_info) return self.exc_info.type and issubclass( self.exc_info.type, self.ExpectedException ) class ExceptionInfo: def __init__(self, *info): if not info: info = sys.exc_info() self.type, self.value, _ = info def __bool__(self): """ Return True if an exception occurred """ return bool(self.type) __nonzero__ = __bool__
mit
Python
15f45377dffa2e267464b38f5f87ffe9526fa8f6
Update support to jax (#585)
lanpa/tensorboardX,lanpa/tensorboardX
tensorboardX/x2num.py
tensorboardX/x2num.py
# DO NOT alter/distruct/free input object ! from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import numpy as np import six def check_nan(array): tmp = np.sum(array) if np.isnan(tmp) or np.isinf(tmp): logging.warning('NaN or Inf found in input tensor.') return array def make_np(x): if isinstance(x, list): return check_nan(np.array(x)) if isinstance(x, np.ndarray): return check_nan(x) if isinstance(x, six.string_types): # Caffe2 will pass name of blob(s) to fetch return check_nan(prepare_caffe2(x)) if np.isscalar(x): return check_nan(np.array([x])) if 'torch' in str(type(x)): return check_nan(prepare_pytorch(x)) if 'chainer' in str(type(x)): return check_nan(prepare_chainer(x)) if 'mxnet' in str(type(x)): return check_nan(prepare_mxnet(x)) if 'jax' in str(type(x)): return check_nan(np.array(x)) raise NotImplementedError( 'Got {}, but expected numpy array or torch tensor.'.format(type(x))) def prepare_pytorch(x): import torch if isinstance(x, torch.autograd.Variable): x = x.data x = x.cpu().numpy() return x def prepare_theano(x): import theano pass def prepare_caffe2(x): from caffe2.python import workspace x = workspace.FetchBlob(x) return x def prepare_mxnet(x): x = x.asnumpy() return x def prepare_chainer(x): import chainer x = chainer.cuda.to_cpu(x.data) return x
# DO NOT alter/distruct/free input object ! from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import numpy as np import six def check_nan(array): tmp = np.sum(array) if np.isnan(tmp) or np.isinf(tmp): logging.warning('NaN or Inf found in input tensor.') return array def make_np(x): if isinstance(x, list): return check_nan(np.array(x)) if isinstance(x, np.ndarray): return check_nan(x) if isinstance(x, six.string_types): # Caffe2 will pass name of blob(s) to fetch return check_nan(prepare_caffe2(x)) if np.isscalar(x): return check_nan(np.array([x])) if 'torch' in str(type(x)): return check_nan(prepare_pytorch(x)) if 'chainer' in str(type(x)): return check_nan(prepare_chainer(x)) if 'mxnet' in str(type(x)): return check_nan(prepare_mxnet(x)) raise NotImplementedError( 'Got {}, but expected numpy array or torch tensor.'.format(type(x))) def prepare_pytorch(x): import torch if isinstance(x, torch.autograd.Variable): x = x.data x = x.cpu().numpy() return x def prepare_theano(x): import theano pass def prepare_caffe2(x): from caffe2.python import workspace x = workspace.FetchBlob(x) return x def prepare_mxnet(x): x = x.asnumpy() return x def prepare_chainer(x): import chainer x = chainer.cuda.to_cpu(x.data) return x
mit
Python
4e280094687d8c369a1eee3c8b7bb246549898eb
Update utils.py
CMPUT404/socialdistribution,CMPUT404/socialdistribution,CMPUT404/socialdistribution
backend/utils.py
backend/utils.py
from rest_framework.views import exception_handler from rest_framework.exceptions import APIException, AuthenticationFailed checks = ['Username not found', 'Username already exists', 'Authentication failed'] def custom_exception_handler(exc): """ Exception handler called by all raised exceptions during HTTP requests. Return value: { "error":"message body" } """ # Debug exceptions print 'EXCEPTION DEBUG %s' %exc if not isinstance(exc.detail, unicode): try: # original error message is {'detail':[list of messages]} # Get values from dictionary and take first list element msg = exc.detail.values()[0][0] exc = GenericException(msg) except: exc = GenericException() response = exception_handler(exc) if response is not None: # Uncomment to add status code in message body # response.data['status_code'] = response.status_code if response.data['detail']: response.data['error'] = response.data['detail'] del response.data['detail'] return response class GenericException(APIException): status_code = 400 default_detail = 'Error encountered' class UsernameNotFound(APIException): status_code = 400 default_detail = 'Username not found' class UsernameAlreadyExists(APIException): status_code = 400 default_detail = 'Username already exists' class AuthenticationFailure(AuthenticationFailed): status_code = 401 default_detail = 'Authentication failed'
from rest_framework.views import exception_handler from rest_framework.exceptions import APIException, AuthenticationFailed checks = ['Username not found', 'Username already exists', 'Authentication failed'] def custom_exception_handler(exc): """ Exception handler called by all raised exceptions during HTTP requests. Return value: { "error":"message body" } """ # Debug exceptions print 'EXCEPTION DEBUG %s' %exc if not isinstance(exc.detail, unicode): try: # original error message is {'detail':[list of messages]} # Get values from dictionary and take first list element msg = exc.detail.values()[0][0] exc = GenericException(msg) except: exc = GenericException() response = exception_handler(exc) if response is not None: # Uncomment to add status code in message body # response.data['status_code'] = response.status_code if response.data['detail']: response.data['error'] = response.data['detail'] del response.data['detail'] return response class GenericException(APIException): status_code = 400 default_detail = 'Error encountered' class UsernameNotFound(APIException): status_code = 400 default_detail = 'Username not found' class UsernameAlreadyExists(APIException): status_code = 400 default_detail = 'Username already exists' class AuthenticationFailure(AuthenticationFailed): status_code = 401 default_detail = 'Authentication failed'
apache-2.0
Python
cd4e7c5bc10c8e946ddf31d99a249a5a97b2dfda
Update get-observations.py
valpo-sats/scheduling-bazaar,valpo-sats/scheduling-bazaar
python-files/get-observations.py
python-files/get-observations.py
#!/usr/bin/env python3 """ Utility to get observations from a SatNOGS Network server. Collects the paginated objects into a single JSON list and stores in a file. """ import json import requests OBSERVATIONS_API = 'https://network.satnogs.org/api/observations' OBSERVATIONS_JSON = 'observations.json' def get(url): print(url) return requests.get(url) try: with open(OBSERVATIONS_JSON) as f: data = json.load(f) # json.dump() coerces to string keys # convert keys back to integers observations = {} for k,v in data.items(): print(k) observations[int(k)] = v # observations = {v['id']:v for k,v in data.items()} except IOError: observations = {} def update(o, observations): o_id = o['id'] print(o_id) if o_id not in observations: observations[o_id] = o was_new = True else: observations.update(o) was_new = False return was_new r = get(OBSERVATIONS_API) updated = [update(o, observations) for o in r.json()] any_updated = any(updated) nextpage = r.links.get('next') while any_updated and nextpage: r = get(nextpage['url']) updated = [update(o, observations) for o in r.json()] print(updated) any_updated = any(updated) if any_updated: nextpage = r.links.get('next') with open(OBSERVATIONS_JSON, 'w') as fp: json.dump(observations, fp, sort_keys=True, indent=2)
#!/usr/bin/env python3 """ Utility to get observations from a SatNOGS Network server. Collects the paginated objects into a single JSON list and stores in a file. """ import json import requests OBSERVATIONS_API = 'https://network.satnogs.org/api/observations' OBSERVATIONS_JSON = 'observations.json' def get(url): print(url) return requests.get(url) observations = [] r = get(OBSERVATIONS_API) # r = requests.get(OBSERVATIONS_API) observations.extend(r.json()) nextpage = r.links.get('next') while nextpage: # r = requests.get(nextpage['url']) r = get(nextpage['url']) observations.extend(r.json()) nextpage = r.links.get('next') observations = sorted(observations, key=lambda s: s['id']) with open(OBSERVATIONS_JSON, 'w') as fp: json.dump(observations, fp, sort_keys=True, indent=2)
agpl-3.0
Python
43e8b090d806d615a8153d1e14063cc6d274bb25
Update issue 130 Now I also applied the fix :)
RDFLib/rdflib,ssssam/rdflib,marma/rdflib,marma/rdflib,dbs/rdflib,armandobs14/rdflib,ssssam/rdflib,armandobs14/rdflib,RDFLib/rdflib,ssssam/rdflib,avorio/rdflib,yingerj/rdflib,marma/rdflib,armandobs14/rdflib,yingerj/rdflib,RDFLib/rdflib,dbs/rdflib,dbs/rdflib,marma/rdflib,yingerj/rdflib,avorio/rdflib,avorio/rdflib,ssssam/rdflib,yingerj/rdflib,armandobs14/rdflib,avorio/rdflib,dbs/rdflib,RDFLib/rdflib
rdflib/plugins/serializers/nt.py
rdflib/plugins/serializers/nt.py
""" N-Triples RDF graph serializer for RDFLib. See <http://www.w3.org/TR/rdf-testcases/#ntriples> for details about the format. """ from rdflib.serializer import Serializer import warnings class NTSerializer(Serializer): """ Serializes RDF graphs to NTriples format. """ def serialize(self, stream, base=None, encoding=None, **args): if base is not None: warnings.warn("NTSerializer does not support base.") if encoding is not None: warnings.warn("NTSerializer does not use custom encoding.") encoding = self.encoding for triple in self.store: stream.write(_nt_row(triple).encode(encoding, "replace")) stream.write("\n") def _nt_row(triple): return u"%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), _xmlcharref_encode(triple[2].n3())) # from <http://code.activestate.com/recipes/303668/> def _xmlcharref_encode(unicode_data, encoding="ascii"): """Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler.""" chars = [] # nothing to do about xmlchars, but replace newlines with escapes: unicode_data=unicode_data.replace("\n","\\n") if unicode_data.startswith('"""'): unicode_data = unicode_data.replace('"""', '"') # Step through the unicode_data string one character at a time in # order to catch unencodable characters: for char in unicode_data: try: chars.append(char.encode(encoding, 'strict')) except UnicodeError: chars.append('\u%04X' % ord(char) if ord(char) <= 0xFFFF else '\U%08X' % ord(char)) return ''.join(chars)
""" N-Triples RDF graph serializer for RDFLib. See <http://www.w3.org/TR/rdf-testcases/#ntriples> for details about the format. """ from rdflib.serializer import Serializer import warnings class NTSerializer(Serializer): """ Serializes RDF graphs to NTriples format. """ def serialize(self, stream, base=None, encoding=None, **args): if base is not None: warnings.warn("NTSerializer does not support base.") if encoding is not None: warnings.warn("NTSerializer does not use custom encoding.") encoding = self.encoding for triple in self.store: stream.write(_nt_row(triple).encode(encoding, "replace")) stream.write("\n") def _nt_row(triple): return u"%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), _xmlcharref_encode(triple[2].n3())) # from <http://code.activestate.com/recipes/303668/> def _xmlcharref_encode(unicode_data, encoding="ascii"): """Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler.""" chars = [] # nothing to do about xmlchars, but replace newlines with escapes: unicode_data=unicode_data.replace("\n","\\n") if unicode_data.startswith('"""'): unicode_data = unicode_data.replace('"""', '"') # Step through the unicode_data string one character at a time in # order to catch unencodable characters: for char in unicode_data: try: chars.append(char.encode(encoding, 'strict')) except UnicodeError: chars.append('\u%04X' % ord(char)) return ''.join(chars)
bsd-3-clause
Python
0035200543a7b226a095d2fb4ec880e0dd8732fd
Rearrange test data
projectweekend/Pi-Jukebox,projectweekend/Pi-Jukebox,projectweekend/Pi-Jukebox
make_test_data.py
make_test_data.py
import sqlite3 INSERT_SONG = ''' INSERT INTO jukebox_song_queue VALUES (?) ''' TEST_URIS = [ 'spotify:track:68MToCqJRJvNW8tYoxDl5p', 'spotify:track:0p1VSXFdkr71f0nO21IEyq', 'spotify:track:7udJ4LFSIrRnySD3eI8lad' ] if __name__ == '__main__': conn = sqlite3.connect('jukebox.db') cursor = conn.cursor() for uri in TEST_URIS: uri = (uri,) cursor.execute(INSERT_SONG, uri) conn.commit() conn.close()
import sqlite3 INSERT_SONG = ''' INSERT INTO jukebox_song_queue VALUES (?) ''' TEST_URIS = [ 'spotify:track:7udJ4LFSIrRnySD3eI8lad', 'spotify:track:0p1VSXFdkr71f0nO21IEyq', 'spotify:track:68MToCqJRJvNW8tYoxDl5p' ] if __name__ == '__main__': conn = sqlite3.connect('jukebox.db') cursor = conn.cursor() for uri in TEST_URIS: uri = (uri,) cursor.execute(INSERT_SONG, uri) conn.commit() conn.close()
mit
Python
95ea1d7d6564bcbb2e3b8d2ba254ccd2c1c38436
Add import for focused stuff
nestorsalceda/mamba
mamba/__init__.py
mamba/__init__.py
__version__ = '0.9.2' def description(message): pass def _description(message): pass def fdescription(message): pass def it(message): pass def _it(message): pass def fit(message): pass def context(message): pass def _context(message): pass def fcontext(message): pass def before(): pass def after(): pass
__version__ = '0.9.2' def description(message): pass def _description(message): pass def it(message): pass def _it(message): pass def context(message): pass def _context(message): pass def before(): pass def after(): pass
mit
Python
d3b3e9af722ac00b21bf36706f4e0ab7cf94af00
bump to v0.6.4
rubik/mando,MarioSchwalbe/mando,MarioSchwalbe/mando
mando/__init__.py
mando/__init__.py
__version__ = '0.6.4' try: from mando.core import Program except ImportError as e: # pragma: no cover # unfortunately the only workaround for Python2.6, argparse and setup.py e.version = __version__ raise e main = Program() command = main.command arg = main.arg parse = main.parse execute = main.execute
__version__ = '0.5' try: from mando.core import Program except ImportError as e: # pragma: no cover # unfortunately the only workaround for Python2.6, argparse and setup.py e.version = __version__ raise e main = Program() command = main.command arg = main.arg parse = main.parse execute = main.execute
mit
Python
59400100aa2f35bfea52b3cf049ef8d0f958527d
Fix error when reaching a dead end in the markov chain
tmerr/trevornet
markov/markov2.py
markov/markov2.py
#!python3 import string import random import time import sys ''' This is an implementation of a markov chain used for text generation. Just pass a file name as an argument and it should load it up, build a markov chain with a state for each word(s), and start walking through the chain, writing incoherent text to the terminal. ''' asciiset = set(string.ascii_letters) asciiset.add(' ') asciiset.add('.') def strip2ascii(txt): return ''.join([ch for ch in txt if ch in asciiset]) def tokenize(fname): ''' Generate tokens defined by - Sequences of characters that aren't spaces - Periods For example, 'This is a test. Ok.' => ('This', 'is', 'a', 'test', '.', 'Ok, '.') ''' with open(fname, 'r') as f: for line in f: stripped = strip2ascii(line) for word in stripped.split(): if word[-1] == '.': yield word[:-1] yield '.' else: yield word def buildtransitionmap(tokens, order): dct = {} prev = ('',)*order for token in tokens: if prev in dct: dct[prev].append(token) else: dct[prev] = [token] prev = prev[1:]+(token,) return dct def walk(transmap, prev=None): if prev == None: prev = random.choice(list(transmap.keys())) while True: if not prev in transmap: prev = random.choice(list(transmap.keys())) word = random.choice(transmap[prev]) yield word prev = prev[1:]+(word,) def eternalramble(fname, order): ''' Walk through the markov chain printing out words to the terminal one at a time ''' transmap = buildtransitionmap(tokenize(fname), order) for word in walk(transmap): print(word, end=' ') sys.stdout.flush() time.sleep(0.25) def printusage(): print('Usage: markov filename order') print(' filename: the filename of the text to base the markov chain on.') print(' order: how many consecutive words make up each state (2 works well)') def launch(): if len(sys.argv) != 3: printusage() return try: order = int(sys.argv[2]) except: printusage() return eternalramble(sys.argv[1], order) if __name__ == '__main__': launch()
#!python3 import string import random import time import sys ''' This is an implementation of a markov chain used for text generation. Just pass a file name as an argument and it should load it up, build a markov chain with a state for each word(s), and start walking through the chain, writing incoherent text to the terminal. ''' asciiset = set(string.ascii_letters) asciiset.add(' ') asciiset.add('.') def strip2ascii(txt): return ''.join([ch for ch in txt if ch in asciiset]) def tokenize(fname): ''' Generate tokens defined by - Sequences of characters that aren't spaces - Periods For example, 'This is a test. Ok.' => ('This', 'is', 'a', 'test', '.', 'Ok, '.') ''' with open(fname, 'r') as f: for line in f: stripped = strip2ascii(line) for word in stripped.split(): if word[-1] == '.': yield word[:-1] yield '.' else: yield word def buildtransitionmap(tokens, order): dct = {} prev = ('',)*order for token in tokens: if prev in dct: dct[prev].append(token) else: dct[prev] = [token] prev = prev[1:]+(token,) return dct def transition(word, transmap): return random.choice(transmap[word]) def eternalramble(fname, order): ''' Walk through the markov chain printing out words to the terminal one at a time ''' transmap = buildtransitionmap(tokenize(fname), order) prev = random.choice(list(transmap.keys())) while True: word = transition(prev, transmap) print(word, end=' ') prev = prev[1:]+(word,) sys.stdout.flush() time.sleep(0.25) def printusage(): print('Usage: markov filename order') print(' filename: the filename of the text to base the markov chain on.') print(' order: how many consecutive words make up each state (2 works well)') def launch(): if len(sys.argv) != 3: printusage() return try: order = int(sys.argv[2]) except: printusage() return eternalramble(sys.argv[1], order) if __name__ == '__main__': launch()
mit
Python
cfb09353b02dd230546775d18dadb1ba7ed2acc6
Refactor submit_comment tests
18F/regulations-site,tadhg-ohiggins/regulations-site,18F/regulations-site,18F/regulations-site,18F/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,eregs/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site
regulations/tests/tasks_tests.py
regulations/tests/tasks_tests.py
import json import mock import six from celery.exceptions import Retry, MaxRetriesExceededError from requests.exceptions import RequestException from django.test import SimpleTestCase, override_settings from regulations.tasks import submit_comment @mock.patch('regulations.tasks.save_failed_submission') @mock.patch('regulations.tasks.submit_comment.retry') @mock.patch('requests.post') @mock.patch('regulations.tasks.html_to_pdf') @override_settings( ATTACHMENT_BUCKET='test-bucket', ATTACHMENT_ACCESS_KEY_ID='test-access-key', ATTACHMENT_SECRET_ACCESS_KEY='test-secret-key', ATTACHMENT_MAX_SIZE=42, REGS_GOV_API_URL='test-url', REGS_GOV_API_KEY='test-key', ) class TestSubmitComment(SimpleTestCase): def setUp(self): self.file_handle = six.BytesIO("some-content") self.submission = {'assembled_comment': [ {"id": "A1", "comment": "A simple comment", "files": []}, {"id": "A5", "comment": "Another comment", "files": []} ]} def test_submit_comment(self, html_to_pdf, post, retry, save_failed_submission): html_to_pdf.return_value.__enter__ = mock.Mock( return_value=self.file_handle) expected_result = {'tracking_number': 'some-tracking-number'} post.return_value.status_code = 201 post.return_value.json.return_value = expected_result result = submit_comment(self.submission) self.assertEqual(result, expected_result) def test_failed_submit_raises_retry(self, html_to_pdf, post, retry, save_failed_submission): html_to_pdf.return_value.__enter__ = mock.Mock( return_value=self.file_handle) post.side_effect = [RequestException] retry.return_value = Retry() with self.assertRaises(Retry): submit_comment(self.submission) def test_failed_submit_maximum_retries(self, html_to_pdf, post, retry, save_failed_submission): html_to_pdf.return_value.__enter__ = mock.Mock( return_value=self.file_handle) post.side_effect = [RequestException] retry.return_value = MaxRetriesExceededError() submit_comment(self.submission) save_failed_submission.assert_called_with(json.dumps(self.submission))
import json import mock import six from celery.exceptions import Retry, MaxRetriesExceededError from requests.exceptions import RequestException from django.test import SimpleTestCase, override_settings from regulations.tasks import submit_comment @mock.patch('regulations.tasks.save_failed_submission') @mock.patch('regulations.tasks.submit_comment.retry') @mock.patch('requests.post') @mock.patch('regulations.tasks.html_to_pdf') @override_settings( ATTACHMENT_BUCKET='test-bucket', ATTACHMENT_ACCESS_KEY_ID='test-access-key', ATTACHMENT_SECRET_ACCESS_KEY='test-secret-key', ATTACHMENT_MAX_SIZE=42, REGS_GOV_API_URL='test-url', REGS_GOV_API_KEY='test-key', ) class TestSubmitComment(SimpleTestCase): def test_submit_comment(self, html_to_pdf, post, retry, save_failed_submission): file_handle = six.BytesIO("foobar") html_to_pdf.return_value.__enter__ = mock.Mock( return_value=file_handle) expected_result = {'tracking_number': '133321'} post.return_value.status_code = 201 post.return_value.json.return_value = expected_result body = {'assembled_comment': {'sections': []}} result = submit_comment(body) self.assertEqual(result, expected_result) def test_failed_submit_raises_retry(self, html_to_pdf, post, retry, save_failed_submission): file_handle = six.BytesIO("foobar") html_to_pdf.return_value.__enter__ = mock.Mock( return_value=file_handle) post.side_effect = [RequestException] retry.return_value = Retry() body = {'assembled_comment': {'sections': []}} with self.assertRaises(Retry): submit_comment(body) def test_failed_submit_maximum_retries(self, html_to_pdf, post, retry, save_failed_submission): file_handle = six.BytesIO("foobar") html_to_pdf.return_value.__enter__ = mock.Mock( return_value=file_handle) post.side_effect = [RequestException] retry.return_value = MaxRetriesExceededError() body = {'assembled_comment': {'sections': []}} submit_comment(body) save_failed_submission.assert_called_with(json.dumps(body))
cc0-1.0
Python
29e491c5505d2068b46eb489044455968e53ab70
Add tests for strait and fjord
mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource
test/400-bay-water.py
test/400-bay-water.py
# osm_id: 43950409 name: San Pablo Bay assert_has_feature( 14, 2623, 6318, 'water', { 'kind': 'bay', 'label_placement': 'yes' }) # osm_id: 360566115 name: Byron strait assert_has_feature( 14, 15043, 8311, 'water', { 'kind': 'strait', 'label_placement': 'yes' }) # osm_id: -1451065 name: Horsens Fjord assert_has_feature( 14, 8645, 5114, 'water', { 'kind': 'fjord', 'label_placement': 'yes' })
assert_has_feature( 14, 2623, 6318, 'water', { 'kind': 'bay', 'label_placement': 'yes' })
mit
Python
83781f3b2f1cde0aab913ff4d64de45cf9b798be
Update snooper for multi-spline qp controller inputs
openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro
software/control/src/qp_controller_input_snooper.py
software/control/src/qp_controller_input_snooper.py
#!/usr/bin/python ''' Listens to QP Controller Inputs and draws, in different but order-consistent colors, the cubic splines being followed by each body motion block. ''' import lcm import drc from drake import lcmt_qp_controller_input, lcmt_body_motion_data import sys import time from bot_lcmgl import lcmgl, GL_LINES import numpy as np color_order = [[1.0, 0.1, 0.1], [0.1, 1.0, 0.1], [0.1, 0.1, 1.0], [1.0, 1.0, 0.1], [1.0, 0.1, 1.0], [0.1, 1.0, 1.0]]; def pval(coefs, t_off): out = np.array([0.0]*6) for j in range(0, 6): out[j] = coefs[j, 0]*(t_off**3.0) + coefs[j, 1]*(t_off**2.0) + coefs[j, 2]*t_off + coefs[j, 3] return out def handle_qp_controller_input_msg(channel, data): msg = lcmt_qp_controller_input.decode(data) #print("received") # draw spline segment for each tracked body for i in range(0, msg.num_tracked_bodies): bmd = msg.body_motion_data[i] ts = bmd.ts; color = color_order[i%len(color_order)]; for j in range(0, msg.body_motion_data[i].num_spline_coefs): tsdense = np.linspace(ts[j], ts[j+1], 20); coefs = np.array(bmd.coefs[j].coefs); gl.glColor3f(color[0], color[1], color[2]); gl.glLineWidth(5); gl.glBegin(GL_LINES); ps = np.array([pval(coefs, t-ts[j]) for t in tsdense]); for j in range(0,tsdense.size-1): gl.glVertex3f(ps[j,0], ps[j,1], ps[j,2]); gl.glVertex3f(ps[j+1,0], ps[j+1,1], ps[j+1,2]); gl.glEnd(); gl.switch_buffer() lc = lcm.LCM() gl = lcmgl('qp input bmd snoop', lc); subscription = lc.subscribe("QP_CONTROLLER_INPUT", handle_qp_controller_input_msg) subscription.set_queue_capacity(1); try: while True: lc.handle() except KeyboardInterrupt: pass
#!/usr/bin/python ''' Listens to QP Controller Inputs and draws, in different but order-consistent colors, the cubic splines being followed by each body motion block. ''' import lcm import drc from drake import lcmt_qp_controller_input, lcmt_body_motion_data import sys import time from bot_lcmgl import lcmgl, GL_LINES import numpy as np color_order = [[1.0, 0.1, 0.1], [0.1, 1.0, 0.1], [0.1, 0.1, 1.0], [1.0, 1.0, 0.1], [1.0, 0.1, 1.0], [0.1, 1.0, 1.0]]; def pval(coefs, t_off): out = np.array([0.0]*6) for j in range(0, 6): out[j] = coefs[j, 0]*(t_off**3.0) + coefs[j, 1]*(t_off**2.0) + coefs[j, 2]*t_off + coefs[j, 3] return out def handle_qp_controller_input_msg(channel, data): msg = lcmt_qp_controller_input.decode(data) #print("received") # draw spline segment for each tracked body for i in range(0, msg.num_tracked_bodies): bmd = msg.body_motion_data[i] ts = bmd.ts; tsdense = np.linspace(ts[0], ts[-1], 20); coefs = np.array(bmd.coefs); color = color_order[i%len(color_order)]; gl.glColor3f(color[0], color[1], color[2]); gl.glLineWidth(5); gl.glBegin(GL_LINES); ps = np.array([pval(coefs, t-ts[0]) for t in tsdense]); for j in range(0,tsdense.size-1): gl.glVertex3f(ps[j,0], ps[j,1], ps[j,2]); gl.glVertex3f(ps[j+1,0], ps[j+1,1], ps[j+1,2]); gl.glEnd(); gl.switch_buffer() lc = lcm.LCM() gl = lcmgl('qp input bmd snoop', lc); subscription = lc.subscribe("QP_CONTROLLER_INPUT", handle_qp_controller_input_msg) subscription.set_queue_capacity(1); try: while True: lc.handle() except KeyboardInterrupt: pass
bsd-3-clause
Python
cdf545cf9385a0490590cd0162141025a1301c09
Use argparse formatter RawDescriptionHelpFormatter, maybe temporarily
bgottula/track,bgottula/track
track/config.py
track/config.py
import configargparse DEFAULT_CONFIG_FILES=[ './track.cfg', '~/.track.cfg', ] # Bit of a cheat... not actually an object constructor, just a 'make me an object' method def ArgParser(): return configargparse.ArgParser( ignore_unknown_config_file_keys =True, allow_abbrev =True, default_config_files =DEFAULT_CONFIG_FILES, # formatter_class =configargparse.ArgumentDefaultsHelpFormatter, formatter_class =configargparse.RawDescriptionHelpFormatter, config_file_parser_class =configargparse.DefaultConfigFileParser, # INI format args_for_setting_config_path =['-c', '--cfg'], args_for_writing_out_config_file=['-w', '--cfg-write'], )
import configargparse DEFAULT_CONFIG_FILES=[ './track.cfg', '~/.track.cfg', ] # Bit of a cheat... not actually an object constructor, just a 'make me an object' method def ArgParser(): return configargparse.ArgParser( ignore_unknown_config_file_keys =True, allow_abbrev =True, default_config_files =DEFAULT_CONFIG_FILES, formatter_class =configargparse.ArgumentDefaultsHelpFormatter, config_file_parser_class =configargparse.DefaultConfigFileParser, # INI format args_for_setting_config_path =['-c', '--cfg'], args_for_writing_out_config_file=['-w', '--cfg-write'], )
mit
Python
148d4c44a9eb63016b469c6bf317a3dbe9ed7918
Add documentation for Permutations class
PermutaTriangle/Permuta
permuta/permutations.py
permuta/permutations.py
from .misc import DancingLinks from .permutation import Permutation import random class Permutations(object): """Class for iterating through all Permutations of length n""" def __init__(self, n): """Returns an object giving all permutations of length n""" assert 0 <= n self.n = n def __iter__(self): """Iterates through permutations of length n in lexical order""" left = DancingLinks(range(1, self.n+1)) res = [] def gen(): if len(left) == 0: yield Permutation(list(res)) else: cur = left.front while cur is not None: left.erase(cur) res.append(cur.value) for p in gen(): yield p res.pop() left.restore(cur) cur = cur.next return gen() def random_element(self): """Returns a random permutation of length n""" p = [i+1 for i in range(self.n)] for i in range(self.n-1, -1, -1): j = random.randint(0, i) p[i], p[j] = p[j], p[i] return Permutation(p) def __str__(self): return 'The set of Permutations of length %d' % self.n def __repr__(self): return 'Permutations(%d)' % self.n
from .misc import DancingLinks from .permutation import Permutation import random class Permutations(object): def __init__(self, n): assert 0 <= n self.n = n def __iter__(self): left = DancingLinks(range(1, self.n+1)) res = [] def gen(): if len(left) == 0: yield Permutation(list(res)) else: cur = left.front while cur is not None: left.erase(cur) res.append(cur.value) for p in gen(): yield p res.pop() left.restore(cur) cur = cur.next return gen() def random_element(self): p = [ i+1 for i in range(self.n) ] for i in range(self.n-1, -1, -1): j = random.randint(0, i) p[i],p[j] = p[j],p[i] return Permutation(p) def __str__(self): return 'The set of Permutations of length %d' % self.n def __repr__(self): return 'Permutations(%d)' % self.n
bsd-3-clause
Python
8b9f68514d78851f3b445f996f3eaf607831d352
Add more descriptive names to variables and functions
alaudet/raspi-sump,alaudet/raspi-sump,jreuter/raspi-sump,jreuter/raspi-sump
raspisump/checkpid.py
raspisump/checkpid.py
#!/usr/bin/python # Check to make sure process raspi-sump is running and restart if required. import subprocess import time def check_pid(): '''Check status of raspisump.py process.''' cmdp1 = "ps aux" cmdp2 = "grep -v grep" cmdp3 = "grep -v sudo" cmdp4 = "grep -c /home/pi/raspi-sump/raspisump.py" cmdp1list = cmdp1.split(' ') cmdp2list = cmdp2.split(' ') cmdp3list = cmdp3.split(' ') cmdp4list = cmdp4.split(' ') part1 = subprocess.Popen(cmdp1list, stdout=subprocess.PIPE) part2 = subprocess.Popen(cmdp2list, stdin=part1.stdout, stdout=subprocess.PIPE) part1.stdout.close() part3 = subprocess.Popen(cmdp3list, stdin=part2.stdout,stdout=subprocess.PIPE) part2.stdout.close() part4 = subprocess.Popen(cmdp4list, stdin=part3.stdout,stdout=subprocess.PIPE) part3.stdout.close() number_of_processes = int(part4.communicate()[0]) if number_of_processes == 0: log_restarts("Process stopped, restarting") restart() elif number_of_processes == 1: exit(0) else: log_restarts("Multiple processes...killing and restarting") kill_start() def restart(): '''Restart raspisump.py process.''' restart_cmd = "/home/pi/raspi-sump/raspisump.py &" restart_now = restart_cmd.split(' ') subprocess.Popen(restart_now) exit(0) def kill_start(): '''Kill all instances of raspisump.py process.''' kill_cmd = "killall 09 raspisump.py" kill_it = kill_cmd.split(' ') subprocess.call(kill_it) restart() def log_restarts(reason): '''Log all process restarts''' logfile = open("/home/pi/raspi-sump/logs/process_log", 'a') logfile.write(time.strftime("%Y-%m-%d %H:%M:%S,")), logfile.write(reason), logfile.write("\n") logfile.close if __name__ == "__main__": check_pid()
#!/usr/bin/python # Check to make sure process raspi-sump is running and restart if required. import subprocess import time def check_pid(): '''Check status of raspisump.py process.''' cmdp1 = "ps aux" cmdp2 = "grep -v grep" cmdp3 = "grep -v sudo" cmdp4 = "grep -c /home/pi/raspi-sump/raspisump.py" cmdp1list = cmdp1.split(' ') cmdp2list = cmdp2.split(' ') cmdp3list = cmdp3.split(' ') cmdp4list = cmdp4.split(' ') part1 = subprocess.Popen(cmdp1list, stdout=subprocess.PIPE) part2 = subprocess.Popen(cmdp2list, stdin=part1.stdout, stdout=subprocess.PIPE) part1.stdout.close() part3 = subprocess.Popen(cmdp3list, stdin=part2.stdout,stdout=subprocess.PIPE) part2.stdout.close() part4 = subprocess.Popen(cmdp4list, stdin=part3.stdout,stdout=subprocess.PIPE) part3.stdout.close() x = int(part4.communicate()[0]) if x == 0: log_check("Process stopped, restarting") restart() elif x == 1: exit(0) else: log_check("Multiple Processes...Killing and Restarting") kill_start() def restart(): '''Restart raspisump.py process.''' restart_cmd = "/home/pi/raspi-sump/raspisump.py &" restart_now = restart_cmd.split(' ') subprocess.Popen(restart_now) exit(0) def kill_start(): '''Kill all instances of raspisump.py process.''' kill_cmd = "killall 09 raspisump.py" kill_it = kill_cmd.split(' ') subprocess.call(kill_it) restart() def log_check(reason): logfile = open("/home/pi/raspi-sump/logs/process_log", 'a') logfile.write(time.strftime("%Y-%m-%d %H:%M:%S,")), logfile.write(reason), logfile.write("\n") logfile.close if __name__ == "__main__": check_pid()
mit
Python
51373b776403b94cf0b72b43952013f3b4ecdb2d
Remove useless codes
Sherlock-Holo/Holosocket
holosocket/encrypt.py
holosocket/encrypt.py
import struct from Cryptodome.Cipher import AES from Cryptodome.Hash import SHA256 from Cryptodome.Random import get_random_bytes class aes_gcm: def __init__(self, key, salt=None): """Create a new AES-GCM cipher. key: Your password like: passw0rd salt: a 16 bytes length byte string, if not provided a random salt will be used nonce: a 8 bytes length byte string, if not provided a random nonce will be used""" self.raw_key = key.encode() if not salt: self._salt = get_random_bytes(16) else: if len(salt) != 16: error_msg = 'salt length should be 16, not {}' raise ValueError(error_msg.format(len(salt))) else: self._salt = salt self.key = SHA256.new(self.raw_key + self._salt).digest() # generate a 256 bytes key self.nonce = 0 def _new(self): nonce = struct.pack('>Q', self.nonce) self.cipher = AES.new(self.key, AES.MODE_GCM, nonce) self.nonce += 1 def encrypt(self, data): """Encrypt data return cipher. data: raw data""" self._new() return self.cipher.encrypt_and_digest(data) def decrypt(self, data, mac): """Decrypt data. data: cipher mac: gmac""" self._new() # Verify MAC, if matching, will return plain text or raise ValueError plain = self.cipher.decrypt_and_verify(data, mac) return plain @property def salt(self): return self._salt def test(): # AES-GCM print('AES-256-GCM') gen = aes_gcm('test') salt = gen.salt gcipher = gen.encrypt(b'holo') gde = aes_gcm('test', salt) print(gde.decrypt(*gcipher)) if __name__ == '__main__': test()
import struct from Cryptodome.Cipher import AES from Cryptodome.Hash import SHA256 from Cryptodome.Random import get_random_bytes #Cipher_Tag = {'aes-256-gcm': 16} #Nonce_Len = 8 # fuck you 12 bytes class aes_gcm: def __init__(self, key, salt=None): """Create a new AES-GCM cipher. key: Your password like: passw0rd salt: a 16 bytes length byte string, if not provided a random salt will be used nonce: a 8 bytes length byte string, if not provided a random nonce will be used""" self.raw_key = key.encode() if not salt: self._salt = get_random_bytes(16) else: if len(salt) != 16: error_msg = 'salt length should be 16, not {}' raise ValueError(error_msg.format(len(salt))) else: self._salt = salt self.key = SHA256.new(self.raw_key + self._salt).digest() # generate a 256 bytes key self.nonce = 0 def _new(self): nonce = struct.pack('>Q', self.nonce) self.cipher = AES.new(self.key, AES.MODE_GCM, nonce) self.nonce += 1 def encrypt(self, data): """Encrypt data return cipher. data: raw data""" self._new() #Return (cpiher, MAC) return self.cipher.encrypt_and_digest(data) def decrypt(self, data, mac): """Decrypt data. data: cipher mac: gmac""" self._new() #Verify MAC, if matching, will return plain text or raise ValueError plain = self.cipher.decrypt_and_verify(data, mac) return plain @property def salt(self): return self._salt def test(): # AES-GCM print('AES-256-GCM') gen = aes_gcm('test') salt = gen.salt gcipher = gen.encrypt(b'holo') gde = aes_gcm('test', salt) print(gde.decrypt(*gcipher)) if __name__ == '__main__': test()
mpl-2.0
Python
f21204c8828e840dc54c6822348fa9a47bc8964e
Add model's to_dict method.
yola/opensrs,yola/opensrs
opensrs/models.py
opensrs/models.py
from dateutil.parser import parse class Domain(object): def __init__(self, data): self.name = data['name'] self.auto_renew = (data['f_auto_renew'] == 'Y') self.expiry_date = parse(data['expiredate']).date() @property def tld(self): return self.name.split('.')[-1] def to_dict(self): return { 'name': self.name, 'auto_renew': self.auto_renew, 'expiry_date': self.expiry_date }
from dateutil.parser import parse class Domain(object): def __init__(self, data): self.name = data['name'] self.auto_renew = (data['f_auto_renew'] == 'Y') self.expiry_date = parse(data['expiredate']).date() @property def tld(self): return self.name.split('.')[-1]
mit
Python
da12bb0058cb48d3262eb70469aa30cdb8312ee2
fix typos/bugs/indexing in block dicing
Rhoana/rhoana,Rhoana/rhoana,Rhoana/rhoana,Rhoana/rhoana,Rhoana/rhoana,Rhoana/rhoana
Control/dice_block.py
Control/dice_block.py
import os import sys import subprocess import h5py def check_file(filename): # verify the file has the expected data f = h5py.File(filename, 'r') if set(f.keys()) != set(['segmentations', 'probabilities']): os.unlink(filename) return False return True try: args = sys.argv[1:] i_min = int(args.pop(0)) j_min = int(args.pop(0)) i_max = int(args.pop(0)) j_max = int(args.pop(0)) output = args.pop(0) input_slices = args if os.path.exists(output): print output, "already exists" if check_file(output): sys.exit(0) else: os.unlink(output) # Write to a temporary location to avoid partial files temp_file_path = output + '_partial' out_f = h5py.File(temp_file_path, 'w') num_slices = len(input_slices) for slice_idx, slice in enumerate(input_slices): in_f = h5py.File(slice, 'r') segs = in_f['segmentations'][i_min:i_max, j_min:j_max, :] probs = in_f['probabilities'][i_min:i_max, j_min:j_max] if not 'segmentations' in out_f.keys(): outsegs = out_f.create_dataset('segmentations', tuple(list(segs.shape) + [num_slices]), dtype=segs.dtype, chunks=(64, 64, segs.shape[2], 1)) outprobs = out_f.create_dataset('probabilities', tuple(list(probs.shape) + [num_slices]), dtype=probs.dtype, chunks=(64, 64, 1)) outsegs[:, :, :, slice_idx] = segs outprobs[:, :, slice_idx] = probs out_f.close() # move to final location os.rename(output + '_partial', output) print "Successfully wrote", output except KeyboardInterrupt: pass
import os import sys import subprocess import h5py def check_file(filename): # verify the file has the expected data f = h5py.File(filename, 'r') if set(f.keys()) != set(['segmentations', 'probabilities']): os.unlink(filename) return False return True try: args = sys.argv[1:] i_min = int(args.pop()) j_min = int(args.pop()) i_max = int(args.pop()) j_max = int(args.pop()) output = args.pop() input_slices = args if os.path.exists(segmentations_file): print segmentations_file, "already exists" if check_file(segmentations_file): sys.exit(0) else: os.unlink(output) # Write to a temporary location to avoid partial files temp_file_path = output + '_partial' out_f = h5py.File(temp_file_path, 'classify') num_slices = len(input_slices) for slice_idx, slice in enumerate(input_slices): in_f = h5py.File(slice, 'r') segs = in_f['segmentations'][i_min:i_max, j_min:j_max, :] probs = in_f['segmentations'][i_min:i_max, j_min:j_max] if not 'segmentations' in out_f.keys(): outsegs = out_f.create_dataset('segmentations', tuple(list(segs.shape) + [num_slices]), dtype=segs.dtype, chunks=(64, 64, segs.shape[2], 1)) outprobs = out_f.create_dataset('probabilities', dtype=probabilities.dtype, chunks=(64, 64, num_slices) chunks=(64, 64, 1)) outsegs[:, :, :, slice_idx] = segs outprobs[:, :, slice_idx] = probs outf.close() # move to final location os.rename(output + '_partial', output) print "Successfully wrote", output except KeyboardInterrupt: pass
mit
Python
5d2301b15e07394e24fed2fac2f258d72554eede
Add tests for query_geonames, MITIE, city resolution
openeventdata/mordecai
resources/tests/test_mordecai.py
resources/tests/test_mordecai.py
import os import sys import glob from ConfigParser import ConfigParser from mitie import named_entity_extractor from ..country import CountryAPI from ..places import PlacesAPI from ..utilities import mitie_context, setup_es, query_geonames def test_places_api_one(): if os.environ.get('CI'): ci = 'circle' assert ci == 'circle' else: a = PlacesAPI() locs = "Ontario" result = a.process(locs, ['CAN']) gold = [{u'lat': 49.25014, u'searchterm': 'Ontario', u'lon': -84.49983, u'countrycode': u'CAN', u'placename': u'Ontario'}] assert result == gold def test_query_geonames(): conn = setup_es() placename = "Berlin" country_filter = ["DEU"] qg = query_geonames(conn, placename, country_filter) hit_hit_name = qg['hits']['hits'][0]['name'] assert hit_hit_name == "Berlin" def test_places_api_syria(): if os.environ.get('CI'): ci = 'circle' assert ci == 'circle' else: a = PlacesAPI() locs = "Rebels from Aleppo attacked Damascus." result = a.process(locs, ['SYR']) gold = [{u'lat': 36.20124, u'searchterm': 'Aleppo', u'lon': 37.16117, u'countrycode': u'SYR', u'placename': u'Aleppo'}, {u'lat': 33.5102, u'searchterm': 'Damascus', u'lon': 36.29128, u'countrycode': u'SYR', u'placename': u'Damascus'}] assert result == gold def test_mitie_context(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) config_file = glob.glob(os.path.join(__location__, '../../config.ini')) parser = ConfigParser() parser.read(config_file) mitie_directory = parser.get('Locations', 'mitie_directory') mitie_ner_model = parser.get('Locations', 'mitie_ner_model') sys.path.append(mitie_directory) ner_model = named_entity_extractor(mitie_ner_model) text = "The meeting happened in Ontario." mc = mitie_context(text, ner_model) mc_gold = {u'entities': [{u'text': 'Ontario', u'tag': u'LOCATION', u'score': 1.3923831181343844, u'context': ['meeting', 'happened', 'in', '.']}]} assert mc == mc_gold def test_country_process_one(): a = CountryAPI() result = a.process('The meeting happened in Ontario.') assert result == u'CAN' def test_country_process_two(): a = CountryAPI() result = a.process('Rebels from Damascus attacked Aleppo') assert result == u'SYR' def test_city_resolution(): a = PlacesAPI() city_list = [("Lagos", "NGA"), ("Mogadishu", "SOM"), ("Mannheim", "DEU"), ("Noida", "IND"), ("Berlin", "DEU"), ("Damascus", "SYR"), ("New York", "USA"), ("San Francisco", "USA"), ("Freetown", "SLE"), ("Cape Town", "ZAF"), ("Windhoek", "NAM"), ("Paris", "FRA")] rs = [a.process(c[0], [c[1]]) for c in city_list] searched_cities = [c[0]['searchterm'] for c in rs] resolved_cities = [c[0]['placename'] for c in rs] assert resolved_cities == searched_cities
import os from ..country import CountryAPI from ..places import PlacesAPI def test_places_api_one(): if os.environ.get('CI'): ci = 'circle' assert ci == 'circle' else: a = PlacesAPI() locs = {u'entities': [{u'context': ['meeting', 'happened', 'in', '.'], u'score': 1.3923831181343844, u'tag': u'LOCATION', u'text': 'Ontario'}]} result = a.process(locs, 'CAN') gold = [{u'countrycode': u'CAN', u'lat': 43.65004, u'lon': -79.90554, u'placename': u'SunnyView Dental', u'searchterm': 'Ontario'}] assert result == gold def test_country_process_one(): a = CountryAPI() result = a.process('The meeting happened in Ontario.') assert result == u'CAN' def test_country_process_two(): a = CountryAPI() result = a.process('Rebels from Damascus attacked Aleppo') assert result == u'SYR'
mit
Python
2443c891e5f9cccb5c36b02303a3b9b7a94a4c45
Change Jinja escape sequences.
bamos/beamer-snippets,bamos/beamer-snippets,bamos/beamer-snippets
generate.py
generate.py
#!/usr/bin/env python3 import os import shutil from jinja2 import Environment,FileSystemLoader from pygments import highlight from pygments.lexers import TexLexer from pygments.formatters import HtmlFormatter from subprocess import Popen,PIPE env = Environment(loader=FileSystemLoader("tmpl"), block_start_string='~{',block_end_string='}~', variable_start_string='~{{', variable_end_string='}}~') snippets_dir = "snippets" dist_dir = "dist" html_index = "/index.html" gen_snippets_dir = "/gen_snippets" static_dir = "static" shutil.rmtree(dist_dir, ignore_errors=True) shutil.copytree(static_dir, dist_dir) os.makedirs(dist_dir+"/"+gen_snippets_dir) snippets = [] for subdir, dirs, files in os.walk(snippets_dir): for fname in files: trimmedName, ext = os.path.splitext(fname) full_path = subdir + "/" + fname if ext == '.tex': with open(full_path, "r") as snippet_f: gen_tex_name = gen_snippets_dir+"/"+fname gen_pdf_name = gen_snippets_dir+"/"+trimmedName+".pdf" gen_png_name = gen_snippets_dir+"/"+trimmedName+".png" snippet_content = snippet_f.read().strip() with open(dist_dir+"/"+gen_tex_name, "w") as f: f.write(env.get_template("base.jinja.tex").render( content=snippet_content )) snippets.append({ 'fname': trimmedName, 'pdf': gen_pdf_name, 'png': gen_png_name, 'content': highlight(snippet_content, TexLexer(), HtmlFormatter()) }) p = Popen(['make', "-f", "../../Makefile.slides", "-C", dist_dir+"/"+gen_snippets_dir], stdout=PIPE, stderr=PIPE) out = p.communicate() if out[1]: print("Warning: Make stderr non-empty.") print("===Stdout:") print(out[0].decode()) print("===Stderr:") print(out[1].decode()) with open("tmpl/preamble.tex", "r") as f: preamble = f.read() with open(dist_dir+"/"+html_index, "w") as idx_f: idx_f.write(env.get_template("index.jinja.html").render( snippets=snippets, base=highlight( env.get_template("base.jinja.tex").render( content="Start content here." ), TexLexer(), HtmlFormatter() ) ))
#!/usr/bin/env python3 import os import shutil from jinja2 import Environment,FileSystemLoader from pygments import highlight from pygments.lexers import TexLexer from pygments.formatters import HtmlFormatter from subprocess import Popen,PIPE env = Environment(loader=FileSystemLoader("tmpl")) snippets_dir = "snippets" dist_dir = "dist" html_index = "/index.html" gen_snippets_dir = "/gen_snippets" static_dir = "static" shutil.rmtree(dist_dir, ignore_errors=True) shutil.copytree(static_dir, dist_dir) os.makedirs(dist_dir+"/"+gen_snippets_dir) snippets = [] for subdir, dirs, files in os.walk(snippets_dir): for fname in files: trimmedName, ext = os.path.splitext(fname) full_path = subdir + "/" + fname if ext == '.tex': with open(full_path, "r") as snippet_f: gen_tex_name = gen_snippets_dir+"/"+fname gen_pdf_name = gen_snippets_dir+"/"+trimmedName+".pdf" gen_png_name = gen_snippets_dir+"/"+trimmedName+".png" snippet_content = snippet_f.read().strip() with open(dist_dir+"/"+gen_tex_name, "w") as f: f.write(env.get_template("base.jinja.tex").render( content=snippet_content )) snippets.append({ 'fname': trimmedName, 'pdf': gen_pdf_name, 'png': gen_png_name, 'content': highlight(snippet_content, TexLexer(), HtmlFormatter()) }) p = Popen(['make', "-f", "../../Makefile.slides", "-C", dist_dir+"/"+gen_snippets_dir], stdout=PIPE, stderr=PIPE) out = p.communicate() if out[1]: print("Warning: Make stderr non-empty.") print("===Stdout:") print(out[0].decode()) print("===Stderr:") print(out[1].decode()) with open("tmpl/preamble.tex", "r") as f: preamble = f.read() with open(dist_dir+"/"+html_index, "w") as idx_f: idx_f.write(env.get_template("index.jinja.html").render( snippets=snippets, base=highlight( env.get_template("base.jinja.tex").render( content="Start content here." ), TexLexer(), HtmlFormatter() ) ))
mit
Python
54e2359ed2cd75b87dc4a8007df6b252af3a3765
fix typo
econ-ark/HARK,econ-ark/HARK
HARK/ConsumptionSaving/tests/test_ConsLaborModel.py
HARK/ConsumptionSaving/tests/test_ConsLaborModel.py
from HARK.ConsumptionSaving.ConsLaborModel import ( LaborIntMargConsumerType, init_labor_lifecycle, ) import unittest class test_LaborIntMargConsumerType(unittest.TestCase): def setUp(self): self.model = LaborIntMargConsumerType() self.model_finite_lifecycle = LaborIntMargConsumerType(**init_labor_lifecycle) self.model_finite_lifecycle.cycles = 1 def test_solution(self): self.model.solve() self.model_finite_lifecycle.solve() self.model.T_sim = 120 self.model.track_vars = ["bNrmNow", "cNrmNow"] self.model.initializeSim() self.model.simulate()
from HARK.ConsumptionSaving.ConsLaborModel import ( LaborIntMargConsumerType, init_labor_lifecycle, ) import unittest class test_LaborIntMargConsumerType(unittest.TestCase): def setUp(self): self.model = LaborIntMargConsumerType() self.model_finte_lifecycle = LaborIntMargConsumerType(**init_labor_lifecycle) self.model_finte_lifecycle.cycles = 1 def test_solution(self): self.model.solve() self.model_finte_lifecycle.solve() self.model.T_sim = 120 self.model.track_vars = ["bNrmNow", "cNrmNow"] self.model.initializeSim() self.model.simulate()
apache-2.0
Python
c1b4216e610a46260f52d5ed71267a2ed5fcdd25
update debug url to account for downloads
hydroshare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare
hs_core/debug_urls.py
hs_core/debug_urls.py
"""Extra URLs that add debugging capabilities to resources.""" from django.conf.urls import url from hs_core import views urlpatterns = [ # Resource Debugging: print consistency problems in a resource url(r'^debug/resource/(?P<shortkey>[0-9a-f-]+)/$', views.debug_resource_view.debug_resource, name='debug_resource'), url(r'^debug/resource/(?P<shortkey>[0-9a-f-]+)/irods-issues/$', views.debug_resource_view.irods_issues, name='debug_resource'), url(r'^taskstatus/(?P<task_id>[A-z0-9\-]+)/$', views.debug_resource_view.check_task_status, name='get_debug_task_status'), ]
"""Extra URLs that add debugging capabilities to resources.""" from django.conf.urls import url from hs_core import views urlpatterns = [ # Resource Debugging: print consistency problems in a resource url(r'^resource/(?P<shortkey>[0-9a-f-]+)/debug/$', views.debug_resource_view.debug_resource, name='debug_resource'), url(r'^resource/(?P<shortkey>[0-9a-f-]+)/debug/irods-issues/$', views.debug_resource_view.irods_issues, name='debug_resource'), url(r'^taskstatus/(?P<task_id>[A-z0-9\-]+)/$', views.debug_resource_view.check_task_status, name='get_debug_task_status'), ]
bsd-3-clause
Python
600a19b8a3f6d320b00d1d2b25e5c0f341f821d1
bump version
floydsoft/kaggle-cli,floydwch/kaggle-cli
kaggle_cli/main.py
kaggle_cli/main.py
import sys from cliff.app import App from cliff.commandmanager import CommandManager VERSION = '0.6.1' class KaggleCLI(App): def __init__(self): super(KaggleCLI, self).__init__( description='An unofficial Kaggle command line tool.', version=VERSION, command_manager=CommandManager('kaggle_cli'), ) def main(argv=sys.argv[1:]): app = KaggleCLI() return app.run(argv) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
import sys from cliff.app import App from cliff.commandmanager import CommandManager VERSION = '0.6.0' class KaggleCLI(App): def __init__(self): super(KaggleCLI, self).__init__( description='An unofficial Kaggle command line tool.', version=VERSION, command_manager=CommandManager('kaggle_cli'), ) def main(argv=sys.argv[1:]): app = KaggleCLI() return app.run(argv) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
mit
Python
ad6b055b53d621addc3565209c7af095b6d6d0e7
Add .delete() and the start of Room
nprapps/HypChat,dougkeen/HypChat,nprapps/HypChat,RidersDiscountCom/HypChat,dougkeen/HypChat
hypchat/jsonobject.py
hypchat/jsonobject.py
from __future__ import absolute_import, division import json import re from . import requests _urls_to_objects = {} class Linker(object): """ Responsible for on-demand loading of JSON objects. """ def __init__(self, url, parent=None, _requests=None): self.url = url self.__parent = parent self._requests = _requests or __import__('requests') def __call__(self): def _object_hook(obj): if 'links' in obj: rv = JsonObject(obj) rv._requests = self._requests return rv else: return obj rv = json.JSONDecoder(object_hook=_object_hook).decode(self._requests.get(self.url).text) rv._requests = self._requests if self.__parent is not None: rv.parent = self.__parent return rv def __repr__(self): return "<%s url=%r>" % (type(self).__name__, self.url) class JsonObject(dict): """ Nice wrapper around the JSON objects and their links. """ def __getattr__(self, name): if name in self.get('links', {}): return Linker(self['links'][name], parent=self, _requests=self._requests) elif name in self: return self[name] else: raise AttributeError("%r object has no attribute %r" % (type(self).__name__, name)) def save(self): return requests.put(self['links']['self']).json() def delete(self): return requests.delete(self['links']['self']).json() class Room(JsonObject): def message(self, *p, **kw): """ Redirects to notification (for now) """ return self.notification(*p, **kw) def notification(self, message, color='yellow', notify=False, format='html'): raise NotImplementedError def topic(self, text): raise NotImplementedError def history(self, date='recent'): raise NotImplementedError def invite(self, user, reason): raise NotImplementedError _urls_to_objects[re.compile(r'https://api.hipchat.com/v2/room/[^/]+')] = Room
from __future__ import absolute_import, division import json from . import requests class Linker(object): """ Responsible for on-demand loading of JSON objects. """ def __init__(self, url, parent=None, _requests=None): self.url = url self.__parent = parent self._requests = _requests or __import__('requests') def __call__(self): def _object_hook(obj): if 'links' in obj: rv = JsonObject(obj) rv._requests = self._requests return rv else: return obj rv = json.JSONDecoder(object_hook=_object_hook).decode(self._requests.get(self.url).text) rv._requests = self._requests if self.__parent is not None: rv.parent = self.__parent return rv def __repr__(self): return "<%s url=%r>" % (type(self).__name__, self.url) class JsonObject(dict): """ Nice wrapper around the JSON objects and their links. """ def __getattr__(self, name): if name in self.get('links', {}): return Linker(self['links'][name], parent=self, _requests=self._requests) elif name in self: return self[name] else: raise AttributeError("%r object has no attribute %r" % (type(self).__name__, name)) def save(self): return requests.put(self['links']['self']).json()
mit
Python
fe9226898772c4ff909f9c3f0cb05c271333b73a
Make auth_url lookup dynamic
jasondunsmore/heat,jasondunsmore/heat,openstack/heat,noironetworks/heat,openstack/heat,steveb/heat,cwolferh/heat-scratch,cwolferh/heat-scratch,noironetworks/heat,steveb/heat
heat/common/auth_url.py
heat/common/auth_url.py
# # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from webob import exc from heat.common import endpoint_utils from heat.common.i18n import _ from heat.common import wsgi class AuthUrlFilter(wsgi.Middleware): def __init__(self, app, conf): super(AuthUrlFilter, self).__init__(app) self.conf = conf self._auth_url = None @property def auth_url(self): if not self._auth_url: self._auth_url = self._get_auth_url() return self._auth_url def _get_auth_url(self): if 'auth_uri' in self.conf: return self.conf['auth_uri'] else: return endpoint_utils.get_auth_uri(v3=False) def _validate_auth_url(self, auth_url): """Validate auth_url to ensure it can be used.""" if not auth_url: raise exc.HTTPBadRequest(_('Request missing required header ' 'X-Auth-Url')) allowed = cfg.CONF.auth_password.allowed_auth_uris if auth_url not in allowed: raise exc.HTTPUnauthorized(_('Header X-Auth-Url "%s" not ' 'an allowed endpoint') % auth_url) return True def process_request(self, req): auth_url = self.auth_url if cfg.CONF.auth_password.multi_cloud: auth_url = req.headers.get('X-Auth-Url') self._validate_auth_url(auth_url) req.headers['X-Auth-Url'] = auth_url return None def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def auth_url_filter(app): return AuthUrlFilter(app, conf) return auth_url_filter
# # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from webob import exc from heat.common import endpoint_utils from heat.common.i18n import _ from heat.common import wsgi class AuthUrlFilter(wsgi.Middleware): def __init__(self, app, conf): super(AuthUrlFilter, self).__init__(app) self.conf = conf self.auth_url = self._get_auth_url() def _get_auth_url(self): if 'auth_uri' in self.conf: return self.conf['auth_uri'] else: return endpoint_utils.get_auth_uri(v3=False) def _validate_auth_url(self, auth_url): """Validate auth_url to ensure it can be used.""" if not auth_url: raise exc.HTTPBadRequest(_('Request missing required header ' 'X-Auth-Url')) allowed = cfg.CONF.auth_password.allowed_auth_uris if auth_url not in allowed: raise exc.HTTPUnauthorized(_('Header X-Auth-Url "%s" not ' 'an allowed endpoint') % auth_url) return True def process_request(self, req): auth_url = self.auth_url if cfg.CONF.auth_password.multi_cloud: auth_url = req.headers.get('X-Auth-Url') self._validate_auth_url(auth_url) req.headers['X-Auth-Url'] = auth_url return None def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def auth_url_filter(app): return AuthUrlFilter(app, conf) return auth_url_filter
apache-2.0
Python
a79db7cf85dac6d74d7929137f640a0ac10ddf7d
return from sys.exit for easier testing
bfontaine/p7doi
p7doi/__init__.py
p7doi/__init__.py
# -*- coding: UTF-8 -*- from __future__ import print_function import webbrowser import sys __version__ = '0.0.1' DOI_URL = 'http://rproxy.sc.univ-paris-diderot.fr/login' + \ '?url=http://dx.doi.org/%s' def make_doi_url(doi): """ Return an URL for the given DOI """ return DOI_URL % doi def open_url(url): """ Open an URL in the default browser, in a new tab if possible """ webbrowser.open_new_tab(url) def open_doi(doi): """ Open the URL for the given DOI in the default browser """ open_url(make_doi_url(doi)) def cli(): """ CLI endpoint """ if len(sys.argv) < 2: print('Usage: %s <doi>' % sys.argv[0]) return sys.exit(1) doi = sys.argv[1] if doi.startswith('-'): if doi in ['-v', '-version', '--version']: print('p7doi v%s' % __version__) else: print("Unrecognized option: '%s'" % doi) return sys.exit(1) return sys.exit(0) open_doi(doi)
# -*- coding: UTF-8 -*- from __future__ import print_function import webbrowser import sys __version__ = '0.0.1' DOI_URL = 'http://rproxy.sc.univ-paris-diderot.fr/login' + \ '?url=http://dx.doi.org/%s' def make_doi_url(doi): """ Return an URL for the given DOI """ return DOI_URL % doi def open_url(url): """ Open an URL in the default browser, in a new tab if possible """ webbrowser.open_new_tab(url) def open_doi(doi): """ Open the URL for the given DOI in the default browser """ open_url(make_doi_url(doi)) def cli(): """ CLI endpoint """ if len(sys.argv) < 2: print('Usage: %s <doi>' % sys.argv[0]) sys.exit(1) doi = sys.argv[1] if doi.startswith('-'): if doi in ['-v', '-version', '--version']: print('p7doi v%s' % __version__) else: print("Unrecognized option: '%s'" % doi) sys.exit(1) sys.exit(0) open_doi(doi)
mit
Python
eaac4e45928b7008e6c561e28e9b5ed5dc427587
fix redis storage
mihau/labDNS
labDNS/storages.py
labDNS/storages.py
try: import redis except ImportError: redis = None class BaseStorage: DEFAULT_CONFIG = dict() def __init__(self, config): self.config = self.DEFAULT_CONFIG self._configure(config) def get(self, key): raise NotImplementedError def _configure(self, config): self.config.update(config) class DictStorage(BaseStorage): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dictionary = self.config def get(self, key, default=None): return self.dictionary.get(key, default) class RedisStorage(BaseStorage): DEFAULT_SETTINGS = dict(host='localhost', port=6379, db=0) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.redis = redis.StrictRedis(**self.config) def get(self, key, default=None): return self.redis.get(key).decode("utf-8") or default
try: import redis except ImportError: redis = None class BaseStorage: DEFAULT_CONFIG = dict() def __init__(self, config): self.config = self.DEFAULT_CONFIG self._configure(config) def get(self, key): raise NotImplementedError def _configure(self, config): self.config.update(config) class DictStorage(BaseStorage): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dictionary = self.config def get(self, key, default=None): return self.dictionary.get(key, default) class RedisStorage(BaseStorage): DEFAULT_SETTINGS = dict(host='localhost', port=6379, db=0) def __init__(self, config): self.redis = redis.StrictRedis(**self.config) def get(self, key, default=None): return self.redis.get(key, default)
bsd-3-clause
Python
13ffa4113341c13e635896f94a29df5cff5c0348
Build objects in JSON generator tool
quicktype/quicktype,quicktype/quicktype,quicktype/quicktype,quicktype/quicktype,quicktype/quicktype
test/generate-json.py
test/generate-json.py
#!/usr/bin/env python import argparse import random def random_array_element(): return random.choice(['123', 'true', 'false', 'null', '3.1415', '"foo"']) def main(): parser = argparse.ArgumentParser(description="Generate a large JSON document.") parser.add_argument('--array-size', nargs=1, type=int, default=[100000]) parser.add_argument('--array-type', choices=['int', 'array', 'object'], default='object') parser.add_argument('--array-elements', nargs=1, type=int, default=[3]) parser.add_argument('--object-size', nargs=1, type=int, default=None) args = parser.parse_args() if args.object_size: print('{') for i in range(args.object_size[0] - 1): print(' "x%d": %s,' % (i, random_array_element())) print(' "no": "comma"') print('}') else: n = args.array_size[0] type = args.array_type print('{"x": [') if type == 'int': elem_format = "%d%s" need_i = True elif type == 'object': elem_format = '{"a": %d}%s' need_i = True elif type == 'array': nelems = args.array_elements[0] arr = [] if nelems > 0: arr.append('%s') if nelems > 1: arr.extend([random_array_element() for _ in range(nelems-1)]) elem_format = '[%s]%%s' % ", ".join(arr) need_i = nelems > 0 else: raise Exception("Unknown array type %s" % type) for i in range(n): semicolon = "," if i < n-1 else "" if need_i: print(elem_format % (i, semicolon)) else: print(elem_format % semicolon) print(']}') if __name__ == "__main__": main()
#!/usr/bin/env python import argparse import random def random_array_element(): return random.choice(['123', 'true', 'false', 'null', '3.1415', '"foo"']) def main(): parser = argparse.ArgumentParser(description="Generate a large JSON document.") parser.add_argument('--array-size', nargs=1, type=int, default=[100000]) parser.add_argument('--array-type', choices=['int', 'array', 'object'], default='object') parser.add_argument('--array-elements', nargs=1, type=int, default=[3]) args = parser.parse_args() n = args.array_size[0] type = args.array_type print('{"x": [') if type == 'int': elem_format = "%d%s" need_i = True elif type == 'object': elem_format = '{"a": %d}%s' need_i = True elif type == 'array': nelems = args.array_elements[0] arr = [] if nelems > 0: arr.append('%s') if nelems > 1: arr.extend([random_array_element() for _ in range(nelems-1)]) elem_format = '[%s]%%s' % ", ".join(arr) need_i = nelems > 0 else: raise Exception("Unknown array type %s" % type) for i in range(n): semicolon = "," if i < n-1 else "" if need_i: print(elem_format % (i, semicolon)) else: print(elem_format % semicolon) print(']}') if __name__ == "__main__": main()
apache-2.0
Python
c48b0ae4331d1d039cb6bc29ef25fc7c4a5df8da
Bump version to 0.2.7
approvals/ApprovalTests.Python,tdpreece/ApprovalTests.Python,approvals/ApprovalTests.Python,approvals/ApprovalTests.Python
approvaltests/version.py
approvaltests/version.py
version_number = "0.2.7"
version_number = "0.2.6"
apache-2.0
Python
903d9b000c4d7b333b5d3000aeb38b7e4d818c27
add "Partly Cloudy" to color_icons
paulollivier/i3pystatus,facetoe/i3pystatus,claria/i3pystatus,richese/i3pystatus,richese/i3pystatus,fmarchenko/i3pystatus,opatut/i3pystatus,ncoop/i3pystatus,yang-ling/i3pystatus,enkore/i3pystatus,juliushaertl/i3pystatus,m45t3r/i3pystatus,Arvedui/i3pystatus,opatut/i3pystatus,Elder-of-Ozone/i3pystatus,fmarchenko/i3pystatus,teto/i3pystatus,paulollivier/i3pystatus,onkelpit/i3pystatus,MaicoTimmerman/i3pystatus,plumps/i3pystatus,yang-ling/i3pystatus,drwahl/i3pystatus,m45t3r/i3pystatus,eBrnd/i3pystatus,ismaelpuerto/i3pystatus,drwahl/i3pystatus,Elder-of-Ozone/i3pystatus,Arvedui/i3pystatus,teto/i3pystatus,ncoop/i3pystatus,claria/i3pystatus,ismaelpuerto/i3pystatus,facetoe/i3pystatus,asmikhailov/i3pystatus,schroeji/i3pystatus,MaicoTimmerman/i3pystatus,enkore/i3pystatus,plumps/i3pystatus,juliushaertl/i3pystatus,schroeji/i3pystatus,asmikhailov/i3pystatus,onkelpit/i3pystatus,eBrnd/i3pystatus
i3pystatus/weather.py
i3pystatus/weather.py
from i3pystatus import IntervalModule import pywapi from i3pystatus.core.util import internet, require class Weather(IntervalModule): """ This module gets the weather from weather.com using pywapi module First, you need to get the code for the location from the www.weather.com Available formatters: * {current_temp} * {humidity} Requires pywapi from PyPI. """ interval = 20 settings = ( "location_code", ("colorize", "Enable color with temperature and UTF-8 icons."), ("units", "Celsius (metric) or Fahrenheit (imperial)"), "format", ) required = ("location_code",) units = "metric" format = "{current_temp}" colorize = None color_icons = {'Fair': (u'\u2600', '#FFCC00'), 'Cloudy': (u'\u2601', '#F8F8FF'), 'Partly Cloudy': (u'\u2601', '#F8F8FF'), # \u26c5 is not in many fonts 'Rainy': (u'\u2614', '#CBD2C0'), 'Sunny': (u'\u263C', '#FFFF00'), 'Snow': (u'\u2603', '#FFFFFF'), 'default': ('', None), } @require(internet) def run(self): result = pywapi.get_weather_from_weather_com(self.location_code, self.units) conditions = result['current_conditions'] temperature = conditions['temperature'] humidity = conditions['humidity'] units = result['units'] color = None current_temp = '{t}°{d} '.format(t=temperature, d=units['temperature']) if self.colorize: icon, color = self.color_icons.get(conditions['text'], self.color_icons['default']) current_temp = '{t}°{d} {i}'.format(t=temperature, d=units['temperature'], i=icon) color = color self.output = { "full_text": self.format.format(current_temp=current_temp, humidity=humidity), "color": color }
from i3pystatus import IntervalModule import pywapi from i3pystatus.core.util import internet, require class Weather(IntervalModule): """ This module gets the weather from weather.com using pywapi module First, you need to get the code for the location from the www.weather.com Available formatters: * {current_temp} * {humidity} Requires pywapi from PyPI. """ interval = 20 settings = ( "location_code", ("colorize", "Enable color with temperature and UTF-8 icons."), ("units", "Celsius (metric) or Fahrenheit (imperial)"), "format", ) required = ("location_code",) units = "metric" format = "{current_temp}" colorize = None color_icons = {'Fair': (u'\u2600', '#FFCC00'), 'Cloudy': (u'\u2601', '#F8F8FF'), 'Rainy': (u'\u2614', '#CBD2C0'), 'Sunny': (u'\u263C', '#FFFF00'), 'Snow': (u'\u2603', '#FFFFFF'), 'default': ('', None), } @require(internet) def run(self): result = pywapi.get_weather_from_weather_com(self.location_code, self.units) conditions = result['current_conditions'] temperature = conditions['temperature'] humidity = conditions['humidity'] units = result['units'] color = None current_temp = '{t}°{d} '.format(t=temperature, d=units['temperature']) if self.colorize: icon, color = self.color_icons.get(conditions['text'], self.color_icons['default']) current_temp = '{t}°{d} {i}'.format(t=temperature, d=units['temperature'], i=icon) color = color self.output = { "full_text": self.format.format(current_temp=current_temp, humidity=humidity), "color": color }
mit
Python
faebe4928b4bef33efd6183f97f1ff1396a701ee
fix missing urls.
soasme/blackgate
blackgate/cli.py
blackgate/cli.py
# -*- coding: utf-8 -*- import click from blackgate.core import component from blackgate.config import parse_yaml_config from blackgate.config import read_yaml_config from blackgate.config import read_default_config from blackgate.server import run @click.group() @click.option('-c', '--config', default='') @click.pass_context def main(ctx, config): if not config: config = read_default_config() else: config = read_yaml_config(config) if not config: ctx.fail('config not found.') try: config = parse_yaml_config(config) except ValueError: ctx.fail('config is not valid yaml.') ctx.obj = {} ctx.obj['config'] = config @main.command() @click.pass_context def start(ctx): config = ctx.obj['config'] component.configurations = config component.install() run(config.get('port', 9654)) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- import click from blackgate.core import component from blackgate.config import parse_yaml_config from blackgate.config import read_yaml_config from blackgate.config import read_default_config from blackgate.server import run @click.group() @click.option('-c', '--config', default='') @click.pass_context def main(ctx, config): if not config: config = read_default_config() else: config = read_yaml_config(config) if not config: ctx.fail('config not found.') try: config = parse_yaml_config(config) except ValueError: ctx.fail('config is not valid yaml.') ctx.obj['config'] = config @main.command() @click.pass_context def start(ctx): config = ctx.obj['config'] component.configurations = config component.install() run(config.get('port', 9654)) if __name__ == '__main__': main()
mit
Python
3154f0098f9696cd48536599413659e47747491f
Add api [2]
igorbpf/TheGist,igorbpf/TheGist,igorbpf/TheGist
blue/__init__.py
blue/__init__.py
from flask import Flask app = Flask(__name__) from blue.site.routes import mod from blue.api.routes import mod app.register_blueprint(site.routes.mod) app.register_blueprint(api.routes.mod, url_prefix='/api')
from flask import Flask app = Flask(__name__) from blue.site.routes import mod from blue.api.routes import mod app.register_blueprint(site.routes.mod) app.register_blueprint(api.routes.mod)
mit
Python
1e930adbfb1714670ad04717401b36b59bf12558
Bump version to 0.0.2
laughingman7743/BigQuery-DatasetManager
bqdm/__init__.py
bqdm/__init__.py
# -*- coding: utf-8 -*- from __future__ import absolute_import __version__ = '0.0.2' CONTEXT_SETTINGS = dict( help_option_names=['-h', '--help'], max_content_width=120, )
# -*- coding: utf-8 -*- from __future__ import absolute_import __version__ = '0.0.1' CONTEXT_SETTINGS = dict( help_option_names=['-h', '--help'], max_content_width=120, )
mit
Python
a0dfb1ce1a72880da34ad817c8021e54e2ce0e5d
add fields.
jonhadfield/acli,jonhadfield/acli
lib/acli/output.py
lib/acli/output.py
# from tabulate import tabulate from terminaltables import AsciiTable def output_ec2(output_type=None, instances=None): if output_type == 'console': heading = ['id', 'state', 'type', 'image', 'public ip', 'private ip'] table_data = [heading] for instance in instances: instance_id = instance[0].id instance_state = instance[0].state instance_type = instance[0].instance_type image_id = instance[0].image_id public_ip = instance[0].ip_address private_ip = instance[0].private_ip_address table_data.append([instance_id, instance_state, instance_type, image_id, public_ip if public_ip else '-', private_ip if private_ip else '-']) table = AsciiTable(table_data) print(table.table) def output_elb(output_type=None, elbs=None): if output_type == 'console': heading = ['id', 'name'] table_data = [heading] for elb in elbs: elb_id = elb.name elb_name = elb.name table_data.append([elb_id, elb_name]) table = AsciiTable(table_data) print(table.table)
# from tabulate import tabulate from terminaltables import AsciiTable def output_ec2(output_type=None, instances=None): if output_type == 'console': heading = ['id', 'state'] table_data = [heading] for instance in instances: instance_id = instance[0].id instance_state = instance[0].state table_data.append([instance_id, instance_state]) table = AsciiTable(table_data) print(table.table) def output_elb(output_type=None, elbs=None): if output_type == 'console': heading = ['id', 'name'] table_data = [heading] for elb in elbs: elb_id = elb.name elb_name = elb.name table_data.append([elb_id, elb_name]) table = AsciiTable(table_data) print(table.table) # def console_table(collection_type, collection_data): # pass
mit
Python
2d7b3afaca97a3e6a115c077586d0a9fb9daf8b2
Fix imap connection lost (#380)
enkore/i3pystatus,Arvedui/i3pystatus,drwahl/i3pystatus,richese/i3pystatus,m45t3r/i3pystatus,m45t3r/i3pystatus,richese/i3pystatus,enkore/i3pystatus,ncoop/i3pystatus,facetoe/i3pystatus,fmarchenko/i3pystatus,schroeji/i3pystatus,asmikhailov/i3pystatus,yang-ling/i3pystatus,asmikhailov/i3pystatus,facetoe/i3pystatus,schroeji/i3pystatus,fmarchenko/i3pystatus,teto/i3pystatus,Arvedui/i3pystatus,drwahl/i3pystatus,yang-ling/i3pystatus,ncoop/i3pystatus,teto/i3pystatus
i3pystatus/mail/imap.py
i3pystatus/mail/imap.py
import imaplib import socket from i3pystatus.mail import Backend class IMAP(Backend): """ Checks for mail on a IMAP server """ settings = ( "host", "port", "username", "password", ('keyring_backend', 'alternative keyring backend for retrieving credentials'), "ssl", "mailbox", ) required = ("host", "username", "password") keyring_backend = None port = 993 ssl = True mailbox = "INBOX" imap_class = imaplib.IMAP4 connection = None last = 0 def init(self): if self.ssl: self.imap_class = imaplib.IMAP4_SSL def get_connection(self): if self.connection: try: self.connection.select(self.mailbox) except socket.error: # NOTE(sileht): retry just once if the connection have been # broken to ensure this is not a sporadic connection lost. # Like wifi reconnect, sleep wake up try: self.connection.logout() except socket.error: pass self.connection = None if not self.connection: self.connection = self.imap_class(self.host, self.port) self.connection.login(self.username, self.password) self.connection.select(self.mailbox) return self.connection @property def unread(self): conn = self.get_connection() self.last = len(conn.search(None, "UnSeen")[1][0].split()) return self.last Backend = IMAP
import sys import imaplib from i3pystatus.mail import Backend from i3pystatus.core.util import internet class IMAP(Backend): """ Checks for mail on a IMAP server """ settings = ( "host", "port", "username", "password", ('keyring_backend', 'alternative keyring backend for retrieving credentials'), "ssl", "mailbox", ) required = ("host", "username", "password") keyring_backend = None port = 993 ssl = True mailbox = "INBOX" imap_class = imaplib.IMAP4 connection = None last = 0 def init(self): if self.ssl: self.imap_class = imaplib.IMAP4_SSL def get_connection(self): if not self.connection: self.connection = self.imap_class(self.host, self.port) self.connection.login(self.username, self.password) self.connection.select(self.mailbox) self.connection.select(self.mailbox) return self.connection @property def unread(self): if internet(): conn = self.get_connection() self.last = len(conn.search(None, "UnSeen")[1][0].split()) return self.last Backend = IMAP
mit
Python
17f1c210c9c8b410cb6888a51ea1d863b74c14be
Use has_module check in _can_read
patricksnape/imageio,kuchi/imageio,imageio/imageio
imageio/plugins/gdal.py
imageio/plugins/gdal.py
# -*- coding: utf-8 -*- # Copyright (c) 2015, imageio contributors # imageio is distributed under the terms of the (new) BSD License. """ Plugin for reading gdal files. """ from __future__ import absolute_import, print_function, division from .. import formats from ..core import Format, has_module _gdal = None # lazily loaded in load_lib() def load_lib(): global _gdal try: import osgeo.gdal as _gdal except ImportError: raise ImportError("The GDAL format relies on the GDAL package." "Please refer to http://www.gdal.org/" "for further instructions.") return _gdal GDAL_FORMATS = ('.tiff', ' .tif', '.img', '.ecw', '.jpg', '.jpeg') class GdalFormat(Format): """ Parameters for reading ---------------------- None """ def _can_read(self, request): if request.filename.lower().endswith('.ecw'): return True if has_module('osgeo.gdal'): return request.filename.lower().endswith(self.extensions) def _can_write(self, request): return False # -- class Reader(Format.Reader): def _open(self): if not _gdal: load_lib() self._ds = _gdal.Open(self.request.get_local_filename()) def _close(self): del self._ds def _get_length(self): return 1 def _get_data(self, index): if index != 0: raise IndexError('Gdal file contains only one dataset') return self._ds.ReadAsArray(), self._get_meta_data(index) def _get_meta_data(self, index): return self._ds.GetMetadata() # Add this format formats.add_format(GdalFormat( 'gdal', 'Geospatial Data Abstraction Library', ' '.join(GDAL_FORMATS), 'iIvV'))
# -*- coding: utf-8 -*- # Copyright (c) 2015, imageio contributors # imageio is distributed under the terms of the (new) BSD License. """ Plugin for reading gdal files. """ from __future__ import absolute_import, print_function, division from .. import formats from ..core import Format _gdal = None # lazily loaded in load_lib() def load_lib(): global _gdal try: import osgeo.gdal as _gdal except ImportError: raise ImportError("The GDAL format relies on the GDAL package." "Please refer to http://www.gdal.org/" "for further instructions.") return _gdal GDAL_FORMATS = ('.tiff', ' .tif', '.img', '.ecw', '.jpg', '.jpeg') class GdalFormat(Format): """ Parameters for reading ---------------------- None """ def _can_read(self, request): return request.filename.lower().endswith(GDAL_FORMATS) def _can_write(self, request): return False # -- class Reader(Format.Reader): def _open(self): if not _gdal: load_lib() self._ds = _gdal.Open(self.request.get_local_filename()) def _close(self): del self._ds def _get_length(self): return 1 def _get_data(self, index): if index != 0: raise IndexError('Gdal file contains only one dataset') return self._ds.ReadAsArray(), self._get_meta_data(index) def _get_meta_data(self, index): return self._ds.GetMetadata() # Add this format formats.add_format(GdalFormat( 'gdal', 'Geospatial Data Abstraction Library', ' '.join(GDAL_FORMATS), 'iIvV'))
bsd-2-clause
Python
86791effb26c33514bbc6713f67a903e8d9e5295
Choose a single corpus for a given series,date pair.
ViralTexts/vt-passim,ViralTexts/vt-passim,ViralTexts/vt-passim
scripts/c19.py
scripts/c19.py
from __future__ import print_function import sys from pyspark.sql import SparkSession from pyspark.sql.functions import col, struct, max if __name__ == "__main__": if len(sys.argv) != 3: print("Usage: c19.py <input> <output>", file=sys.stderr) exit(-1) spark = SparkSession.builder.appName('Select c19').getOrCreate() raw = spark.read.option('mergeSchema','true').load(sys.argv[1]) df = raw.filter(col('date') < '1900') spark.conf.set('spark.sql.shuffle.partitions', df.rdd.getNumPartitions() * 2) issues = df.groupBy('series', 'date')\ .agg(max(struct('open', 'corpus'))['corpus'].alias('corpus')) df.join(issues, ['series', 'date', 'corpus'], 'inner')\ .write.save(sys.argv[2]) spark.stop()
from __future__ import print_function import sys from pyspark.sql import SparkSession from pyspark.sql.functions import lit, col, coalesce if __name__ == "__main__": if len(sys.argv) != 3: print("Usage: c19.py <input> <output>", file=sys.stderr) exit(-1) spark = SparkSession.builder.appName('Select c19').getOrCreate() raw = spark.read.option('mergeSchema','true').load(sys.argv[1]) df = raw.filter(col('date') < '1900') opens = df.filter(col('open') == 'true')\ .select('series', 'date', lit(1).alias('inopen')).distinct() df.join(opens, ['series', 'date'], 'left_outer')\ .filter((col('open') == 'true') | col('inopen').isNull())\ .drop('inopen')\ .dropDuplicates(['id'])\ .write.save(sys.argv[2]) spark.stop()
apache-2.0
Python
c2d543a3de566443a2c61761f9a190e915426fec
Return stream_client instead of binding it inside method (tests now passing)
GetStream/stream-django,GetStream/stream-django
stream_django/client.py
stream_django/client.py
from stream_django import conf import os import stream from stream_django.conf import DJANGO_MAJOR_VERSION from django.core.exceptions import ImproperlyConfigured def init_client(raise_config_error=False): if conf.API_KEY and conf.API_SECRET: return stream.connect(conf.API_KEY, conf.API_SECRET, location=conf.LOCATION, timeout=conf.TIMEOUT) elif os.environ.get('STREAM_URL') is not None: return stream.connect() elif raise_config_error: raise ImproperlyConfigured('Stream credentials are not set in your settings') stream_client = init_client(raise_config_error=DJANGO_MAJOR_VERSION<1.7)
from stream_django import conf import os import stream from stream_django.conf import DJANGO_MAJOR_VERSION from django.core.exceptions import ImproperlyConfigured def init_client(mayRaise=False): if conf.API_KEY and conf.API_SECRET: stream_client = stream.connect( conf.API_KEY, conf.API_SECRET, location=conf.LOCATION, timeout=conf.TIMEOUT) elif os.environ.get('STREAM_URL') is not None: stream_client = stream.connect() else: stream_client = None if mayRaise: raise ImproperlyConfigured('Stream credentials are not set in your settings') stream_client = init_client(mayRaise=DJANGO_MAJOR_VERSION<1.7)
bsd-3-clause
Python
066e60897aa931b22ce92776b896912dbec3ccf6
bump dev version
desihub/desispec,desihub/desispec
py/desispec/_version.py
py/desispec/_version.py
__version__ = '0.47.1.dev6182'
__version__ = '0.47.1.dev6104'
bsd-3-clause
Python
54c48073dfb8ffd418efe234c0c107f7a5c303a9
Fix failing imports in Python 2
mixxorz/django-inline-svg
svg/templatetags/svg.py
svg/templatetags/svg.py
from __future__ import absolute_import import logging import os from django import template from django.conf import settings from django.contrib.staticfiles import finders from django.utils.safestring import mark_safe from svg.exceptions import SVGNotFound logger = logging.getLogger(__name__) register = template.Library() @register.simple_tag def svg(filename): path = finders.find(os.path.join('svg', '%s.svg' % filename), all=True) if not path: message = "SVG 'svg/%s.svg' not found" % filename if settings.DEBUG: raise SVGNotFound(message) else: logger.warning(message) return '' if isinstance(path, (list, tuple)): path = path[0] with open(path) as svg_file: svg = mark_safe(svg_file.read()) return svg
import logging import os from django import template from django.conf import settings from django.contrib.staticfiles import finders from django.utils.safestring import mark_safe from svg.exceptions import SVGNotFound logger = logging.getLogger(__name__) register = template.Library() @register.simple_tag def svg(filename): path = finders.find(os.path.join('svg', '%s.svg' % filename), all=True) if not path: message = "SVG 'svg/%s.svg' not found" % filename if settings.DEBUG: raise SVGNotFound(message) else: logger.warning(message) return '' if isinstance(path, (list, tuple)): path = path[0] with open(path) as svg_file: svg = mark_safe(svg_file.read()) return svg
mit
Python
b71ef8c05a9afa9eb3614c863650c12df0967fae
document methods
hall-lab/svtools,hall-lab/svtools,abelhj/svtools,abelhj/svtools,abelhj/svtools,ernfrid/svtools,abelhj/svtools,hall-lab/svtools,ernfrid/svtools
svtools/vcf/genotype.py
svtools/vcf/genotype.py
import sys class Genotype(object): ''' This class stores information about each sample. ''' def __init__(self, variant, gt): ''' Initialize the class. All instances have a GT field. ''' self.format = dict() self.variant = variant self.set_format('GT', gt) def set_formats(self, fields, values): ''' Set many format fields for this instance. Updates format information in the owning Variant class. ''' format_set = self.variant.format_set add_to_active = self.variant.active_formats.add active_formats = self.variant.active_formats format_dict = self.format for field, value in zip(fields, values): if field in format_set: format_dict[field] = value if field not in active_formats: add_to_active(field) else: sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n') sys.exit(1) def set_format(self, field, value, update_active=True): ''' Set information for an individual format field. ''' if field in self.variant.format_set: self.format[field] = value if field not in self.variant.active_formats: self.variant.active_formats.add(field) self.variant.update_active_format_list() else: sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n') sys.exit(1) def get_format(self, field): ''' Get value of particular field key ''' return self.format[field] def get_gt_string(self): ''' Convert object back to string. If some values are missing (at the end for example) they are printed out as all format fields present in any Genotype instance in the Variant line are tracked. ''' g_list = list() for f in self.variant.active_format_list: if f in self.format: if type(self.format[f]) == float: g_list.append('%0.2f' % self.format[f]) else: g_list.append(str(self.format[f])) else: g_list.append('.') return ':'.join(g_list)
import sys class Genotype(object): def __init__(self, variant, gt): self.format = dict() self.variant = variant self.set_format('GT', gt) def set_formats(self, fields, values): format_set = self.variant.format_set add_to_active = self.variant.active_formats.add active_formats = self.variant.active_formats format_dict = self.format for field, value in zip(fields, values): if field in format_set: format_dict[field] = value if field not in active_formats: add_to_active(field) else: sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n') sys.exit(1) def set_format(self, field, value, update_active=True): if field in self.variant.format_set: self.format[field] = value if field not in self.variant.active_formats: self.variant.active_formats.add(field) self.variant.update_active_format_list() else: sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n') sys.exit(1) def get_format(self, field): return self.format[field] def get_gt_string(self): g_list = list() for f in self.variant.active_format_list: if f in self.format: if type(self.format[f]) == float: g_list.append('%0.2f' % self.format[f]) else: g_list.append(str(self.format[f])) else: g_list.append('.') return ':'.join(g_list)
mit
Python
e2a0fb602c9de9f988d733a30b466dc400cd9503
update issue 84
nwebs/rdflib
test/test_issue084.py
test/test_issue084.py
from codecs import getreader from StringIO import StringIO from rdflib.term import URIRef from rdflib.graph import Graph rdf = u"""@prefix skos: <http://www.w3.org/2004/02/skos/core#> . @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . @prefix : <http://www.test.org/#> . :world rdf:type skos:Concept; skos:prefLabel "World"@en. :africa rdf:type skos:Concept; skos:prefLabel "Africa"@en; skos:broaderTransitive :world. :CI rdf:type skos:Concept; skos:prefLabel "C\u00f4te d'Ivoire"@en; skos:broaderTransitive :africa. """ rdf_utf8 = rdf.encode('utf-8') rdf_reader = getreader('utf-8')(StringIO(rdf.encode('utf-8'))) def test_a(): g = Graph() g.parse(data=rdf, format='n3') v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel")) assert v==u"C\u00f4te d'Ivoire" def test_b(): g = Graph() g.parse(data=rdf_utf8, format='n3') v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel")) assert v==u"C\u00f4te d'Ivoire" def test_c(): g = Graph() g.parse(source=rdf_reader, format='n3') v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel")) assert v==u"C\u00f4te d'Ivoire"
from rdflib.term import URIRef from rdflib.graph import Graph rdf = u"""@prefix skos: <http://www.w3.org/2004/02/skos/core#> . @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . @prefix : <http://www.test.org/#> . :world rdf:type skos:Concept; skos:prefLabel "World"@en. :africa rdf:type skos:Concept; skos:prefLabel "Africa"@en; skos:broaderTransitive :world. :CI rdf:type skos:Concept; skos:prefLabel "C\u00f4te d'Ivoire"@en; skos:broaderTransitive :africa. """.encode('utf-8') def test_issue(): g = Graph() g.parse(data=rdf, format='n3') v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel")) assert v==u"C\u00f4te d'Ivoire"
bsd-3-clause
Python
f486280a264c195c989d59f0b3fa631d9e165a18
Fix comment
mitchgu/TAMProxy-pyHost
servo_write.py
servo_write.py
from tamproxy import Sketch, SyncedSketch, Timer from tamproxy.devices import Servo class ServoWrite(Sketch): """Cycles a servo back and forth between 1050us and 1950us pulse widths (most servos are 1000-2000)""" def setup(self): self.servo = Servo(self.tamp, 9) self.servo.write(1050) self.timer = Timer() self.end = False def loop(self): if (self.timer.millis() > 2000): self.timer.reset() if self.end: self.servo.write(1050) else: self.servo.write(1950) self.end = not self.end if __name__ == "__main__": sketch = ServoWrite() sketch.run()
from tamproxy import Sketch, SyncedSketch, Timer from tamproxy.devices import Servo # Cycles a motor back and forth between -255 and 255 PWM every ~5 seconds class ServoWrite(Sketch): def setup(self): self.servo = Servo(self.tamp, 9) self.servo.write(1050) self.timer = Timer() self.end = False def loop(self): if (self.timer.millis() > 2000): self.timer.reset() if self.end: self.servo.write(1050) else: self.servo.write(1950) self.end = not self.end if __name__ == "__main__": sketch = ServoWrite() sketch.run()
mit
Python
a0a2810e52ba27bb2b6eba5d13d8a3bc88bca266
Complete overhaul because I hated the ConfigParser module.
schae234/Camoco,schae234/Camoco
camoco/Config.py
camoco/Config.py
#!/usr/env/python3 import os import configparser import yaml import pprint global cf default_config = '''--- # YAML Camoco Configuration File options: basedir: ~/.camoco/ testdir: ~/.camoco/tests/ logging: log_level: verbose test: force: RefGen: True COB: True Ontology: True refgen: Zm5bFGS cob: NewRoot ontology: ZmIonome term: Fe57 gene: GRMZM2G000014 ''' class Level(dict): ''' Ha! Take that config parser! I am accessing everything like an object. ''' def __init__(self,*args,**kwargs): super().__init__(*args,**kwargs) def __getattr__(self,item): if isinstance(self[item],dict): return Level(self[item]) else: if 'dir' in item and '~' in self[item]: return os.path.expanduser(self[item]) return self[item] class Config(object): def __init__(self,filename): filename = os.path.expanduser(filename) self.data = Level(yaml.load(open(filename,'r'))) def __getattr__(self,item): return Level(self.data[item]) def __repr__(self): return pprint.pformat(self.data) ''' ------------------------------------------------------------------------- Program Logic ''' cf_file = os.path.expanduser('~/.camoco.conf') # Check to see if there is a config file available if not os.path.isfile(cf_file): with open(cf_file, 'w') as CF: print(default_config, file=CF) else: cf = Config(cf_file)
#!/usr/env/python3 import os import configparser global cf cf = configparser.ConfigParser() cf._interpolation = configparser.ExtendedInterpolation() cf_file = os.path.expanduser('~/.camoco.conf') default_config = ''' [options] basedir = ~/.camoco/ testdir = ~/.camoco/tests/ [logging] log_level = verbose [test] force = True refgen = Zm5bFGS cob = NewRoot ontology = ZmIonome term = Fe57 gene = GRMZM2G000014 ''' # Check to see if if not os.path.isfile(cf_file): with open(cf_file, 'w') as CF: print(default_config, file=CF) cf.read(os.path.expanduser('~/.camoco.conf'))
mit
Python
648de375f5e9ae1620bc836e5d647688b541690c
Add atom package
wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build
test/test_packages.py
test/test_packages.py
import pytest @pytest.mark.parametrize("name", [ ("apt-file"), ("apt-transport-https"), ("atom"), ("blktrace"), ("ca-certificates"), ("chromium-browser"), ("cron"), ("curl"), ("diod"), ("docker-ce"), ("fonts-font-awesome"), ("git"), ("gnupg"), ("handbrake"), ("handbrake-cli"), ("haveged"), ("htop"), ("i3"), ("iotop"), ("language-pack-en-base"), ("laptop-mode-tools"), ("nfs-common"), ("ntop"), ("ntp"), ("openssh-client"), ("openssh-server"), ("openssh-sftp-server"), ("openssl"), ("python"), ("python-pip"), ("software-properties-common"), ("suckless-tools"), ("sysstat"), ("tree"), ("vagrant"), ("vim"), ("virtualbox"), ("vlc"), ("wget"), ("whois"), ("x264"), ("xfce4-terminal"), ("xfonts-terminus"), ("xinit"), ]) def test_packages(Package, name): assert Package(name).is_installed
import pytest @pytest.mark.parametrize("name", [ ("apt-file"), ("apt-transport-https"), ("blktrace"), ("ca-certificates"), ("chromium-browser"), ("cron"), ("curl"), ("diod"), ("docker-ce"), ("fonts-font-awesome"), ("git"), ("gnupg"), ("handbrake"), ("handbrake-cli"), ("haveged"), ("htop"), ("i3"), ("iotop"), ("language-pack-en-base"), ("laptop-mode-tools"), ("nfs-common"), ("ntop"), ("ntp"), ("openssh-client"), ("openssh-server"), ("openssh-sftp-server"), ("openssl"), ("python"), ("python-pip"), ("software-properties-common"), ("suckless-tools"), ("sysstat"), ("tree"), ("vagrant"), ("vim"), ("virtualbox"), ("vlc"), ("wget"), ("whois"), ("x264"), ("xfce4-terminal"), ("xfonts-terminus"), ("xinit"), ]) def test_packages(Package, name): assert Package(name).is_installed
mit
Python
89664ec37036553534c07d65f2df2b9fa07bfe80
Check total weights remain correct.
python-hyper/priority
test/test_priority.py
test/test_priority.py
# -*- coding: utf-8 -*- """ test_priority ~~~~~~~~~~~~~ Tests for the Priority trees """ from hypothesis import given from hypothesis.strategies import integers, lists, tuples import priority STREAMS_AND_WEIGHTS = lists( elements=tuples( integers(min_value=1), integers(min_value=1, max_value=255) ), unique_by=lambda x: x[0], ) class TestPriorityTree(object): def test_priority_tree_one_stream(self): """ When only one stream is in the PriorityTree, priorities are easy. """ p = priority.PriorityTree() p.insert_stream(stream_id=1) priorities = p.priorities() assert len(priorities) == 1 priorities.total_weight == 16 @given(lists(elements=integers(min_value=0))) def test_priority_tree_single_level(self, weights): """ If lots of elements are added to the tree all at the top level, their weights are summed properly and the priorities object has the correct length. """ p = priority.PriorityTree() stream_id = 1 for weight in weights: p.insert_stream(stream_id=stream_id, weight=weight) stream_id += 1 priorities = p.priorities() assert len(priorities) == len(weights) assert priorities.total_weight == sum(weights) @given(STREAMS_AND_WEIGHTS) def test_priorities_stream_weights(self, stream_data): """ For a given set of priorities, we can index by ID and find the weight of the stream. """ p = priority.PriorityTree() for stream_id, weight in stream_data: p.insert_stream(stream_id=stream_id, weight=weight) priorities = p.priorities() for stream_id, weight in stream_data: assert weight == priorities.stream_weight(stream_id) def test_drilling_down(self, readme_tree): """ We can drill down each layer of the tree by stream ID. """ top_level = readme_tree.priorities() assert 7 in top_level dependents = top_level[7] assert len(dependents) == 1 assert 11 in dependents assert dependents.total_weight == 16 second_level_dependents = dependents[11] assert len(second_level_dependents) == 1 assert 9 in second_level_dependents assert second_level_dependents.total_weight == 8
# -*- coding: utf-8 -*- """ test_priority ~~~~~~~~~~~~~ Tests for the Priority trees """ from hypothesis import given from hypothesis.strategies import integers, lists, tuples import priority STREAMS_AND_WEIGHTS = lists( elements=tuples( integers(min_value=1), integers(min_value=1, max_value=255) ), unique_by=lambda x: x[0], ) class TestPriorityTree(object): def test_priority_tree_one_stream(self): """ When only one stream is in the PriorityTree, priorities are easy. """ p = priority.PriorityTree() p.insert_stream(stream_id=1) priorities = p.priorities() assert len(priorities) == 1 priorities.total_weight == 16 @given(lists(elements=integers(min_value=0))) def test_priority_tree_single_level(self, weights): """ If lots of elements are added to the tree all at the top level, their weights are summed properly and the priorities object has the correct length. """ p = priority.PriorityTree() stream_id = 1 for weight in weights: p.insert_stream(stream_id=stream_id, weight=weight) stream_id += 1 priorities = p.priorities() assert len(priorities) == len(weights) assert priorities.total_weight == sum(weights) @given(STREAMS_AND_WEIGHTS) def test_priorities_stream_weights(self, stream_data): """ For a given set of priorities, we can index by ID and find the weight of the stream. """ p = priority.PriorityTree() for stream_id, weight in stream_data: p.insert_stream(stream_id=stream_id, weight=weight) priorities = p.priorities() for stream_id, weight in stream_data: assert weight == priorities.stream_weight(stream_id) def test_drilling_down(self, readme_tree): """ We can drill down each layer of the tree by stream ID. """ top_level = readme_tree.priorities() assert 7 in top_level dependents = top_level[7] assert len(dependents) == 1 assert 11 in dependents second_level_dependents = dependents[11] assert len(second_level_dependents) == 1 assert 9 in second_level_dependents
mit
Python
4530eea92e37c087b6f25fe3a0e48e54b949b68b
allow setup.py to work without django
gregplaysguitar/django-trolley
cart/__init__.py
cart/__init__.py
__version__ = '1.1' VERSION = tuple(map(int, __version__.split('.'))) + ('dev',) def get_helper_module(): '''Get the helper module as defined in the settings.''' # need to be able to import file without importing django, so these can't go # at the top from django.utils.importlib import import_module from django.core.exceptions import ImproperlyConfigured import settings as cart_settings if cart_settings.HELPER_MODULE: try: package = import_module(cart_settings.HELPER_MODULE) except ImportError, e: raise ImproperlyConfigured(u'The CART_HELPER_MODULE setting refers to a ' \ 'non-existent package, or the import failed ' \ 'due to an error. Error details: %s' % e) return package else: return None
from django.utils.importlib import import_module from django.core.exceptions import ImproperlyConfigured __version__ = '1.1' VERSION = tuple(map(int, __version__.split('.'))) + ('dev',) def get_helper_module(): '''Get the helper module as defined in the settings.''' import settings as cart_settings if cart_settings.HELPER_MODULE: try: package = import_module(cart_settings.HELPER_MODULE) except ImportError, e: raise ImproperlyConfigured(u'The CART_HELPER_MODULE setting refers to a ' \ 'non-existent package, or the import failed ' \ 'due to an error. Error details: %s' % e) return package else: return None
bsd-3-clause
Python
f89bc55aebeba0cbf3c8423c97599aa0d334d9c9
Fix lint error (#113)
googleapis/synthtool,googleapis/synthtool,googleapis/synthtool,googleapis/synthtool,googleapis/synthtool
synthtool/gcp/common.py
synthtool/gcp/common.py
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from synthtool.languages import node from synthtool.sources import templates from synthtool import _tracked_paths _TEMPLATES_DIR = Path(__file__).parent / "templates" class CommonTemplates: def __init__(self): self._templates = templates.Templates(_TEMPLATES_DIR) def py_library(self) -> Path: raise NotImplementedError() def node_library(self, **kwargs) -> Path: kwargs["metadata"] = node.read_metadata() t = templates.TemplateGroup(_TEMPLATES_DIR / "node_library") result = t.render(**kwargs) _tracked_paths.add(result) return result def php_library(self, **kwargs) -> Path: t = templates.TemplateGroup(_TEMPLATES_DIR / "php_library") result = t.render(**kwargs) _tracked_paths.add(result) return result def render(self, template_name: str, **kwargs) -> Path: return self._templates.render(template_name, **kwargs)
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from synthtool.languages import node from synthtool.sources import templates from synthtool import _tracked_paths _TEMPLATES_DIR = Path(__file__).parent / "templates" class CommonTemplates: def __init__(self): self._templates = templates.Templates(_TEMPLATES_DIR) def py_library(self) -> Path: raise NotImplemented() def node_library(self, **kwargs) -> Path: kwargs["metadata"] = node.read_metadata() t = templates.TemplateGroup(_TEMPLATES_DIR / "node_library") result = t.render(**kwargs) _tracked_paths.add(result) return result def php_library(self, **kwargs) -> Path: t = templates.TemplateGroup(_TEMPLATES_DIR / "php_library") result = t.render(**kwargs) _tracked_paths.add(result) return result def render(self, template_name: str, **kwargs) -> Path: return self._templates.render(template_name, **kwargs)
apache-2.0
Python
7b4531ec867982ba2f660a2a08e85dbae457083e
Fix new line stripping in admin site
ashbc/tgrsite,ashbc/tgrsite,ashbc/tgrsite
users/models.py
users/models.py
import hashlib import urllib.parse as urllib from django.contrib.auth.models import User from django.db import models # extension to django's User class which has authentication details # as well as some basic info such as name class Member(models.Model): def gravatar(self, size=128): default = "https://pbs.twimg.com/media/Civ9AUkVAAAwihS.jpg" h = hashlib.md5( self.equiv_user.email.encode('utf8').lower() ).hexdigest() q = urllib.urlencode({ # 'd':default, 'd': 'identicon', 's': str(size), }) return 'https://www.gravatar.com/avatar/{}?{}'.format(h, q) equiv_user = models.OneToOneField(User, on_delete=models.CASCADE) def __str__(self): return self.equiv_user.username bio = models.TextField(max_length=4096, blank=True) signature = models.TextField(max_length=1024, blank=True) def notification_count(self): return len(self.notifications_owned.filter(is_unread=True)) official_photo_url = models.CharField(max_length=512, null=True, blank=True) def is_exec(self): return len(self.execrole_set.all()) > 0
import hashlib import urllib.parse as urllib from django.contrib.auth.models import User from django.db import models # extension to django's User class which has authentication details # as well as some basic info such as name class Member(models.Model): def gravatar(self, size=128): default = "https://pbs.twimg.com/media/Civ9AUkVAAAwihS.jpg" h = hashlib.md5( self.equiv_user.email.encode('utf8').lower() ).hexdigest() q = urllib.urlencode({ # 'd':default, 'd': 'identicon', 's': str(size), }) return 'https://www.gravatar.com/avatar/{}?{}'.format(h, q) equiv_user = models.OneToOneField(User, on_delete=models.CASCADE) def __str__(self): return self.equiv_user.username bio = models.CharField(max_length=4096, blank=True) signature = models.CharField(max_length=1024, blank=True) def notification_count(self): return len(self.notifications_owned.filter(is_unread=True)) official_photo_url = models.CharField(max_length=512, null=True, blank=True) def is_exec(self): return len(self.execrole_set.all()) > 0
isc
Python
57cec2b03eaa6857bcb1b3780c4de00c3165b281
Return early if owner
BeatButton/beattie-bot,BeatButton/beattie
utils/checks.py
utils/checks.py
from discord.ext import commands def is_owner_or(**perms): async def predicate(ctx): if await ctx.bot.is_owner(ctx.author): return True permissions = ctx.channel.permissions_for(ctx.author) return all(getattr(permissions, perm, None) == value for perm, value in perms.items()) return commands.check(predicate)
from discord.ext import commands def is_owner_or(**perms): async def predicate(ctx): owner = await ctx.bot.is_owner(ctx.author) permissions = ctx.channel.permissions_for(ctx.author) return all(getattr(permissions, perm, None) == value for perm, value in perms.items()) or owner return commands.check(predicate)
mit
Python
28c314e98ec88586b8c423b0941d8f029e4946e9
fix function which has obviously never been tested
grawity/accdb
lib/xdg_secret.py
lib/xdg_secret.py
import subprocess def xdg_secret_store(label, secret, attrs): with subprocess.Popen(["secret-tool", "store", "--label", label] + attrs, stdin=subprocess.PIPE) as proc: proc.communicate(secret.encode("utf-8")) return proc.wait() == 0 def xdg_secret_lookup_secret(attrs): with subprocess.Popen(["secret-tool", "lookup"] + attrs, stdout=subprocess.PIPE) as proc: return proc.stdout.read().rstrip(b"\n") def xdg_secret_search_stdout(attrs): return subprocess.call(["secret-tool", "search"] + attrs) == 0 def xdg_secret_clear(attrs): return subprocess.call(["secret-tool", "clear"] + attrs) == 0
import subprocess def xdg_secret_store(label, secret, attrs): with subprocess.Popen(["secret-tool", "store", "--label", label] + attrs, stdin=subprocess.PIPE) as proc: proc.communicate(secret.encode("utf-8")) return proc.wait() == 0 def xdg_secret_lookup_secret(attrs): with subprocess.Popen(["secret-tool", "lookup"] + attrs, stdout=subprocess.PIPE) as proc: return proc.stdout.read().rstrip("\n") def xdg_secret_search_stdout(attrs): return subprocess.call(["secret-tool", "search"] + attrs) == 0 def xdg_secret_clear(attrs): return subprocess.call(["secret-tool", "clear"] + attrs) == 0
mit
Python
e1a4b0d7f7d9e860dce794e07aadedea193d470e
Set version to v2.0.18.dev1
spacy-io/spaCy,spacy-io/spaCy,honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,explosion/spaCy
spacy/about.py
spacy/about.py
# inspired from: # https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/ # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py __title__ = 'spacy' __version__ = '2.0.18.dev1' __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' __uri__ = 'https://spacy.io' __author__ = 'Explosion AI' __email__ = 'contact@explosion.ai' __license__ = 'MIT' __release__ = False __download_url__ = 'https://github.com/explosion/spacy-models/releases/download' __compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json' __shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json'
# inspired from: # https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/ # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py __title__ = 'spacy' __version__ = '2.0.18' __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' __uri__ = 'https://spacy.io' __author__ = 'Explosion AI' __email__ = 'contact@explosion.ai' __license__ = 'MIT' __release__ = True __download_url__ = 'https://github.com/explosion/spacy-models/releases/download' __compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json' __shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json'
mit
Python
d5c8d2f5fd4177b6f4980689ae972352563c28e5
Update about.py and increment version
recognai/spaCy,aikramer2/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,aikramer2/spaCy,recognai/spaCy,spacy-io/spaCy,honnibal/spaCy,aikramer2/spaCy,aikramer2/spaCy,spacy-io/spaCy,spacy-io/spaCy,recognai/spaCy,recognai/spaCy,aikramer2/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,recognai/spaCy,explosion/spaCy,honnibal/spaCy,spacy-io/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,recognai/spaCy
spacy/about.py
spacy/about.py
# inspired from: # https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/ # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py __title__ = 'spacy' __version__ = '2.0.0' __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' __uri__ = 'https://spacy.io' __author__ = 'Explosion AI' __email__ = 'contact@explosion.ai' __license__ = 'MIT' __docs_models__ = 'https://spacy.io/docs/usage/models' __download_url__ = 'https://github.com/explosion/spacy-models/releases/download' __compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json' __shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts.json' __model_files__ = 'https://raw.githubusercontent.com/explosion/spacy-dev-resources/v2/templates/model/'
# inspired from: # https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/ # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py __title__ = 'spacy' __version__ = '1.8.2' __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' __uri__ = 'https://spacy.io' __author__ = 'Matthew Honnibal' __email__ = 'matt@explosion.ai' __license__ = 'MIT' __docs_models__ = 'https://spacy.io/docs/usage/models' __download_url__ = 'https://github.com/explosion/spacy-models/releases/download' __compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json' __shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts.json' __model_files__ = 'https://raw.githubusercontent.com/explosion/spacy-dev-resources/v2/templates/model/'
mit
Python
4e2f5c79b67a86fce622c486a0ea28fca0130015
clean up default arguments in strip_training_tags()
menzenski/Razmetka,menzenski/tagger-tester
taggertester/testing.py
taggertester/testing.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from nltk.tag.stanford import StanfordPOSTagger from .config import DATA_DIR_NAME, PATH_TO_DATA_DIR from .files import TrainingFile, write_to_directory from .tag import FilePair class TaggerTester(object): """Collection of files for training/testing part-of-speech taggers. """ def __init__(self): """Initialize the test suite.""" pass class SentencePair(object): """Pair of sentences: one tagged by hand, one by a POS tagger.""" def __init__(self, hand_tagged_sentence, auto_tagged_sentence, separator='_'): """Initialize the object. Parameters ---------- hand_tagged_sentence (unicode / str) : a sentence which has been tagged by hand (i.e., it belongs to part of the original training file which was set aside to serve as a test set) auto_tagged_sentence (list) : a sentence which has been tagged automatically by a part-of-speech tagger separator (str) : the character which serves to separate words from their part-of-speech tags (likely '_' or '/') """ # split the hand-tagged sentence on whitespace, since the auto-tagged # sentence will already be split and we want them to match self.hand_tagged = hand_tagged_sentence.split() self.auto_tagged = auto_tagged_sentence self.sep = separator def strip_training_tags(self, sentence=None, sep=None): """Remove the part-of-speech tags from a test sentence.""" if sentence == None: sentence = self.hand_tagged if sep == None: sep = self.sep return [w.split(sep, 1)[0] for w in sentence]
#!/usr/bin/env python # -*- coding: utf-8 -*- from nltk.tag.stanford import StanfordPOSTagger from .config import DATA_DIR_NAME, PATH_TO_DATA_DIR from .files import TrainingFile, write_to_directory from .tag import FilePair class TaggerTester(object): """Collection of files for training/testing part-of-speech taggers. """ def __init__(self): """Initialize the test suite.""" pass class SentencePair(object): """Pair of sentences: one tagged by hand, one by a POS tagger.""" def __init__(self, hand_tagged_sentence, auto_tagged_sentence, separator='_'): """Initialize the object. Parameters ---------- hand_tagged_sentence (unicode / str) : a sentence which has been tagged by hand (i.e., it belongs to part of the original training file which was set aside to serve as a test set) auto_tagged_sentence (list) : a sentence which has been tagged automatically by a part-of-speech tagger separator (str) : the character which serves to separate words from their part-of-speech tags (likely '_' or '/') """ # split the hand-tagged sentence on whitespace, since the auto-tagged # sentence will already be split and we want them to match self.hand_tagged = hand_tagged_sentence.split() self.auto_tagged = auto_tagged_sentence self.sep = separator def strip_training_tags(self, hand_tagged_sentence): """Remove the part-of-speech tags from a test sentence.""" return [w.split(self.sep, 1)[0] for w in self.hand_tagged]
mit
Python
e8a5a97ea18120915dba74b9a73fdca4eb381568
Fix indentation level
shuttle1987/tail,shuttle1987/tail
tail/tests/test_tail.py
tail/tests/test_tail.py
""" Tests for the tail implementation """ from tail import FileBasedTail def test_tail_from_file(): """Tests that tail works as advertised from a file""" from unittest.mock import mock_open, patch, Mock # The mock_data we are using for our test mock_data = """A B C D E F """ mocked_open = mock_open(read_data=mock_data) # mock_open does not support iteration by lines by default so # we must define the following: mocked_open.return_value.__iter__.return_value = mock_data.splitlines() # The file check in the class returns no value upon a valid file # the error states just raise exceptions. mocked_file_validity_check = Mock() # We need to patch the open found in the namespace of the module # where the function is defined with patch('tail.open', mocked_open, create=True) as mocked_file_open: # We also need to patch the file checking because we are not dealing # with an actual file in the filesystem in this unit test with patch('tail.tail.check_file_validity', mocked_file_validity_check): res = FileBasedTail('Test_filename.txt').tail(3) mocked_file_validity_check.assert_called_once_with('Test_filename.txt') mocked_file_open.assert_called_once_with('Test_filename.txt', 'r') assert len(res) == 3 assert res == ["D", "E", "F"] def test_head_from_file(): """Tests that tail works as advertised from a file""" from unittest.mock import mock_open, patch, Mock # The mock_data we are using for our test mock_data = """A B C D E F """ mocked_open = mock_open(read_data=mock_data) # mock_open does not support iteration by lines by default so # we must define the following: mocked_open.return_value.__iter__.return_value = mock_data.splitlines() # The file check in the class returns no value upon a valid file # the error states just raise exceptions. mocked_file_validity_check = Mock() # We need to patch the open found in the namespace of the module # where the function is defined with patch('tail.open', mocked_open, create=True) as mocked_file_open: # We also need to patch the file checking because we are not dealing # with an actual file in the filesystem in this unit test with patch('tail.tail.check_file_validity', mocked_file_validity_check): res = FileBasedTail('Test_filename.txt').head(3) mocked_file_validity_check.assert_called_once_with('Test_filename.txt') mocked_file_open.assert_called_once_with('Test_filename.txt', 'r') assert len(res) == 3 assert res == ["A", "B", "C"]
""" Tests for the tail implementation """ from tail import FileBasedTail def test_tail_from_file(): """Tests that tail works as advertised from a file""" from unittest.mock import mock_open, patch, Mock # The mock_data we are using for our test mock_data = """A B C D E F """ mocked_open = mock_open(read_data=mock_data) # mock_open does not support iteration by lines by default so # we must define the following: mocked_open.return_value.__iter__.return_value = mock_data.splitlines() # The file check in the class returns no value upon a valid file # the error states just raise exceptions. mocked_file_validity_check = Mock() # We need to patch the open found in the namespace of the module # where the function is defined with patch('builtins.open', mocked_open, create=True) as mocked_file_open: # We also need to patch the file checking because we are not dealing # with an actual file in the filesystem in this unit test with patch('tail.tail.check_file_validity', mocked_file_validity_check): res = FileBasedTail('Test_filename.txt').tail(3) mocked_file_validity_check.assert_called_once_with('Test_filename.txt') mocked_file_open.assert_called_once_with('Test_filename.txt', 'r') assert len(res) == 3 assert res == ["D", "E", "F"] def test_head_from_file(): """Tests that tail works as advertised from a file""" from unittest.mock import mock_open, patch, Mock # The mock_data we are using for our test mock_data = """A B C D E F """ mocked_open = mock_open(read_data=mock_data) # mock_open does not support iteration by lines by default so # we must define the following: mocked_open.return_value.__iter__.return_value = mock_data.splitlines() # The file check in the class returns no value upon a valid file # the error states just raise exceptions. mocked_file_validity_check = Mock() # We need to patch the open found in the namespace of the module # where the function is defined with patch('builtins.open', mocked_open, create=True) as mocked_file_open: # We also need to patch the file checking because we are not dealing # with an actual file in the filesystem in this unit test with patch('tail.tail.check_file_validity', mocked_file_validity_check): res = FileBasedTail('Test_filename.txt').head(3) mocked_file_validity_check.assert_called_once_with('Test_filename.txt') mocked_file_open.assert_called_once_with('Test_filename.txt', 'r') assert len(res) == 3 assert res == ["A", "B", "C"]
mit
Python
33efe92104ad139f9313d91ae7b2eea8a76da9d7
fix flake8
higumachan/pyscalambda
pyscalambda/__init__.py
pyscalambda/__init__.py
from pyscalambda.operands import Underscore from pyscalambda.operators import UnaryOperator from pyscalambda.quote import quote from pyscalambda.scalambdable import scalambdable_const, scalambdable_func, scalambdable_iterator from pyscalambda.utility import convert_operand _ = Underscore(0) _1 = Underscore(1) _2 = Underscore(2) _3 = Underscore(3) _4 = Underscore(4) _5 = Underscore(5) _6 = Underscore(6) _7 = Underscore(7) _8 = Underscore(8) _9 = Underscore(9) SF = scalambdable_func SC = scalambdable_const SI = scalambdable_iterator Q = quote def not_(value): return UnaryOperator("not ", convert_operand(value)) __all__ = ("_", "_1", "_2", "_3", "_4", "_5", "_6", "_7", "_8", "_9", "SF", "SC", "Q", "not_")
from pyscalambda.operands import Underscore from pyscalambda.operators import UnaryOperator from pyscalambda.quote import quote from pyscalambda.scalambdable import scalambdable_const, scalambdable_func, scalambdable_iterator from pyscalambda.utility import convert_operand _ = Underscore(0) _1 = Underscore(1) _2 = Underscore(2) _3 = Underscore(3) _4 = Underscore(4) _5 = Underscore(5) _6 = Underscore(6) _7 = Underscore(7) _8 = Underscore(8) _9 = Underscore(9) SF = scalambdable_func SC = scalambdable_const SI = scalambdable_iterator Q = quote def not_(value): return UnaryOperator("not ", convert_operand(value)) __all__ = ("_", "_1", "_2", "_3", "_4", "_5", "_6", "_7", "_8", "_9", "SF", "SC", "Q", "not_")
mit
Python
c0824d3cb9cba811ba36c2f8937e91716f5a50df
Fix lint
adamtheturtle/vws-python,adamtheturtle/vws-python
ci/run_script.py
ci/run_script.py
""" Run tests and linters on Travis CI. """ import os import subprocess import sys from pathlib import Path import pytest def run_test(test_filename: str) -> None: """ Run pytest with a given filename. """ path = Path('tests') / 'mock_vws' / test_filename result = pytest.main( [ '-vvv', '--exitfirst', str(path), '--cov=src', '--cov=tests', ] ) sys.exit(result) if __name__ == '__main__': TEST_FILENAME = os.environ.get('TEST_FILENAME') if TEST_FILENAME: run_test(test_filename=TEST_FILENAME) else: subprocess.check_call(['make', 'lint'])
""" Run tests and linters on Travis CI. """ import os import subprocess import sys from pathlib import Path import pytest def run_test(test_filename: str) -> None: """ Run pytest with a given filename. """ path = Path('tests') / 'mock_vws' / test_filename result = pytest.main([ '-vvv', '--exitfirst', str(path), '--cov=src', '--cov=tests', ]) sys.exit(result) if __name__ == '__main__': TEST_FILENAME = os.environ.get('TEST_FILENAME') if TEST_FILENAME: run_test(test_filename=TEST_FILENAME) else: subprocess.check_call(['make', 'lint'])
mit
Python
2d60ef3a9ff53c1623747fd1a00df4d788dd3777
fix tobler init
pysal/pysal,pedrovma/pysal,weikang9009/pysal,sjsrey/pysal,lanselin/pysal
pysal/model/tobler/__init__.py
pysal/model/tobler/__init__.py
from tobler import area_weighted from tobler import dasymetric from tobler import model
from tobler import area_weighted from tobler import data from tobler import dasymetric
bsd-3-clause
Python
d1c88387a129d64488a5ca2dee56d7fac36ffbf1
Disable GCC fallback, add time logging.
ramosian-glider/clang-kernel-build,ramosian-glider/clang-kernel-build
clang_wrapper.py
clang_wrapper.py
#!/usr/bin/env python import optparse import os import subprocess import sys import time WORLD_PATH = os.path.dirname(os.path.abspath(__file__)) COMPILER_PATH = {'gcc': 'gcc', 'clang': WORLD_PATH + '/third_party/llvm-build/Release+Asserts/bin/clang' } FILTER = {'gcc': ['-Qunused-arguments', '-no-integrated-as', '-mno-global-merge', '-Wdate-time', '-Wno-unknown-warning-option', '-Wno-initializer-overrides', '-Wno-tautological-compare', '-Wincompatible-pointer-types', '-Wno-gnu', '-Wno-format-invalid-specifier', '-Werror=date-time', '-Werror=incompatible-pointer-types', ],'clang': []} SOURCE = 'source' WRAPPER_LOG = WORLD_PATH + '/wrapper.log' LOG = sys.stderr LOG_OPTIONS = {'time': True, 'argv': True} def compiler(flags): path = 'clang' return path # no need to use GCC for now if SOURCE in flags: source = flags[SOURCE] #print >>LOG, source # kernel/* ok # kernel/[st] broken # kernel/[kmpstuw] broken # kernel/[abckmpstuw] broken # kernel/[abcdefgkmpstuw] ok # kernel/[defgkmpstuw] ok # kernel/[defgkm] ok # kernel/[defg] ok # kernel/[de] broken # kernel/[fg] ok # kernel/[f] broken # kernel/[g] ok -- that's kernel/groups.h if source.startswith('kernel/'): pieces = source.split('/') if pieces[1][0] in ['g']: path = 'gcc' #print >>LOG, path return path def filter_args(argv, cname): new_argv = [] for arg in argv: if arg not in FILTER[cname]: new_argv.append(arg) return new_argv def compiler_argv(flags, argv): cname = compiler(flags) new_argv = [COMPILER_PATH[cname]] + filter_args(argv, cname) return new_argv def make_flags(argv): flags = {} argv = argv[1:] for arg in argv: if arg.endswith('.c'): flags[SOURCE] = arg return flags, argv def main(argv): global LOG LOG = file(WRAPPER_LOG, 'a+') if 'argv' in LOG_OPTIONS: print >>LOG, ' '.join(argv) flags, argv = make_flags(argv) new_argv = compiler_argv(flags, argv) #print >>LOG, ' '.join(new_argv) start_time = time.time() ret = subprocess.call(new_argv) end_time = time.time() if 'time' in LOG_OPTIONS: print >> LOG, 'Time elapsed: {:.3f} seconds'.format(end_time - start_time) LOG.close() return ret if __name__ == '__main__': sys.exit(main(sys.argv))
#!/usr/bin/env python import optparse import os import subprocess import sys WORLD_PATH = os.path.dirname(os.path.abspath(__file__)) COMPILER_PATH = {'gcc': 'gcc', 'clang': WORLD_PATH + '/third_party/llvm-build/Release+Asserts/bin/clang' } FILTER = {'gcc': ['-Qunused-arguments', '-no-integrated-as', '-mno-global-merge', '-Wdate-time', '-Wno-unknown-warning-option', '-Wno-initializer-overrides', '-Wno-tautological-compare', '-Wincompatible-pointer-types', '-Wno-gnu', '-Wno-format-invalid-specifier', '-Werror=date-time', '-Werror=incompatible-pointer-types', ],'clang': []} SOURCE = 'source' WRAPPER_LOG = WORLD_PATH + '/wrapper.log' LOG = sys.stderr def compiler(flags): path = 'clang' if SOURCE in flags: source = flags[SOURCE] print >>LOG, source # kernel/* ok # kernel/[st] broken # kernel/[kmpstuw] broken # kernel/[abckmpstuw] broken # kernel/[abcdefgkmpstuw] ok # kernel/[defgkmpstuw] ok # kernel/[defgkm] ok # kernel/[defg] ok # kernel/[de] broken # kernel/[fg] ok # kernel/[f] broken # kernel/[g] ok -- that's kernel/groups.h if source.startswith('kernel/'): pieces = source.split('/') if pieces[1][0] in ['g']: path = 'gcc' print >>LOG, path return path def filter_args(argv, cname): new_argv = [] for arg in argv: if arg not in FILTER[cname]: new_argv.append(arg) return new_argv def compiler_argv(flags, argv): cname = compiler(flags) new_argv = [COMPILER_PATH[cname]] + filter_args(argv, cname) return new_argv def make_flags(argv): flags = {} argv = argv[1:] for arg in argv: if arg.endswith('.c'): flags[SOURCE] = arg return flags, argv def main(argv): global LOG LOG = file(WRAPPER_LOG, 'a+') #print >>LOG, ' '.join(argv) flags, argv = make_flags(argv) new_argv = compiler_argv(flags, argv) #print >>LOG, ' '.join(new_argv) ret = subprocess.call(new_argv) #print >>LOG, ret LOG.close() return ret if __name__ == '__main__': sys.exit(main(sys.argv))
apache-2.0
Python
deebd351b09108d95b4759b179ad84b48b6c933e
Fix typo in random-seed's help
mark-adams/pytest-test-groups
pytest_test_groups/__init__.py
pytest_test_groups/__init__.py
from random import Random import math def get_group_size(total_items, total_groups): return int(math.ceil(float(total_items) / total_groups)) def get_group(items, group_size, group_id): start = group_size * (group_id - 1) end = start + group_size if start >= len(items) or start < 0: raise ValueError("Invalid test-group argument") return items[start:end] def pytest_addoption(parser): group = parser.getgroup('split your tests into evenly sized groups and run them') group.addoption('--test-group-count', dest='test-group-count', type=int, help='The number of groups to split the tests into') group.addoption('--test-group', dest='test-group', type=int, help='The group of tests that should be executed') group.addoption('--test-group-random-seed', dest='random-seed', type=int, help='Integer to seed pseudo-random test ordering') def pytest_collection_modifyitems(session, config, items): group_count = config.getoption('test-group-count') group_id = config.getoption('test-group') seed = config.getoption('random-seed', False) if not group_count or not group_id: return if seed: seeded = Random(seed) seeded.shuffle(items) total_items = len(items) group_size = get_group_size(total_items, group_count) tests_in_group = get_group(items, group_size, group_id) del items[:] items.extend(tests_in_group) print('Running test group #{0} ({1} tests)'.format(group_id, len(items)))
from random import Random import math def get_group_size(total_items, total_groups): return int(math.ceil(float(total_items) / total_groups)) def get_group(items, group_size, group_id): start = group_size * (group_id - 1) end = start + group_size if start >= len(items) or start < 0: raise ValueError("Invalid test-group argument") return items[start:end] def pytest_addoption(parser): group = parser.getgroup('split your tests into evenly sized groups and run them') group.addoption('--test-group-count', dest='test-group-count', type=int, help='The number of groups to split the tests into') group.addoption('--test-group', dest='test-group', type=int, help='The group of tests that should be executed') group.addoption('--test-group-random-seed', dest='random-seed', type=int, help='Integer to seed psuedo-random test ordering') def pytest_collection_modifyitems(session, config, items): group_count = config.getoption('test-group-count') group_id = config.getoption('test-group') seed = config.getoption('random-seed', False) if not group_count or not group_id: return if seed: seeded = Random(seed) seeded.shuffle(items) total_items = len(items) group_size = get_group_size(total_items, group_count) tests_in_group = get_group(items, group_size, group_id) del items[:] items.extend(tests_in_group) print('Running test group #{0} ({1} tests)'.format(group_id, len(items)))
mit
Python
2eca98c216a590c6163c8236c392f19ddd8d85d9
update to 4.4.12
hycis/TensorGraph,hycis/TensorGraph
tensorgraph/__init__.py
tensorgraph/__init__.py
# import json # from os.path import dirname # # with open(dirname(__file__) + '/pkg_info.json') as fp: # _info = json.load(fp) # __version__ = _info['version'] __version__ = "4.4.12" from .stopper import EarlyStopper from .sequential import Sequential from .graph import Graph from .node import StartNode, HiddenNode, EndNode from .progbar import ProgressBar from .data_iterator import SequentialIterator, StepIterator, SimpleBlocks, DataBlocks from . import cost from . import utils from .dataset.preprocess import *
# import json # from os.path import dirname # # with open(dirname(__file__) + '/pkg_info.json') as fp: # _info = json.load(fp) # __version__ = _info['version'] __version__ = "4.4.10" from .stopper import EarlyStopper from .sequential import Sequential from .graph import Graph from .node import StartNode, HiddenNode, EndNode from .progbar import ProgressBar from .data_iterator import SequentialIterator, StepIterator, SimpleBlocks, DataBlocks from . import cost from . import utils from .dataset.preprocess import *
apache-2.0
Python
4f5d81b48a5bb48771b82f30e3853472550ee65c
add demo about using file iterator
ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study
python/src/file_iter.py
python/src/file_iter.py
# Copyright (c) 2014 ASMlover. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list ofconditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materialsprovided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # #!/usr/bin/env python # -*- encoding: utf-8 -*- import sys import fileinput def process(string): print 'Processing: ', string def file_iter_by_ch(filename): f = open(filename) while True: char = f.read(1) if not char: break process(char) f.close() def file_iter_by_line(filename): f = open(filename) while True: line = f.readline() if not line: break process(line) f.close() def file_iter_by_ch_all(filename): f = open(filename) for char in f.read(): process(char) f.close() def file_iter_by_line_all(filename): f = open(filename) for line in f.readlines(): process(line) f.close() def file_iter_by_lazy(filename): for line in fileinput.input(filename): process(line) def file_iter(filename): f = open(filename) for line in f: process(line) f.close() if __name__ == '__main__': if len(sys.argv) < 2: print 'invalid arguments' exit(1) filename = 'file_iter.py' if sys.argv[1] == 'c': file_iter_by_ch(filename) elif sys.argv[1] == 'l': file_iter_by_line(filename) elif sys.argv[1] == 'ca': file_iter_by_ch_all(filename) elif sys.argv[1] == 'la': file_iter_by_line_all(filename) elif sys.argv[1] == 'lazy': file_iter_by_lazy(filename) else: file_iter(filename)
# Copyright (c) 2014 ASMlover. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list ofconditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materialsprovided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # #!/usr/bin/env python # -*- encoding: utf-8 -*- import sys import fileinput def process(string): print 'Processing: ', string def file_iter_by_ch(filename): f = open(filename) while True: char = f.read(1) if not char: break process(char) f.close() def file_iter_by_line(filename): f = open(filename) while True: line = f.readline() if not line: break process(line) f.close() def file_iter_by_ch_all(filename): f = open(filename) for char in f.read(): process(char) f.close() def file_iter_by_line_all(filename): f = open(filename) for line in f.readlines(): process(line) f.close() def file_iter_by_lazy(filename): for line in fileinput.input(filename): process(line) if __name__ == '__main__': if len(sys.argv) < 2: print 'invalid arguments' exit(1) filename = 'file_iter.py' if sys.argv[1] == 'c': file_iter_by_ch(filename) elif sys.argv[1] == 'l': file_iter_by_line(filename) elif sys.argv[1] == 'ca': file_iter_by_ch_all(filename) elif sys.argv[1] == 'la': file_iter_by_line_all(filename) elif sys.argv[1] == 'lazy': file_iter_by_lazy(filename) else: print 'error'
bsd-2-clause
Python