commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
eaf390b065944a64a3b74c1b0e43b1df60d4e88f
Reimplement deduping hurr
invoke/executor.py
invoke/executor.py
class Executor(object): """ An execution strategy for Task objects. Subclasses may override various extension points to change, add or remove behavior. """ def __init__(self, collection): """ Create executor with a pointer to the task collection ``collection``. This pointer is used for looking up tasks by name and storing/retrieving state, e.g. how many times a given task has been run this session and so on. """ self.collection = collection def execute(self, name, kwargs=None, dedupe=True): """ Execute task named ``name``, optionally passing along ``kwargs``. If ``dedupe`` is ``True`` (default), will ensure any given task within ``self.collection`` is only run once per session. To disable this behavior, say ``dedupe=False``. """ kwargs = kwargs or {} # Expand task list all_tasks = self.task_list(name) # Dedupe if requested if dedupe: # Compact (preserving order, so not using list+set) compact_tasks = [] for task in all_tasks: if task not in compact_tasks: compact_tasks.append(task) # Remove tasks already called tasks = [] for task in compact_tasks: if not task.called: tasks.append(task) else: tasks = all_tasks # Execute for task in tasks: task(**kwargs) def task_list(self, name): task = self.collection[name] tasks = [task] prereqs = [] for pretask in task.pre: prereqs.append(self.collection[pretask]) return prereqs + tasks
class Executor(object): """ An execution strategy for Task objects. Subclasses may override various extension points to change, add or remove behavior. """ def __init__(self, collection): """ Create executor with a pointer to the task collection ``collection``. This pointer is used for looking up tasks by name and storing/retrieving state, e.g. how many times a given task has been run this session and so on. """ self.collection = collection def execute(self, name, kwargs=None, dedupe=True): """ Execute task named ``name``, optionally passing along ``kwargs``. If ``dedupe`` is ``True`` (default), will ensure any given task within ``self.collection`` is only run once per session. To disable this behavior, say ``dedupe=False``. """ kwargs = kwargs or {} # Expand task list all_tasks = self.task_list(name) # Compact (preserving order, so not using list+set) compact_tasks = [] for task in all_tasks: if task not in compact_tasks: compact_tasks.append(task) # Remove tasks already called tasks = [] for task in compact_tasks: if not task.called: tasks.append(task) # Execute for task in tasks: task.body(**kwargs) def task_list(self, name): task = self.collection[name] tasks = [task] prereqs = [] for pretask in task.pre: prereqs.append(self.collection[pretask]) return prereqs + tasks
Python
0.000001
375d12ab7486f6bb0d57232d48c556e6c0eda0c1
Update P05_stylingExcel fixed PEP8 spacing
books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py
books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py
# This program uses the OpenPyXL module to manipulate Excel documents import openpyxl from openpyxl.styles import Font, NamedStyle wb = openpyxl.Workbook() sheet = wb["Sheet"] # Setting the Font Style of Cells italic24Font = NamedStyle(name="italic24Font") italic24Font.font = Font(size=24, italic=True) sheet["A1"].style = italic24Font sheet["A1"] = "Hello world!" wb.save("styled.xlsx") # Font Objects wb = openpyxl.Workbook() sheet = wb["Sheet"] fontObj1 = Font(name="Times New Roman", bold=True) styleObj1 = NamedStyle(name="styleObj1") styleObj1.font = fontObj1 sheet["A1"].style = styleObj1 sheet["A1"] = "Bold Times New Roman" fontObj2 = Font(size=24, italic=True) styleObj2 = NamedStyle(name="styleObj2") styleObj2.font = fontObj2 sheet["B3"].style = styleObj2 sheet["B3"] = "24 pt Italic" wb.save("styles.xlsx") # Formulas wb = openpyxl.Workbook() sheet = wb.active sheet["A1"] = 200 sheet["A2"] = 300 sheet["A3"] = "=SUM(A1:A2)" wb.save("writeFormula.xlsx") wbFormulas = openpyxl.load_workbook("writeFormula.xlsx") sheet = wbFormulas.active print(sheet["A3"].value) wbDataOnly = openpyxl.load_workbook("writeFormula.xlsx", data_only=True) sheet = wbDataOnly.active print(sheet["A3"].value) # Not working with LibreOffice 6.0.3.2 # Adjusting Rows and Columns wb = openpyxl.Workbook() sheet = wb.active sheet["A1"] = "Tall row" sheet["B2"] = "Wide column" sheet.row_dimensions[1].height = 70 sheet.column_dimensions['B'].width = 20 wb.save("dimensions.xlsx") wb = openpyxl.Workbook() sheet = wb.active sheet.merge_cells("A1:D3") sheet["A1"] = "Twelve cells merged together." sheet.merge_cells("C5:D5") sheet["C5"] = "Two merged cells." wb.save("merged.xlsx") wb = openpyxl.load_workbook("merged.xlsx") sheet = wb.active sheet.unmerge_cells("A1:D3") sheet.unmerge_cells("C5:D5") #wb.save("merged.xlsx") # uncomment to see changes wb = openpyxl.load_workbook("produceSales.xlsx") sheet = wb.active sheet.freeze_panes = "A2" wb.save("freezeExample.xlsx") # Charts wb = openpyxl.Workbook() sheet = wb.get_active_sheet() for i in range(1, 11): # create some data in column A sheet['A' + str(i)] = i refObj = openpyxl.charts.Reference(sheet, (1, 1), (10, 1)) seriesObj = openpyxl.charts.Series(refObj, title="First Series") chartObj = openpyxl.charts.BarChart() chartObj.append(seriesObj) chartObj.drawing.top = 50 # set the position chartObj.drawing.left = 100 chartObj.drawing.width = 300 # set the size chartObj.drawing.height = 200 sheet.add_chart(chartObj) wb.save("sampleChart.xlsx")
# This program uses the OpenPyXL module to manipulate Excel documents import openpyxl from openpyxl.styles import Font, NamedStyle wb = openpyxl.Workbook() sheet = wb["Sheet"] # Setting the Font Style of Cells italic24Font = NamedStyle(name="italic24Font") italic24Font.font = Font(size=24, italic=True) sheet["A1"].style = italic24Font sheet["A1"] = "Hello world!" wb.save("styled.xlsx") # Font Objects wb = openpyxl.Workbook() sheet = wb["Sheet"] fontObj1 = Font(name="Times New Roman", bold=True) styleObj1 = NamedStyle(name="styleObj1") styleObj1.font = fontObj1 sheet["A1"].style = styleObj1 sheet["A1"] = "Bold Times New Roman" fontObj2 = Font(size=24, italic=True) styleObj2 = NamedStyle(name="styleObj2") styleObj2.font = fontObj2 sheet["B3"].style = styleObj2 sheet["B3"] = "24 pt Italic" wb.save("styles.xlsx") # Formulas wb = openpyxl.Workbook() sheet = wb.active sheet["A1"] = 200 sheet["A2"] = 300 sheet["A3"] = "=SUM(A1:A2)" wb.save("writeFormula.xlsx") wbFormulas = openpyxl.load_workbook("writeFormula.xlsx") sheet = wbFormulas.active print(sheet["A3"].value) wbDataOnly = openpyxl.load_workbook("writeFormula.xlsx", data_only=True) sheet = wbDataOnly.active print(sheet["A3"].value) # Not working with LibreOffice 6.0.3.2 # Adjusting Rows and Columns wb = openpyxl.Workbook() sheet = wb.active sheet["A1"] = "Tall row" sheet["B2"] = "Wide column" sheet.row_dimensions[1].height = 70 sheet.column_dimensions['B'].width = 20 wb.save("dimensions.xlsx") wb = openpyxl.Workbook() sheet = wb.active sheet.merge_cells("A1:D3") sheet["A1"] = "Twelve cells merged together." sheet.merge_cells("C5:D5") sheet["C5"] = "Two merged cells." wb.save("merged.xlsx") wb = openpyxl.load_workbook("merged.xlsx") sheet = wb.active sheet.unmerge_cells("A1:D3") sheet.unmerge_cells("C5:D5") #wb.save("merged.xlsx") # uncomment to see changes wb = openpyxl.load_workbook("produceSales.xlsx") sheet = wb.active sheet.freeze_panes = "A2" wb.save("freezeExample.xlsx") # Charts wb = openpyxl.Workbook() sheet = wb.get_active_sheet() for i in range(1, 11): # create some data in column A sheet['A' + str(i)] = i refObj = openpyxl.charts.Reference(sheet, (1, 1), (10, 1)) seriesObj = openpyxl.charts.Series(refObj, title="First Series") chartObj = openpyxl.charts.BarChart() chartObj.append(seriesObj) chartObj.drawing.top = 50 # set the position chartObj.drawing.left = 100 chartObj.drawing.width = 300 # set the size chartObj.drawing.height = 200 sheet.add_chart(chartObj) wb.save("sampleChart.xlsx")
Python
0
17793c9b3ceecc206aab1d1c34c0d3dc69892cbd
Use ArgumentParser to enforce required arguments
monitor/runner.py
monitor/runner.py
import sys from time import sleep from camera import Camera from controller import Controller from plotter_pygame import PyGamePlotter import epics import argparse if __name__ == "__main__": parser = argparse.ArgumentParser(description='') parser.add_argument('--prefix', required=True, dest='prefix', help='controller IOC prefix') parser.add_argument('--name', required=True, dest='name', help='name of monitor') parser.add_argument('--fullscreen', dest='fullscreen', default=1, help='1 for fullscreen (default), 0 for small window') args = parser.parse_args() controller = Controller(args.prefix, args.name) plotter = PyGamePlotter(args.name, args.fullscreen) camera = Camera() old_cmap = "" while True: try: # check for quit events if not plotter.i_shall_continue(): break # get camera name camera_name = controller.camera # if no camera is selected, make screen blank if camera_name == "": plotter.blank() # otherwise, display camera feed else: camera.set_name(camera_name) # update colormap cmap = controller.colourmap_name if cmap != old_cmap: old_cmap = cmap plotter.set_colormap(controller.colourmap_data) # update aspect ratio plotter.set_aspect_ratio(controller.aspect) # get camera data and process it plotter.process(camera.get_data()) # udpate label info if controller.label == 1: plotter.show_label(camera_name) pass # show and wait plotter.show() sleep(controller.rate) except KeyboardInterrupt: plotter.quit() pass plotter.quit()
import sys from time import sleep from camera import Camera from controller import Controller from plotter_pygame import PyGamePlotter import epics import argparse if __name__ == "__main__": parser = argparse.ArgumentParser(description='') parser.add_argument('--prefix', dest='prefix', help='controller IOC prefix') parser.add_argument('--name', dest='name', help='name of monitor') parser.add_argument('--fullscreen', dest='fullscreen', default=1, help='1 for fullscreen (default), 0 for small window') args = parser.parse_args() if not (args.prefix and args.name): parser.error("Arguments missing. Please use both --prefix and --name") controller = Controller(args.prefix, args.name) plotter = PyGamePlotter(args.name, args.fullscreen) camera = Camera() old_cmap = "" while True: try: # check for quit events if not plotter.i_shall_continue(): break # get camera name camera_name = controller.camera # if no camera is selected, make screen blank if camera_name == "": plotter.blank() # otherwise, display camera feed else: camera.set_name(camera_name) # update colormap cmap = controller.colourmap_name if cmap != old_cmap: old_cmap = cmap plotter.set_colormap(controller.colourmap_data) # update aspect ratio plotter.set_aspect_ratio(controller.aspect) # get camera data and process it plotter.process(camera.get_data()) # udpate label info if controller.label == 1: plotter.show_label(camera_name) pass # show and wait plotter.show() sleep(controller.rate) except KeyboardInterrupt: plotter.quit() pass plotter.quit()
Python
0
81a4b04173033d7e678ad6c4b4efae654af9ac11
Use a threading local object to isolate MongoDB connection between different threads but reuse the same connection in the same thread
moocng/mongodb.py
moocng/mongodb.py
# Copyright 2013 Rooter Analysis S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urlparse from threading import local from django.conf import settings from django.core.exceptions import ImproperlyConfigured from pymongo.connection import Connection DEFAULT_MONGODB_HOST = 'localhost' DEFAULT_MONGODB_PORT = 27017 DEFAULT_MONGODB_NAME = 'moocng' DEFAULT_MONGODB_URI = 'mongodb://%s:%d/%s' % (DEFAULT_MONGODB_HOST, DEFAULT_MONGODB_PORT, DEFAULT_MONGODB_NAME) class MongoDB(object): def __init__(self, db_uri=DEFAULT_MONGODB_URI, connection_factory=Connection): self.db_uri = urlparse.urlparse(db_uri) self.connection = connection_factory( host=self.db_uri.hostname or DEFAULT_MONGODB_HOST, port=self.db_uri.port or DEFAULT_MONGODB_PORT) if self.db_uri.path: self.database_name = self.db_uri.path[1:] else: self.database_name = DEFAULT_MONGODB_NAME self.database = self.get_database() def get_connection(self): return self.connection def get_database(self): database = self.connection[self.database_name] if self.db_uri.username and self.db_uri.password: database.authenticate(self.db_uri.username, self.db_uri.password) return database def get_collection(self, collection): return self.database[collection] connections = local() def get_db(): try: db_uri = settings.MONGODB_URI except AttributeError: raise ImproperlyConfigured('Missing required MONGODB_URI setting') if not hasattr(connections, 'default'): connections.default = MongoDB(db_uri) return connections.default
# Copyright 2013 Rooter Analysis S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urlparse from django.conf import settings from django.core.exceptions import ImproperlyConfigured from pymongo.connection import Connection DEFAULT_MONGODB_HOST = 'localhost' DEFAULT_MONGODB_PORT = 27017 DEFAULT_MONGODB_NAME = 'moocng' DEFAULT_MONGODB_URI = 'mongodb://%s:%d/%s' % (DEFAULT_MONGODB_HOST, DEFAULT_MONGODB_PORT, DEFAULT_MONGODB_NAME) class MongoDB(object): def __init__(self, db_uri=DEFAULT_MONGODB_URI, connection_factory=Connection): self.db_uri = urlparse.urlparse(db_uri) self.connection = connection_factory( host=self.db_uri.hostname or DEFAULT_MONGODB_HOST, port=self.db_uri.port or DEFAULT_MONGODB_PORT) if self.db_uri.path: self.database_name = self.db_uri.path[1:] else: self.database_name = DEFAULT_MONGODB_NAME self.database = self.get_database() def get_connection(self): return self.connection def get_database(self): database = self.connection[self.database_name] if self.db_uri.username and self.db_uri.password: database.authenticate(self.db_uri.username, self.db_uri.password) return database def get_collection(self, collection): return self.database[collection] def get_db(): try: db_uri = settings.MONGODB_URI except AttributeError: raise ImproperlyConfigured('Missing required MONGODB_URI setting') return MongoDB(db_uri)
Python
0.000001
aa9143302b376e1274c8c11b53687771d0444b5a
Remove now-unused isInt code
morss/__main__.py
morss/__main__.py
# ran on `python -m morss` import os import sys from . import wsgi from . import cli from .morss import MorssException import wsgiref.simple_server import wsgiref.handlers PORT = int(os.getenv('PORT', 8080)) def main(): if 'REQUEST_URI' in os.environ: # mod_cgi (w/o file handler) app = wsgi.cgi_app app = wsgi.cgi_dispatcher(app) app = wsgi.cgi_error_handler(app) app = wsgi.cgi_encode(app) wsgiref.handlers.CGIHandler().run(app) elif len(sys.argv) <= 1: # start internal (basic) http server (w/ file handler) app = wsgi.cgi_app app = wsgi.cgi_file_handler(app) app = wsgi.cgi_dispatcher(app) app = wsgi.cgi_error_handler(app) app = wsgi.cgi_encode(app) print('Serving http://localhost:%s/' % port) httpd = wsgiref.simple_server.make_server('', PORT, app) httpd.serve_forever() else: # as a CLI app try: cli.cli_app() except (KeyboardInterrupt, SystemExit): raise except Exception as e: print('ERROR: %s' % e.message) if __name__ == '__main__': main()
# ran on `python -m morss` import os import sys from . import wsgi from . import cli from .morss import MorssException import wsgiref.simple_server import wsgiref.handlers PORT = int(os.getenv('PORT', 8080)) def isInt(string): try: int(string) return True except ValueError: return False def main(): if 'REQUEST_URI' in os.environ: # mod_cgi (w/o file handler) app = wsgi.cgi_app app = wsgi.cgi_dispatcher(app) app = wsgi.cgi_error_handler(app) app = wsgi.cgi_encode(app) wsgiref.handlers.CGIHandler().run(app) elif len(sys.argv) <= 1: # start internal (basic) http server (w/ file handler) app = wsgi.cgi_app app = wsgi.cgi_file_handler(app) app = wsgi.cgi_dispatcher(app) app = wsgi.cgi_error_handler(app) app = wsgi.cgi_encode(app) print('Serving http://localhost:%s/' % port) httpd = wsgiref.simple_server.make_server('', PORT, app) httpd.serve_forever() else: # as a CLI app try: cli.cli_app() except (KeyboardInterrupt, SystemExit): raise except Exception as e: print('ERROR: %s' % e.message) if __name__ == '__main__': main()
Python
0.000444
f021922dec168a4bb97516eb6b7a7ca5fe3bfb96
Use HostAddressOpt for opts that accept IP and hostnames
ironic/conf/api.py
ironic/conf/api.py
# Copyright 2016 Intel Corporation # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ironic.common.i18n import _ opts = [ cfg.HostAddressOpt('host_ip', default='0.0.0.0', help=_('The IP address or hostname on which ironic-api ' 'listens.')), cfg.PortOpt('port', default=6385, help=_('The TCP port on which ironic-api listens.')), cfg.IntOpt('max_limit', default=1000, help=_('The maximum number of items returned in a single ' 'response from a collection resource.')), cfg.StrOpt('public_endpoint', help=_("Public URL to use when building the links to the API " "resources (for example, \"https://ironic.rocks:6384\")." " If None the links will be built using the request's " "host URL. If the API is operating behind a proxy, you " "will want to change this to represent the proxy's URL. " "Defaults to None.")), cfg.IntOpt('api_workers', help=_('Number of workers for OpenStack Ironic API service. ' 'The default is equal to the number of CPUs available ' 'if that can be determined, else a default worker ' 'count of 1 is returned.')), cfg.BoolOpt('enable_ssl_api', default=False, help=_("Enable the integrated stand-alone API to service " "requests via HTTPS instead of HTTP. If there is a " "front-end service performing HTTPS offloading from " "the service, this option should be False; note, you " "will want to change public API endpoint to represent " "SSL termination URL with 'public_endpoint' option.")), cfg.BoolOpt('restrict_lookup', default=True, help=_('Whether to restrict the lookup API to only nodes ' 'in certain states.')), cfg.IntOpt('ramdisk_heartbeat_timeout', default=300, deprecated_group='agent', deprecated_name='heartbeat_timeout', help=_('Maximum interval (in seconds) for agent heartbeats.')), ] opt_group = cfg.OptGroup(name='api', title='Options for the ironic-api service') def register_opts(conf): conf.register_group(opt_group) conf.register_opts(opts, group=opt_group)
# Copyright 2016 Intel Corporation # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ironic.common.i18n import _ opts = [ cfg.StrOpt('host_ip', default='0.0.0.0', help=_('The IP address on which ironic-api listens.')), cfg.PortOpt('port', default=6385, help=_('The TCP port on which ironic-api listens.')), cfg.IntOpt('max_limit', default=1000, help=_('The maximum number of items returned in a single ' 'response from a collection resource.')), cfg.StrOpt('public_endpoint', help=_("Public URL to use when building the links to the API " "resources (for example, \"https://ironic.rocks:6384\")." " If None the links will be built using the request's " "host URL. If the API is operating behind a proxy, you " "will want to change this to represent the proxy's URL. " "Defaults to None.")), cfg.IntOpt('api_workers', help=_('Number of workers for OpenStack Ironic API service. ' 'The default is equal to the number of CPUs available ' 'if that can be determined, else a default worker ' 'count of 1 is returned.')), cfg.BoolOpt('enable_ssl_api', default=False, help=_("Enable the integrated stand-alone API to service " "requests via HTTPS instead of HTTP. If there is a " "front-end service performing HTTPS offloading from " "the service, this option should be False; note, you " "will want to change public API endpoint to represent " "SSL termination URL with 'public_endpoint' option.")), cfg.BoolOpt('restrict_lookup', default=True, help=_('Whether to restrict the lookup API to only nodes ' 'in certain states.')), cfg.IntOpt('ramdisk_heartbeat_timeout', default=300, deprecated_group='agent', deprecated_name='heartbeat_timeout', help=_('Maximum interval (in seconds) for agent heartbeats.')), ] opt_group = cfg.OptGroup(name='api', title='Options for the ironic-api service') def register_opts(conf): conf.register_group(opt_group) conf.register_opts(opts, group=opt_group)
Python
0.000094
2159e6c2b550367d456e3d743b7757c59636a3c7
update dictionary on ramiro's desktop to use starmon data
pycqed/init/config/setup_dict.py
pycqed/init/config/setup_dict.py
# Dictionaries used in setup. mac_dict = {'203178706891063': 'CDickel_Desktop', '203308017140376': 'Adriaans_Macbook', '963460802314': 'La_Ferrari', '46390847648': 'La_Maserati', '215977245841658': 'La_Maserati_JrJr', '13795386264098': 'Serwans_Laptop', '963460956772': 'La_Ducati', '203050745808564': 'La_Ducati_Jr', '57277341811788': 'Simulation_PC', '272774795670508': 'Nathans_Laptop', '46390847630': 'tud276606_FPGA_PC', '198690273946987': 'Bart_Laptop', '167746772205643': 'NuovaFerrari', '167746772714689' : 'Xiang_PC', '180725258210527': 'Niels_macbook', '109952948723616': 'Ramiro_Desktop', '215977245834050': 'Ramiro_Desktop', '31054844829911': 'Sjoerd_laptop' } data_dir_dict = {'tud276606_FPGA_PC': 'D:\Experiments/CBox_Testing/Data', 'CDickel_Desktop': 'D:\Experiments/ExperimentName/Data', 'Sjoerd_laptop': 'D:\data', # 'Adriaans_Macbook': ('/Users/Adriaan/Dropbox/PhD-Delft/' + # 'DataFolders/ExperimentalDataTUD277620/CBox_Testing/Data'), 'Adriaans_Macbook': ('/Users/Adriaan/Documents/Testing/Data'), 'Niels_macbook': '/Users/nbultink/temp_data', 'La_Ferrari': 'D:\Experiments/1511_RabiSims2/Data', 'La_Maserati': 'D:\Experiments/JJO-type_IV/Data', # 'La_Maserati_JrJr': 'D:\\Experimentsp7_Qcodes_5qubit\data', 'La_Maserati_JrJr': 'D:\\Experiments\\1607_Qcodes_5qubit\\data', 'Xiang_PC' : 'D:\PycQED\data', 'Serwans_Laptop': 'W:/tnw/NS/qt/Serwan/MuxMon/', 'La_Ducati': 'D:\Experiments/Simultaneous_Driving/Data', 'La_Ducati_Jr': 'D:\Experiments/MixerCalibrations/data', 'Simulation_PC': 'D:\Experiments/testSingleShotFidelityAnalysis/Data', # 'Ramiro_Desktop': r'\\131.180.82.81\\data', 'Ramiro_Desktop': r'\\131.180.82.190\\Experiments\\1611_Starmon\\Data', # 'Ramiro_Desktop': r'\\131.180.82.237\\1511_RabiSims2\\Data', r'Nathans_Laptop': 'D:/nlangford\My Documents\Projects\Rabi Model\Experiment_1504\Data', 'Bart_Laptop' : 'C:\Experiments/NumericalOptimization/Data' }
# Dictionaries used in setup. mac_dict = {'203178706891063': 'CDickel_Desktop', '203308017140376': 'Adriaans_Macbook', '963460802314': 'La_Ferrari', '46390847648': 'La_Maserati', '215977245841658': 'La_Maserati_JrJr', '13795386264098': 'Serwans_Laptop', '963460956772': 'La_Ducati', '203050745808564': 'La_Ducati_Jr', '57277341811788': 'Simulation_PC', '272774795670508': 'Nathans_Laptop', '46390847630': 'tud276606_FPGA_PC', '198690273946987': 'Bart_Laptop', '167746772205643': 'NuovaFerrari', '167746772714689' : 'Xiang_PC', '180725258210527': 'Niels_macbook', '109952948723616': 'Ramiro_Desktop', '215977245834050': 'Ramiro_Desktop', '31054844829911': 'Sjoerd_laptop' } data_dir_dict = {'tud276606_FPGA_PC': 'D:\Experiments/CBox_Testing/Data', 'CDickel_Desktop': 'D:\Experiments/ExperimentName/Data', 'Sjoerd_laptop': 'D:\data', # 'Adriaans_Macbook': ('/Users/Adriaan/Dropbox/PhD-Delft/' + # 'DataFolders/ExperimentalDataTUD277620/CBox_Testing/Data'), 'Adriaans_Macbook': ('/Users/Adriaan/Documents/Testing/Data'), 'Niels_macbook': '/Users/nbultink/temp_data', 'La_Ferrari': 'D:\Experiments/1511_RabiSims2/Data', 'La_Maserati': 'D:\Experiments/JJO-type_IV/Data', # 'La_Maserati_JrJr': 'D:\\Experimentsp7_Qcodes_5qubit\data', 'La_Maserati_JrJr': 'D:\\Experiments\\1607_Qcodes_5qubit\\data', 'Xiang_PC' : 'D:\PycQED\data', 'Serwans_Laptop': 'W:/tnw/NS/qt/Serwan/MuxMon/', 'La_Ducati': 'D:\Experiments/Simultaneous_Driving/Data', 'La_Ducati_Jr': 'D:\Experiments/MixerCalibrations/data', 'Simulation_PC': 'D:\Experiments/testSingleShotFidelityAnalysis/Data', 'Ramiro_Desktop': r'\\131.180.82.81\\data', # 'Ramiro_Desktop': r'\\131.180.82.237\\1511_RabiSims2\\Data', r'Nathans_Laptop': 'D:/nlangford\My Documents\Projects\Rabi Model\Experiment_1504\Data', 'Bart_Laptop' : 'C:\Experiments/NumericalOptimization/Data' }
Python
0
3927fd757ff404af61e609cc1728d1f3fe398230
Fix on error text.
mp3datastorage.py
mp3datastorage.py
#store file attributes component import sqlite3 as sql import os import mp3metadata #TODO add directory of the database #Allow database recognition and resetting the database class SQLmgr: def __init__(self, username): #note everytime function is called MusicData table is dropped! self.serv = False self.errors = open("error.txt", "w") self.servcount=1 db = username + ".db" self.db = db if self.db in os.listdir("."): #database already exists pass else: try: serv = sql.connect(db) with serv: self.serv = serv.cursor() self.serv.execute("DROP TABLE IF EXISTS MusicData") self.serv.execute("CREATE TABLE MusicData(Id INT, ALBUM TEXT, ARTIST TEXT, TITLE TEXT, PATH TEXT)") self.serv.close() except sql.Error, e: print "Error executing SQL table. ", e.args[0] return 1 def wipe_database(self, username): self.db = username + ".db" try: serv = sql.connect(db) with serv: self.serv = serv.cursor() self.serv.execute("DROP TABLE IF EXISTS MusicData") self.serv.execute("CREATE TABLE MusicData(Id INT, ALBUM TEXT, ARTIST TEXT, TITLE TEXT, PATH TEXT)") self.serv.close() except sql.Error, e: print "Error wiping database." return 1 def add_db(self, case): try: with sql.connect(self.db) as serv: self.serv = serv.cursor() self.serv.execute("INSERT INTO MusicData VALUES (?, ?, ?, ?, ?);", case) self.servcount += 1 self.serv.close() except sql.Error, e: self.errors.write(str(case[-1])) def addmp3todb(self, filetup): try: case = [] case.append(self.servcount) for h,j in filetup[1].items(): if h in ["ALBUM", "ARTIST", "TITLE"]: case.append(j) case.append(filetup[0]) self.add_db(tuple(case)) except: self.errors.write("Error writing: " + filetup[1]) def add_test(self, filedir): try: tester = mp3metadata.mp3data().returnobj() case = [] case.append(self.servcount) #tuple pairings will proceed in this order. for k,v in tester.items(): if k in ["ALBUM", "ARTIST", "TITLE"]: case.append(v) case.append(filedir) self.add_db(tuple(case)) return 0 except sql.Error, e: print e.args[0] return 1
#store file attributes component import sqlite3 as sql import os import mp3metadata #TODO add directory of the database #Allow database recognition and resetting the database class SQLmgr: def __init__(self, username): #note everytime function is called MusicData table is dropped! self.serv = False self.errors = open("error.txt", "w") self.servcount=1 db = username + ".db" self.db = db if self.db in os.listdir("."): #database already exists pass else: try: serv = sql.connect(db) with serv: self.serv = serv.cursor() self.serv.execute("DROP TABLE IF EXISTS MusicData") self.serv.execute("CREATE TABLE MusicData(Id INT, ALBUM TEXT, ARTIST TEXT, TITLE TEXT, PATH TEXT)") self.serv.close() except sql.Error, e: print "Error executing SQL table. ", e.args[0] return 1 def wipe_database(self, username): self.db = username + ".db" try: serv = sql.connect(db) with serv: self.serv = serv.cursor() self.serv.execute("DROP TABLE IF EXISTS MusicData") self.serv.execute("CREATE TABLE MusicData(Id INT, ALBUM TEXT, ARTIST TEXT, TITLE TEXT, PATH TEXT)") self.serv.close() except sql.Error, e: print "Error wiping database." return 1 def add_db(self, case): try: with sql.connect(self.db) as serv: self.serv = serv.cursor() self.serv.execute("INSERT INTO MusicData VALUES (?, ?, ?, ?, ?);", case) self.servcount += 1 self.serv.close() except sql.Error, e: errors.write(str(case[-1])) def addmp3todb(self, filetup): try: case = [] case.append(self.servcount) for h,j in filetup[1].items(): if h in ["ALBUM", "ARTIST", "TITLE"]: case.append(j) case.append(filetup[0]) self.add_db(tuple(case)) except: errors.write("Error writing: " + filetup[1]) def add_test(self, filedir): try: tester = mp3metadata.mp3data().returnobj() case = [] case.append(self.servcount) #tuple pairings will proceed in this order. for k,v in tester.items(): if k in ["ALBUM", "ARTIST", "TITLE"]: case.append(v) case.append(filedir) self.add_db(tuple(case)) return 0 except sql.Error, e: print e.args[0] return 1
Python
0
73529579a6abaf1b33e6135d4abaa2c892dbfa3c
exit with retcode when called directly
kcheck/command.py
kcheck/command.py
#!/usr/bin/env python3 """ Entry point for utility, option handling. """ def main() -> int: """ Entry point for command line utility. :return: integer for return code of command line """ import configargparse import importlib import logging import platform from configparser import DuplicateOptionError import kcheck parser = configargparse.ArgumentParser( add_config_file_help=True, default_config_files=['/etc/kcheck.conf'], ignore_unknown_config_file_keys=True, formatter_class=lambda prog: configargparse.HelpFormatter(prog,max_help_position=35) ) parser.add_argument('--config', '-c', is_config_file=True, help='kcheck config file') parser.add_argument('--kernel', '-k', help='kernel config file', default='/usr/src/linux/.config') parser.add_argument('--logfile', '-l', help='file to write logging into') parser.add_argument('--verbose', '-v', help='Output extra information', action='count', default=2) parser.add_argument('--version', '-V', help='Print version information and exit', action='store_true') # subparsers = parser.add_subparsers(help='commands') # # gen_parser = subparsers.add_parser('genconfig', help='Generate config requirements from installed packages') # gen_parser.add_argument('-l', '--list', help='list available package manager integrations', action='store_true') # gen_parser.add_argument('-m', '--manager', help='Package manager', choices=kcheck.ALLOWED_PKGMGR, default='portage') # gen_parser.set_defaults(mode='genconfig') args = parser.parse_args() ## set up logging ## # logging output level log_level = 50 - (args.verbose * 10) # format and handler if args.logfile: logHandler = logging.FileHandler(args.logfile) logHandler.setFormatter(logging.Formatter("%(asctime)s [%(name)s] [%(levelname)-5.5s] %(message)s")) else: logHandler = logging.NullHandler() logging.basicConfig(level=log_level, handlers=[logHandler]) # initialise logger and log basics log = logging.getLogger('main') log.info('kcheck %s' % kcheck.__version__) [log.debug(line) for line in parser.format_values().splitlines()] if args.version: print('kcheck %s (Python %s)' % (kcheck.__version__, platform.python_version())) return 0 if 'mode' in args: if args.mode == 'genconfig': if args.list: print('The following package managers can be used for generating required kernel configurations') [print(' ', p) for p in kcheck.ALLOWED_PKGMGR] return 0 # get the module name for the package manager, import and hand over module = 'kcheck.'+args.manager log.debug('Loading module %s' % module) try: package_manager = importlib.import_module(module) except ImportError as exception: log.critical("Unable to load module for package manager %s" % module) log.exception(exception) return -1 return package_manager.generate_config(args) else: # no "mode", so run kcheck import kcheck.checker try: return kcheck.checker.check_config(args.config, args.kernel) except DuplicateOptionError: print('Your config file has duplicate keys in a section.') if args.logfile: print('See the log file %s for more details' % args.logfile) print('Correct your config file and try running this again.') return -2 if __name__ == '__main__': exit(main())
#!/usr/bin/env python3 """ Entry point for utility, option handling. """ def main() -> int: """ Entry point for command line utility. :return: integer for return code of command line """ import configargparse import importlib import logging import platform from configparser import DuplicateOptionError import kcheck parser = configargparse.ArgumentParser( add_config_file_help=True, default_config_files=['/etc/kcheck.conf'], ignore_unknown_config_file_keys=True, formatter_class=lambda prog: configargparse.HelpFormatter(prog,max_help_position=35) ) parser.add_argument('--config', '-c', is_config_file=True, help='kcheck config file') parser.add_argument('--kernel', '-k', help='kernel config file', default='/usr/src/linux/.config') parser.add_argument('--logfile', '-l', help='file to write logging into') parser.add_argument('--verbose', '-v', help='Output extra information', action='count', default=2) parser.add_argument('--version', '-V', help='Print version information and exit', action='store_true') # subparsers = parser.add_subparsers(help='commands') # # gen_parser = subparsers.add_parser('genconfig', help='Generate config requirements from installed packages') # gen_parser.add_argument('-l', '--list', help='list available package manager integrations', action='store_true') # gen_parser.add_argument('-m', '--manager', help='Package manager', choices=kcheck.ALLOWED_PKGMGR, default='portage') # gen_parser.set_defaults(mode='genconfig') args = parser.parse_args() ## set up logging ## # logging output level log_level = 50 - (args.verbose * 10) # format and handler if args.logfile: logHandler = logging.FileHandler(args.logfile) logHandler.setFormatter(logging.Formatter("%(asctime)s [%(name)s] [%(levelname)-5.5s] %(message)s")) else: logHandler = logging.NullHandler() logging.basicConfig(level=log_level, handlers=[logHandler]) # initialise logger and log basics log = logging.getLogger('main') log.info('kcheck %s' % kcheck.__version__) [log.debug(line) for line in parser.format_values().splitlines()] if args.version: print('kcheck %s (Python %s)' % (kcheck.__version__, platform.python_version())) return 0 if 'mode' in args: if args.mode == 'genconfig': if args.list: print('The following package managers can be used for generating required kernel configurations') [print(' ', p) for p in kcheck.ALLOWED_PKGMGR] return 0 # get the module name for the package manager, import and hand over module = 'kcheck.'+args.manager log.debug('Loading module %s' % module) try: package_manager = importlib.import_module(module) except ImportError as exception: log.critical("Unable to load module for package manager %s" % module) log.exception(exception) return -1 return package_manager.generate_config(args) else: # no "mode", so run kcheck import kcheck.checker try: return kcheck.checker.check_config(args.config, args.kernel) except DuplicateOptionError: print('Your config file has duplicate keys in a section.') if args.logfile: print('See the log file %s for more details' % args.logfile) print('Correct your config file and try running this again.') return -2 if __name__ == '__main__': main()
Python
0
2d92e69d00a7419a23bcb38ab7c55ccc533237df
Fix artwork size
itunescli/query.py
itunescli/query.py
import logging from cliff.command import Command from cliff.lister import Lister from cliff.show import ShowOne import itunes class ITunesSearchBase(object): MEDIA_TYPES = frozenset([ 'movie', 'podcast', 'music', 'musicVideo', 'audiobook', 'shortFilm', 'tvShow', 'tvSeason', 'software', 'ebook', 'all', ]) def config_parser(self, parser): parser.add_argument('query', metavar='SEARCH_QUERY') parser.add_argument('--country', default='US', type=str) parser.add_argument('--media', default='all', choices=self.MEDIA_TYPES) parser.add_argument('--entity', default=None) return parser def artwork_url(self, artwork): """Return the largest artwork URL possible""" return artwork['100'].replace('.100x100-75', '.300x300-75') class SearchLister(Lister, ITunesSearchBase): """Search iTunes""" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(SearchLister, self).get_parser(prog_name) parser = self.config_parser(parser) parser.add_argument('--limit', default=100, type=int) return parser def get_data(self, parsed_args): results = itunes.Search(query=parsed_args.query, limit=parsed_args.limit, country=parsed_args.country, entity=parsed_args.entity, media=parsed_args.media).get() return (('name', 'url', 'genre', 'release_date', 'artwork', 'type'), ((n.get_name(), n.get_url(), n.get_genre(), n.get_release_date(), self.artwork_url(n.get_artwork()), n.type) for n in results) ) class SearchOne(ShowOne, ITunesSearchBase): """Show the first result from a search query""" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(SearchOne, self).get_parser(prog_name) parser = self.config_parser(parser) return parser def get_data(self, parsed_args): results = itunes.Search(query=parsed_args.query, limit=1, country=parsed_args.country, entity=parsed_args.entity, media=parsed_args.media).get() result = results[0] columns = ('name', 'url', 'genre', 'release_date', 'artwork', 'type') data = ( result.get_name(), result.get_url(), result.get_genre(), result.get_release_date(), self.artwork_url(result.get_artwork()), result.type ) return (columns, data) class GetArtwork(Command, ITunesSearchBase): """Get the album artwork from the first result of a query""" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(GetArtwork, self).get_parser(prog_name) parser = self.config_parser(parser) return parser def run(self, parsed_args): results = itunes.Search(query=parsed_args.query, limit=1, country=parsed_args.country, entity=parsed_args.entity, media=parsed_args.media).get() all_artwork = results[0].get_artwork() artwork_url = self.artwork_url(all_artwork) self.app.stdout.write("%s\n" % artwork_url)
import logging from cliff.command import Command from cliff.lister import Lister from cliff.show import ShowOne import itunes class ITunesSearchBase(object): MEDIA_TYPES = frozenset([ 'movie', 'podcast', 'music', 'musicVideo', 'audiobook', 'shortFilm', 'tvShow', 'tvSeason', 'software', 'ebook', 'all', ]) def config_parser(self, parser): parser.add_argument('query', metavar='SEARCH_QUERY') parser.add_argument('--country', default='US', type=str) parser.add_argument('--media', default='all', choices=self.MEDIA_TYPES) parser.add_argument('--entity', default=None) return parser def artwork_url(self, artwork): """Return the largest artwork URL possible""" return artwork['100'].replace('.100x100-75', '.400x400-75') class SearchLister(Lister, ITunesSearchBase): """Search iTunes""" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(SearchLister, self).get_parser(prog_name) parser = self.config_parser(parser) parser.add_argument('--limit', default=100, type=int) return parser def get_data(self, parsed_args): results = itunes.Search(query=parsed_args.query, limit=parsed_args.limit, country=parsed_args.country, entity=parsed_args.entity, media=parsed_args.media).get() return (('name', 'url', 'genre', 'release_date', 'artwork', 'type'), ((n.get_name(), n.get_url(), n.get_genre(), n.get_release_date(), self.artwork_url(n.get_artwork()), n.type) for n in results) ) class SearchOne(ShowOne, ITunesSearchBase): """Show the first result from a search query""" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(SearchOne, self).get_parser(prog_name) parser = self.config_parser(parser) return parser def get_data(self, parsed_args): results = itunes.Search(query=parsed_args.query, limit=1, country=parsed_args.country, entity=parsed_args.entity, media=parsed_args.media).get() result = results[0] columns = ('name', 'url', 'genre', 'release_date', 'artwork', 'type') data = ( result.get_name(), result.get_url(), result.get_genre(), result.get_release_date(), self.artwork_url(result.get_artwork()), result.type ) return (columns, data) class GetArtwork(Command, ITunesSearchBase): """Get the album artwork from the first result of a query""" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(GetArtwork, self).get_parser(prog_name) parser = self.config_parser(parser) return parser def run(self, parsed_args): results = itunes.Search(query=parsed_args.query, limit=1, country=parsed_args.country, entity=parsed_args.entity, media=parsed_args.media).get() all_artwork = results[0].get_artwork() artwork_url = self.artwork_url(all_artwork) self.app.stdout.write("%s\n" % artwork_url)
Python
0.000001
e1e25bc1166efa9a39fdf769f1081fafd08dd937
handle unknown source country, add recovered
pyfibot/modules/module_korona.py
pyfibot/modules/module_korona.py
# -*- coding: utf-8 -*- """ Koronavirus statistics from HS.fi open data https://github.com/HS-Datadesk/koronavirus-avoindata """ from __future__ import unicode_literals, print_function, division from collections import Counter def init(bot): global lang config = bot.config.get("module_posti", {}) lang = config.get("language", "en") def command_korona(bot, user, channel, args): """Get latest info about COVID-19 in Finland (Source: https://github.com/HS-Datadesk/koronavirus-avoindata )""" url = "https://w3qa5ydb4l.execute-api.eu-west-1.amazonaws.com/prod/finnishCoronaData" try: r = bot.get_url(url) data = r.json() except Exception as e: bot.say( channel, "Error while getting data.", ) raise e msg = "[COVID-19] Vahvistettuja tapauksia: %s Kuolleita: %s Parantunut: %s" % (len(data['confirmed']), len(data['deaths']), len(data['recovered'])) # top5 infection sources top5 = Counter(map(lambda x: x['infectionSourceCountry'], data['confirmed'])).most_common(5) msg = msg + " | Top5 lähdemaat: " topstr = [] for country, count in top5: if country == None: country = "N/A" topstr.append(country+":"+str(count)) msg = msg + " ".join(topstr) bot.say(channel, msg)
# -*- coding: utf-8 -*- """ Koronavirus statistics from HS.fi open data https://github.com/HS-Datadesk/koronavirus-avoindata """ from __future__ import unicode_literals, print_function, division from collections import Counter def init(bot): global lang config = bot.config.get("module_posti", {}) lang = config.get("language", "en") def command_korona(bot, user, channel, args): """Get latest info about COVID-19 in Finland (Source: https://github.com/HS-Datadesk/koronavirus-avoindata )""" url = "https://w3qa5ydb4l.execute-api.eu-west-1.amazonaws.com/prod/finnishCoronaData" try: r = bot.get_url(url) data = r.json() except Exception as e: bot.say( channel, "Error while getting data.", ) raise e msg = "[COVID-19] Vahvistettuja tapauksia: %s Kuolleita: %s" % (len(data['confirmed']), len(data['deaths'])) # top5 infection sources top5 = Counter(map(lambda x: x['infectionSourceCountry'], data['confirmed'])).most_common(5) msg = msg + " | Top5 lähdemaat: " topstr = [] for country, count in top5: topstr.append(country+":"+str(count)) msg = msg + " ".join(topstr) bot.say(channel, msg)
Python
0
13f26d9007629be019140aa3bedd5f6fbfefe69b
delete all() method when apply document filter
jellyblog/views.py
jellyblog/views.py
# -*- coding: utf-8 -*- from django.shortcuts import render, get_object_or_404 from django.core.paginator import Paginator from .models import Category, Document from htmlmin.decorators import minified_response from .util import get_page_number_range, get_documents, \ categoryList def home(request): Category.init_category() return render(request, 'jellyblog/home.html') def index(request): return index_with_page(request, 1) @minified_response def index_with_page(request, page): document_list = Document.objects.filter(public_doc=True).order_by('-id') paginator = Paginator(document_list, 4) documents = get_documents(paginator, page) context = { 'documents': documents, 'category_list': categoryList, 'page_range': get_page_number_range( paginator, documents ) } return render(request, 'jellyblog/index.html', context) def category_detail(request, category_id): return category_with_page(request, category_id, 1) @minified_response def category_with_page(request, category_id, page): selected_category = Category.objects.get(id=category_id) document_list = [] if selected_category.parent.id == 1: # 카테고리가 상위 카테고리인지 아닌지를 판별 후, 상위 카테고리일 경우엔 하위 카테고리의 문서 리스트를 추가함 children = Category.objects.all().filter(parent=selected_category.id) for child in children: document_list += Document.objects.all() \ .filter(category_id=child.id, public_doc=True) document_list += Document.objects.all().filter( category=category_id, public_doc=True) document_list.sort(key=lambda x: x.pk, reverse=True) paginator = Paginator(document_list, 4) documents = get_documents(paginator, page) context = { 'documents': documents, 'category_list': categoryList, 'category_id': category_id, 'page_range': get_page_number_range( paginator, documents), 'category_name': selected_category.name, } return render(request, 'jellyblog/category.html', context) @minified_response def detail(request, document_id): document = get_object_or_404(Document, pk=document_id) document.read() return render(request, 'jellyblog/detail.html', {'document': document, 'category_list': categoryList})
# -*- coding: utf-8 -*- from django.shortcuts import render, get_object_or_404 from django.core.paginator import Paginator from .models import Category, Document from htmlmin.decorators import minified_response from .util import get_page_number_range, get_documents, \ categoryList def home(request): Category.init_category() return render(request, 'jellyblog/home.html') def index(request): return index_with_page(request, 1) @minified_response def index_with_page(request, page): document_list = Document.objects.all().filter(public_doc=True).order_by('-id') paginator = Paginator(document_list, 4) documents = get_documents(paginator, page) context = { 'documents': documents, 'category_list': categoryList, 'page_range': get_page_number_range( paginator, documents ) } return render(request, 'jellyblog/index.html', context) def category_detail(request, category_id): return category_with_page(request, category_id, 1) @minified_response def category_with_page(request, category_id, page): selected_category = Category.objects.get(id=category_id) document_list = [] if selected_category.parent.id == 1: # 카테고리가 상위 카테고리인지 아닌지를 판별 후, 상위 카테고리일 경우엔 하위 카테고리의 문서 리스트를 추가함 children = Category.objects.all().filter(parent=selected_category.id) for child in children: document_list += Document.objects.all() \ .filter(category_id=child.id, public_doc=True) document_list += Document.objects.all().filter( category=category_id, public_doc=True) document_list.sort(key=lambda x: x.pk, reverse=True) paginator = Paginator(document_list, 4) documents = get_documents(paginator, page) context = { 'documents': documents, 'category_list': categoryList, 'category_id': category_id, 'page_range': get_page_number_range( paginator, documents), 'category_name': selected_category.name, } return render(request, 'jellyblog/category.html', context) @minified_response def detail(request, document_id): document = get_object_or_404(Document, pk=document_id) document.read() return render(request, 'jellyblog/detail.html', {'document': document, 'category_list': categoryList})
Python
0.000001
deaee894589a2247b9322ba5cdb94e4c127c35bd
correct docstring for KeyringLocked class
keyring/errors.py
keyring/errors.py
import sys __metaclass__ = type class KeyringError(Exception): """Base class for exceptions in keyring """ class PasswordSetError(KeyringError): """Raised when the password can't be set. """ class PasswordDeleteError(KeyringError): """Raised when the password can't be deleted. """ class InitError(KeyringError): """Raised when the keyring could not be initialised """ class KeyringLocked(KeyringError): """Raised when the keyring failed unlocking """ class ExceptionRaisedContext: """ An exception-trapping context that indicates whether an exception was raised. """ def __init__(self, ExpectedException=Exception): self.ExpectedException = ExpectedException self.exc_info = None def __enter__(self): self.exc_info = object.__new__(ExceptionInfo) return self.exc_info def __exit__(self, *exc_info): self.exc_info.__init__(*exc_info) return self.exc_info.type and issubclass( self.exc_info.type, self.ExpectedException ) class ExceptionInfo: def __init__(self, *info): if not info: info = sys.exc_info() self.type, self.value, _ = info def __bool__(self): """ Return True if an exception occurred """ return bool(self.type) __nonzero__ = __bool__
import sys __metaclass__ = type class KeyringError(Exception): """Base class for exceptions in keyring """ class PasswordSetError(KeyringError): """Raised when the password can't be set. """ class PasswordDeleteError(KeyringError): """Raised when the password can't be deleted. """ class InitError(KeyringError): """Raised when the keyring could not be initialised """ class KeyringLocked(KeyringError): """Raised when the keyring could not be initialised """ class ExceptionRaisedContext: """ An exception-trapping context that indicates whether an exception was raised. """ def __init__(self, ExpectedException=Exception): self.ExpectedException = ExpectedException self.exc_info = None def __enter__(self): self.exc_info = object.__new__(ExceptionInfo) return self.exc_info def __exit__(self, *exc_info): self.exc_info.__init__(*exc_info) return self.exc_info.type and issubclass( self.exc_info.type, self.ExpectedException ) class ExceptionInfo: def __init__(self, *info): if not info: info = sys.exc_info() self.type, self.value, _ = info def __bool__(self): """ Return True if an exception occurred """ return bool(self.type) __nonzero__ = __bool__
Python
0
15f45377dffa2e267464b38f5f87ffe9526fa8f6
Update support to jax (#585)
tensorboardX/x2num.py
tensorboardX/x2num.py
# DO NOT alter/distruct/free input object ! from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import numpy as np import six def check_nan(array): tmp = np.sum(array) if np.isnan(tmp) or np.isinf(tmp): logging.warning('NaN or Inf found in input tensor.') return array def make_np(x): if isinstance(x, list): return check_nan(np.array(x)) if isinstance(x, np.ndarray): return check_nan(x) if isinstance(x, six.string_types): # Caffe2 will pass name of blob(s) to fetch return check_nan(prepare_caffe2(x)) if np.isscalar(x): return check_nan(np.array([x])) if 'torch' in str(type(x)): return check_nan(prepare_pytorch(x)) if 'chainer' in str(type(x)): return check_nan(prepare_chainer(x)) if 'mxnet' in str(type(x)): return check_nan(prepare_mxnet(x)) if 'jax' in str(type(x)): return check_nan(np.array(x)) raise NotImplementedError( 'Got {}, but expected numpy array or torch tensor.'.format(type(x))) def prepare_pytorch(x): import torch if isinstance(x, torch.autograd.Variable): x = x.data x = x.cpu().numpy() return x def prepare_theano(x): import theano pass def prepare_caffe2(x): from caffe2.python import workspace x = workspace.FetchBlob(x) return x def prepare_mxnet(x): x = x.asnumpy() return x def prepare_chainer(x): import chainer x = chainer.cuda.to_cpu(x.data) return x
# DO NOT alter/distruct/free input object ! from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import numpy as np import six def check_nan(array): tmp = np.sum(array) if np.isnan(tmp) or np.isinf(tmp): logging.warning('NaN or Inf found in input tensor.') return array def make_np(x): if isinstance(x, list): return check_nan(np.array(x)) if isinstance(x, np.ndarray): return check_nan(x) if isinstance(x, six.string_types): # Caffe2 will pass name of blob(s) to fetch return check_nan(prepare_caffe2(x)) if np.isscalar(x): return check_nan(np.array([x])) if 'torch' in str(type(x)): return check_nan(prepare_pytorch(x)) if 'chainer' in str(type(x)): return check_nan(prepare_chainer(x)) if 'mxnet' in str(type(x)): return check_nan(prepare_mxnet(x)) raise NotImplementedError( 'Got {}, but expected numpy array or torch tensor.'.format(type(x))) def prepare_pytorch(x): import torch if isinstance(x, torch.autograd.Variable): x = x.data x = x.cpu().numpy() return x def prepare_theano(x): import theano pass def prepare_caffe2(x): from caffe2.python import workspace x = workspace.FetchBlob(x) return x def prepare_mxnet(x): x = x.asnumpy() return x def prepare_chainer(x): import chainer x = chainer.cuda.to_cpu(x.data) return x
Python
0
4e280094687d8c369a1eee3c8b7bb246549898eb
Update utils.py
backend/utils.py
backend/utils.py
from rest_framework.views import exception_handler from rest_framework.exceptions import APIException, AuthenticationFailed checks = ['Username not found', 'Username already exists', 'Authentication failed'] def custom_exception_handler(exc): """ Exception handler called by all raised exceptions during HTTP requests. Return value: { "error":"message body" } """ # Debug exceptions print 'EXCEPTION DEBUG %s' %exc if not isinstance(exc.detail, unicode): try: # original error message is {'detail':[list of messages]} # Get values from dictionary and take first list element msg = exc.detail.values()[0][0] exc = GenericException(msg) except: exc = GenericException() response = exception_handler(exc) if response is not None: # Uncomment to add status code in message body # response.data['status_code'] = response.status_code if response.data['detail']: response.data['error'] = response.data['detail'] del response.data['detail'] return response class GenericException(APIException): status_code = 400 default_detail = 'Error encountered' class UsernameNotFound(APIException): status_code = 400 default_detail = 'Username not found' class UsernameAlreadyExists(APIException): status_code = 400 default_detail = 'Username already exists' class AuthenticationFailure(AuthenticationFailed): status_code = 401 default_detail = 'Authentication failed'
from rest_framework.views import exception_handler from rest_framework.exceptions import APIException, AuthenticationFailed checks = ['Username not found', 'Username already exists', 'Authentication failed'] def custom_exception_handler(exc): """ Exception handler called by all raised exceptions during HTTP requests. Return value: { "error":"message body" } """ # Debug exceptions print 'EXCEPTION DEBUG %s' %exc if not isinstance(exc.detail, unicode): try: # original error message is {'detail':[list of messages]} # Get values from dictionary and take first list element msg = exc.detail.values()[0][0] exc = GenericException(msg) except: exc = GenericException() response = exception_handler(exc) if response is not None: # Uncomment to add status code in message body # response.data['status_code'] = response.status_code if response.data['detail']: response.data['error'] = response.data['detail'] del response.data['detail'] return response class GenericException(APIException): status_code = 400 default_detail = 'Error encountered' class UsernameNotFound(APIException): status_code = 400 default_detail = 'Username not found' class UsernameAlreadyExists(APIException): status_code = 400 default_detail = 'Username already exists' class AuthenticationFailure(AuthenticationFailed): status_code = 401 default_detail = 'Authentication failed'
Python
0.000001
cd4e7c5bc10c8e946ddf31d99a249a5a97b2dfda
Update get-observations.py
python-files/get-observations.py
python-files/get-observations.py
#!/usr/bin/env python3 """ Utility to get observations from a SatNOGS Network server. Collects the paginated objects into a single JSON list and stores in a file. """ import json import requests OBSERVATIONS_API = 'https://network.satnogs.org/api/observations' OBSERVATIONS_JSON = 'observations.json' def get(url): print(url) return requests.get(url) try: with open(OBSERVATIONS_JSON) as f: data = json.load(f) # json.dump() coerces to string keys # convert keys back to integers observations = {} for k,v in data.items(): print(k) observations[int(k)] = v # observations = {v['id']:v for k,v in data.items()} except IOError: observations = {} def update(o, observations): o_id = o['id'] print(o_id) if o_id not in observations: observations[o_id] = o was_new = True else: observations.update(o) was_new = False return was_new r = get(OBSERVATIONS_API) updated = [update(o, observations) for o in r.json()] any_updated = any(updated) nextpage = r.links.get('next') while any_updated and nextpage: r = get(nextpage['url']) updated = [update(o, observations) for o in r.json()] print(updated) any_updated = any(updated) if any_updated: nextpage = r.links.get('next') with open(OBSERVATIONS_JSON, 'w') as fp: json.dump(observations, fp, sort_keys=True, indent=2)
#!/usr/bin/env python3 """ Utility to get observations from a SatNOGS Network server. Collects the paginated objects into a single JSON list and stores in a file. """ import json import requests OBSERVATIONS_API = 'https://network.satnogs.org/api/observations' OBSERVATIONS_JSON = 'observations.json' def get(url): print(url) return requests.get(url) observations = [] r = get(OBSERVATIONS_API) # r = requests.get(OBSERVATIONS_API) observations.extend(r.json()) nextpage = r.links.get('next') while nextpage: # r = requests.get(nextpage['url']) r = get(nextpage['url']) observations.extend(r.json()) nextpage = r.links.get('next') observations = sorted(observations, key=lambda s: s['id']) with open(OBSERVATIONS_JSON, 'w') as fp: json.dump(observations, fp, sort_keys=True, indent=2)
Python
0
bf349b5f41e3b7edb4efbe279f79ded856320388
Fix typo
python/xchainer/testing/array.py
python/xchainer/testing/array.py
import numpy.testing import xchainer # NumPy-like assertion functions that accept both NumPy and xChainer arrays def _check_xchainer_array(x): # Checks basic conditions that are assumed to hold true for any given xChainer array passed to assert_array_close and # assert_array_equal. assert isinstance(x, xchainer.Array) assert not x.is_grad_required() def _as_numpy(x): if isinstance(x, xchainer.Array): # TODO(hvy): Use a function that converts xChainer arrays to NumPy arrays. return x.to_device('native:0') assert isinstance(x, numpy.ndarray) or numpy.isscalar(x) return x def assert_allclose(x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='', verbose=True): """Raises an AssertionError if two array_like objects are not equal up to a tolerance. Args: x(numpy.ndarray or xchainer.Array): The actual object to check. y(numpy.ndarray or xchainer.Array): The desired, expected object. rtol(float): Relative tolerance. atol(float): Absolute tolerance. equal_nan(bool): Allow NaN values if True. Otherwise, fail the assertion if any NaN is found. err_msg(str): The error message to be printed in case of failure. verbose(bool): If ``True``, the conflicting values are appended to the error message. .. seealso:: :func:`numpy.testing.assert_allclose` """ def check_array(array): if isinstance(array, xchainer.Array): _check_xchainer_array(array) check_array(x) check_array(y) # TODO(sonots): Uncomment after strides compatibility between xChainer and NumPy is implemented. # assert x.strides == y.strides numpy.testing.assert_allclose( _as_numpy(x), _as_numpy(y), rtol=rtol, atol=atol, equal_nan=equal_nan, err_msg=err_msg, verbose=verbose) def assert_array_equal(x, y, err_msg='', verbose=True): """Raises an AssertionError if two array_like objects are not equal. Args: x(numpy.ndarray or xchainer.Array): The actual object to check. y(numpy.ndarray or xchainer.Array): The desired, expected object. err_msg(str): The error message to be printed in case of failure. verbose(bool): If ``True``, the conflicting values are appended to the error message. .. seealso:: :func:`numpy.testing.assert_array_equal` """ def check_array(array): if isinstance(array, xchainer.Array): _check_xchainer_array(array) check_array(x) check_array(y) # TODO(sonots): Uncomment after strides compatibility between xChainer and NumPy is implemented. # assert x.strides == y.strides numpy.testing.assert_array_equal(_as_numpy(x), _as_numpy(y), err_msg=err_msg, verbose=verbose)
import numpy.testing import xchainer # NumPy-like assertion functions that accept both NumPy and xChainer arrays def _check_xchainer_array(x): # Checks basic conditions that are assumed to hold true for any given xChainer array passed to assert_array_close and # assert_array_equal. assert isinstance(x, xchainer.Array) assert not x.is_grad_required() def _as_numpy(x): if isinstance(x, xchainer.Array): # TODO(hvy): Use a function that convers an xChainer array to a NumPy array. return x.to_device('native:0') assert isinstance(x, numpy.ndarray) or numpy.isscalar(x) return x def assert_allclose(x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='', verbose=True): """Raises an AssertionError if two array_like objects are not equal up to a tolerance. Args: x(numpy.ndarray or xchainer.Array): The actual object to check. y(numpy.ndarray or xchainer.Array): The desired, expected object. rtol(float): Relative tolerance. atol(float): Absolute tolerance. equal_nan(bool): Allow NaN values if True. Otherwise, fail the assertion if any NaN is found. err_msg(str): The error message to be printed in case of failure. verbose(bool): If ``True``, the conflicting values are appended to the error message. .. seealso:: :func:`numpy.testing.assert_allclose` """ def check_array(array): if isinstance(array, xchainer.Array): _check_xchainer_array(array) check_array(x) check_array(y) # TODO(sonots): Uncomment after strides compatibility between xChainer and NumPy is implemented. # assert x.strides == y.strides numpy.testing.assert_allclose( _as_numpy(x), _as_numpy(y), rtol=rtol, atol=atol, equal_nan=equal_nan, err_msg=err_msg, verbose=verbose) def assert_array_equal(x, y, err_msg='', verbose=True): """Raises an AssertionError if two array_like objects are not equal. Args: x(numpy.ndarray or xchainer.Array): The actual object to check. y(numpy.ndarray or xchainer.Array): The desired, expected object. err_msg(str): The error message to be printed in case of failure. verbose(bool): If ``True``, the conflicting values are appended to the error message. .. seealso:: :func:`numpy.testing.assert_array_equal` """ def check_array(array): if isinstance(array, xchainer.Array): _check_xchainer_array(array) check_array(x) check_array(y) # TODO(sonots): Uncomment after strides compatibility between xChainer and NumPy is implemented. # assert x.strides == y.strides numpy.testing.assert_array_equal(_as_numpy(x), _as_numpy(y), err_msg=err_msg, verbose=verbose)
Python
0.999999
f238a7d227036510b91ea4a7e1e9178ea60b3997
Update imagecodecs/__main__.py
imagecodecs/__main__.py
imagecodecs/__main__.py
# -*- coding: utf-8 -*- # imagecodecs/__main__.py """Imagecodecs package command line script.""" import sys from matplotlib.pyplot import show from tifffile import imshow import imagecodecs def askopenfilename(**kwargs): """Return file name(s) from Tkinter's file open dialog.""" try: from Tkinter import Tk import tkFileDialog as filedialog except ImportError: from tkinter import Tk, filedialog root = Tk() root.withdraw() root.update() filenames = filedialog.askopenfilename(**kwargs) root.destroy() return filenames def main(argv=None, verbose=True, decoders=None): """Imagecodecs command line usage main function.""" if argv is None: argv = sys.argv if len(argv) < 2: fname = askopenfilename(title='Select a. image file') if not fname: print('No file selected') return -1 elif len(argv) == 2: fname = argv[1] else: print('Usage: imagecodecs filename') return -1 with open(fname, 'rb') as fh: data = fh.read() if decoders is None: decoders = [ imagecodecs.png_decode, imagecodecs.jpeg8_decode, imagecodecs.jpeg12_decode, imagecodecs.jpegsof3_decode, imagecodecs.jpegls_decode, imagecodecs.j2k_decode, imagecodecs.jxr_decode, imagecodecs.webp_decode, imagecodecs.zfp_decode, imagecodecs.numpy_decode, ] messages = [] image = None for decode in decoders: try: image = decode(data) if image.dtype == 'object': image = None raise ValueError('failed') except Exception as exception: # raise(exception) messages.append('%s: %s' % (decode.__name__.upper(), exception)) continue break if verbose: print() if image is None: print('Could not decode the file\n') if verbose: for message in messages: print(message) return -1 if verbose: print("%s: %s %s" % (decode.__name__.upper(), image.shape, image.dtype)) imshow(image, title=fname) show() return 0 sys.exit(main())
# -*- coding: utf-8 -*- # imagecodecs/__main__.py """Imagecodecs package command line script.""" import sys from matplotlib.pyplot import show from tifffile import imshow import imagecodecs def askopenfilename(**kwargs): """Return file name(s) from Tkinter's file open dialog.""" try: from Tkinter import Tk import tkFileDialog as filedialog except ImportError: from tkinter import Tk, filedialog root = Tk() root.withdraw() root.update() filenames = filedialog.askopenfilename(**kwargs) root.destroy() return filenames def main(argv=None, verbose=True, decoders=None): """Imagecodecs command line usage main function.""" if argv is None: argv = sys.argv if len(argv) < 2: fname = askopenfilename(title='Select a. image file') if not fname: print('No file selected') return -1 elif len(argv) == 2: fname = argv[1] else: print('Usage: imagecodecs filename') return -1 with open(fname, 'rb') as fh: data = fh.read() if decoders is None: decoders = [ imagecodecs.png_decode, imagecodecs.jpeg8_decode, imagecodecs.jpeg12_decode, imagecodecs.jpegsof3_decode, imagecodecs.jpegls_decode, imagecodecs.j2k_decode, imagecodecs.jxr_decode, imagecodecs.webp_decode, ] messages = [] image = None for decode in decoders: try: image = decode(data) if image.dtype == 'object': image = None raise ValueError('failed') except Exception as exception: # raise(exception) messages.append('%s: %s' % (decode.__name__.upper(), exception)) continue break if verbose: print() if image is None: print('Could not decode the file\n') if verbose: for message in messages: print(message) return -1 if verbose: print("%s: %s %s" % (decode.__name__.upper(), image.shape, image.dtype)) imshow(image, title=fname) show() return 0 sys.exit(main())
Python
0.000005
43e8b090d806d615a8153d1e14063cc6d274bb25
Update issue 130 Now I also applied the fix :)
rdflib/plugins/serializers/nt.py
rdflib/plugins/serializers/nt.py
""" N-Triples RDF graph serializer for RDFLib. See <http://www.w3.org/TR/rdf-testcases/#ntriples> for details about the format. """ from rdflib.serializer import Serializer import warnings class NTSerializer(Serializer): """ Serializes RDF graphs to NTriples format. """ def serialize(self, stream, base=None, encoding=None, **args): if base is not None: warnings.warn("NTSerializer does not support base.") if encoding is not None: warnings.warn("NTSerializer does not use custom encoding.") encoding = self.encoding for triple in self.store: stream.write(_nt_row(triple).encode(encoding, "replace")) stream.write("\n") def _nt_row(triple): return u"%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), _xmlcharref_encode(triple[2].n3())) # from <http://code.activestate.com/recipes/303668/> def _xmlcharref_encode(unicode_data, encoding="ascii"): """Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler.""" chars = [] # nothing to do about xmlchars, but replace newlines with escapes: unicode_data=unicode_data.replace("\n","\\n") if unicode_data.startswith('"""'): unicode_data = unicode_data.replace('"""', '"') # Step through the unicode_data string one character at a time in # order to catch unencodable characters: for char in unicode_data: try: chars.append(char.encode(encoding, 'strict')) except UnicodeError: chars.append('\u%04X' % ord(char) if ord(char) <= 0xFFFF else '\U%08X' % ord(char)) return ''.join(chars)
""" N-Triples RDF graph serializer for RDFLib. See <http://www.w3.org/TR/rdf-testcases/#ntriples> for details about the format. """ from rdflib.serializer import Serializer import warnings class NTSerializer(Serializer): """ Serializes RDF graphs to NTriples format. """ def serialize(self, stream, base=None, encoding=None, **args): if base is not None: warnings.warn("NTSerializer does not support base.") if encoding is not None: warnings.warn("NTSerializer does not use custom encoding.") encoding = self.encoding for triple in self.store: stream.write(_nt_row(triple).encode(encoding, "replace")) stream.write("\n") def _nt_row(triple): return u"%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), _xmlcharref_encode(triple[2].n3())) # from <http://code.activestate.com/recipes/303668/> def _xmlcharref_encode(unicode_data, encoding="ascii"): """Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler.""" chars = [] # nothing to do about xmlchars, but replace newlines with escapes: unicode_data=unicode_data.replace("\n","\\n") if unicode_data.startswith('"""'): unicode_data = unicode_data.replace('"""', '"') # Step through the unicode_data string one character at a time in # order to catch unencodable characters: for char in unicode_data: try: chars.append(char.encode(encoding, 'strict')) except UnicodeError: chars.append('\u%04X' % ord(char)) return ''.join(chars)
Python
0
0035200543a7b226a095d2fb4ec880e0dd8732fd
Rearrange test data
make_test_data.py
make_test_data.py
import sqlite3 INSERT_SONG = ''' INSERT INTO jukebox_song_queue VALUES (?) ''' TEST_URIS = [ 'spotify:track:68MToCqJRJvNW8tYoxDl5p', 'spotify:track:0p1VSXFdkr71f0nO21IEyq', 'spotify:track:7udJ4LFSIrRnySD3eI8lad' ] if __name__ == '__main__': conn = sqlite3.connect('jukebox.db') cursor = conn.cursor() for uri in TEST_URIS: uri = (uri,) cursor.execute(INSERT_SONG, uri) conn.commit() conn.close()
import sqlite3 INSERT_SONG = ''' INSERT INTO jukebox_song_queue VALUES (?) ''' TEST_URIS = [ 'spotify:track:7udJ4LFSIrRnySD3eI8lad', 'spotify:track:0p1VSXFdkr71f0nO21IEyq', 'spotify:track:68MToCqJRJvNW8tYoxDl5p' ] if __name__ == '__main__': conn = sqlite3.connect('jukebox.db') cursor = conn.cursor() for uri in TEST_URIS: uri = (uri,) cursor.execute(INSERT_SONG, uri) conn.commit() conn.close()
Python
0.000026
95ea1d7d6564bcbb2e3b8d2ba254ccd2c1c38436
Add import for focused stuff
mamba/__init__.py
mamba/__init__.py
__version__ = '0.9.2' def description(message): pass def _description(message): pass def fdescription(message): pass def it(message): pass def _it(message): pass def fit(message): pass def context(message): pass def _context(message): pass def fcontext(message): pass def before(): pass def after(): pass
__version__ = '0.9.2' def description(message): pass def _description(message): pass def it(message): pass def _it(message): pass def context(message): pass def _context(message): pass def before(): pass def after(): pass
Python
0
d3b3e9af722ac00b21bf36706f4e0ab7cf94af00
bump to v0.6.4
mando/__init__.py
mando/__init__.py
__version__ = '0.6.4' try: from mando.core import Program except ImportError as e: # pragma: no cover # unfortunately the only workaround for Python2.6, argparse and setup.py e.version = __version__ raise e main = Program() command = main.command arg = main.arg parse = main.parse execute = main.execute
__version__ = '0.5' try: from mando.core import Program except ImportError as e: # pragma: no cover # unfortunately the only workaround for Python2.6, argparse and setup.py e.version = __version__ raise e main = Program() command = main.command arg = main.arg parse = main.parse execute = main.execute
Python
0
59400100aa2f35bfea52b3cf049ef8d0f958527d
Fix error when reaching a dead end in the markov chain
markov/markov2.py
markov/markov2.py
#!python3 import string import random import time import sys ''' This is an implementation of a markov chain used for text generation. Just pass a file name as an argument and it should load it up, build a markov chain with a state for each word(s), and start walking through the chain, writing incoherent text to the terminal. ''' asciiset = set(string.ascii_letters) asciiset.add(' ') asciiset.add('.') def strip2ascii(txt): return ''.join([ch for ch in txt if ch in asciiset]) def tokenize(fname): ''' Generate tokens defined by - Sequences of characters that aren't spaces - Periods For example, 'This is a test. Ok.' => ('This', 'is', 'a', 'test', '.', 'Ok, '.') ''' with open(fname, 'r') as f: for line in f: stripped = strip2ascii(line) for word in stripped.split(): if word[-1] == '.': yield word[:-1] yield '.' else: yield word def buildtransitionmap(tokens, order): dct = {} prev = ('',)*order for token in tokens: if prev in dct: dct[prev].append(token) else: dct[prev] = [token] prev = prev[1:]+(token,) return dct def walk(transmap, prev=None): if prev == None: prev = random.choice(list(transmap.keys())) while True: if not prev in transmap: prev = random.choice(list(transmap.keys())) word = random.choice(transmap[prev]) yield word prev = prev[1:]+(word,) def eternalramble(fname, order): ''' Walk through the markov chain printing out words to the terminal one at a time ''' transmap = buildtransitionmap(tokenize(fname), order) for word in walk(transmap): print(word, end=' ') sys.stdout.flush() time.sleep(0.25) def printusage(): print('Usage: markov filename order') print(' filename: the filename of the text to base the markov chain on.') print(' order: how many consecutive words make up each state (2 works well)') def launch(): if len(sys.argv) != 3: printusage() return try: order = int(sys.argv[2]) except: printusage() return eternalramble(sys.argv[1], order) if __name__ == '__main__': launch()
#!python3 import string import random import time import sys ''' This is an implementation of a markov chain used for text generation. Just pass a file name as an argument and it should load it up, build a markov chain with a state for each word(s), and start walking through the chain, writing incoherent text to the terminal. ''' asciiset = set(string.ascii_letters) asciiset.add(' ') asciiset.add('.') def strip2ascii(txt): return ''.join([ch for ch in txt if ch in asciiset]) def tokenize(fname): ''' Generate tokens defined by - Sequences of characters that aren't spaces - Periods For example, 'This is a test. Ok.' => ('This', 'is', 'a', 'test', '.', 'Ok, '.') ''' with open(fname, 'r') as f: for line in f: stripped = strip2ascii(line) for word in stripped.split(): if word[-1] == '.': yield word[:-1] yield '.' else: yield word def buildtransitionmap(tokens, order): dct = {} prev = ('',)*order for token in tokens: if prev in dct: dct[prev].append(token) else: dct[prev] = [token] prev = prev[1:]+(token,) return dct def transition(word, transmap): return random.choice(transmap[word]) def eternalramble(fname, order): ''' Walk through the markov chain printing out words to the terminal one at a time ''' transmap = buildtransitionmap(tokenize(fname), order) prev = random.choice(list(transmap.keys())) while True: word = transition(prev, transmap) print(word, end=' ') prev = prev[1:]+(word,) sys.stdout.flush() time.sleep(0.25) def printusage(): print('Usage: markov filename order') print(' filename: the filename of the text to base the markov chain on.') print(' order: how many consecutive words make up each state (2 works well)') def launch(): if len(sys.argv) != 3: printusage() return try: order = int(sys.argv[2]) except: printusage() return eternalramble(sys.argv[1], order) if __name__ == '__main__': launch()
Python
0.000006
7ccb52897e82629e4bbb0298dba4de76bc6a63db
Add a deprecation warning
pathvalidate/_symbol.py
pathvalidate/_symbol.py
""" .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ import re import warnings from typing import Sequence from ._common import ascii_symbols, preprocess, unprintable_ascii_chars from .error import InvalidCharError __RE_UNPRINTABLE = re.compile( "[{}]".format(re.escape("".join(unprintable_ascii_chars))), re.UNICODE ) __RE_SYMBOL = re.compile( "[{}]".format(re.escape("".join(ascii_symbols + unprintable_ascii_chars))), re.UNICODE ) def validate_unprintable(text: str) -> None: # deprecated match_list = __RE_UNPRINTABLE.findall(preprocess(text)) if match_list: raise InvalidCharError("unprintable character found: {}".format(match_list)) def replace_unprintable(text: str, replacement_text: str = "") -> str: warnings.warn( "'replace_unprintable' has moved to 'replace_unprintable_char'", DeprecationWarning ) try: return __RE_UNPRINTABLE.sub(replacement_text, preprocess(text)) except (TypeError, AttributeError): raise TypeError("text must be a string") def validate_symbol(text: str) -> None: """ Verifying whether symbol(s) included in the ``text`` or not. Args: text: Input text to validate. Raises: ValidationError (ErrorReason.INVALID_CHARACTER): If symbol(s) included in the ``text``. """ match_list = __RE_SYMBOL.findall(preprocess(text)) if match_list: raise InvalidCharError("invalid symbols found: {}".format(match_list)) def replace_symbol( text: str, replacement_text: str = "", exclude_symbols: Sequence[str] = [], is_replace_consecutive_chars: bool = False, is_strip: bool = False, ) -> str: """ Replace all of the symbols in the ``text``. Args: text: Input text. replacement_text: Replacement text. exclude_symbols: Symbols that exclude from the replacement. is_replace_consecutive_chars: If |True|, replace consecutive multiple ``replacement_text`` characters to a single character. is_strip: If |True|, strip ``replacement_text`` from the beginning/end of the replacement text. Returns: A replacement string. Example: :ref:`example-sanitize-symbol` """ if exclude_symbols: regexp = re.compile( "[{}]".format( re.escape( "".join(set(ascii_symbols + unprintable_ascii_chars) - set(exclude_symbols)) ) ), re.UNICODE, ) else: regexp = __RE_SYMBOL try: new_text = regexp.sub(replacement_text, preprocess(text)) except TypeError: raise TypeError("text must be a string") if not replacement_text: return new_text if is_replace_consecutive_chars: new_text = re.sub("{}+".format(re.escape(replacement_text)), replacement_text, new_text) if is_strip: new_text = new_text.strip(replacement_text) return new_text
""" .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ import re from typing import Sequence from ._common import ascii_symbols, preprocess, unprintable_ascii_chars from .error import InvalidCharError __RE_UNPRINTABLE = re.compile( "[{}]".format(re.escape("".join(unprintable_ascii_chars))), re.UNICODE ) __RE_SYMBOL = re.compile( "[{}]".format(re.escape("".join(ascii_symbols + unprintable_ascii_chars))), re.UNICODE ) def validate_unprintable(text: str) -> None: # deprecated match_list = __RE_UNPRINTABLE.findall(preprocess(text)) if match_list: raise InvalidCharError("unprintable character found: {}".format(match_list)) def replace_unprintable(text: str, replacement_text: str = "") -> str: # deprecated try: return __RE_UNPRINTABLE.sub(replacement_text, preprocess(text)) except (TypeError, AttributeError): raise TypeError("text must be a string") def validate_symbol(text: str) -> None: """ Verifying whether symbol(s) included in the ``text`` or not. Args: text: Input text to validate. Raises: ValidationError (ErrorReason.INVALID_CHARACTER): If symbol(s) included in the ``text``. """ match_list = __RE_SYMBOL.findall(preprocess(text)) if match_list: raise InvalidCharError("invalid symbols found: {}".format(match_list)) def replace_symbol( text: str, replacement_text: str = "", exclude_symbols: Sequence[str] = [], is_replace_consecutive_chars: bool = False, is_strip: bool = False, ) -> str: """ Replace all of the symbols in the ``text``. Args: text: Input text. replacement_text: Replacement text. exclude_symbols: Symbols that exclude from the replacement. is_replace_consecutive_chars: If |True|, replace consecutive multiple ``replacement_text`` characters to a single character. is_strip: If |True|, strip ``replacement_text`` from the beginning/end of the replacement text. Returns: A replacement string. Example: :ref:`example-sanitize-symbol` """ if exclude_symbols: regexp = re.compile( "[{}]".format( re.escape( "".join(set(ascii_symbols + unprintable_ascii_chars) - set(exclude_symbols)) ) ), re.UNICODE, ) else: regexp = __RE_SYMBOL try: new_text = regexp.sub(replacement_text, preprocess(text)) except TypeError: raise TypeError("text must be a string") if not replacement_text: return new_text if is_replace_consecutive_chars: new_text = re.sub("{}+".format(re.escape(replacement_text)), replacement_text, new_text) if is_strip: new_text = new_text.strip(replacement_text) return new_text
Python
0.001163
12dac769152ebd074ed2b415d3980729bdbe3e46
Make nonexistent warning more obvious
regparser/commands/compare_to.py
regparser/commands/compare_to.py
import json import logging import os import click from json_delta import udiff import requests import requests_cache def local_and_remote_generator(api_base, paths): """Find all local files in `paths` and pair them with the appropriate remote file (prefixing with api_base). As the local files could be at any position in the file system, we back out directories until we hit one of the four root resource types (diff, layer, notice, regulation)""" local_names = [path for path in paths if os.path.isfile(path)] # this won't duplicate the previous line as it'll only add files in dirs local_names.extend(os.path.join(dirpath, filename) for path in paths for dirpath, _, filenames in os.walk(path) for filename in filenames) for local_name in local_names: dirname, basename = os.path.split(local_name) reversed_suffix = [basename] # these are the four root resource types while basename not in ('diff', 'layer', 'notice', 'regulation'): dirname, basename = os.path.split(dirname) reversed_suffix.append(basename) remote_name = api_base + '/'.join(reversed(reversed_suffix)) yield (local_name, remote_name) def compare(local_path, remote_url): """Downloads and compares a local JSON file with a remote one. If there is a difference, notifies the user and prompts them if they want to see the diff""" remote_response = requests.get(remote_url) if remote_response.status_code == 404: logging.warn("Nonexistent: %s", remote_url) else: remote = remote_response.json() with open(local_path) as f: local = json.load(f) if remote != local: click.echo("Content differs: {} {}".format(local_path, remote_url)) if click.confirm("Show diff?"): diffs_str = '\n'.join(udiff(remote, local)) click.echo_via_pager(diffs_str) @click.command() @click.argument('api_base') @click.argument('paths', nargs=-1, required=True, type=click.Path(exists=True, resolve_path=True)) @click.pass_context def compare_to(ctx, api_base, paths): """Compare local JSON to a remote server. This is useful for verifying changes to the parser. API_BASE is the uri of the root of the API. Use what would be the last parameter in the `write_to` command. PATH parameters indicate specific files or directories to use when comparing. For example, use `/some/path/to/regulation/555` to compare all versions of 555. Glob syntax works if your shell supports it""" if not api_base.endswith("/"): api_base += "/" # @todo: ugly to uninstall the cache after installing it in eregs.py. # Remove the globalness requests_cache.uninstall_cache() for local, remote in local_and_remote_generator(api_base, paths): compare(local, remote)
import json import os import click from json_delta import udiff import requests import requests_cache def local_and_remote_generator(api_base, paths): """Find all local files in `paths` and pair them with the appropriate remote file (prefixing with api_base). As the local files could be at any position in the file system, we back out directories until we hit one of the four root resource types (diff, layer, notice, regulation)""" local_names = [path for path in paths if os.path.isfile(path)] # this won't duplicate the previous line as it'll only add files in dirs local_names.extend(os.path.join(dirpath, filename) for path in paths for dirpath, _, filenames in os.walk(path) for filename in filenames) for local_name in local_names: dirname, basename = os.path.split(local_name) reversed_suffix = [basename] # these are the four root resource types while basename not in ('diff', 'layer', 'notice', 'regulation'): dirname, basename = os.path.split(dirname) reversed_suffix.append(basename) remote_name = api_base + '/'.join(reversed(reversed_suffix)) yield (local_name, remote_name) def compare(local_path, remote_url): """Downloads and compares a local JSON file with a remote one. If there is a difference, notifies the user and prompts them if they want to see the diff""" remote_response = requests.get(remote_url) if remote_response.status_code == 404: click.echo("Nonexistent: " + remote_url) else: remote = remote_response.json() with open(local_path) as f: local = json.load(f) if remote != local: click.echo("Content differs: {} {}".format(local_path, remote_url)) if click.confirm("Show diff?"): diffs_str = '\n'.join(udiff(remote, local)) click.echo_via_pager(diffs_str) @click.command() @click.argument('api_base') @click.argument('paths', nargs=-1, required=True, type=click.Path(exists=True, resolve_path=True)) @click.pass_context def compare_to(ctx, api_base, paths): """Compare local JSON to a remote server. This is useful for verifying changes to the parser. API_BASE is the uri of the root of the API. Use what would be the last parameter in the `write_to` command. PATH parameters indicate specific files or directories to use when comparing. For example, use `/some/path/to/regulation/555` to compare all versions of 555. Glob syntax works if your shell supports it""" if not api_base.endswith("/"): api_base += "/" # @todo: ugly to uninstall the cache after installing it in eregs.py. # Remove the globalness requests_cache.uninstall_cache() for local, remote in local_and_remote_generator(api_base, paths): compare(local, remote)
Python
0
cfb09353b02dd230546775d18dadb1ba7ed2acc6
Refactor submit_comment tests
regulations/tests/tasks_tests.py
regulations/tests/tasks_tests.py
import json import mock import six from celery.exceptions import Retry, MaxRetriesExceededError from requests.exceptions import RequestException from django.test import SimpleTestCase, override_settings from regulations.tasks import submit_comment @mock.patch('regulations.tasks.save_failed_submission') @mock.patch('regulations.tasks.submit_comment.retry') @mock.patch('requests.post') @mock.patch('regulations.tasks.html_to_pdf') @override_settings( ATTACHMENT_BUCKET='test-bucket', ATTACHMENT_ACCESS_KEY_ID='test-access-key', ATTACHMENT_SECRET_ACCESS_KEY='test-secret-key', ATTACHMENT_MAX_SIZE=42, REGS_GOV_API_URL='test-url', REGS_GOV_API_KEY='test-key', ) class TestSubmitComment(SimpleTestCase): def setUp(self): self.file_handle = six.BytesIO("some-content") self.submission = {'assembled_comment': [ {"id": "A1", "comment": "A simple comment", "files": []}, {"id": "A5", "comment": "Another comment", "files": []} ]} def test_submit_comment(self, html_to_pdf, post, retry, save_failed_submission): html_to_pdf.return_value.__enter__ = mock.Mock( return_value=self.file_handle) expected_result = {'tracking_number': 'some-tracking-number'} post.return_value.status_code = 201 post.return_value.json.return_value = expected_result result = submit_comment(self.submission) self.assertEqual(result, expected_result) def test_failed_submit_raises_retry(self, html_to_pdf, post, retry, save_failed_submission): html_to_pdf.return_value.__enter__ = mock.Mock( return_value=self.file_handle) post.side_effect = [RequestException] retry.return_value = Retry() with self.assertRaises(Retry): submit_comment(self.submission) def test_failed_submit_maximum_retries(self, html_to_pdf, post, retry, save_failed_submission): html_to_pdf.return_value.__enter__ = mock.Mock( return_value=self.file_handle) post.side_effect = [RequestException] retry.return_value = MaxRetriesExceededError() submit_comment(self.submission) save_failed_submission.assert_called_with(json.dumps(self.submission))
import json import mock import six from celery.exceptions import Retry, MaxRetriesExceededError from requests.exceptions import RequestException from django.test import SimpleTestCase, override_settings from regulations.tasks import submit_comment @mock.patch('regulations.tasks.save_failed_submission') @mock.patch('regulations.tasks.submit_comment.retry') @mock.patch('requests.post') @mock.patch('regulations.tasks.html_to_pdf') @override_settings( ATTACHMENT_BUCKET='test-bucket', ATTACHMENT_ACCESS_KEY_ID='test-access-key', ATTACHMENT_SECRET_ACCESS_KEY='test-secret-key', ATTACHMENT_MAX_SIZE=42, REGS_GOV_API_URL='test-url', REGS_GOV_API_KEY='test-key', ) class TestSubmitComment(SimpleTestCase): def test_submit_comment(self, html_to_pdf, post, retry, save_failed_submission): file_handle = six.BytesIO("foobar") html_to_pdf.return_value.__enter__ = mock.Mock( return_value=file_handle) expected_result = {'tracking_number': '133321'} post.return_value.status_code = 201 post.return_value.json.return_value = expected_result body = {'assembled_comment': {'sections': []}} result = submit_comment(body) self.assertEqual(result, expected_result) def test_failed_submit_raises_retry(self, html_to_pdf, post, retry, save_failed_submission): file_handle = six.BytesIO("foobar") html_to_pdf.return_value.__enter__ = mock.Mock( return_value=file_handle) post.side_effect = [RequestException] retry.return_value = Retry() body = {'assembled_comment': {'sections': []}} with self.assertRaises(Retry): submit_comment(body) def test_failed_submit_maximum_retries(self, html_to_pdf, post, retry, save_failed_submission): file_handle = six.BytesIO("foobar") html_to_pdf.return_value.__enter__ = mock.Mock( return_value=file_handle) post.side_effect = [RequestException] retry.return_value = MaxRetriesExceededError() body = {'assembled_comment': {'sections': []}} submit_comment(body) save_failed_submission.assert_called_with(json.dumps(body))
Python
0
29e491c5505d2068b46eb489044455968e53ab70
Add tests for strait and fjord
test/400-bay-water.py
test/400-bay-water.py
# osm_id: 43950409 name: San Pablo Bay assert_has_feature( 14, 2623, 6318, 'water', { 'kind': 'bay', 'label_placement': 'yes' }) # osm_id: 360566115 name: Byron strait assert_has_feature( 14, 15043, 8311, 'water', { 'kind': 'strait', 'label_placement': 'yes' }) # osm_id: -1451065 name: Horsens Fjord assert_has_feature( 14, 8645, 5114, 'water', { 'kind': 'fjord', 'label_placement': 'yes' })
assert_has_feature( 14, 2623, 6318, 'water', { 'kind': 'bay', 'label_placement': 'yes' })
Python
0.000007
83781f3b2f1cde0aab913ff4d64de45cf9b798be
Update snooper for multi-spline qp controller inputs
software/control/src/qp_controller_input_snooper.py
software/control/src/qp_controller_input_snooper.py
#!/usr/bin/python ''' Listens to QP Controller Inputs and draws, in different but order-consistent colors, the cubic splines being followed by each body motion block. ''' import lcm import drc from drake import lcmt_qp_controller_input, lcmt_body_motion_data import sys import time from bot_lcmgl import lcmgl, GL_LINES import numpy as np color_order = [[1.0, 0.1, 0.1], [0.1, 1.0, 0.1], [0.1, 0.1, 1.0], [1.0, 1.0, 0.1], [1.0, 0.1, 1.0], [0.1, 1.0, 1.0]]; def pval(coefs, t_off): out = np.array([0.0]*6) for j in range(0, 6): out[j] = coefs[j, 0]*(t_off**3.0) + coefs[j, 1]*(t_off**2.0) + coefs[j, 2]*t_off + coefs[j, 3] return out def handle_qp_controller_input_msg(channel, data): msg = lcmt_qp_controller_input.decode(data) #print("received") # draw spline segment for each tracked body for i in range(0, msg.num_tracked_bodies): bmd = msg.body_motion_data[i] ts = bmd.ts; color = color_order[i%len(color_order)]; for j in range(0, msg.body_motion_data[i].num_spline_coefs): tsdense = np.linspace(ts[j], ts[j+1], 20); coefs = np.array(bmd.coefs[j].coefs); gl.glColor3f(color[0], color[1], color[2]); gl.glLineWidth(5); gl.glBegin(GL_LINES); ps = np.array([pval(coefs, t-ts[j]) for t in tsdense]); for j in range(0,tsdense.size-1): gl.glVertex3f(ps[j,0], ps[j,1], ps[j,2]); gl.glVertex3f(ps[j+1,0], ps[j+1,1], ps[j+1,2]); gl.glEnd(); gl.switch_buffer() lc = lcm.LCM() gl = lcmgl('qp input bmd snoop', lc); subscription = lc.subscribe("QP_CONTROLLER_INPUT", handle_qp_controller_input_msg) subscription.set_queue_capacity(1); try: while True: lc.handle() except KeyboardInterrupt: pass
#!/usr/bin/python ''' Listens to QP Controller Inputs and draws, in different but order-consistent colors, the cubic splines being followed by each body motion block. ''' import lcm import drc from drake import lcmt_qp_controller_input, lcmt_body_motion_data import sys import time from bot_lcmgl import lcmgl, GL_LINES import numpy as np color_order = [[1.0, 0.1, 0.1], [0.1, 1.0, 0.1], [0.1, 0.1, 1.0], [1.0, 1.0, 0.1], [1.0, 0.1, 1.0], [0.1, 1.0, 1.0]]; def pval(coefs, t_off): out = np.array([0.0]*6) for j in range(0, 6): out[j] = coefs[j, 0]*(t_off**3.0) + coefs[j, 1]*(t_off**2.0) + coefs[j, 2]*t_off + coefs[j, 3] return out def handle_qp_controller_input_msg(channel, data): msg = lcmt_qp_controller_input.decode(data) #print("received") # draw spline segment for each tracked body for i in range(0, msg.num_tracked_bodies): bmd = msg.body_motion_data[i] ts = bmd.ts; tsdense = np.linspace(ts[0], ts[-1], 20); coefs = np.array(bmd.coefs); color = color_order[i%len(color_order)]; gl.glColor3f(color[0], color[1], color[2]); gl.glLineWidth(5); gl.glBegin(GL_LINES); ps = np.array([pval(coefs, t-ts[0]) for t in tsdense]); for j in range(0,tsdense.size-1): gl.glVertex3f(ps[j,0], ps[j,1], ps[j,2]); gl.glVertex3f(ps[j+1,0], ps[j+1,1], ps[j+1,2]); gl.glEnd(); gl.switch_buffer() lc = lcm.LCM() gl = lcmgl('qp input bmd snoop', lc); subscription = lc.subscribe("QP_CONTROLLER_INPUT", handle_qp_controller_input_msg) subscription.set_queue_capacity(1); try: while True: lc.handle() except KeyboardInterrupt: pass
Python
0
cdf545cf9385a0490590cd0162141025a1301c09
Use argparse formatter RawDescriptionHelpFormatter, maybe temporarily
track/config.py
track/config.py
import configargparse DEFAULT_CONFIG_FILES=[ './track.cfg', '~/.track.cfg', ] # Bit of a cheat... not actually an object constructor, just a 'make me an object' method def ArgParser(): return configargparse.ArgParser( ignore_unknown_config_file_keys =True, allow_abbrev =True, default_config_files =DEFAULT_CONFIG_FILES, # formatter_class =configargparse.ArgumentDefaultsHelpFormatter, formatter_class =configargparse.RawDescriptionHelpFormatter, config_file_parser_class =configargparse.DefaultConfigFileParser, # INI format args_for_setting_config_path =['-c', '--cfg'], args_for_writing_out_config_file=['-w', '--cfg-write'], )
import configargparse DEFAULT_CONFIG_FILES=[ './track.cfg', '~/.track.cfg', ] # Bit of a cheat... not actually an object constructor, just a 'make me an object' method def ArgParser(): return configargparse.ArgParser( ignore_unknown_config_file_keys =True, allow_abbrev =True, default_config_files =DEFAULT_CONFIG_FILES, formatter_class =configargparse.ArgumentDefaultsHelpFormatter, config_file_parser_class =configargparse.DefaultConfigFileParser, # INI format args_for_setting_config_path =['-c', '--cfg'], args_for_writing_out_config_file=['-w', '--cfg-write'], )
Python
0
148d4c44a9eb63016b469c6bf317a3dbe9ed7918
Add documentation for Permutations class
permuta/permutations.py
permuta/permutations.py
from .misc import DancingLinks from .permutation import Permutation import random class Permutations(object): """Class for iterating through all Permutations of length n""" def __init__(self, n): """Returns an object giving all permutations of length n""" assert 0 <= n self.n = n def __iter__(self): """Iterates through permutations of length n in lexical order""" left = DancingLinks(range(1, self.n+1)) res = [] def gen(): if len(left) == 0: yield Permutation(list(res)) else: cur = left.front while cur is not None: left.erase(cur) res.append(cur.value) for p in gen(): yield p res.pop() left.restore(cur) cur = cur.next return gen() def random_element(self): """Returns a random permutation of length n""" p = [i+1 for i in range(self.n)] for i in range(self.n-1, -1, -1): j = random.randint(0, i) p[i], p[j] = p[j], p[i] return Permutation(p) def __str__(self): return 'The set of Permutations of length %d' % self.n def __repr__(self): return 'Permutations(%d)' % self.n
from .misc import DancingLinks from .permutation import Permutation import random class Permutations(object): def __init__(self, n): assert 0 <= n self.n = n def __iter__(self): left = DancingLinks(range(1, self.n+1)) res = [] def gen(): if len(left) == 0: yield Permutation(list(res)) else: cur = left.front while cur is not None: left.erase(cur) res.append(cur.value) for p in gen(): yield p res.pop() left.restore(cur) cur = cur.next return gen() def random_element(self): p = [ i+1 for i in range(self.n) ] for i in range(self.n-1, -1, -1): j = random.randint(0, i) p[i],p[j] = p[j],p[i] return Permutation(p) def __str__(self): return 'The set of Permutations of length %d' % self.n def __repr__(self): return 'Permutations(%d)' % self.n
Python
0
8b9f68514d78851f3b445f996f3eaf607831d352
Add more descriptive names to variables and functions
raspisump/checkpid.py
raspisump/checkpid.py
#!/usr/bin/python # Check to make sure process raspi-sump is running and restart if required. import subprocess import time def check_pid(): '''Check status of raspisump.py process.''' cmdp1 = "ps aux" cmdp2 = "grep -v grep" cmdp3 = "grep -v sudo" cmdp4 = "grep -c /home/pi/raspi-sump/raspisump.py" cmdp1list = cmdp1.split(' ') cmdp2list = cmdp2.split(' ') cmdp3list = cmdp3.split(' ') cmdp4list = cmdp4.split(' ') part1 = subprocess.Popen(cmdp1list, stdout=subprocess.PIPE) part2 = subprocess.Popen(cmdp2list, stdin=part1.stdout, stdout=subprocess.PIPE) part1.stdout.close() part3 = subprocess.Popen(cmdp3list, stdin=part2.stdout,stdout=subprocess.PIPE) part2.stdout.close() part4 = subprocess.Popen(cmdp4list, stdin=part3.stdout,stdout=subprocess.PIPE) part3.stdout.close() number_of_processes = int(part4.communicate()[0]) if number_of_processes == 0: log_restarts("Process stopped, restarting") restart() elif number_of_processes == 1: exit(0) else: log_restarts("Multiple processes...killing and restarting") kill_start() def restart(): '''Restart raspisump.py process.''' restart_cmd = "/home/pi/raspi-sump/raspisump.py &" restart_now = restart_cmd.split(' ') subprocess.Popen(restart_now) exit(0) def kill_start(): '''Kill all instances of raspisump.py process.''' kill_cmd = "killall 09 raspisump.py" kill_it = kill_cmd.split(' ') subprocess.call(kill_it) restart() def log_restarts(reason): '''Log all process restarts''' logfile = open("/home/pi/raspi-sump/logs/process_log", 'a') logfile.write(time.strftime("%Y-%m-%d %H:%M:%S,")), logfile.write(reason), logfile.write("\n") logfile.close if __name__ == "__main__": check_pid()
#!/usr/bin/python # Check to make sure process raspi-sump is running and restart if required. import subprocess import time def check_pid(): '''Check status of raspisump.py process.''' cmdp1 = "ps aux" cmdp2 = "grep -v grep" cmdp3 = "grep -v sudo" cmdp4 = "grep -c /home/pi/raspi-sump/raspisump.py" cmdp1list = cmdp1.split(' ') cmdp2list = cmdp2.split(' ') cmdp3list = cmdp3.split(' ') cmdp4list = cmdp4.split(' ') part1 = subprocess.Popen(cmdp1list, stdout=subprocess.PIPE) part2 = subprocess.Popen(cmdp2list, stdin=part1.stdout, stdout=subprocess.PIPE) part1.stdout.close() part3 = subprocess.Popen(cmdp3list, stdin=part2.stdout,stdout=subprocess.PIPE) part2.stdout.close() part4 = subprocess.Popen(cmdp4list, stdin=part3.stdout,stdout=subprocess.PIPE) part3.stdout.close() x = int(part4.communicate()[0]) if x == 0: log_check("Process stopped, restarting") restart() elif x == 1: exit(0) else: log_check("Multiple Processes...Killing and Restarting") kill_start() def restart(): '''Restart raspisump.py process.''' restart_cmd = "/home/pi/raspi-sump/raspisump.py &" restart_now = restart_cmd.split(' ') subprocess.Popen(restart_now) exit(0) def kill_start(): '''Kill all instances of raspisump.py process.''' kill_cmd = "killall 09 raspisump.py" kill_it = kill_cmd.split(' ') subprocess.call(kill_it) restart() def log_check(reason): logfile = open("/home/pi/raspi-sump/logs/process_log", 'a') logfile.write(time.strftime("%Y-%m-%d %H:%M:%S,")), logfile.write(reason), logfile.write("\n") logfile.close if __name__ == "__main__": check_pid()
Python
0.000001
51373b776403b94cf0b72b43952013f3b4ecdb2d
Remove useless codes
holosocket/encrypt.py
holosocket/encrypt.py
import struct from Cryptodome.Cipher import AES from Cryptodome.Hash import SHA256 from Cryptodome.Random import get_random_bytes class aes_gcm: def __init__(self, key, salt=None): """Create a new AES-GCM cipher. key: Your password like: passw0rd salt: a 16 bytes length byte string, if not provided a random salt will be used nonce: a 8 bytes length byte string, if not provided a random nonce will be used""" self.raw_key = key.encode() if not salt: self._salt = get_random_bytes(16) else: if len(salt) != 16: error_msg = 'salt length should be 16, not {}' raise ValueError(error_msg.format(len(salt))) else: self._salt = salt self.key = SHA256.new(self.raw_key + self._salt).digest() # generate a 256 bytes key self.nonce = 0 def _new(self): nonce = struct.pack('>Q', self.nonce) self.cipher = AES.new(self.key, AES.MODE_GCM, nonce) self.nonce += 1 def encrypt(self, data): """Encrypt data return cipher. data: raw data""" self._new() return self.cipher.encrypt_and_digest(data) def decrypt(self, data, mac): """Decrypt data. data: cipher mac: gmac""" self._new() # Verify MAC, if matching, will return plain text or raise ValueError plain = self.cipher.decrypt_and_verify(data, mac) return plain @property def salt(self): return self._salt def test(): # AES-GCM print('AES-256-GCM') gen = aes_gcm('test') salt = gen.salt gcipher = gen.encrypt(b'holo') gde = aes_gcm('test', salt) print(gde.decrypt(*gcipher)) if __name__ == '__main__': test()
import struct from Cryptodome.Cipher import AES from Cryptodome.Hash import SHA256 from Cryptodome.Random import get_random_bytes #Cipher_Tag = {'aes-256-gcm': 16} #Nonce_Len = 8 # fuck you 12 bytes class aes_gcm: def __init__(self, key, salt=None): """Create a new AES-GCM cipher. key: Your password like: passw0rd salt: a 16 bytes length byte string, if not provided a random salt will be used nonce: a 8 bytes length byte string, if not provided a random nonce will be used""" self.raw_key = key.encode() if not salt: self._salt = get_random_bytes(16) else: if len(salt) != 16: error_msg = 'salt length should be 16, not {}' raise ValueError(error_msg.format(len(salt))) else: self._salt = salt self.key = SHA256.new(self.raw_key + self._salt).digest() # generate a 256 bytes key self.nonce = 0 def _new(self): nonce = struct.pack('>Q', self.nonce) self.cipher = AES.new(self.key, AES.MODE_GCM, nonce) self.nonce += 1 def encrypt(self, data): """Encrypt data return cipher. data: raw data""" self._new() #Return (cpiher, MAC) return self.cipher.encrypt_and_digest(data) def decrypt(self, data, mac): """Decrypt data. data: cipher mac: gmac""" self._new() #Verify MAC, if matching, will return plain text or raise ValueError plain = self.cipher.decrypt_and_verify(data, mac) return plain @property def salt(self): return self._salt def test(): # AES-GCM print('AES-256-GCM') gen = aes_gcm('test') salt = gen.salt gcipher = gen.encrypt(b'holo') gde = aes_gcm('test', salt) print(gde.decrypt(*gcipher)) if __name__ == '__main__': test()
Python
0.000221
6288caa954c8834ef6fec0bf24c62a1c8265e302
Use InstanceProfileName value to remove
pipes/iam/create_iam.py
pipes/iam/create_iam.py
"""Create IAM Instance Profiles, Roles, Users, and Groups.""" import logging import boto3 from boto3.exceptions import botocore from .utils import get_details, get_template LOG = logging.getLogger(__name__) def create_iam_resources(env='dev', app=''): """Create the IAM Resources for the application. Args: env (str): Deployment environment, i.e. dev, stage, prod. app (str): Spinnaker Application name. Returns: True upon successful completion. """ session = boto3.session.Session(profile_name=env) client = session.client('iam') details = get_details(env=env, app=app) resource_action( client, action='create_role', log_format='Role: %(RoleName)s', RoleName=details.role, AssumeRolePolicyDocument=get_template('iam_role_policy.json')) resource_action(client, action='create_instance_profile', log_format='Instance Profile: %(InstanceProfileName)s', InstanceProfileName=details.profile) attach_profile_to_role(client, role_name=details.role, profile_name=details.profile) resource_action(client, action='create_user', log_format='User: %(UserName)s', UserName=details.user) resource_action(client, action='create_group', log_format='Group: %(GroupName)s', GroupName=details.group) resource_action(client, action='add_user_to_group', log_format='User to Group: %(UserName)s -> %(GroupName)s', log_failure=True, GroupName=details.group, UserName=details.user) return True def attach_profile_to_role(client, role_name='forrest_unicorn_role', profile_name='forrest_unicorn_profile'): """Attach an IAM Instance Profile _profile_name_ to Role _role_name_. Args: role_name (str): Name of Role. profile_name (str): Name of Instance Profile. Returns: True upon successful completion. """ current_instance_profiles = client.list_instance_profiles_for_role( RoleName=role_name)['InstanceProfiles'] for profile in current_instance_profiles: if profile['InstanceProfileName'] == profile_name: LOG.info('Found Instance Profile attached to Role: %s -> %s', profile_name, role_name) break else: for remove_profile in current_instance_profiles: client.remove_role_from_instance_profile( InstanceProfileName=remove_profile['InstanceProfileName'], RoleName=role_name) LOG.info('Removed Instance Profile from Role: %s -> %s', remove_profile, role_name) client.add_role_to_instance_profile(InstanceProfileName=profile_name, RoleName=role_name) LOG.info('Added Instance Profile to Role: %s -> %s', profile_name, role_name) return True def resource_action(client, action='', log_format='item: %(key)s', log_failure=False, **kwargs): """Call _action_ using boto3 _client_ with _kwargs_. This is meant for _action_ methods that will create or implicitely prove a given Resource exists. The _log_failure_ flag is available for methods that should always succeed, but will occasionally fail due to unknown AWS issues. Args: client (botocore.client.IAM): boto3 client object. action (str): Client method to call. log_format (str): Generic log message format, 'Added' or 'Found' will be prepended depending on the scenario. log_failure (bool): Will log WARNING level 'Failed' instead of 'Found' message. **kwargs: Keyword arguments to pass to _action_ method. Returns: True upon successful completion. """ try: getattr(client, action)(**kwargs) LOG.info(' '.join(('Added', log_format)), kwargs) except botocore.exceptions.ClientError: if not log_failure: LOG.info(' '.join(('Found', log_format)), kwargs) else: LOG.warning(' '.join(('Failed', log_format)), kwargs) return True
"""Create IAM Instance Profiles, Roles, Users, and Groups.""" import logging import boto3 from boto3.exceptions import botocore from .utils import get_details, get_template LOG = logging.getLogger(__name__) def create_iam_resources(env='dev', app=''): """Create the IAM Resources for the application. Args: env (str): Deployment environment, i.e. dev, stage, prod. app (str): Spinnaker Application name. Returns: True upon successful completion. """ session = boto3.session.Session(profile_name=env) client = session.client('iam') details = get_details(env=env, app=app) resource_action( client, action='create_role', log_format='Role: %(RoleName)s', RoleName=details.role, AssumeRolePolicyDocument=get_template('iam_role_policy.json')) resource_action(client, action='create_instance_profile', log_format='Instance Profile: %(InstanceProfileName)s', InstanceProfileName=details.profile) attach_profile_to_role(client, role_name=details.role, profile_name=details.profile) resource_action(client, action='create_user', log_format='User: %(UserName)s', UserName=details.user) resource_action(client, action='create_group', log_format='Group: %(GroupName)s', GroupName=details.group) resource_action(client, action='add_user_to_group', log_format='User to Group: %(UserName)s -> %(GroupName)s', log_failure=True, GroupName=details.group, UserName=details.user) return True def attach_profile_to_role(client, role_name='forrest_unicorn_role', profile_name='forrest_unicorn_profile'): """Attach an IAM Instance Profile _profile_name_ to Role _role_name_. Args: role_name (str): Name of Role. profile_name (str): Name of Instance Profile. Returns: True upon successful completion. """ current_instance_profiles = client.list_instance_profiles_for_role( RoleName=role_name)['InstanceProfiles'] for profile in current_instance_profiles: if profile['InstanceProfileName'] == profile_name: LOG.info('Found Instance Profile attached to Role: %s -> %s', profile_name, role_name) break else: for remove_profile in current_instance_profiles: client.remove_role_from_instance_profile( InstanceProfileName=remove_profile, RoleName=role_name) LOG.info('Removed Instance Profile from Role: %s -> %s', remove_profile, role_name) client.add_role_to_instance_profile(InstanceProfileName=profile_name, RoleName=role_name) LOG.info('Added Instance Profile to Role: %s -> %s', profile_name, role_name) return True def resource_action(client, action='', log_format='item: %(key)s', log_failure=False, **kwargs): """Call _action_ using boto3 _client_ with _kwargs_. This is meant for _action_ methods that will create or implicitely prove a given Resource exists. The _log_failure_ flag is available for methods that should always succeed, but will occasionally fail due to unknown AWS issues. Args: client (botocore.client.IAM): boto3 client object. action (str): Client method to call. log_format (str): Generic log message format, 'Added' or 'Found' will be prepended depending on the scenario. log_failure (bool): Will log WARNING level 'Failed' instead of 'Found' message. **kwargs: Keyword arguments to pass to _action_ method. Returns: True upon successful completion. """ try: getattr(client, action)(**kwargs) LOG.info(' '.join(('Added', log_format)), kwargs) except botocore.exceptions.ClientError: if not log_failure: LOG.info(' '.join(('Found', log_format)), kwargs) else: LOG.warning(' '.join(('Failed', log_format)), kwargs) return True
Python
0
f21204c8828e840dc54c6822348fa9a47bc8964e
Add model's to_dict method.
opensrs/models.py
opensrs/models.py
from dateutil.parser import parse class Domain(object): def __init__(self, data): self.name = data['name'] self.auto_renew = (data['f_auto_renew'] == 'Y') self.expiry_date = parse(data['expiredate']).date() @property def tld(self): return self.name.split('.')[-1] def to_dict(self): return { 'name': self.name, 'auto_renew': self.auto_renew, 'expiry_date': self.expiry_date }
from dateutil.parser import parse class Domain(object): def __init__(self, data): self.name = data['name'] self.auto_renew = (data['f_auto_renew'] == 'Y') self.expiry_date = parse(data['expiredate']).date() @property def tld(self): return self.name.split('.')[-1]
Python
0
fa82883576a659d9cd9d830919e744299ac14ac7
improve show command to show target types, build phase types and filter other build phases.
pbxproj/pbxcli/pbxproj_show.py
pbxproj/pbxcli/pbxproj_show.py
""" usage: pbxproj show [options] <project> pbxproj show [options] (--target <target>...) <project> [(-s | --source-files) | (-H | --header-files) | (-r | --resource-files) | (-f | --framework-files) | (--build-phase-files <build_phase_type>)] positional arguments: <project> Project path to the .xcodeproj folder. generic options: -h, --help This message. -t, --target <target> Target name to be modified. If there is no target specified, all targets are used. -b, --backup Creates a backup before start processing the command. target options: -s, --source-files Show the source files attached to the target -r, --resource-files Show the resource files attached to the target -f, --framework-files Show the library files attached to the target -H, --header-files Show the header files attached to the target -c, --configurations Show the configurations attached to the target -B, --build-phase-files <type> Show the files associated to the build phase of the given type. """ from pbxproj.pbxcli import * def execute(project, args): # make a decision of what function to call based on the -D flag if args[u'--target']: return _target_info(project, args[u'--target'], args) else: return _summary(project, args) def _summary(project, args): info = '' for target in project.objects.get_targets(): info += "{name}:\n" \ "\tTarget type: {type}\n" \ "\tProduct name: {productName}\n" \ "\tConfigurations: {configs}\n" \ .format(name=target.name, productName=target.productName, type=target.isa, configs=', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)]), ) for build_phase_id in target.buildPhases: build_phase = project.objects[build_phase_id] info += "\t{name} ({type}) file count: {count}\n"\ .format(name=build_phase._get_comment(), type=build_phase.isa, count=build_phase.files.__len__()) info += "\n" return info def _target_info(project, target_name, args): build_phases = [] if args[u'--source-files']: build_phases += [u'PBXSourcesBuildPhase'] elif args[u'--header-files']: build_phases += [u'PBXHeadersBuildPhase'] elif args[u'--resource-files']: build_phases += [u'PBXResourcesBuildPhase'] elif args[u'--framework-files']: build_phases += [u'PBXFrameworksBuildPhase'] elif args[u'--build-phase-files']: build_phases += [args[u'--build-phase-files']] info = '' for target in project.objects.get_targets(target_name): info += "{name}:\n" \ "\tProduct name: {productName}\n" \ .format(name=target.name, productName=target.productName, configs=', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)]), ) if args[u'--configurations']: info += "\tConfigurations: {configs}\n" \ .format(configs=', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)])) for build_phase_id in target.buildPhases: build_phase = project.objects[build_phase_id] if build_phase.isa in build_phases: info += "\t{name}: \n\t\t".format(name=build_phase._get_comment()) files = [] for build_file_id in build_phase.files: build_file = project.objects[build_file_id] files.append(project.objects[build_file.fileRef]._get_comment()) info += '{files}\n'.format(files="\n\t\t".join(sorted(files))) info += '\n' return info
""" usage: pbxproj show [options] <project> pbxproj show [options] (--target <target>...) <project> [(-s | --source-files) | (-H | --header-files) | (-r | --resource-files) | (-f | --framework-files)] positional arguments: <project> Project path to the .xcodeproj folder. generic options: -h, --help This message. -t, --target <target> Target name to be modified. If there is no target specified, all targets are used. -b, --backup Creates a backup before start processing the command. target options: -s, --source-files Show the source files attached to the target -r, --resource-files Show the resource files attached to the target -f, --framework-files Show the library files attached to the target -H, --header-files Show the header files attached to the target -c, --configurations Show the configurations attached to the target """ from pbxproj.pbxcli import * def execute(project, args): # make a decision of what function to call based on the -D flag if args[u'--target']: return _target_info(project, args[u'--target'], args) else: return _summary(project, args) def _summary(project, args): info = '' for target in project.objects.get_targets(): info += "{name}:\n" \ "\tProduct name: {productName}\n" \ "\tConfigurations: {configs}\n" \ .format(name=target.name, productName=target.productName, configs=', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)]), ) for build_phase_id in target.buildPhases: build_phase = project.objects[build_phase_id] info += "\t{name} count: {count}\n"\ .format(name=build_phase._get_comment(), count=build_phase.files.__len__()) info += "\n" return info def _target_info(project, target_name, args): build_phases = [] if args[u'--source-files']: build_phases += [u'PBXSourcesBuildPhase'] elif args[u'--header-files']: build_phases += [u'PBXHeadersBuildPhase'] elif args[u'--resource-files']: build_phases += [u'PBXResourcesBuildPhase'] elif args[u'--framework-files']: build_phases += [u'PBXFrameworksBuildPhase'] info = '' for target in project.objects.get_targets(target_name): info += "{name}:\n" \ "\tProduct name: {productName}\n" \ .format(name=target.name, productName=target.productName, configs=', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)]), ) if args[u'--configurations']: info += "\tConfigurations: {configs}\n" \ .format(configs=', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)])) for build_phase_id in target.buildPhases: build_phase = project.objects[build_phase_id] if build_phase.isa in build_phases: info += "\t{name}: \n\t\t".format(name=build_phase._get_comment()) files = [] for build_file_id in build_phase.files: build_file = project.objects[build_file_id] files.append(project.objects[build_file.fileRef]._get_comment()) info += '{files}\n'.format(files="\n\t\t".join(sorted(files))) info += '\n' return info
Python
0
da12bb0058cb48d3262eb70469aa30cdb8312ee2
fix typos/bugs/indexing in block dicing
Control/dice_block.py
Control/dice_block.py
import os import sys import subprocess import h5py def check_file(filename): # verify the file has the expected data f = h5py.File(filename, 'r') if set(f.keys()) != set(['segmentations', 'probabilities']): os.unlink(filename) return False return True try: args = sys.argv[1:] i_min = int(args.pop(0)) j_min = int(args.pop(0)) i_max = int(args.pop(0)) j_max = int(args.pop(0)) output = args.pop(0) input_slices = args if os.path.exists(output): print output, "already exists" if check_file(output): sys.exit(0) else: os.unlink(output) # Write to a temporary location to avoid partial files temp_file_path = output + '_partial' out_f = h5py.File(temp_file_path, 'w') num_slices = len(input_slices) for slice_idx, slice in enumerate(input_slices): in_f = h5py.File(slice, 'r') segs = in_f['segmentations'][i_min:i_max, j_min:j_max, :] probs = in_f['probabilities'][i_min:i_max, j_min:j_max] if not 'segmentations' in out_f.keys(): outsegs = out_f.create_dataset('segmentations', tuple(list(segs.shape) + [num_slices]), dtype=segs.dtype, chunks=(64, 64, segs.shape[2], 1)) outprobs = out_f.create_dataset('probabilities', tuple(list(probs.shape) + [num_slices]), dtype=probs.dtype, chunks=(64, 64, 1)) outsegs[:, :, :, slice_idx] = segs outprobs[:, :, slice_idx] = probs out_f.close() # move to final location os.rename(output + '_partial', output) print "Successfully wrote", output except KeyboardInterrupt: pass
import os import sys import subprocess import h5py def check_file(filename): # verify the file has the expected data f = h5py.File(filename, 'r') if set(f.keys()) != set(['segmentations', 'probabilities']): os.unlink(filename) return False return True try: args = sys.argv[1:] i_min = int(args.pop()) j_min = int(args.pop()) i_max = int(args.pop()) j_max = int(args.pop()) output = args.pop() input_slices = args if os.path.exists(segmentations_file): print segmentations_file, "already exists" if check_file(segmentations_file): sys.exit(0) else: os.unlink(output) # Write to a temporary location to avoid partial files temp_file_path = output + '_partial' out_f = h5py.File(temp_file_path, 'classify') num_slices = len(input_slices) for slice_idx, slice in enumerate(input_slices): in_f = h5py.File(slice, 'r') segs = in_f['segmentations'][i_min:i_max, j_min:j_max, :] probs = in_f['segmentations'][i_min:i_max, j_min:j_max] if not 'segmentations' in out_f.keys(): outsegs = out_f.create_dataset('segmentations', tuple(list(segs.shape) + [num_slices]), dtype=segs.dtype, chunks=(64, 64, segs.shape[2], 1)) outprobs = out_f.create_dataset('probabilities', dtype=probabilities.dtype, chunks=(64, 64, num_slices) chunks=(64, 64, 1)) outsegs[:, :, :, slice_idx] = segs outprobs[:, :, slice_idx] = probs outf.close() # move to final location os.rename(output + '_partial', output) print "Successfully wrote", output except KeyboardInterrupt: pass
Python
0.000017
5d2301b15e07394e24fed2fac2f258d72554eede
Add tests for query_geonames, MITIE, city resolution
resources/tests/test_mordecai.py
resources/tests/test_mordecai.py
import os import sys import glob from ConfigParser import ConfigParser from mitie import named_entity_extractor from ..country import CountryAPI from ..places import PlacesAPI from ..utilities import mitie_context, setup_es, query_geonames def test_places_api_one(): if os.environ.get('CI'): ci = 'circle' assert ci == 'circle' else: a = PlacesAPI() locs = "Ontario" result = a.process(locs, ['CAN']) gold = [{u'lat': 49.25014, u'searchterm': 'Ontario', u'lon': -84.49983, u'countrycode': u'CAN', u'placename': u'Ontario'}] assert result == gold def test_query_geonames(): conn = setup_es() placename = "Berlin" country_filter = ["DEU"] qg = query_geonames(conn, placename, country_filter) hit_hit_name = qg['hits']['hits'][0]['name'] assert hit_hit_name == "Berlin" def test_places_api_syria(): if os.environ.get('CI'): ci = 'circle' assert ci == 'circle' else: a = PlacesAPI() locs = "Rebels from Aleppo attacked Damascus." result = a.process(locs, ['SYR']) gold = [{u'lat': 36.20124, u'searchterm': 'Aleppo', u'lon': 37.16117, u'countrycode': u'SYR', u'placename': u'Aleppo'}, {u'lat': 33.5102, u'searchterm': 'Damascus', u'lon': 36.29128, u'countrycode': u'SYR', u'placename': u'Damascus'}] assert result == gold def test_mitie_context(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) config_file = glob.glob(os.path.join(__location__, '../../config.ini')) parser = ConfigParser() parser.read(config_file) mitie_directory = parser.get('Locations', 'mitie_directory') mitie_ner_model = parser.get('Locations', 'mitie_ner_model') sys.path.append(mitie_directory) ner_model = named_entity_extractor(mitie_ner_model) text = "The meeting happened in Ontario." mc = mitie_context(text, ner_model) mc_gold = {u'entities': [{u'text': 'Ontario', u'tag': u'LOCATION', u'score': 1.3923831181343844, u'context': ['meeting', 'happened', 'in', '.']}]} assert mc == mc_gold def test_country_process_one(): a = CountryAPI() result = a.process('The meeting happened in Ontario.') assert result == u'CAN' def test_country_process_two(): a = CountryAPI() result = a.process('Rebels from Damascus attacked Aleppo') assert result == u'SYR' def test_city_resolution(): a = PlacesAPI() city_list = [("Lagos", "NGA"), ("Mogadishu", "SOM"), ("Mannheim", "DEU"), ("Noida", "IND"), ("Berlin", "DEU"), ("Damascus", "SYR"), ("New York", "USA"), ("San Francisco", "USA"), ("Freetown", "SLE"), ("Cape Town", "ZAF"), ("Windhoek", "NAM"), ("Paris", "FRA")] rs = [a.process(c[0], [c[1]]) for c in city_list] searched_cities = [c[0]['searchterm'] for c in rs] resolved_cities = [c[0]['placename'] for c in rs] assert resolved_cities == searched_cities
import os from ..country import CountryAPI from ..places import PlacesAPI def test_places_api_one(): if os.environ.get('CI'): ci = 'circle' assert ci == 'circle' else: a = PlacesAPI() locs = {u'entities': [{u'context': ['meeting', 'happened', 'in', '.'], u'score': 1.3923831181343844, u'tag': u'LOCATION', u'text': 'Ontario'}]} result = a.process(locs, 'CAN') gold = [{u'countrycode': u'CAN', u'lat': 43.65004, u'lon': -79.90554, u'placename': u'SunnyView Dental', u'searchterm': 'Ontario'}] assert result == gold def test_country_process_one(): a = CountryAPI() result = a.process('The meeting happened in Ontario.') assert result == u'CAN' def test_country_process_two(): a = CountryAPI() result = a.process('Rebels from Damascus attacked Aleppo') assert result == u'SYR'
Python
0.000141
2443c891e5f9cccb5c36b02303a3b9b7a94a4c45
Change Jinja escape sequences.
generate.py
generate.py
#!/usr/bin/env python3 import os import shutil from jinja2 import Environment,FileSystemLoader from pygments import highlight from pygments.lexers import TexLexer from pygments.formatters import HtmlFormatter from subprocess import Popen,PIPE env = Environment(loader=FileSystemLoader("tmpl"), block_start_string='~{',block_end_string='}~', variable_start_string='~{{', variable_end_string='}}~') snippets_dir = "snippets" dist_dir = "dist" html_index = "/index.html" gen_snippets_dir = "/gen_snippets" static_dir = "static" shutil.rmtree(dist_dir, ignore_errors=True) shutil.copytree(static_dir, dist_dir) os.makedirs(dist_dir+"/"+gen_snippets_dir) snippets = [] for subdir, dirs, files in os.walk(snippets_dir): for fname in files: trimmedName, ext = os.path.splitext(fname) full_path = subdir + "/" + fname if ext == '.tex': with open(full_path, "r") as snippet_f: gen_tex_name = gen_snippets_dir+"/"+fname gen_pdf_name = gen_snippets_dir+"/"+trimmedName+".pdf" gen_png_name = gen_snippets_dir+"/"+trimmedName+".png" snippet_content = snippet_f.read().strip() with open(dist_dir+"/"+gen_tex_name, "w") as f: f.write(env.get_template("base.jinja.tex").render( content=snippet_content )) snippets.append({ 'fname': trimmedName, 'pdf': gen_pdf_name, 'png': gen_png_name, 'content': highlight(snippet_content, TexLexer(), HtmlFormatter()) }) p = Popen(['make', "-f", "../../Makefile.slides", "-C", dist_dir+"/"+gen_snippets_dir], stdout=PIPE, stderr=PIPE) out = p.communicate() if out[1]: print("Warning: Make stderr non-empty.") print("===Stdout:") print(out[0].decode()) print("===Stderr:") print(out[1].decode()) with open("tmpl/preamble.tex", "r") as f: preamble = f.read() with open(dist_dir+"/"+html_index, "w") as idx_f: idx_f.write(env.get_template("index.jinja.html").render( snippets=snippets, base=highlight( env.get_template("base.jinja.tex").render( content="Start content here." ), TexLexer(), HtmlFormatter() ) ))
#!/usr/bin/env python3 import os import shutil from jinja2 import Environment,FileSystemLoader from pygments import highlight from pygments.lexers import TexLexer from pygments.formatters import HtmlFormatter from subprocess import Popen,PIPE env = Environment(loader=FileSystemLoader("tmpl")) snippets_dir = "snippets" dist_dir = "dist" html_index = "/index.html" gen_snippets_dir = "/gen_snippets" static_dir = "static" shutil.rmtree(dist_dir, ignore_errors=True) shutil.copytree(static_dir, dist_dir) os.makedirs(dist_dir+"/"+gen_snippets_dir) snippets = [] for subdir, dirs, files in os.walk(snippets_dir): for fname in files: trimmedName, ext = os.path.splitext(fname) full_path = subdir + "/" + fname if ext == '.tex': with open(full_path, "r") as snippet_f: gen_tex_name = gen_snippets_dir+"/"+fname gen_pdf_name = gen_snippets_dir+"/"+trimmedName+".pdf" gen_png_name = gen_snippets_dir+"/"+trimmedName+".png" snippet_content = snippet_f.read().strip() with open(dist_dir+"/"+gen_tex_name, "w") as f: f.write(env.get_template("base.jinja.tex").render( content=snippet_content )) snippets.append({ 'fname': trimmedName, 'pdf': gen_pdf_name, 'png': gen_png_name, 'content': highlight(snippet_content, TexLexer(), HtmlFormatter()) }) p = Popen(['make', "-f", "../../Makefile.slides", "-C", dist_dir+"/"+gen_snippets_dir], stdout=PIPE, stderr=PIPE) out = p.communicate() if out[1]: print("Warning: Make stderr non-empty.") print("===Stdout:") print(out[0].decode()) print("===Stderr:") print(out[1].decode()) with open("tmpl/preamble.tex", "r") as f: preamble = f.read() with open(dist_dir+"/"+html_index, "w") as idx_f: idx_f.write(env.get_template("index.jinja.html").render( snippets=snippets, base=highlight( env.get_template("base.jinja.tex").render( content="Start content here." ), TexLexer(), HtmlFormatter() ) ))
Python
0
18aa5e20a5dbc931f48774c4bf034e6efe022923
Implement 'force' and 'directory' options
generate.py
generate.py
#!/usr/bin/env python import sys import os import re from optparse import OptionParser from crontab import CronTab from jinja2 import FileSystemLoader, Environment from yaml import dump def remove_user_from_command(command_with_user): match = re.search(r'\{{0,2}\s?\w+\s?\}{0,2}\s(.*)', command_with_user) return match.group(1) if match else command_with_user def replace_template_variables(command): config_vars = [] def replace(input_string): config_vars.append(input_string.group(1)) return '{{{}}}'.format(input_string.group(1)) formatted_string = re.sub(r'\{{2}\s*(\w+)\s*\}{2}', replace, command) formatted_args = ', '.join( ['{0}=task_config[\'{0}\']'.format(var) for var in config_vars]) if config_vars: result_string = '\'{0}\'.format({1})'.format( formatted_string, formatted_args) else: result_string = '\'{0}\''.format(formatted_string) return result_string, config_vars def task_name(shell_command): match = re.search(r'\/(.*)\.', shell_command) task_name = match.group(1) if match else '' task_name = task_name.replace('-', '_') return task_name def render_to_file(filename, template, **kwargs): with open(filename, 'w') as ofile: ofile.write(template.render(kwargs)) def main(): parser = OptionParser() parser.add_option("-d", "--directory", dest="directory", help="directory for output files", default='') parser.add_option("-f", "--force", action="store_true", dest="force", default=False, help="force file overwrite") (options, args) = parser.parse_args() env = Environment(loader=FileSystemLoader('.')) for cron in [CronTab(tabfile=os.path.abspath(arg)) for arg in args]: for job in cron: test_template = env.get_template('workflow-test-template.jj2') workflow_template = env.get_template('workflow-template.jj2') task = task_name(job.command) command = remove_user_from_command(job.command) command, vars = replace_template_variables(command) values = { 'hour': job.hour, 'minute': job.minute, 'task_config_filename': task + '.yaml', 'dag_id': task, 'task_id': task, 'command': command } if options.directory and not os.path.exists(options.directory): os.mkdir(options.directory) workflow_filename = os.path.join( options.directory, task + '.py') if not os.path.exists(workflow_filename) or options.force: render_to_file(workflow_filename, workflow_template, **values) test_filename = os.path.join( options.directory, 'test_' + task + '.py') if not os.path.exists(test_filename) or options.force: render_to_file(test_filename, test_template, workflow_module_name=task) config_filename = os.path.join( options.directory, task + '.yaml') if not os.path.exists(config_filename) or options.force: with open(config_filename, 'w') as cfile: dump({var: '' for var in vars}, cfile) return 0 if __name__ == '__main__': sys.exit(main())
#!/usr/bin/env python import sys import os import re from optparse import OptionParser from crontab import CronTab from jinja2 import FileSystemLoader, Environment from yaml import dump def remove_user_from_command(command_with_user): match = re.search(r'\{{0,2}\s?\w+\s?\}{0,2}\s(.*)', command_with_user) return match.group(1) if match else command_with_user def replace_template_variables(command): config_vars = [] def replace(input_string): config_vars.append(input_string.group(1)) return '{{{}}}'.format(input_string.group(1)) formatted_string = re.sub(r'\{{2}\s*(\w+)\s*\}{2}', replace, command) formatted_args = ', '.join( ['{0}=task_config[\'{0}\']'.format(var) for var in config_vars]) if config_vars: result_string = '\'{0}\'.format({1})'.format( formatted_string, formatted_args) else: result_string = '\'{0}\''.format(formatted_string) return result_string, config_vars def task_name(shell_command): match = re.search(r'\/(.*)\.', shell_command) task_name = match.group(1) if match else '' task_name = task_name.replace('-', '_') return task_name def main(): parser = OptionParser() parser.add_option("-d", "--directory", dest="directory", help="directory for output files") parser.add_option("-f", "--force", action="store_true", dest="force", default=False, help="force file overwrite") (options, args) = parser.parse_args() env = Environment(loader=FileSystemLoader('.')) for cron in [CronTab(tabfile=os.path.abspath(arg)) for arg in args]: for job in cron: test_template = env.get_template('workflow-test-template.jj2') workflow_template = env.get_template('workflow-template.jj2') task = task_name(job.command) command = remove_user_from_command(job.command) command, vars = replace_template_variables(command) values = { 'hour': job.hour, 'minute': job.minute, 'task_config_filename': task + '.yaml', 'dag_id': task, 'task_id': task, 'command': command } with open(task + '.py', 'w') as wfile: wfile.write(workflow_template.render(**values)) with open('test_' + task + '.py', 'w') as tfile: tfile.write(test_template.render(workflow_module_name=task)) with open(task + '.yaml', 'w') as cfile: dump({var: '' for var in vars}, cfile) return 0 if __name__ == '__main__': sys.exit(main())
Python
0.999999
54e2359ed2cd75b87dc4a8007df6b252af3a3765
fix typo
HARK/ConsumptionSaving/tests/test_ConsLaborModel.py
HARK/ConsumptionSaving/tests/test_ConsLaborModel.py
from HARK.ConsumptionSaving.ConsLaborModel import ( LaborIntMargConsumerType, init_labor_lifecycle, ) import unittest class test_LaborIntMargConsumerType(unittest.TestCase): def setUp(self): self.model = LaborIntMargConsumerType() self.model_finite_lifecycle = LaborIntMargConsumerType(**init_labor_lifecycle) self.model_finite_lifecycle.cycles = 1 def test_solution(self): self.model.solve() self.model_finite_lifecycle.solve() self.model.T_sim = 120 self.model.track_vars = ["bNrmNow", "cNrmNow"] self.model.initializeSim() self.model.simulate()
from HARK.ConsumptionSaving.ConsLaborModel import ( LaborIntMargConsumerType, init_labor_lifecycle, ) import unittest class test_LaborIntMargConsumerType(unittest.TestCase): def setUp(self): self.model = LaborIntMargConsumerType() self.model_finte_lifecycle = LaborIntMargConsumerType(**init_labor_lifecycle) self.model_finte_lifecycle.cycles = 1 def test_solution(self): self.model.solve() self.model_finte_lifecycle.solve() self.model.T_sim = 120 self.model.track_vars = ["bNrmNow", "cNrmNow"] self.model.initializeSim() self.model.simulate()
Python
0.999991
c1b4216e610a46260f52d5ed71267a2ed5fcdd25
update debug url to account for downloads
hs_core/debug_urls.py
hs_core/debug_urls.py
"""Extra URLs that add debugging capabilities to resources.""" from django.conf.urls import url from hs_core import views urlpatterns = [ # Resource Debugging: print consistency problems in a resource url(r'^debug/resource/(?P<shortkey>[0-9a-f-]+)/$', views.debug_resource_view.debug_resource, name='debug_resource'), url(r'^debug/resource/(?P<shortkey>[0-9a-f-]+)/irods-issues/$', views.debug_resource_view.irods_issues, name='debug_resource'), url(r'^taskstatus/(?P<task_id>[A-z0-9\-]+)/$', views.debug_resource_view.check_task_status, name='get_debug_task_status'), ]
"""Extra URLs that add debugging capabilities to resources.""" from django.conf.urls import url from hs_core import views urlpatterns = [ # Resource Debugging: print consistency problems in a resource url(r'^resource/(?P<shortkey>[0-9a-f-]+)/debug/$', views.debug_resource_view.debug_resource, name='debug_resource'), url(r'^resource/(?P<shortkey>[0-9a-f-]+)/debug/irods-issues/$', views.debug_resource_view.irods_issues, name='debug_resource'), url(r'^taskstatus/(?P<task_id>[A-z0-9\-]+)/$', views.debug_resource_view.check_task_status, name='get_debug_task_status'), ]
Python
0
e68307e10e1aebe8a6c527a15bfc34b1158bf0eb
Use labels in API for #
judge/views/api.py
judge/views/api.py
from django.core.exceptions import ObjectDoesNotExist from django.http import JsonResponse, Http404 from judge.models import Contest, Problem, Profile, Submission def sane_time_repr(delta): days = delta.days hours = delta.seconds / 3600 minutes = (delta.seconds % 3600) / 60 return '%02d:%02d:%02d' % (days, hours, minutes) def api_contest_list(request): contests = {} for c in Contest.objects.filter(is_public=True, is_private=False): contests[c.key] = { 'name': c.name, 'start_time': c.start_time.isoformat(), 'end_time': c.end_time.isoformat(), 'time_limit': c.time_limit and sane_time_repr(c.time_limit), 'labels': ['external'] if c.is_external else [], } return JsonResponse(contests) def api_problem_list(request): problems = {} for p in Problem.objects.filter(is_public=True): problems[p.code] = { 'points': p.points, 'partial': p.partial, 'name': p.name, 'group': p.group.full_name } return JsonResponse(problems) def api_problem_info(request, problem): try: p = Problem.objects.get(code=problem) except ObjectDoesNotExist: raise Http404() return JsonResponse({ 'name': p.name, 'authors': list(p.authors.values_list('user__username', flat=True)), 'types': list(p.types.values_list('full_name', flat=True)), 'group': p.group.full_name, 'time_limit': p.time_limit, 'memory_limit': p.memory_limit, 'points': p.points, 'partial': p.partial, 'languages': list(p.allowed_languages.values_list('key', flat=True)), }) def api_user_list(request): users = {} for p in Profile.objects.select_related('user').only('user__username', 'name', 'points', 'display_rank'): users[p.user.username] = { 'display_name': p.name, 'points': p.points, 'rank': p.display_rank } return JsonResponse(users) def api_user_info(request, user): try: p = Profile.objects.get(user__username=user) except ObjectDoesNotExist: raise Http404() return JsonResponse({ 'display_name': p.name, 'points': p.points, 'rank': p.display_rank, 'solved_problems': [], # TODO }) def api_user_submissions(request, user): try: p = Profile.objects.get(user__username=user) except ObjectDoesNotExist: raise Http404() subs = Submission.objects.filter(user=p, problem__is_public=True).select_related('problem', 'language') \ .only('id', 'problem__code', 'time', 'memory', 'points', 'language__key', 'status', 'result') data = {} for s in subs: data[s.id] = { 'problem': s.problem.code, 'time': s.time, 'memory': s.memory, 'points': s.points, 'language': s.language.key, 'status': s.status, 'result': s.result } return JsonResponse(data)
from django.core.exceptions import ObjectDoesNotExist from django.http import JsonResponse, Http404 from judge.models import Contest, Problem, Profile, Submission def sane_time_repr(delta): days = delta.days hours = delta.seconds / 3600 minutes = (delta.seconds % 3600) / 60 return '%02d:%02d:%02d' % (days, hours, minutes) def api_contest_list(request): contests = {} for c in Contest.objects.filter(is_public=True, is_private=False): contests[c.key] = { 'name': c.name, 'start_time': c.start_time.isoformat(), 'end_time': c.end_time.isoformat(), 'time_limit': c.time_limit and sane_time_repr(c.time_limit), 'external': c.is_external, } return JsonResponse(contests) def api_problem_list(request): problems = {} for p in Problem.objects.filter(is_public=True): problems[p.code] = { 'points': p.points, 'partial': p.partial, 'name': p.name, 'group': p.group.full_name } return JsonResponse(problems) def api_problem_info(request, problem): try: p = Problem.objects.get(code=problem) except ObjectDoesNotExist: raise Http404() return JsonResponse({ 'name': p.name, 'authors': list(p.authors.values_list('user__username', flat=True)), 'types': list(p.types.values_list('full_name', flat=True)), 'group': p.group.full_name, 'time_limit': p.time_limit, 'memory_limit': p.memory_limit, 'points': p.points, 'partial': p.partial, 'languages': list(p.allowed_languages.values_list('key', flat=True)), }) def api_user_list(request): users = {} for p in Profile.objects.select_related('user').only('user__username', 'name', 'points', 'display_rank'): users[p.user.username] = { 'display_name': p.name, 'points': p.points, 'rank': p.display_rank } return JsonResponse(users) def api_user_info(request, user): try: p = Profile.objects.get(user__username=user) except ObjectDoesNotExist: raise Http404() return JsonResponse({ 'display_name': p.name, 'points': p.points, 'rank': p.display_rank, 'solved_problems': [], # TODO }) def api_user_submissions(request, user): try: p = Profile.objects.get(user__username=user) except ObjectDoesNotExist: raise Http404() subs = Submission.objects.filter(user=p, problem__is_public=True).select_related('problem', 'language') \ .only('id', 'problem__code', 'time', 'memory', 'points', 'language__key', 'status', 'result') data = {} for s in subs: data[s.id] = { 'problem': s.problem.code, 'time': s.time, 'memory': s.memory, 'points': s.points, 'language': s.language.key, 'status': s.status, 'result': s.result } return JsonResponse(data)
Python
0
600a19b8a3f6d320b00d1d2b25e5c0f341f821d1
bump version
kaggle_cli/main.py
kaggle_cli/main.py
import sys from cliff.app import App from cliff.commandmanager import CommandManager VERSION = '0.6.1' class KaggleCLI(App): def __init__(self): super(KaggleCLI, self).__init__( description='An unofficial Kaggle command line tool.', version=VERSION, command_manager=CommandManager('kaggle_cli'), ) def main(argv=sys.argv[1:]): app = KaggleCLI() return app.run(argv) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
import sys from cliff.app import App from cliff.commandmanager import CommandManager VERSION = '0.6.0' class KaggleCLI(App): def __init__(self): super(KaggleCLI, self).__init__( description='An unofficial Kaggle command line tool.', version=VERSION, command_manager=CommandManager('kaggle_cli'), ) def main(argv=sys.argv[1:]): app = KaggleCLI() return app.run(argv) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
Python
0
5a87dbbb0ea4faa44d743a21a2f7d7aca46242f9
Document internet gateway properties:
heat/engine/resources/internet_gateway.py
heat/engine/resources/internet_gateway.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.engine import clients from heat.common import exception from heat.openstack.common import log as logging from heat.engine import resource logger = logging.getLogger(__name__) class InternetGateway(resource.Resource): tags_schema = {'Key': {'Type': 'String', 'Required': True}, 'Value': {'Type': 'String', 'Required': True}} properties_schema = { 'Tags': {'Type': 'List', 'Schema': { 'Type': 'Map', 'Implemented': False, 'Schema': tags_schema}} } def handle_create(self): self.resource_id_set(self.physical_resource_name()) def handle_delete(self): pass @staticmethod def get_external_network_id(client): ext_filter = {'router:external': True} ext_nets = client.list_networks(**ext_filter)['networks'] if len(ext_nets) != 1: # TODO(sbaker) if there is more than one external network # add a heat configuration variable to set the ID of # the default one raise exception.Error( 'Expected 1 external network, found %d' % len(ext_nets)) external_network_id = ext_nets[0]['id'] return external_network_id class VPCGatewayAttachment(resource.Resource): properties_schema = { 'VpcId': { 'Type': 'String', 'Required': True, 'Description': _('VPC ID for this gateway association.')}, 'InternetGatewayId': { 'Type': 'String', 'Description': _('ID of the InternetGateway.')}, 'VpnGatewayId': { 'Type': 'String', 'Implemented': False, 'Description': _('ID of the VPNGateway to attach to the VPC.')} } def _vpc_route_tables(self): for resource in self.stack.resources.itervalues(): if (resource.has_interface('AWS::EC2::RouteTable') and resource.properties.get('VpcId') == self.properties.get('VpcId')): yield resource def add_dependencies(self, deps): super(VPCGatewayAttachment, self).add_dependencies(deps) # Depend on any route table in this template with the same # VpcId as this VpcId. # All route tables must exist before gateway attachment # as attachment happens to routers (not VPCs) for route_table in self._vpc_route_tables(): deps += (self, route_table) def handle_create(self): client = self.neutron() external_network_id = InternetGateway.get_external_network_id(client) for router in self._vpc_route_tables(): client.add_gateway_router(router.resource_id, { 'network_id': external_network_id}) def handle_delete(self): from neutronclient.common.exceptions import NeutronClientException client = self.neutron() for router in self._vpc_route_tables(): try: client.remove_gateway_router(router.resource_id) except NeutronClientException as ex: if ex.status_code != 404: raise ex def resource_mapping(): if clients.neutronclient is None: return {} return { 'AWS::EC2::InternetGateway': InternetGateway, 'AWS::EC2::VPCGatewayAttachment': VPCGatewayAttachment, }
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.engine import clients from heat.common import exception from heat.openstack.common import log as logging from heat.engine import resource logger = logging.getLogger(__name__) class InternetGateway(resource.Resource): tags_schema = {'Key': {'Type': 'String', 'Required': True}, 'Value': {'Type': 'String', 'Required': True}} properties_schema = { 'Tags': {'Type': 'List', 'Schema': { 'Type': 'Map', 'Implemented': False, 'Schema': tags_schema}} } def handle_create(self): self.resource_id_set(self.physical_resource_name()) def handle_delete(self): pass @staticmethod def get_external_network_id(client): ext_filter = {'router:external': True} ext_nets = client.list_networks(**ext_filter)['networks'] if len(ext_nets) != 1: # TODO(sbaker) if there is more than one external network # add a heat configuration variable to set the ID of # the default one raise exception.Error( 'Expected 1 external network, found %d' % len(ext_nets)) external_network_id = ext_nets[0]['id'] return external_network_id class VPCGatewayAttachment(resource.Resource): properties_schema = { 'VpcId': { 'Type': 'String', 'Required': True}, 'InternetGatewayId': {'Type': 'String'}, 'VpnGatewayId': { 'Type': 'String', 'Implemented': False} } def _vpc_route_tables(self): for resource in self.stack.resources.itervalues(): if (resource.has_interface('AWS::EC2::RouteTable') and resource.properties.get('VpcId') == self.properties.get('VpcId')): yield resource def add_dependencies(self, deps): super(VPCGatewayAttachment, self).add_dependencies(deps) # Depend on any route table in this template with the same # VpcId as this VpcId. # All route tables must exist before gateway attachment # as attachment happens to routers (not VPCs) for route_table in self._vpc_route_tables(): deps += (self, route_table) def handle_create(self): client = self.neutron() external_network_id = InternetGateway.get_external_network_id(client) for router in self._vpc_route_tables(): client.add_gateway_router(router.resource_id, { 'network_id': external_network_id}) def handle_delete(self): from neutronclient.common.exceptions import NeutronClientException client = self.neutron() for router in self._vpc_route_tables(): try: client.remove_gateway_router(router.resource_id) except NeutronClientException as ex: if ex.status_code != 404: raise ex def resource_mapping(): if clients.neutronclient is None: return {} return { 'AWS::EC2::InternetGateway': InternetGateway, 'AWS::EC2::VPCGatewayAttachment': VPCGatewayAttachment, }
Python
0
77503c0e09c0a520ffdc3b4f936c579148acd915
Clean up the code a bit
piwik_tracking/piwiktracker.py
piwik_tracking/piwiktracker.py
import datetime import httplib import random import urllib import urlparse class PiwikTracker: VERSION = 1 def __init__(self, id_site, api_url, request, token_auth): random.seed() self.id_site = id_site self.api_url = api_url self.request = request self.token_auth = token_auth self.page_url = self.get_current_url() self.set_request_parameters() self.set_local_time(self.get_timestamp()) def set_request_parameters(self): # django-specific self.user_agent = self.request.META.get('HTTP_USER_AGENT', '') self.referer = self.request.META.get('HTTP_REFERER', '') self.ip = self.request.META.get('REMOTE_ADDR') self.accept_language = self.request.META.get('HTTP_ACCEPT_LANGUAGE', '') def set_local_time(self, datetime): self.local_hour = datetime.hour self.local_minute = datetime.minute self.local_second = datetime.second def get_current_scheme(self): # django-specific if self.request.is_secure(): scheme = 'https' else: scheme = 'http' return scheme def get_current_host(self): # django-specific return self.request.get_host() def get_current_script_name(self): # django-specific return self.request.path_info def get_current_query_string(self): # django-specific return self.request.META.get('QUERY_STRING', '') def get_current_url(self): url = self.get_current_scheme() + '://' url += self.get_current_host() url += self.get_current_script_name() url += self.get_current_query_string() return url def get_timestamp(self): return datetime.datetime.now() def get_query_vars(self, document_title=False): parameters = { 'idsite': self.id_site, 'rec': 1, 'apiv': self.VERSION, 'r': random.randint(0, 99999), 'url': self.page_url, 'urlref': self.referer, # Forcing IP requires the auth token 'cip': self.ip, 'token_auth': self.token_auth, } if document_title: parameters['action_name'] = urllib.quote_plus(document_title) return urllib.urlencode(parameters) def send_request(self, query_vars): "Send the request to piwik" headers = { 'Accept-Language': self.accept_language, 'User-Agent': self.user_agent, } parsed = urlparse.urlparse(self.api_url) connection = httplib.HTTPConnection(parsed.hostname) url = "%s?%s" % (parsed.path, query_vars) connection.request('GET', url, '', headers) response = connection.getresponse() return response.read() def do_track_page_view(self, document_title): query_vars = self.get_query_vars(document_title) return self.send_request(query_vars) def piwik_get_url_track_page_view(id_site, api_url, request, token_auth, document_title=False): tracker = PiwikTracker(id_site, api_url, request, token_auth) return tracker.do_track_page_view(document_title)
import datetime import httplib import random import urllib import urlparse class PiwikTracker: VERSION = 1 def __init__(self, id_site, api_url, request, token_auth): random.seed() self.id_site = id_site self.api_url = api_url self.request = request self.token_auth = token_auth self.page_url = self.get_current_url() self.set_request_parameters() self.set_local_time(self.get_timestamp()) def set_request_parameters(self): # django-specific self.user_agent = self.request.META.get('HTTP_USER_AGENT', '') self.referer = self.request.META.get('HTTP_REFERER', '') self.ip = self.request.META.get('REMOTE_ADDR') self.accept_language = self.request.META.get('HTTP_ACCEPT_LANGUAGE', '') def set_local_time(self, datetime): self.local_hour = datetime.hour self.local_minute = datetime.minute self.local_second = datetime.second def get_current_scheme(self): # django-specific if self.request.is_secure(): scheme = 'https' else: scheme = 'http' return scheme def get_current_host(self): # django-specific return self.request.get_host() def get_current_script_name(self): # django-specific return self.request.path_info def get_current_query_string(self): # django-specific return self.request.META.get('QUERY_STRING', '') def get_current_url(self): url = self.get_current_scheme() + '://' url += self.get_current_host() url += self.get_current_script_name() url += self.get_current_query_string() return url def get_timestamp(self): return datetime.datetime.now() def get_query_vars(self, document_title=False): url = "?idsite=%d&rec=1&apiv=%s&r=%s&url=%s&urlref=%s&cip=%s&token_auth=%s" % ( self.id_site, self.VERSION, random.randint(0, 99999), urllib.quote_plus(self.page_url), urllib.quote_plus(self.referer), # Forcing IP requires the auth token self.ip, self.token_auth, ) if document_title: url += '&action_name=%s' % urllib.quote_plus(document_title) return url def send_request(self, query_vars): "Send the request to piwik" headers = { 'Accept-Language': self.accept_language, 'User-Agent': self.user_agent, } parsed = urlparse.urlparse(self.api_url) connection = httplib.HTTPConnection(parsed.hostname) url = parsed.path + query_vars connection.request('GET', url, '', headers) response = connection.getresponse() return response.read() def do_track_page_view(self, document_title): query_vars = self.get_query_vars(document_title) return self.send_request(query_vars); def piwik_get_url_track_page_view(id_site, api_url, request, token_auth, document_title=False): tracker = PiwikTracker(id_site, api_url, request, token_auth) return tracker.do_track_page_view(document_title)
Python
0
b1e2275b47e70949c018ab276279c9e6b8f6d3cf
Add debug (#10828)
homeassistant/components/sensor/serial.py
homeassistant/components/sensor/serial.py
""" Support for reading data from a serial port. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.serial/ """ import asyncio import logging import json import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_NAME, CONF_VALUE_TEMPLATE, EVENT_HOMEASSISTANT_STOP) from homeassistant.helpers.entity import Entity REQUIREMENTS = ['pyserial-asyncio==0.4'] _LOGGER = logging.getLogger(__name__) CONF_SERIAL_PORT = 'serial_port' CONF_BAUDRATE = 'baudrate' DEFAULT_NAME = "Serial Sensor" DEFAULT_BAUDRATE = 9600 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SERIAL_PORT): cv.string, vol.Optional(CONF_BAUDRATE, default=DEFAULT_BAUDRATE): cv.positive_int, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_VALUE_TEMPLATE): cv.template, }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the Serial sensor platform.""" name = config.get(CONF_NAME) port = config.get(CONF_SERIAL_PORT) baudrate = config.get(CONF_BAUDRATE) value_template = config.get(CONF_VALUE_TEMPLATE) if value_template is not None: value_template.hass = hass sensor = SerialSensor(name, port, baudrate, value_template) hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STOP, sensor.stop_serial_read()) async_add_devices([sensor], True) class SerialSensor(Entity): """Representation of a Serial sensor.""" def __init__(self, name, port, baudrate, value_template): """Initialize the Serial sensor.""" self._name = name self._state = None self._port = port self._baudrate = baudrate self._serial_loop_task = None self._template = value_template self._attributes = [] @asyncio.coroutine def async_added_to_hass(self): """Handle when an entity is about to be added to Home Assistant.""" self._serial_loop_task = self.hass.loop.create_task( self.serial_read(self._port, self._baudrate)) @asyncio.coroutine def serial_read(self, device, rate, **kwargs): """Read the data from the port.""" import serial_asyncio reader, _ = yield from serial_asyncio.open_serial_connection( url=device, baudrate=rate, **kwargs) while True: line = yield from reader.readline() line = line.decode('utf-8').strip() try: data = json.loads(line) if isinstance(data, dict): self._attributes = data except ValueError: pass if self._template is not None: line = self._template.async_render_with_possible_json_value( line) _LOGGER.debug("Received: %s", line) self._state = line self.async_schedule_update_ha_state() @asyncio.coroutine def stop_serial_read(self): """Close resources.""" if self._serial_loop_task: self._serial_loop_task.cancel() @property def name(self): """Return the name of the sensor.""" return self._name @property def should_poll(self): """No polling needed.""" return False @property def device_state_attributes(self): """Return the attributes of the entity (if any JSON present).""" return self._attributes @property def state(self): """Return the state of the sensor.""" return self._state
""" Support for reading data from a serial port. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.serial/ """ import asyncio import logging import json import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_NAME, CONF_VALUE_TEMPLATE, EVENT_HOMEASSISTANT_STOP) from homeassistant.helpers.entity import Entity REQUIREMENTS = ['pyserial-asyncio==0.4'] _LOGGER = logging.getLogger(__name__) CONF_SERIAL_PORT = 'serial_port' CONF_BAUDRATE = 'baudrate' DEFAULT_NAME = "Serial Sensor" DEFAULT_BAUDRATE = 9600 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SERIAL_PORT): cv.string, vol.Optional(CONF_BAUDRATE, default=DEFAULT_BAUDRATE): cv.positive_int, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_VALUE_TEMPLATE): cv.template, }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the Serial sensor platform.""" name = config.get(CONF_NAME) port = config.get(CONF_SERIAL_PORT) baudrate = config.get(CONF_BAUDRATE) value_template = config.get(CONF_VALUE_TEMPLATE) if value_template is not None: value_template.hass = hass sensor = SerialSensor(name, port, baudrate, value_template) hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STOP, sensor.stop_serial_read()) async_add_devices([sensor], True) class SerialSensor(Entity): """Representation of a Serial sensor.""" def __init__(self, name, port, baudrate, value_template): """Initialize the Serial sensor.""" self._name = name self._state = None self._port = port self._baudrate = baudrate self._serial_loop_task = None self._template = value_template self._attributes = [] @asyncio.coroutine def async_added_to_hass(self): """Handle when an entity is about to be added to Home Assistant.""" self._serial_loop_task = self.hass.loop.create_task( self.serial_read(self._port, self._baudrate)) @asyncio.coroutine def serial_read(self, device, rate, **kwargs): """Read the data from the port.""" import serial_asyncio reader, _ = yield from serial_asyncio.open_serial_connection( url=device, baudrate=rate, **kwargs) while True: line = yield from reader.readline() line = line.decode('utf-8').strip() try: data = json.loads(line) if isinstance(data, dict): self._attributes = data except ValueError: pass if self._template is not None: line = self._template.async_render_with_possible_json_value( line) self._state = line self.async_schedule_update_ha_state() @asyncio.coroutine def stop_serial_read(self): """Close resources.""" if self._serial_loop_task: self._serial_loop_task.cancel() @property def name(self): """Return the name of the sensor.""" return self._name @property def should_poll(self): """No polling needed.""" return False @property def device_state_attributes(self): """Return the attributes of the entity (if any JSON present).""" return self._attributes @property def state(self): """Return the state of the sensor.""" return self._state
Python
0.000001
ad6b055b53d621addc3565209c7af095b6d6d0e7
Add .delete() and the start of Room
hypchat/jsonobject.py
hypchat/jsonobject.py
from __future__ import absolute_import, division import json import re from . import requests _urls_to_objects = {} class Linker(object): """ Responsible for on-demand loading of JSON objects. """ def __init__(self, url, parent=None, _requests=None): self.url = url self.__parent = parent self._requests = _requests or __import__('requests') def __call__(self): def _object_hook(obj): if 'links' in obj: rv = JsonObject(obj) rv._requests = self._requests return rv else: return obj rv = json.JSONDecoder(object_hook=_object_hook).decode(self._requests.get(self.url).text) rv._requests = self._requests if self.__parent is not None: rv.parent = self.__parent return rv def __repr__(self): return "<%s url=%r>" % (type(self).__name__, self.url) class JsonObject(dict): """ Nice wrapper around the JSON objects and their links. """ def __getattr__(self, name): if name in self.get('links', {}): return Linker(self['links'][name], parent=self, _requests=self._requests) elif name in self: return self[name] else: raise AttributeError("%r object has no attribute %r" % (type(self).__name__, name)) def save(self): return requests.put(self['links']['self']).json() def delete(self): return requests.delete(self['links']['self']).json() class Room(JsonObject): def message(self, *p, **kw): """ Redirects to notification (for now) """ return self.notification(*p, **kw) def notification(self, message, color='yellow', notify=False, format='html'): raise NotImplementedError def topic(self, text): raise NotImplementedError def history(self, date='recent'): raise NotImplementedError def invite(self, user, reason): raise NotImplementedError _urls_to_objects[re.compile(r'https://api.hipchat.com/v2/room/[^/]+')] = Room
from __future__ import absolute_import, division import json from . import requests class Linker(object): """ Responsible for on-demand loading of JSON objects. """ def __init__(self, url, parent=None, _requests=None): self.url = url self.__parent = parent self._requests = _requests or __import__('requests') def __call__(self): def _object_hook(obj): if 'links' in obj: rv = JsonObject(obj) rv._requests = self._requests return rv else: return obj rv = json.JSONDecoder(object_hook=_object_hook).decode(self._requests.get(self.url).text) rv._requests = self._requests if self.__parent is not None: rv.parent = self.__parent return rv def __repr__(self): return "<%s url=%r>" % (type(self).__name__, self.url) class JsonObject(dict): """ Nice wrapper around the JSON objects and their links. """ def __getattr__(self, name): if name in self.get('links', {}): return Linker(self['links'][name], parent=self, _requests=self._requests) elif name in self: return self[name] else: raise AttributeError("%r object has no attribute %r" % (type(self).__name__, name)) def save(self): return requests.put(self['links']['self']).json()
Python
0.000001
1ae34b1a9035ec8813c40477a6f83bfdf10413f3
Add chkdown in the list of server metrics
haproxystats/metrics.py
haproxystats/metrics.py
"""Provide constants for grouping metric names. There are seperated groups for frontend, backend, servers and haproxy daemon. Metric names are the field names contained in the HAProxy statistics. """ from collections import namedtuple DAEMON_METRICS = [ 'CompressBpsIn', 'CompressBpsOut', 'CompressBpsRateLim', 'ConnRate', 'ConnRateLimit', 'CumConns', 'CumReq', 'CumSslConns', 'CurrConns', 'CurrSslConns', 'Hard_maxconn', 'MaxConnRate', 'MaxSessRate', 'MaxSslConns', 'MaxSslRate', 'MaxZlibMemUsage', 'Maxconn', 'Maxpipes', 'Maxsock', 'Memmax_MB', 'PipesFree', 'PipesUsed', 'Run_queue', 'SessRate', 'SessRateLimit', 'SslBackendKeyRate', 'SslBackendMaxKeyRate', 'SslCacheLookups', 'SslCacheMisses', 'SslFrontendKeyRate', 'SslFrontendMaxKeyRate', 'SslFrontendSessionReuse_pct', 'SslRate', 'SslRateLimit', 'Tasks', 'Ulimit-n', 'ZlibMemUsage', ] DAEMON_AVG_METRICS = ['Idle_pct', 'Uptime_sec'] COMMON = [ 'bin', 'bout', 'dresp', 'hrsp_1xx', 'hrsp_2xx', 'hrsp_3xx', 'hrsp_4xx', 'hrsp_5xx', 'hrsp_other', 'rate', 'rate_max', 'scur', 'slim', 'smax', 'stot' ] SERVER_METRICS = [ 'chkfail', 'chkdown', 'cli_abrt', 'econ', 'eresp', 'lbtot', 'qcur', 'qmax', 'srv_abrt', 'wredis', 'wretr' ] + COMMON SERVER_AVG_METRICS = ['qtime', 'rtime', 'throttle', 'ttime', 'weight'] BACKEND_METRICS = [ 'chkdown', 'cli_abrt', 'comp_byp', 'comp_in', 'comp_out', 'comp_rsp', 'downtime', 'dreq', 'econ', 'eresp', 'lbtot', 'qcur', 'qmax', 'srv_abrt', 'wredis', 'wretr', ] + COMMON BACKEND_AVG_METRICS = [ 'act', 'bck', 'rtime', 'ctime', 'qtime', 'ttime', 'weight' ] FRONTEND_METRICS = [ 'comp_byp', 'comp_in', 'comp_out', 'comp_rsp', 'dreq', 'ereq', 'rate_lim', 'req_rate', 'req_rate_max', 'req_tot' ] + COMMON MetricNamesPercentage = namedtuple('MetricsNamesPercentage', ['name', 'limit', 'title'])
"""Provide constants for grouping metric names. There are seperated groups for frontend, backend, servers and haproxy daemon. Metric names are the field names contained in the HAProxy statistics. """ from collections import namedtuple DAEMON_METRICS = [ 'CompressBpsIn', 'CompressBpsOut', 'CompressBpsRateLim', 'ConnRate', 'ConnRateLimit', 'CumConns', 'CumReq', 'CumSslConns', 'CurrConns', 'CurrSslConns', 'Hard_maxconn', 'MaxConnRate', 'MaxSessRate', 'MaxSslConns', 'MaxSslRate', 'MaxZlibMemUsage', 'Maxconn', 'Maxpipes', 'Maxsock', 'Memmax_MB', 'PipesFree', 'PipesUsed', 'Run_queue', 'SessRate', 'SessRateLimit', 'SslBackendKeyRate', 'SslBackendMaxKeyRate', 'SslCacheLookups', 'SslCacheMisses', 'SslFrontendKeyRate', 'SslFrontendMaxKeyRate', 'SslFrontendSessionReuse_pct', 'SslRate', 'SslRateLimit', 'Tasks', 'Ulimit-n', 'ZlibMemUsage', ] DAEMON_AVG_METRICS = ['Idle_pct', 'Uptime_sec'] COMMON = [ 'bin', 'bout', 'dresp', 'hrsp_1xx', 'hrsp_2xx', 'hrsp_3xx', 'hrsp_4xx', 'hrsp_5xx', 'hrsp_other', 'rate', 'rate_max', 'scur', 'slim', 'smax', 'stot' ] SERVER_METRICS = [ 'chkfail', 'cli_abrt', 'econ', 'eresp', 'lbtot', 'qcur', 'qmax', 'srv_abrt', 'wredis', 'wretr' ] + COMMON SERVER_AVG_METRICS = ['qtime', 'rtime', 'throttle', 'ttime', 'weight'] BACKEND_METRICS = [ 'chkdown', 'cli_abrt', 'comp_byp', 'comp_in', 'comp_out', 'comp_rsp', 'downtime', 'dreq', 'econ', 'eresp', 'lbtot', 'qcur', 'qmax', 'srv_abrt', 'wredis', 'wretr', ] + COMMON BACKEND_AVG_METRICS = [ 'act', 'bck', 'rtime', 'ctime', 'qtime', 'ttime', 'weight' ] FRONTEND_METRICS = [ 'comp_byp', 'comp_in', 'comp_out', 'comp_rsp', 'dreq', 'ereq', 'rate_lim', 'req_rate', 'req_rate_max', 'req_tot' ] + COMMON MetricNamesPercentage = namedtuple('MetricsNamesPercentage', ['name', 'limit', 'title'])
Python
0
fe9226898772c4ff909f9c3f0cb05c271333b73a
Make auth_url lookup dynamic
heat/common/auth_url.py
heat/common/auth_url.py
# # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from webob import exc from heat.common import endpoint_utils from heat.common.i18n import _ from heat.common import wsgi class AuthUrlFilter(wsgi.Middleware): def __init__(self, app, conf): super(AuthUrlFilter, self).__init__(app) self.conf = conf self._auth_url = None @property def auth_url(self): if not self._auth_url: self._auth_url = self._get_auth_url() return self._auth_url def _get_auth_url(self): if 'auth_uri' in self.conf: return self.conf['auth_uri'] else: return endpoint_utils.get_auth_uri(v3=False) def _validate_auth_url(self, auth_url): """Validate auth_url to ensure it can be used.""" if not auth_url: raise exc.HTTPBadRequest(_('Request missing required header ' 'X-Auth-Url')) allowed = cfg.CONF.auth_password.allowed_auth_uris if auth_url not in allowed: raise exc.HTTPUnauthorized(_('Header X-Auth-Url "%s" not ' 'an allowed endpoint') % auth_url) return True def process_request(self, req): auth_url = self.auth_url if cfg.CONF.auth_password.multi_cloud: auth_url = req.headers.get('X-Auth-Url') self._validate_auth_url(auth_url) req.headers['X-Auth-Url'] = auth_url return None def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def auth_url_filter(app): return AuthUrlFilter(app, conf) return auth_url_filter
# # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from webob import exc from heat.common import endpoint_utils from heat.common.i18n import _ from heat.common import wsgi class AuthUrlFilter(wsgi.Middleware): def __init__(self, app, conf): super(AuthUrlFilter, self).__init__(app) self.conf = conf self.auth_url = self._get_auth_url() def _get_auth_url(self): if 'auth_uri' in self.conf: return self.conf['auth_uri'] else: return endpoint_utils.get_auth_uri(v3=False) def _validate_auth_url(self, auth_url): """Validate auth_url to ensure it can be used.""" if not auth_url: raise exc.HTTPBadRequest(_('Request missing required header ' 'X-Auth-Url')) allowed = cfg.CONF.auth_password.allowed_auth_uris if auth_url not in allowed: raise exc.HTTPUnauthorized(_('Header X-Auth-Url "%s" not ' 'an allowed endpoint') % auth_url) return True def process_request(self, req): auth_url = self.auth_url if cfg.CONF.auth_password.multi_cloud: auth_url = req.headers.get('X-Auth-Url') self._validate_auth_url(auth_url) req.headers['X-Auth-Url'] = auth_url return None def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def auth_url_filter(app): return AuthUrlFilter(app, conf) return auth_url_filter
Python
0.000009
a79db7cf85dac6d74d7929137f640a0ac10ddf7d
return from sys.exit for easier testing
p7doi/__init__.py
p7doi/__init__.py
# -*- coding: UTF-8 -*- from __future__ import print_function import webbrowser import sys __version__ = '0.0.1' DOI_URL = 'http://rproxy.sc.univ-paris-diderot.fr/login' + \ '?url=http://dx.doi.org/%s' def make_doi_url(doi): """ Return an URL for the given DOI """ return DOI_URL % doi def open_url(url): """ Open an URL in the default browser, in a new tab if possible """ webbrowser.open_new_tab(url) def open_doi(doi): """ Open the URL for the given DOI in the default browser """ open_url(make_doi_url(doi)) def cli(): """ CLI endpoint """ if len(sys.argv) < 2: print('Usage: %s <doi>' % sys.argv[0]) return sys.exit(1) doi = sys.argv[1] if doi.startswith('-'): if doi in ['-v', '-version', '--version']: print('p7doi v%s' % __version__) else: print("Unrecognized option: '%s'" % doi) return sys.exit(1) return sys.exit(0) open_doi(doi)
# -*- coding: UTF-8 -*- from __future__ import print_function import webbrowser import sys __version__ = '0.0.1' DOI_URL = 'http://rproxy.sc.univ-paris-diderot.fr/login' + \ '?url=http://dx.doi.org/%s' def make_doi_url(doi): """ Return an URL for the given DOI """ return DOI_URL % doi def open_url(url): """ Open an URL in the default browser, in a new tab if possible """ webbrowser.open_new_tab(url) def open_doi(doi): """ Open the URL for the given DOI in the default browser """ open_url(make_doi_url(doi)) def cli(): """ CLI endpoint """ if len(sys.argv) < 2: print('Usage: %s <doi>' % sys.argv[0]) sys.exit(1) doi = sys.argv[1] if doi.startswith('-'): if doi in ['-v', '-version', '--version']: print('p7doi v%s' % __version__) else: print("Unrecognized option: '%s'" % doi) sys.exit(1) sys.exit(0) open_doi(doi)
Python
0.000001
7ed627991632cf761dfccb553f830a6e9e3c37e9
fix bitonality test for solid color images
kraken/lib/util.py
kraken/lib/util.py
""" Ocropus's magic PIL-numpy array conversion routines. They express slightly different behavior from PIL.Image.toarray(). """ import unicodedata import numpy as np from PIL import Image __all__ = ['pil2array', 'array2pil'] def pil2array(im: Image, alpha: int = 0) -> np.array: if im.mode == '1': return np.array(im.convert('L')) return np.array(im) def array2pil(a: np.array) -> Image: if a.dtype == np.dtype("B"): if a.ndim == 2: return Image.frombytes("L", (a.shape[1], a.shape[0]), a.tostring()) elif a.ndim == 3: return Image.frombytes("RGB", (a.shape[1], a.shape[0]), a.tostring()) else: raise Exception("bad image rank") elif a.dtype == np.dtype('float32'): return Image.frombytes("F", (a.shape[1], a.shape[0]), a.tostring()) else: raise Exception("unknown image type") def is_bitonal(im: Image) -> bool: """ Tests a PIL.Image for bitonality. Args: im (PIL.Image): Image to test Returns: True if the image contains only two different color values. False otherwise. """ return im.getcolors(2) is not None and len(im.getcolors(2)) == 2 def get_im_str(im: Image) -> str: return im.filename if hasattr(im, 'filename') else str(im) def is_printable(char: str) -> bool: """ Determines if a chode point is printable/visible when printed. Args: char (str): Input code point. Returns: True if printable, False otherwise. """ letters = ('LC', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu') numbers = ('Nd', 'Nl', 'No') punctuation = ('Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps') symbol = ('Sc', 'Sk', 'Sm', 'So') printable = letters + numbers + punctuation + symbol return unicodedata.category(char) in printable def make_printable(char: str) -> str: """ Takes a Unicode code point and return a printable representation of it. Args: char (str): Input code point Returns: Either the original code point, the name of the code point if it is a combining mark, whitespace etc., or the hex code if it is a control symbol. """ if not char or is_printable(char): return char elif unicodedata.category(char) in ('Cc', 'Cs', 'Co'): return '0x{:x}'.format(ord(char)) else: return unicodedata.name(char)
""" Ocropus's magic PIL-numpy array conversion routines. They express slightly different behavior from PIL.Image.toarray(). """ import unicodedata import numpy as np from PIL import Image __all__ = ['pil2array', 'array2pil'] def pil2array(im: Image, alpha: int = 0) -> np.array: if im.mode == '1': return np.array(im.convert('L')) return np.array(im) def array2pil(a: np.array) -> Image: if a.dtype == np.dtype("B"): if a.ndim == 2: return Image.frombytes("L", (a.shape[1], a.shape[0]), a.tostring()) elif a.ndim == 3: return Image.frombytes("RGB", (a.shape[1], a.shape[0]), a.tostring()) else: raise Exception("bad image rank") elif a.dtype == np.dtype('float32'): return Image.frombytes("F", (a.shape[1], a.shape[0]), a.tostring()) else: raise Exception("unknown image type") def is_bitonal(im: Image) -> bool: """ Tests a PIL.Image for bitonality. Args: im (PIL.Image): Image to test Returns: True if the image contains only two different color values. False otherwise. """ return im.getcolors(2) is not None def get_im_str(im: Image) -> str: return im.filename if hasattr(im, 'filename') else str(im) def is_printable(char: str) -> bool: """ Determines if a chode point is printable/visible when printed. Args: char (str): Input code point. Returns: True if printable, False otherwise. """ letters = ('LC', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu') numbers = ('Nd', 'Nl', 'No') punctuation = ('Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps') symbol = ('Sc', 'Sk', 'Sm', 'So') printable = letters + numbers + punctuation + symbol return unicodedata.category(char) in printable def make_printable(char: str) -> str: """ Takes a Unicode code point and return a printable representation of it. Args: char (str): Input code point Returns: Either the original code point, the name of the code point if it is a combining mark, whitespace etc., or the hex code if it is a control symbol. """ if not char or is_printable(char): return char elif unicodedata.category(char) in ('Cc', 'Cs', 'Co'): return '0x{:x}'.format(ord(char)) else: return unicodedata.name(char)
Python
0.000001
4d5889e87399e940ebb08c7513f24466c0a93eaf
Remove useless space.
plugins/ChannelStats/config.py
plugins/ChannelStats/config.py
### # Copyright (c) 2005, Jeremiah Fincher # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### import re import supybot.conf as conf import supybot.registry as registry from supybot.i18n import PluginInternationalization, internationalizeDocstring _ = PluginInternationalization('ChannelStats') def configure(advanced): # This will be called by supybot to configure this module. advanced is # a bool that specifies whether the user identified himself as an advanced # user or not. You should effect your configuration by manipulating the # registry as appropriate. from supybot.questions import expect, anything, something, yn conf.registerPlugin('ChannelStats', True) class Smileys(registry.Value): def set(self, s): L = s.split() self.setValue(L) def setValue(self, v): self.s = ' '.join(v) self.value = re.compile('|'.join(map(re.escape, v))) def __str__(self): return self.s ChannelStats = conf.registerPlugin('ChannelStats') conf.registerChannelValue(ChannelStats, 'selfStats', registry.Boolean(True, _("""Determines whether the bot will keep channel statistics on itself, possibly skewing the channel stats (especially in cases where the bot is relaying between channels on a network)."""))) conf.registerChannelValue(ChannelStats, 'smileys', Smileys(':) ;) ;] :-) :-D :D :P :p (= =)'.split(), _("""Determines what words (i.e., pieces of text with no spaces in them) are considered 'smileys' for the purposes of stats-keeping."""))) conf.registerChannelValue(ChannelStats, 'frowns', Smileys(':| :-/ :-\\ :\\ :/ :( :-( :\'('.split(), _("""Determines what words (i.e., pieces of text with no spaces in them) are considered 'frowns' for the purposes of stats-keeping."""))) # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
### # Copyright (c) 2005, Jeremiah Fincher # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### import re import supybot.conf as conf import supybot.registry as registry from supybot.i18n import PluginInternationalization, internationalizeDocstring _ = PluginInternationalization('ChannelStats') def configure(advanced): # This will be called by supybot to configure this module. advanced is # a bool that specifies whether the user identified himself as an advanced # user or not. You should effect your configuration by manipulating the # registry as appropriate. from supybot.questions import expect, anything, something, yn conf.registerPlugin('ChannelStats', True) class Smileys(registry.Value): def set(self, s): L = s.split() self.setValue(L) def setValue(self, v): self.s = ' '.join(v) self.value = re.compile('|'.join(map(re.escape, v))) def __str__(self): return self.s ChannelStats = conf.registerPlugin('ChannelStats') conf.registerChannelValue(ChannelStats, 'selfStats', registry.Boolean(True, _("""Determines whether the bot will keep channel statistics on itself, possibly skewing the channel stats (especially in cases where the bot is relaying between channels on a network)."""))) conf.registerChannelValue(ChannelStats, 'smileys', Smileys(':) ;) ;] :-) :-D :D :P :p (= =)'.split(), _("""Determines what words (i.e., pieces of text with no spaces in them) are considered 'smileys' for the purposes of stats-keeping."""))) conf.registerChannelValue(ChannelStats, 'frowns', Smileys(':| :-/ :-\\ :\\ :/ :( :-( :\'('.split(), _("""Determines what words (i.e., pieces of text with no spaces in them ) are considered 'frowns' for the purposes of stats-keeping."""))) # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
Python
0
eaac4e45928b7008e6c561e28e9b5ed5dc427587
fix redis storage
labDNS/storages.py
labDNS/storages.py
try: import redis except ImportError: redis = None class BaseStorage: DEFAULT_CONFIG = dict() def __init__(self, config): self.config = self.DEFAULT_CONFIG self._configure(config) def get(self, key): raise NotImplementedError def _configure(self, config): self.config.update(config) class DictStorage(BaseStorage): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dictionary = self.config def get(self, key, default=None): return self.dictionary.get(key, default) class RedisStorage(BaseStorage): DEFAULT_SETTINGS = dict(host='localhost', port=6379, db=0) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.redis = redis.StrictRedis(**self.config) def get(self, key, default=None): return self.redis.get(key).decode("utf-8") or default
try: import redis except ImportError: redis = None class BaseStorage: DEFAULT_CONFIG = dict() def __init__(self, config): self.config = self.DEFAULT_CONFIG self._configure(config) def get(self, key): raise NotImplementedError def _configure(self, config): self.config.update(config) class DictStorage(BaseStorage): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dictionary = self.config def get(self, key, default=None): return self.dictionary.get(key, default) class RedisStorage(BaseStorage): DEFAULT_SETTINGS = dict(host='localhost', port=6379, db=0) def __init__(self, config): self.redis = redis.StrictRedis(**self.config) def get(self, key, default=None): return self.redis.get(key, default)
Python
0
13ffa4113341c13e635896f94a29df5cff5c0348
Build objects in JSON generator tool
test/generate-json.py
test/generate-json.py
#!/usr/bin/env python import argparse import random def random_array_element(): return random.choice(['123', 'true', 'false', 'null', '3.1415', '"foo"']) def main(): parser = argparse.ArgumentParser(description="Generate a large JSON document.") parser.add_argument('--array-size', nargs=1, type=int, default=[100000]) parser.add_argument('--array-type', choices=['int', 'array', 'object'], default='object') parser.add_argument('--array-elements', nargs=1, type=int, default=[3]) parser.add_argument('--object-size', nargs=1, type=int, default=None) args = parser.parse_args() if args.object_size: print('{') for i in range(args.object_size[0] - 1): print(' "x%d": %s,' % (i, random_array_element())) print(' "no": "comma"') print('}') else: n = args.array_size[0] type = args.array_type print('{"x": [') if type == 'int': elem_format = "%d%s" need_i = True elif type == 'object': elem_format = '{"a": %d}%s' need_i = True elif type == 'array': nelems = args.array_elements[0] arr = [] if nelems > 0: arr.append('%s') if nelems > 1: arr.extend([random_array_element() for _ in range(nelems-1)]) elem_format = '[%s]%%s' % ", ".join(arr) need_i = nelems > 0 else: raise Exception("Unknown array type %s" % type) for i in range(n): semicolon = "," if i < n-1 else "" if need_i: print(elem_format % (i, semicolon)) else: print(elem_format % semicolon) print(']}') if __name__ == "__main__": main()
#!/usr/bin/env python import argparse import random def random_array_element(): return random.choice(['123', 'true', 'false', 'null', '3.1415', '"foo"']) def main(): parser = argparse.ArgumentParser(description="Generate a large JSON document.") parser.add_argument('--array-size', nargs=1, type=int, default=[100000]) parser.add_argument('--array-type', choices=['int', 'array', 'object'], default='object') parser.add_argument('--array-elements', nargs=1, type=int, default=[3]) args = parser.parse_args() n = args.array_size[0] type = args.array_type print('{"x": [') if type == 'int': elem_format = "%d%s" need_i = True elif type == 'object': elem_format = '{"a": %d}%s' need_i = True elif type == 'array': nelems = args.array_elements[0] arr = [] if nelems > 0: arr.append('%s') if nelems > 1: arr.extend([random_array_element() for _ in range(nelems-1)]) elem_format = '[%s]%%s' % ", ".join(arr) need_i = nelems > 0 else: raise Exception("Unknown array type %s" % type) for i in range(n): semicolon = "," if i < n-1 else "" if need_i: print(elem_format % (i, semicolon)) else: print(elem_format % semicolon) print(']}') if __name__ == "__main__": main()
Python
0.000002
c48b0ae4331d1d039cb6bc29ef25fc7c4a5df8da
Bump version to 0.2.7
approvaltests/version.py
approvaltests/version.py
version_number = "0.2.7"
version_number = "0.2.6"
Python
0.000001
903d9b000c4d7b333b5d3000aeb38b7e4d818c27
add "Partly Cloudy" to color_icons
i3pystatus/weather.py
i3pystatus/weather.py
from i3pystatus import IntervalModule import pywapi from i3pystatus.core.util import internet, require class Weather(IntervalModule): """ This module gets the weather from weather.com using pywapi module First, you need to get the code for the location from the www.weather.com Available formatters: * {current_temp} * {humidity} Requires pywapi from PyPI. """ interval = 20 settings = ( "location_code", ("colorize", "Enable color with temperature and UTF-8 icons."), ("units", "Celsius (metric) or Fahrenheit (imperial)"), "format", ) required = ("location_code",) units = "metric" format = "{current_temp}" colorize = None color_icons = {'Fair': (u'\u2600', '#FFCC00'), 'Cloudy': (u'\u2601', '#F8F8FF'), 'Partly Cloudy': (u'\u2601', '#F8F8FF'), # \u26c5 is not in many fonts 'Rainy': (u'\u2614', '#CBD2C0'), 'Sunny': (u'\u263C', '#FFFF00'), 'Snow': (u'\u2603', '#FFFFFF'), 'default': ('', None), } @require(internet) def run(self): result = pywapi.get_weather_from_weather_com(self.location_code, self.units) conditions = result['current_conditions'] temperature = conditions['temperature'] humidity = conditions['humidity'] units = result['units'] color = None current_temp = '{t}°{d} '.format(t=temperature, d=units['temperature']) if self.colorize: icon, color = self.color_icons.get(conditions['text'], self.color_icons['default']) current_temp = '{t}°{d} {i}'.format(t=temperature, d=units['temperature'], i=icon) color = color self.output = { "full_text": self.format.format(current_temp=current_temp, humidity=humidity), "color": color }
from i3pystatus import IntervalModule import pywapi from i3pystatus.core.util import internet, require class Weather(IntervalModule): """ This module gets the weather from weather.com using pywapi module First, you need to get the code for the location from the www.weather.com Available formatters: * {current_temp} * {humidity} Requires pywapi from PyPI. """ interval = 20 settings = ( "location_code", ("colorize", "Enable color with temperature and UTF-8 icons."), ("units", "Celsius (metric) or Fahrenheit (imperial)"), "format", ) required = ("location_code",) units = "metric" format = "{current_temp}" colorize = None color_icons = {'Fair': (u'\u2600', '#FFCC00'), 'Cloudy': (u'\u2601', '#F8F8FF'), 'Rainy': (u'\u2614', '#CBD2C0'), 'Sunny': (u'\u263C', '#FFFF00'), 'Snow': (u'\u2603', '#FFFFFF'), 'default': ('', None), } @require(internet) def run(self): result = pywapi.get_weather_from_weather_com(self.location_code, self.units) conditions = result['current_conditions'] temperature = conditions['temperature'] humidity = conditions['humidity'] units = result['units'] color = None current_temp = '{t}°{d} '.format(t=temperature, d=units['temperature']) if self.colorize: icon, color = self.color_icons.get(conditions['text'], self.color_icons['default']) current_temp = '{t}°{d} {i}'.format(t=temperature, d=units['temperature'], i=icon) color = color self.output = { "full_text": self.format.format(current_temp=current_temp, humidity=humidity), "color": color }
Python
0.999821
f4a39adc6513f41dc33c4ecf597f4a80dd846dd9
rename LdapConnection to DatabaseWrapper and accept configuration as a dict
ldapdb/__init__.py
ldapdb/__init__.py
# -*- coding: utf-8 -*- # # django-ldapdb # Copyright (c) 2009-2010, Bolloré telecom # All rights reserved. # # See AUTHORS file for a full list of contributors. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Bolloré telecom nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import ldap from django.conf import settings from django.db.backends import BaseDatabaseFeatures, BaseDatabaseOperations def escape_ldap_filter(value): value = unicode(value) return value.replace('\\', '\\5c') \ .replace('*', '\\2a') \ .replace('(', '\\28') \ .replace(')', '\\29') \ .replace('\0', '\\00') class DatabaseCursor(object): def __init__(self, ldap_connection): self.connection = ldap_connection class DatabaseFeatures(BaseDatabaseFeatures): def __init__(self, connection): self.connection = connection class DatabaseOperations(BaseDatabaseOperations): def quote_name(self, name): return name class DatabaseWrapper(object): def __init__(self, settings_dict={}, alias='ldap'): self.settings_dict = settings_dict self.connection = None self.charset = "utf-8" self.features = DatabaseFeatures(self) self.ops = DatabaseOperations() def close(self): pass def _cursor(self): if self.connection is None: self.connection = ldap.initialize(self.settings_dict['NAME']) self.connection.simple_bind_s( self.settings_dict['USER'], self.settings_dict['PASSWORD']) return DatabaseCursor(self.connection) def add_s(self, dn, modlist): cursor = self._cursor() return cursor.connection.add_s(dn.encode(self.charset), modlist) def delete_s(self, dn): cursor = self._cursor() return cursor.connection.delete_s(dn.encode(self.charset)) def modify_s(self, dn, modlist): cursor = self._cursor() return cursor.connection.modify_s(dn.encode(self.charset), modlist) def rename_s(self, dn, newrdn): cursor = self._cursor() return cursor.connection.rename_s(dn.encode(self.charset), newrdn.encode(self.charset)) def search_s(self, base, scope, filterstr, attrlist): cursor = self._cursor() results = cursor.connection.search_s(base, scope, filterstr.encode(self.charset), attrlist) output = [] for dn, attrs in results: output.append((dn.decode(self.charset), attrs)) return output # FIXME: is this the right place to initialize the LDAP connection? connection = DatabaseWrapper({ 'NAME': settings.LDAPDB_SERVER_URI, 'USER': settings.LDAPDB_BIND_DN, 'PASSWORD': settings.LDAPDB_BIND_PASSWORD})
# -*- coding: utf-8 -*- # # django-ldapdb # Copyright (c) 2009-2010, Bolloré telecom # All rights reserved. # # See AUTHORS file for a full list of contributors. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Bolloré telecom nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import ldap from django.conf import settings from django.db.backends import BaseDatabaseFeatures, BaseDatabaseOperations def escape_ldap_filter(value): value = unicode(value) return value.replace('\\', '\\5c') \ .replace('*', '\\2a') \ .replace('(', '\\28') \ .replace(')', '\\29') \ .replace('\0', '\\00') class DatabaseCursor(object): def __init__(self, ldap_connection): self.connection = ldap_connection class DatabaseFeatures(BaseDatabaseFeatures): def __init__(self, connection): self.connection = connection class DatabaseOperations(BaseDatabaseOperations): def quote_name(self, name): return name class LdapConnection(object): def __init__(self): self.connection = None self.charset = "utf-8" self.features = DatabaseFeatures(self) self.ops = DatabaseOperations() def _cursor(self): if self.connection is None: self.connection = ldap.initialize(settings.LDAPDB_SERVER_URI) self.connection.simple_bind_s( settings.LDAPDB_BIND_DN, settings.LDAPDB_BIND_PASSWORD) return DatabaseCursor(self.connection) def add_s(self, dn, modlist): cursor = self._cursor() return cursor.connection.add_s(dn.encode(self.charset), modlist) def delete_s(self, dn): cursor = self._cursor() return cursor.connection.delete_s(dn.encode(self.charset)) def modify_s(self, dn, modlist): cursor = self._cursor() return cursor.connection.modify_s(dn.encode(self.charset), modlist) def rename_s(self, dn, newrdn): cursor = self._cursor() return cursor.connection.rename_s(dn.encode(self.charset), newrdn.encode(self.charset)) def search_s(self, base, scope, filterstr, attrlist): cursor = self._cursor() results = cursor.connection.search_s(base, scope, filterstr.encode(self.charset), attrlist) output = [] for dn, attrs in results: output.append((dn.decode(self.charset), attrs)) return output # FIXME: is this the right place to initialize the LDAP connection? connection = LdapConnection()
Python
0.000002
faebe4928b4bef33efd6183f97f1ff1396a701ee
fix missing urls.
blackgate/cli.py
blackgate/cli.py
# -*- coding: utf-8 -*- import click from blackgate.core import component from blackgate.config import parse_yaml_config from blackgate.config import read_yaml_config from blackgate.config import read_default_config from blackgate.server import run @click.group() @click.option('-c', '--config', default='') @click.pass_context def main(ctx, config): if not config: config = read_default_config() else: config = read_yaml_config(config) if not config: ctx.fail('config not found.') try: config = parse_yaml_config(config) except ValueError: ctx.fail('config is not valid yaml.') ctx.obj = {} ctx.obj['config'] = config @main.command() @click.pass_context def start(ctx): config = ctx.obj['config'] component.configurations = config component.install() run(config.get('port', 9654)) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- import click from blackgate.core import component from blackgate.config import parse_yaml_config from blackgate.config import read_yaml_config from blackgate.config import read_default_config from blackgate.server import run @click.group() @click.option('-c', '--config', default='') @click.pass_context def main(ctx, config): if not config: config = read_default_config() else: config = read_yaml_config(config) if not config: ctx.fail('config not found.') try: config = parse_yaml_config(config) except ValueError: ctx.fail('config is not valid yaml.') ctx.obj['config'] = config @main.command() @click.pass_context def start(ctx): config = ctx.obj['config'] component.configurations = config component.install() run(config.get('port', 9654)) if __name__ == '__main__': main()
Python
0.00013
66b95e2e0b89470993c52998eeb179d9e4926713
test public list
project/test_project.py
project/test_project.py
#from django.contrib.auth.models import User from .models import * from django.test import TestCase from django.db import transaction import reversion #from reversion.models import Version TEST_USER_NAME_CREATOR = 'test project creator' TEST_USER_NAME_NOT_MEMBER = 'user is not a member' TEST_PROJECT_PUBLIC_NAME = 'test project name public' TEST_PROJECT_PRIVATE_NAME = 'test project name private' def get_public_project(): return Project.objects.get(fullname=TEST_PROJECT_PUBLIC_NAME) def get_private_project(): return Project.objects.get(fullname=TEST_PROJECT_PRIVATE_NAME) def get_creator_user(): return User.objects.get( username = TEST_USER_NAME_CREATOR ) def get_user_not_member(): return User.objects.get( username = TEST_USER_NAME_NOT_MEMBER ) class Project_Test(TestCase): def setUp(self): user_creator = User.objects.create_user( username = TEST_USER_NAME_CREATOR, password = '-' ) user_creator.save() user_not_member = User.objects.create_user( username = TEST_USER_NAME_NOT_MEMBER, password = '-' ) user_not_member.save() test_project_public = Project(fullname=TEST_PROJECT_PUBLIC_NAME) test_project_public.set_change_user(user_creator) test_project_public.save() test_project_private = Project(fullname=TEST_PROJECT_PRIVATE_NAME) test_project_private.set_change_user(user_creator) test_project_private.private_flag = True test_project_private.save() def test_have_repo_false(self): test_project = get_public_project() self.assertEqual( test_project.have_repo(), False ) def test_creator_is_member(self): test_project = get_public_project() user_creator = get_creator_user() self.assertEqual( test_project.is_member(user_creator), True ) def test_creator_is_member_False(self): test_project = get_public_project() user_not_member = get_user_not_member() self.assertEqual( test_project.is_member(user_not_member), False ) def test_creator_is_member_None(self): test_project = get_public_project() self.assertEqual( test_project.is_member(None), False ) def test_creator_is_admin(self): test_project = get_public_project() user_creator = get_creator_user() self.assertEqual( test_project.is_admin(user_creator), True ) def test_creator_can_admin(self): test_project = get_public_project() user_creator = get_creator_user() self.assertEqual( test_project.can_admin(user_creator), True ) def test_creator_acl_admin(self): test_project = get_public_project() user_creator = get_creator_user() self.assertEqual( test_project.user_access_level(user_creator), PROJECT_ACCESS_ADMIN ) def test_none_user_acl_admin_public(self): test_project = get_public_project() self.assertEqual( test_project.user_access_level( None ), PROJECT_ACCESS_VIEW ) def test_none_user_acl_admin_private(self): test_project = get_private_project() self.assertEqual( test_project.user_access_level( None ), PROJECT_ACCESS_NONE ) def test_public_project_list(self): pl = GetAllPublicProjectList() self.assertEqual( get_public_project() in pl, True ) self.assertEqual( pl.count(), 1 )
#from django.contrib.auth.models import User from .models import * from django.test import TestCase from django.db import transaction import reversion #from reversion.models import Version TEST_USER_NAME_CREATOR = 'test project creator' TEST_USER_NAME_NOT_MEMBER = 'user is not a member' TEST_PROJECT_PUBLIC_NAME = 'test project name public' TEST_PROJECT_PRIVATE_NAME = 'test project name private' def get_public_project(): return Project.objects.get(fullname=TEST_PROJECT_PUBLIC_NAME) def get_private_project(): return Project.objects.get(fullname=TEST_PROJECT_PRIVATE_NAME) def get_creator_user(): return User.objects.get( username = TEST_USER_NAME_CREATOR ) def get_user_not_member(): return User.objects.get( username = TEST_USER_NAME_NOT_MEMBER ) class Project_Test(TestCase): def setUp(self): user_creator = User.objects.create_user( username = TEST_USER_NAME_CREATOR, password = '-' ) user_creator.save() user_not_member = User.objects.create_user( username = TEST_USER_NAME_NOT_MEMBER, password = '-' ) user_not_member.save() test_project_public = Project(fullname=TEST_PROJECT_PUBLIC_NAME) test_project_public.set_change_user(user_creator) test_project_public.save() test_project_private = Project(fullname=TEST_PROJECT_PRIVATE_NAME) test_project_private.set_change_user(user_creator) test_project_private.private_flag = True test_project_private.save() def test_have_repo_false(self): test_project = get_public_project() self.assertEqual( test_project.have_repo(), False ) def test_creator_is_member(self): test_project = get_public_project() user_creator = get_creator_user() self.assertEqual( test_project.is_member(user_creator), True ) def test_creator_is_member_False(self): test_project = get_public_project() user_not_member = get_user_not_member() self.assertEqual( test_project.is_member(user_not_member), False ) def test_creator_is_member_None(self): test_project = get_public_project() self.assertEqual( test_project.is_member(None), False ) def test_creator_is_admin(self): test_project = get_public_project() user_creator = get_creator_user() self.assertEqual( test_project.is_admin(user_creator), True ) def test_creator_can_admin(self): test_project = get_public_project() user_creator = get_creator_user() self.assertEqual( test_project.can_admin(user_creator), True ) def test_creator_acl_admin(self): test_project = get_public_project() user_creator = get_creator_user() self.assertEqual( test_project.user_access_level(user_creator), PROJECT_ACCESS_ADMIN ) def test_none_user_acl_admin_public(self): test_project = get_public_project() self.assertEqual( test_project.user_access_level( None ), PROJECT_ACCESS_VIEW ) def test_none_user_acl_admin_private(self): test_project = get_private_project() self.assertEqual( test_project.user_access_level( None ), PROJECT_ACCESS_NONE )
Python
0.000001
1408f2a7e782c0ca059b04cab2526cef558312b6
add comment to explain not extending BaseGoAccountCommand
go/base/management/commands/go_generate_export_conversations_urls.py
go/base/management/commands/go_generate_export_conversations_urls.py
""" Dump URLs that can be used by cURL for downloading conversation data """ from optparse import make_option from django.core.management.base import CommandError from django.utils.text import slugify from go.base.command_utils import BaseGoCommand # We don't extend BaseGoAccountCommand since the '--email' option is used # instead of '--email-address' class Command(BaseGoCommand): help = "Dump URLs for use with cURL for downloading message data." DEFAULT_TEMPLATE = ( 'curl -o {file_name}-{created_at}-{status}-{direction}.json ' '{base_url}{key}/{direction}.json?concurrency=100\n') option_list = BaseGoCommand.option_list + ( make_option( '--email', dest='email', default=None, help='The user to generate export URLs for.'), make_option( '--base-url', dest='base_url', default=None, help='http://export-host:export-port/message_store_exporter/'), make_option( '--template', dest='template', default=DEFAULT_TEMPLATE, help='The template for generating the cURL.') ) def handle(self, *args, **kwargs): self.email = kwargs['email'] if self.email is None: raise CommandError('--email is mandatory.') self.base_url = kwargs['base_url'] if self.base_url is None: raise CommandError('--base-url is mandatory.') self.template = kwargs['template'] self.user, self.user_api = self.mk_user_api(self.email) conversation_store = self.user_api.conversation_store conversation_keys = conversation_store.list_conversations() for conversation_key in conversation_keys: conversation = self.user_api.get_wrapped_conversation( conversation_key) for direction in ['inbound', 'outbound']: self.stdout.write( self.template.format( file_name=slugify(conversation.name), created_at=conversation.created_at.isoformat(), base_url=self.base_url, key=conversation.batch.key, direction=direction, status=(conversation.archive_status if conversation.archived() else conversation.get_status()), ) )
""" Dump URLs that can be used by cURL for downloading conversation data """ from optparse import make_option from django.core.management.base import CommandError from django.utils.text import slugify from go.base.command_utils import BaseGoCommand class Command(BaseGoCommand): help = "Dump URLs for use with cURL for downloading message data." DEFAULT_TEMPLATE = ( 'curl -o {file_name}-{created_at}-{status}-{direction}.json ' '{base_url}{key}/{direction}.json?concurrency=100\n') option_list = BaseGoCommand.option_list + ( make_option( '--email', dest='email', default=None, help='The user to generate export URLs for.'), make_option( '--base-url', dest='base_url', default=None, help='http://export-host:export-port/message_store_exporter/'), make_option( '--template', dest='template', default=DEFAULT_TEMPLATE, help='The template for generating the cURL.') ) def handle(self, *args, **kwargs): self.email = kwargs['email'] if self.email is None: raise CommandError('--email is mandatory.') self.base_url = kwargs['base_url'] if self.base_url is None: raise CommandError('--base-url is mandatory.') self.template = kwargs['template'] self.user, self.user_api = self.mk_user_api(self.email) conversation_store = self.user_api.conversation_store conversation_keys = conversation_store.list_conversations() for conversation_key in conversation_keys: conversation = self.user_api.get_wrapped_conversation( conversation_key) for direction in ['inbound', 'outbound']: self.stdout.write( self.template.format( file_name=slugify(conversation.name), created_at=conversation.created_at.isoformat(), base_url=self.base_url, key=conversation.batch.key, direction=direction, status=(conversation.archive_status if conversation.archived() else conversation.get_status()), ) )
Python
0
bdef12745f5d91bf196139b444b34810b529c38d
Fix #37: Make subclassing of btuple work for __add__ and __radd__.
blist/_btuple.py
blist/_btuple.py
from blist._blist import blist from ctypes import c_int import collections class btuple(collections.Sequence): def __init__(self, seq=None): if isinstance(seq, btuple): self._blist = seq._blist elif seq is not None: self._blist = blist(seq) else: self._blist = blist() self._hash = -1 def _btuple_or_tuple(self, other, f): if isinstance(other, btuple): rv = f(self._blist, other._blist) elif isinstance(other, tuple): rv = f(self._blist, blist(other)) else: return NotImplemented if isinstance(rv, blist): rv = btuple(rv) return rv def __hash__(self): # Based on tuplehash from tupleobject.c if self._hash != -1: return self._hash n = len(self) mult = c_int(1000003) x = c_int(0x345678) for ob in self: n -= 1 y = c_int(hash(ob)) x = (x ^ y) * mult mult += c_int(82520) + n + n x += c_int(97531) if x == -1: x = -2; self._hash = x.value return self._hash def __add__(self, other): return self._btuple_or_tuple(other, blist.__add__) def __radd__(self, other): return self._btuple_or_tuple(other, blist.__radd__) def __contains__(self, item): return item in self._blist def __eq__(self, other): return self._btuple_or_tuple(other, blist.__eq__) def __ge__(self, other): return self._btuple_or_tuple(other, blist.__ge__) def __gt__(self, other): return self._btuple_or_tuple(other, blist.__gt__) def __le__(self, other): return self._btuple_or_tuple(other, blist.__le__) def __lt__(self, other): return self._btuple_or_tuple(other, blist.__lt__) def __ne__(self, other): return self._btuple_or_tuple(other, blist.__ne__) def __iter__(self): return iter(self._blist) def __len__(self): return len(self._blist) def __getitem__(self, key): if isinstance(key, slice): return btuple(self._blist[key]) return self._blist[key] def __getslice__(self, i, j): return btuple(self._blist[i:j]) def __repr__(self): return 'btuple((' + repr(self._blist)[7:-2] + '))' def __str__(self): return repr(self) def __mul__(self, i): return btuple(self._blist * i) def __rmul__(self, i): return btuple(i * self._blist) def count(self, item): return self._blist.count(item) def index(self, item): return self._blist.index(item) del c_int del collections
from blist._blist import blist from ctypes import c_int import collections class btuple(collections.Sequence): def __init__(self, seq=None): if isinstance(seq, btuple): self._blist = seq._blist elif seq is not None: self._blist = blist(seq) else: self._blist = blist() self._hash = -1 def _btuple_or_tuple(self, other, f): if isinstance(other, btuple): rv = f(self._blist, other._blist) elif isinstance(other, tuple): rv = f(self._blist, blist(other)) else: return NotImplemented if isinstance(rv, blist): rv = btuple(rv) return rv def __hash__(self): # Based on tuplehash from tupleobject.c if self._hash != -1: return self._hash n = len(self) mult = c_int(1000003) x = c_int(0x345678) for ob in self: n -= 1 y = c_int(hash(ob)) x = (x ^ y) * mult mult += c_int(82520) + n + n x += c_int(97531) if x == -1: x = -2; self._hash = x.value return self._hash def __add__(self, other): rv = self._btuple_or_tuple(other, blist.__add__) if rv is NotImplemented: raise TypeError return rv def __radd__(self, other): rv = self._btuple_or_tuple(other, blist.__radd__) if rv is NotImplemented: raise TypeError return rv def __contains__(self, item): return item in self._blist def __eq__(self, other): return self._btuple_or_tuple(other, blist.__eq__) def __ge__(self, other): return self._btuple_or_tuple(other, blist.__ge__) def __gt__(self, other): return self._btuple_or_tuple(other, blist.__gt__) def __le__(self, other): return self._btuple_or_tuple(other, blist.__le__) def __lt__(self, other): return self._btuple_or_tuple(other, blist.__lt__) def __ne__(self, other): return self._btuple_or_tuple(other, blist.__ne__) def __iter__(self): return iter(self._blist) def __len__(self): return len(self._blist) def __getitem__(self, key): if isinstance(key, slice): return btuple(self._blist[key]) return self._blist[key] def __getslice__(self, i, j): return btuple(self._blist[i:j]) def __repr__(self): return 'btuple((' + repr(self._blist)[7:-2] + '))' def __str__(self): return repr(self) def __mul__(self, i): return btuple(self._blist * i) def __rmul__(self, i): return btuple(i * self._blist) def count(self, item): return self._blist.count(item) def index(self, item): return self._blist.index(item) del c_int del collections
Python
0
8abcd25ccd36d614ad650e0983385fbeb5a1777c
Add more search engines ping
blog/__init__.py
blog/__init__.py
# -*- coding: UTF-8 -*- # YaBlog # (c) Regis FLORET # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the <organization> nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Regis FLORET BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unicodedata import re # Code from internet def ping_all(sitemap_url='http://www.regisblog.fr/sitemap.xml'): """ Pings the popular search engines, Google, Yahoo, ASK, and Bing, to let them know that you have updated your site's sitemap. Returns successfully pinged servers. """ from django.contrib.sitemaps import ping_google SEARCH_ENGINE_PING_URLS = ( ('google', 'http://www.google.com/webmasters/tools/ping'), ('technorati', 'http://rpc.technorati.com/rpc/ping'), ('pigomatic', 'http://rpc.pingomatic.com'), ('googleblog', 'http://blogsearch.google.com/ping/RPC2'), ('yahoo', 'http://search.yahooapis.com/SiteExplorerService/V1/ping'), ('ask', 'http://submissions.ask.com/ping'), ('bing', 'http://www.bing.com/webmaster/ping.aspx'), ) successfully_pinged = [] for (site, url) in SEARCH_ENGINE_PING_URLS: try: ping_google(sitemap_url=sitemap_url, ping_url=url) pinged = True except: pinged = False if pinged: successfully_pinged.append(site) return successfully_pinged def sanitize_name(name): """ Ensure to remove all non-alphanum characters """ name = unicodedata.normalize('NFKD', name).encode('ascii','ignore') for c in "&\"'()'ç=²¹~#{}[]+°$£^*µ%!§:/;.,?": name = name.replace(c,"") name = name.lower().strip() name = re.sub("\s","-",re.sub("\s+$","",name)) return name
# -*- coding: UTF-8 -*- # YaBlog # (c) Regis FLORET # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the <organization> nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Regis FLORET BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unicodedata import re # Code from internet def ping_all(sitemap_url='http://www.regisblog.fr/sitemap.xml'): """ Pings the popular search engines, Google, Yahoo, ASK, and Bing, to let them know that you have updated your site's sitemap. Returns successfully pinged servers. """ from django.contrib.sitemaps import ping_google SEARCH_ENGINE_PING_URLS = ( ('google', 'http://www.google.com/webmasters/tools/ping'), ('yahoo', 'http://search.yahooapis.com/SiteExplorerService/V1/ping'), ('ask', 'http://submissions.ask.com/ping'), ('bing', 'http://www.bing.com/webmaster/ping.aspx'), ) successfully_pinged = [] for (site, url) in SEARCH_ENGINE_PING_URLS: try: ping_google(sitemap_url=sitemap_url, ping_url=url) pinged = True except: pinged = False if pinged: successfully_pinged.append(site) return successfully_pinged def sanitize_name(name): """ Ensure to remove all non-alphanum characters """ name = unicodedata.normalize('NFKD', name).encode('ascii','ignore') for c in "&\"'()'ç=²¹~#{}[]+°$£^*µ%!§:/;.,?": name = name.replace(c,"") name = name.lower().strip() name = re.sub("\s","-",re.sub("\s+$","",name)) return name
Python
0
3154f0098f9696cd48536599413659e47747491f
Add api [2]
blue/__init__.py
blue/__init__.py
from flask import Flask app = Flask(__name__) from blue.site.routes import mod from blue.api.routes import mod app.register_blueprint(site.routes.mod) app.register_blueprint(api.routes.mod, url_prefix='/api')
from flask import Flask app = Flask(__name__) from blue.site.routes import mod from blue.api.routes import mod app.register_blueprint(site.routes.mod) app.register_blueprint(api.routes.mod)
Python
0
20d47877bf426bc2ec9ac1b8a99ec887faec31c5
Fix minor problems with mux function
boolexpr/misc.py
boolexpr/misc.py
# Copyright 2016 Chris Drake # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Miscellaneous features not implemented in C++ API """ import functools import itertools import operator from .util import clog2 from .util import iter_space from .wrap import array from .wrap import not_ from .wrap import or_ from .wrap import and_ from .wrap import _expect_array def nhot(n, *args): """ Return a CNF expression that means "exactly N input functions are true". """ if not 0 <= n <= len(args): fstr = "expected 0 <= n <= {}, got {}" raise ValueError(fstr.format(len(args), n)) clauses = list() for xs in itertools.combinations(args, n+1): clauses.append(or_(*[not_(x) for x in xs])) for xs in itertools.combinations(args, (len(args)+1)-n): clauses.append(or_(*xs)) return and_(*clauses) def majority(*args, conj=False): """ Return an expression that means "the majority of input functions are true". If *conj* is ``True``, return a CNF. Otherwise, return a DNF. """ clauses = list() if conj: for xs in itertools.combinations(args, (len(args) + 1) // 2): clauses.append(or_(*xs)) return and_(*clauses) else: for xs in itertools.combinations(args, len(args) // 2 + 1): clauses.append(and_(*xs)) return or_(*clauses) def achilles_heel(*args): r""" Return the Achille's Heel function, defined as: :math:`\prod_{i=0}^{n/2-1}{X_{2i} + X_{2i+1}}`. """ num = len(args) if num & 1: fstr = "expected an even number of arguments, got {}" raise ValueError(fstr.format(num)) return and_(*[or_(args[2*i], args[2*i+1]) for i in range(num // 2)]) def mux(a, sel): """ Return an expression that multiplexes an input array over a select array. """ a = _expect_array(a) sel = _expect_array(sel) if sel.size < clog2(a.size): fstr = "expected at least {} select bits, got {}" raise ValueError(fstr.format(clog2(a.size), sel.size)) terms = (tuple(sel[i] if vertex[i] else ~sel[i] for i in range(sel.size)) for vertex in iter_space(sel.size)) return or_(*[and_(x, *term) for (x, term) in zip(a.flat, terms)]) def exists(xs, f): """ Return an expression that means "there exists a variable in *xs* such that *f* true." This is identical to ``f.smoothing(xs)``. """ return f.smoothing(xs) def forall(xs, f): """ Return an expression that means "for all variables in *xs*, *f* is true." This is identical to ``f.consensus(xs)``. """ return f.consensus(xs) def cat(*xs): """Concatenate a sequence of expressions.""" return functools.reduce(operator.add, xs, array([]))
# Copyright 2016 Chris Drake # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Miscellaneous features not implemented in C++ API """ import functools import itertools import operator from .util import clog2 from .util import iter_space from .wrap import array from .wrap import not_ from .wrap import or_ from .wrap import and_ from .wrap import _expect_array def nhot(n, *args): """ Return a CNF expression that means "exactly N input functions are true". """ if not 0 <= n <= len(args): fstr = "expected 0 <= n <= {}, got {}" raise ValueError(fstr.format(len(args), n)) clauses = list() for xs in itertools.combinations(args, n+1): clauses.append(or_(*[not_(x) for x in xs])) for xs in itertools.combinations(args, (len(args)+1)-n): clauses.append(or_(*xs)) return and_(*clauses) def majority(*args, conj=False): """ Return an expression that means "the majority of input functions are true". If *conj* is ``True``, return a CNF. Otherwise, return a DNF. """ clauses = list() if conj: for xs in itertools.combinations(args, (len(args) + 1) // 2): clauses.append(or_(*xs)) return and_(*clauses) else: for xs in itertools.combinations(args, len(args) // 2 + 1): clauses.append(and_(*xs)) return or_(*clauses) def achilles_heel(*args): r""" Return the Achille's Heel function, defined as: :math:`\prod_{i=0}^{n/2-1}{X_{2i} + X_{2i+1}}`. """ num = len(args) if num & 1: fstr = "expected an even number of arguments, got {}" raise ValueError(fstr.format(num)) return and_(*[or_(args[2*i], args[2*i+1]) for i in range(num // 2)]) def mux(xs, sel): """ Return an expression that multiplexes a sequence of input functions over a sequence of select functions. """ xs = _expect_array(xs) sel = _expect_array(sel) if sel.size < clog2(xs.size): fstr = "expected at least {} select bits, got {}" raise ValueError(fstr.format(clog2(xs.size), sel.size)) terms = (tuple(sel[i] if vertex[i] else ~sel[i] for i in range(sel.size)) for vertex in iter_space(sel.size)) return or_(*[and_(x, *term) for (x, term) in zip(xs, terms)]) def exists(xs, f): """ Return an expression that means "there exists a variable in *xs* such that *f* true." This is identical to ``f.smoothing(xs)``. """ return f.smoothing(xs) def forall(xs, f): """ Return an expression that means "for all variables in *xs*, *f* is true." This is identical to ``f.consensus(xs)``. """ return f.consensus(xs) def cat(*xs): """Concatenate a sequence of expressions.""" return functools.reduce(operator.add, xs, array([]))
Python
0.000018
8da750eddfecb2c7162e1a33c7c830fd083944bd
Change allowed exception raising
intelmq/bots/parsers/abusech/parser_ip.py
intelmq/bots/parsers/abusech/parser_ip.py
# -*- coding: utf-8 -*- """ Parses simple newline separated list of IPs. Docs: - https://feodotracker.abuse.ch/blocklist/ - https://zeustracker.abuse.ch/blocklist.php """ import re import dateutil from intelmq.lib.bot import ParserBot from intelmq.lib import utils from intelmq.lib.exceptions import PipelineError FEEDS = { 'https://feodotracker.abuse.ch/downloads/ipblocklist.csv': { 'format': [ 'time.source', 'source.ip', 'source.port', 'malware.name' ], 'malware': 'Cridex' }, 'https://zeustracker.abuse.ch/blocklist.php?download=ipblocklist': { 'format': [ 'source.ip' ], 'malware': 'Zeus' }, 'https://zeustracker.abuse.ch/blocklist.php?download=badips': { 'format': [ 'source.ip' ], 'malware': 'Zeus' } } class AbusechIPParserBot(ParserBot): __last_generated_date = None __is_comment_line_regex = re.compile(r'^#+.*') __date_regex = re.compile(r'[0-9]{4}.[0-9]{2}.[0-9]{2}.[0-9]{2}.[0-9]{2}.[0-9]{2}( UTC)?') def parse(self, report: dict): feed = report['feed.url'] raw_lines = utils.base64_decode(report.get("raw")).splitlines() comments = list(r for r in raw_lines if self.__is_comment_line_regex.search(r)) fields = comments[-1].split(',') if len(fields) is not len(FEEDS[feed]['format']): self.logger.warning("Feed '{}' has not the expected fields: {} != {}".format(feed, len(fields), len(FEEDS[feed]['format']))) raise ValueError("Abusech ip parser is not up to date with the format online") for line in comments: if 'Last updated' in line: self.__last_generated_date = dateutil.parser.parse(self.__date_regex.search(line).group(0)).isoformat() lines = (l for l in raw_lines if not self.__is_comment_line_regex.search(l)) for line in lines: line = line.strip() if not any([line.startswith(prefix) for prefix in self.ignore_lines_starting]): yield line def parse_line(self, line, report): event = self.new_event(report) self.__process_defaults(event, line, report['feed.url']) self.__process_fields(event, line, report['feed.url']) yield event def __process_defaults(self, event, line, feed_url): defaults = { ('malware.name', FEEDS[feed_url]['malware']), ('raw', line), ('classification.type', 'c&c'), ('classification.taxonomy', 'malicious code'), ('time.observation', self.__last_generated_date) } for i in defaults: if i[0] not in FEEDS[feed_url]['format']: if i[1] is None: continue else: event.add(i[0], i[1], overwrite=True) @staticmethod def __process_fields(event, line, feed_url): for field, value in zip(FEEDS[feed_url]['format'], line.split(',')): if field == 'time.source': ts = dateutil.parser.parse(value + ' UTC').isoformat() if not value.endswith(' UTC') else value event.add(field, ts) else: event.add(field, value) def recover_line(self, line): return '\n'.join(self.tempdata + [line]) BOT = AbusechIPParserBot
# -*- coding: utf-8 -*- """ Parses simple newline separated list of IPs. Docs: - https://feodotracker.abuse.ch/blocklist/ - https://zeustracker.abuse.ch/blocklist.php """ import re import dateutil from intelmq.lib.bot import ParserBot from intelmq.lib import utils from intelmq.lib.exceptions import PipelineError FEEDS = { 'https://feodotracker.abuse.ch/downloads/ipblocklist.csv': { 'format': [ 'time.source', 'source.ip', 'source.port', 'malware.name' ], 'malware': 'Cridex' }, 'https://zeustracker.abuse.ch/blocklist.php?download=ipblocklist': { 'format': [ 'source.ip' ], 'malware': 'Zeus' }, 'https://zeustracker.abuse.ch/blocklist.php?download=badips': { 'format': [ 'source.ip' ], 'malware': 'Zeus' } } class AbusechIPParserBot(ParserBot): __last_generated_date = None __is_comment_line_regex = re.compile(r'^#+.*') __date_regex = re.compile(r'[0-9]{4}.[0-9]{2}.[0-9]{2}.[0-9]{2}.[0-9]{2}.[0-9]{2}( UTC)?') def parse(self, report: dict): feed = report['feed.url'] raw_lines = utils.base64_decode(report.get("raw")).splitlines() comments = list(r for r in raw_lines if self.__is_comment_line_regex.search(r)) fields = comments[-1].split(',') if len(fields) is not len(FEEDS[feed]['format']): self.logger.warning("Feed '{}' has not the expected fields: {} != {}".format(feed, len(fields), len(FEEDS[feed]['format']))) raise PipelineError("Abusech ip parser is not up to date with the format online") for line in comments: if 'Last updated' in line: self.__last_generated_date = dateutil.parser.parse(self.__date_regex.search(line).group(0)).isoformat() lines = (l for l in raw_lines if not self.__is_comment_line_regex.search(l)) for line in lines: line = line.strip() if not any([line.startswith(prefix) for prefix in self.ignore_lines_starting]): yield line def parse_line(self, line, report): event = self.new_event(report) self.__process_defaults(event, line, report['feed.url']) self.__process_fields(event, line, report['feed.url']) yield event def __process_defaults(self, event, line, feed_url): defaults = { ('malware.name', FEEDS[feed_url]['malware']), ('raw', line), ('classification.type', 'c&c'), ('classification.taxonomy', 'malicious code'), ('time.observation', self.__last_generated_date) } for i in defaults: if i[0] not in FEEDS[feed_url]['format']: if i[1] is None: continue else: event.add(i[0], i[1], overwrite=True) @staticmethod def __process_fields(event, line, feed_url): for field, value in zip(FEEDS[feed_url]['format'], line.split(',')): if field == 'time.source': ts = dateutil.parser.parse(value + ' UTC').isoformat() if not value.endswith(' UTC') else value event.add(field, ts) else: event.add(field, value) def recover_line(self, line): return '\n'.join(self.tempdata + [line]) BOT = AbusechIPParserBot
Python
0
ff9e3e99e7a5bda1eefdd925960b6b6153a9e10d
Update messenger.py
bot/messenger.py
bot/messenger.py
import logging import random logger = logging.getLogger(__name__) class Messenger(object): def __init__(self, slack_clients): self.clients = slack_clients def send_message(self, channel_id, msg): # in the case of Group and Private channels, RTM channel payload is a complex dictionary if isinstance(channel_id, dict): channel_id = channel_id['id'] logger.debug('Sending msg: {} to channel: {}'.format(msg, channel_id)) channel = self.clients.rtm.server.channels.find(channel_id) channel.send_message("{}".format(msg.encode('ascii', 'ignore'))) def write_help_message(self, channel_id): bot_uid = self.clients.bot_user_id() txt = '{}\n{}\n{}\n{}'.format( "I'm your friendly Slack bot written in Python. I'll *_respond_* to the following commands:", "> `hi <@" + bot_uid + ">` - I'll respond with a randomized greeting mentioning your user. :wave:", "> `<@" + bot_uid + "> joke` - I'll tell you one of my finest jokes, with a typing pause for effect. :laughing:", "> `<@" + bot_uid + "> attachment` - I'll demo a post with an attachment using the Web API. :paperclip:") self.send_message(channel_id, txt) def write_greeting(self, channel_id, user_id): greetings = ['Hi', 'Hello', 'Nice to meet you', 'Howdy', 'Salutations'] txt = '{}, <@{}>!'.format(random.choice(greetings), user_id) self.send_message(channel_id, txt) def write_prompt(self, channel_id): bot_uid = self.clients.bot_user_id() txt = "I'm sorry, I didn't quite understand... Can I help you? (e.g. `<@" + bot_uid + "> help`)" self.send_message(channel_id, txt) def write_joke(self, channel_id): question = "Why did the python cross the road?" self.send_message(channel_id, question) self.clients.send_user_typing_pause(channel_id) answer = "To eat the chicken on the other side! :laughing:" self.send_message(channel_id, answer) def write_error(self, channel_id, err_msg): txt = ":face_with_head_bandage: my maker didn't handle this error very well:\n>```{}```".format(err_msg) self.send_message(channel_id, txt) def demo_attachment(self, channel_id): txt = "Beep Beep Boop is a ridiculously simple hosting platform for your Slackbots." attachment = { "pretext": "We bring bots to life. :sunglasses: :thumbsup:", "title": "Host, deploy and share your bot in seconds.", "title_link": "https://beepboophq.com/", "text": txt, "fallback": txt, "image_url": "https://storage.googleapis.com/beepboophq/_assets/bot-1.22f6fb.png", "color": "#7CD197", } self.clients.web.chat.post_message(channel_id, txt, attachments=[attachment], as_user='true') def write_task_link(self, channel_id, task): fs_url = os.getenv("FLYSPRAY_URL", "") txt = fs_url+task self.send_message(channel_id, txt)
import logging import random logger = logging.getLogger(__name__) class Messenger(object): def __init__(self, slack_clients): self.clients = slack_clients def send_message(self, channel_id, msg): # in the case of Group and Private channels, RTM channel payload is a complex dictionary if isinstance(channel_id, dict): channel_id = channel_id['id'] logger.debug('Sending msg: {} to channel: {}'.format(msg, channel_id)) channel = self.clients.rtm.server.channels.find(channel_id) channel.send_message("{}".format(msg.encode('ascii', 'ignore'))) def write_help_message(self, channel_id): bot_uid = self.clients.bot_user_id() txt = '{}\n{}\n{}\n{}'.format( "I'm your friendly Slack bot written in Python. I'll *_respond_* to the following commands:", "> `hi <@" + bot_uid + ">` - I'll respond with a randomized greeting mentioning your user. :wave:", "> `<@" + bot_uid + "> joke` - I'll tell you one of my finest jokes, with a typing pause for effect. :laughing:", "> `<@" + bot_uid + "> attachment` - I'll demo a post with an attachment using the Web API. :paperclip:") self.send_message(channel_id, txt) def write_greeting(self, channel_id, user_id): greetings = ['Hi', 'Hello', 'Nice to meet you', 'Howdy', 'Salutations'] txt = '{}, <@{}>!'.format(random.choice(greetings), user_id) self.send_message(channel_id, txt) def write_prompt(self, channel_id): bot_uid = self.clients.bot_user_id() txt = "I'm sorry, I didn't quite understand... Can I help you? (e.g. `<@" + bot_uid + "> help`)" self.send_message(channel_id, txt) def write_joke(self, channel_id): question = "Why did the python cross the road?" self.send_message(channel_id, question) self.clients.send_user_typing_pause(channel_id) answer = "To eat the chicken on the other side! :laughing:" self.send_message(channel_id, answer) def write_error(self, channel_id, err_msg): txt = ":face_with_head_bandage: my maker didn't handle this error very well:\n>```{}```".format(err_msg) self.send_message(channel_id, txt) def demo_attachment(self, channel_id): txt = "Beep Beep Boop is a ridiculously simple hosting platform for your Slackbots." attachment = { "pretext": "We bring bots to life. :sunglasses: :thumbsup:", "title": "Host, deploy and share your bot in seconds.", "title_link": "https://beepboophq.com/", "text": txt, "fallback": txt, "image_url": "https://storage.googleapis.com/beepboophq/_assets/bot-1.22f6fb.png", "color": "#7CD197", } self.clients.web.chat.post_message(channel_id, txt, attachments=[attachment], as_user='true') def write_task_link(self, channel_id, task): txt = "task # " + task self.send_message(channel_id, txt)
Python
0.000001
1e930adbfb1714670ad04717401b36b59bf12558
Bump version to 0.0.2
bqdm/__init__.py
bqdm/__init__.py
# -*- coding: utf-8 -*- from __future__ import absolute_import __version__ = '0.0.2' CONTEXT_SETTINGS = dict( help_option_names=['-h', '--help'], max_content_width=120, )
# -*- coding: utf-8 -*- from __future__ import absolute_import __version__ = '0.0.1' CONTEXT_SETTINGS = dict( help_option_names=['-h', '--help'], max_content_width=120, )
Python
0.000001
6c556f6c5e4aa70173a84f6e6854390241231021
Update the Jinja2Templates() constructor to allow PathLike (#1292)
starlette/templating.py
starlette/templating.py
import typing from os import PathLike from starlette.background import BackgroundTask from starlette.responses import Response from starlette.types import Receive, Scope, Send try: import jinja2 # @contextfunction renamed to @pass_context in Jinja 3.0, to be removed in 3.1 if hasattr(jinja2, "pass_context"): pass_context = jinja2.pass_context else: # pragma: nocover pass_context = jinja2.contextfunction except ImportError: # pragma: nocover jinja2 = None # type: ignore class _TemplateResponse(Response): media_type = "text/html" def __init__( self, template: typing.Any, context: dict, status_code: int = 200, headers: dict = None, media_type: str = None, background: BackgroundTask = None, ): self.template = template self.context = context content = template.render(context) super().__init__(content, status_code, headers, media_type, background) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: request = self.context.get("request", {}) extensions = request.get("extensions", {}) if "http.response.template" in extensions: await send( { "type": "http.response.template", "template": self.template, "context": self.context, } ) await super().__call__(scope, receive, send) class Jinja2Templates: """ templates = Jinja2Templates("templates") return templates.TemplateResponse("index.html", {"request": request}) """ def __init__(self, directory: typing.Union[str, PathLike]) -> None: assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates" self.env = self._create_env(directory) def _create_env( self, directory: typing.Union[str, PathLike] ) -> "jinja2.Environment": @pass_context def url_for(context: dict, name: str, **path_params: typing.Any) -> str: request = context["request"] return request.url_for(name, **path_params) loader = jinja2.FileSystemLoader(directory) env = jinja2.Environment(loader=loader, autoescape=True) env.globals["url_for"] = url_for return env def get_template(self, name: str) -> "jinja2.Template": return self.env.get_template(name) def TemplateResponse( self, name: str, context: dict, status_code: int = 200, headers: dict = None, media_type: str = None, background: BackgroundTask = None, ) -> _TemplateResponse: if "request" not in context: raise ValueError('context must include a "request" key') template = self.get_template(name) return _TemplateResponse( template, context, status_code=status_code, headers=headers, media_type=media_type, background=background, )
import typing from starlette.background import BackgroundTask from starlette.responses import Response from starlette.types import Receive, Scope, Send try: import jinja2 # @contextfunction renamed to @pass_context in Jinja 3.0, to be removed in 3.1 if hasattr(jinja2, "pass_context"): pass_context = jinja2.pass_context else: # pragma: nocover pass_context = jinja2.contextfunction except ImportError: # pragma: nocover jinja2 = None # type: ignore class _TemplateResponse(Response): media_type = "text/html" def __init__( self, template: typing.Any, context: dict, status_code: int = 200, headers: dict = None, media_type: str = None, background: BackgroundTask = None, ): self.template = template self.context = context content = template.render(context) super().__init__(content, status_code, headers, media_type, background) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: request = self.context.get("request", {}) extensions = request.get("extensions", {}) if "http.response.template" in extensions: await send( { "type": "http.response.template", "template": self.template, "context": self.context, } ) await super().__call__(scope, receive, send) class Jinja2Templates: """ templates = Jinja2Templates("templates") return templates.TemplateResponse("index.html", {"request": request}) """ def __init__(self, directory: str) -> None: assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates" self.env = self._create_env(directory) def _create_env(self, directory: str) -> "jinja2.Environment": @pass_context def url_for(context: dict, name: str, **path_params: typing.Any) -> str: request = context["request"] return request.url_for(name, **path_params) loader = jinja2.FileSystemLoader(directory) env = jinja2.Environment(loader=loader, autoescape=True) env.globals["url_for"] = url_for return env def get_template(self, name: str) -> "jinja2.Template": return self.env.get_template(name) def TemplateResponse( self, name: str, context: dict, status_code: int = 200, headers: dict = None, media_type: str = None, background: BackgroundTask = None, ) -> _TemplateResponse: if "request" not in context: raise ValueError('context must include a "request" key') template = self.get_template(name) return _TemplateResponse( template, context, status_code=status_code, headers=headers, media_type=media_type, background=background, )
Python
0
a0dfb1ce1a72880da34ad817c8021e54e2ce0e5d
add fields.
lib/acli/output.py
lib/acli/output.py
# from tabulate import tabulate from terminaltables import AsciiTable def output_ec2(output_type=None, instances=None): if output_type == 'console': heading = ['id', 'state', 'type', 'image', 'public ip', 'private ip'] table_data = [heading] for instance in instances: instance_id = instance[0].id instance_state = instance[0].state instance_type = instance[0].instance_type image_id = instance[0].image_id public_ip = instance[0].ip_address private_ip = instance[0].private_ip_address table_data.append([instance_id, instance_state, instance_type, image_id, public_ip if public_ip else '-', private_ip if private_ip else '-']) table = AsciiTable(table_data) print(table.table) def output_elb(output_type=None, elbs=None): if output_type == 'console': heading = ['id', 'name'] table_data = [heading] for elb in elbs: elb_id = elb.name elb_name = elb.name table_data.append([elb_id, elb_name]) table = AsciiTable(table_data) print(table.table)
# from tabulate import tabulate from terminaltables import AsciiTable def output_ec2(output_type=None, instances=None): if output_type == 'console': heading = ['id', 'state'] table_data = [heading] for instance in instances: instance_id = instance[0].id instance_state = instance[0].state table_data.append([instance_id, instance_state]) table = AsciiTable(table_data) print(table.table) def output_elb(output_type=None, elbs=None): if output_type == 'console': heading = ['id', 'name'] table_data = [heading] for elb in elbs: elb_id = elb.name elb_name = elb.name table_data.append([elb_id, elb_name]) table = AsciiTable(table_data) print(table.table) # def console_table(collection_type, collection_data): # pass
Python
0
2d7b3afaca97a3e6a115c077586d0a9fb9daf8b2
Fix imap connection lost (#380)
i3pystatus/mail/imap.py
i3pystatus/mail/imap.py
import imaplib import socket from i3pystatus.mail import Backend class IMAP(Backend): """ Checks for mail on a IMAP server """ settings = ( "host", "port", "username", "password", ('keyring_backend', 'alternative keyring backend for retrieving credentials'), "ssl", "mailbox", ) required = ("host", "username", "password") keyring_backend = None port = 993 ssl = True mailbox = "INBOX" imap_class = imaplib.IMAP4 connection = None last = 0 def init(self): if self.ssl: self.imap_class = imaplib.IMAP4_SSL def get_connection(self): if self.connection: try: self.connection.select(self.mailbox) except socket.error: # NOTE(sileht): retry just once if the connection have been # broken to ensure this is not a sporadic connection lost. # Like wifi reconnect, sleep wake up try: self.connection.logout() except socket.error: pass self.connection = None if not self.connection: self.connection = self.imap_class(self.host, self.port) self.connection.login(self.username, self.password) self.connection.select(self.mailbox) return self.connection @property def unread(self): conn = self.get_connection() self.last = len(conn.search(None, "UnSeen")[1][0].split()) return self.last Backend = IMAP
import sys import imaplib from i3pystatus.mail import Backend from i3pystatus.core.util import internet class IMAP(Backend): """ Checks for mail on a IMAP server """ settings = ( "host", "port", "username", "password", ('keyring_backend', 'alternative keyring backend for retrieving credentials'), "ssl", "mailbox", ) required = ("host", "username", "password") keyring_backend = None port = 993 ssl = True mailbox = "INBOX" imap_class = imaplib.IMAP4 connection = None last = 0 def init(self): if self.ssl: self.imap_class = imaplib.IMAP4_SSL def get_connection(self): if not self.connection: self.connection = self.imap_class(self.host, self.port) self.connection.login(self.username, self.password) self.connection.select(self.mailbox) self.connection.select(self.mailbox) return self.connection @property def unread(self): if internet(): conn = self.get_connection() self.last = len(conn.search(None, "UnSeen")[1][0].split()) return self.last Backend = IMAP
Python
0.000001
c0f959446731b8ce2677c56afd5456c2e047cabb
Change the webapp tests to not interfere with instance level connections.
ichnaea/webapp/tests.py
ichnaea/webapp/tests.py
from ichnaea.config import DummyConfig from ichnaea.tests.base import ( _make_app, _make_db, DBTestCase, RedisIsolation, REDIS_URI, SQLURI, ) class TestApp(RedisIsolation, DBTestCase): def test_db_config(self): app_config = DummyConfig({'ichnaea': { 'db_master': SQLURI, 'db_slave': SQLURI, }}) app = _make_app(app_config=app_config, _raven_client=self.raven_client, _redis_client=self.redis_client, _stats_client=self.stats_client, ) db_rw = app.app.registry.db_rw db_ro = app.app.registry.db_ro # the configured databases are working try: self.assertTrue(db_rw.ping()) self.assertTrue(db_ro.ping()) finally: # clean up the new db engine's _make_app created db_rw.engine.pool.dispose() db_ro.engine.pool.dispose() def test_db_hooks(self): db_rw = _make_db() db_ro = _make_db() app = _make_app(_db_rw=db_rw, _db_ro=db_ro, _raven_client=self.raven_client, _redis_client=self.redis_client, _stats_client=self.stats_client, ) # check that our _db hooks are passed through self.assertTrue(app.app.registry.db_rw is db_rw) self.assertTrue(app.app.registry.db_ro is db_ro) def test_redis_config(self): app_config = DummyConfig({'ichnaea': { 'redis_url': REDIS_URI, }}) app = _make_app(app_config=app_config, _db_rw=self.db_rw, _db_ro=self.db_ro, _raven_client=self.raven_client, _stats_client=self.stats_client) redis_client = app.app.registry.redis_client self.assertTrue(redis_client is not None) self.assertEqual( redis_client.connection_pool.connection_kwargs['db'], 1)
from ichnaea.config import DummyConfig from ichnaea.tests.base import ( _make_app, _make_db, DBTestCase, RedisIsolation, REDIS_URI, SQLURI, ) class TestApp(RedisIsolation, DBTestCase): def test_db_hooks(self): app_config = DummyConfig({'ichnaea': { 'db_master': SQLURI, 'db_slave': SQLURI, }}) app = _make_app(app_config=app_config, _raven_client=self.raven_client, _redis_client=self.redis_client, _stats_client=self.stats_client, ) self.db_rw = app.app.registry.db_rw self.db_ro = app.app.registry.db_ro app.get('/stats_wifi.json', status=200) def test_db_config(self): self.db_rw = _make_db() self.db_ro = _make_db() app = _make_app(_db_rw=self.db_rw, _db_ro=self.db_ro, _raven_client=self.raven_client, _redis_client=self.redis_client, _stats_client=self.stats_client, ) app.get('/stats_wifi.json', status=200) def test_redis_config(self): app_config = DummyConfig({'ichnaea': { 'db_master': SQLURI, 'db_slave': SQLURI, 'redis_url': REDIS_URI, }}) app = _make_app(app_config=app_config, _raven_client=self.raven_client, _stats_client=self.stats_client) self.assertTrue(app.app.registry.redis_client is not None)
Python
0
17f1c210c9c8b410cb6888a51ea1d863b74c14be
Use has_module check in _can_read
imageio/plugins/gdal.py
imageio/plugins/gdal.py
# -*- coding: utf-8 -*- # Copyright (c) 2015, imageio contributors # imageio is distributed under the terms of the (new) BSD License. """ Plugin for reading gdal files. """ from __future__ import absolute_import, print_function, division from .. import formats from ..core import Format, has_module _gdal = None # lazily loaded in load_lib() def load_lib(): global _gdal try: import osgeo.gdal as _gdal except ImportError: raise ImportError("The GDAL format relies on the GDAL package." "Please refer to http://www.gdal.org/" "for further instructions.") return _gdal GDAL_FORMATS = ('.tiff', ' .tif', '.img', '.ecw', '.jpg', '.jpeg') class GdalFormat(Format): """ Parameters for reading ---------------------- None """ def _can_read(self, request): if request.filename.lower().endswith('.ecw'): return True if has_module('osgeo.gdal'): return request.filename.lower().endswith(self.extensions) def _can_write(self, request): return False # -- class Reader(Format.Reader): def _open(self): if not _gdal: load_lib() self._ds = _gdal.Open(self.request.get_local_filename()) def _close(self): del self._ds def _get_length(self): return 1 def _get_data(self, index): if index != 0: raise IndexError('Gdal file contains only one dataset') return self._ds.ReadAsArray(), self._get_meta_data(index) def _get_meta_data(self, index): return self._ds.GetMetadata() # Add this format formats.add_format(GdalFormat( 'gdal', 'Geospatial Data Abstraction Library', ' '.join(GDAL_FORMATS), 'iIvV'))
# -*- coding: utf-8 -*- # Copyright (c) 2015, imageio contributors # imageio is distributed under the terms of the (new) BSD License. """ Plugin for reading gdal files. """ from __future__ import absolute_import, print_function, division from .. import formats from ..core import Format _gdal = None # lazily loaded in load_lib() def load_lib(): global _gdal try: import osgeo.gdal as _gdal except ImportError: raise ImportError("The GDAL format relies on the GDAL package." "Please refer to http://www.gdal.org/" "for further instructions.") return _gdal GDAL_FORMATS = ('.tiff', ' .tif', '.img', '.ecw', '.jpg', '.jpeg') class GdalFormat(Format): """ Parameters for reading ---------------------- None """ def _can_read(self, request): return request.filename.lower().endswith(GDAL_FORMATS) def _can_write(self, request): return False # -- class Reader(Format.Reader): def _open(self): if not _gdal: load_lib() self._ds = _gdal.Open(self.request.get_local_filename()) def _close(self): del self._ds def _get_length(self): return 1 def _get_data(self, index): if index != 0: raise IndexError('Gdal file contains only one dataset') return self._ds.ReadAsArray(), self._get_meta_data(index) def _get_meta_data(self, index): return self._ds.GetMetadata() # Add this format formats.add_format(GdalFormat( 'gdal', 'Geospatial Data Abstraction Library', ' '.join(GDAL_FORMATS), 'iIvV'))
Python
0
70f1838951460c16b7eb4b8220621c198d4634a5
remove pdb
inferelator_ng/tfa.py
inferelator_ng/tfa.py
import numpy as np import pandas as pd from scipy import linalg class TFA: """ TFA calculates transcription factor activity using matrix pseudoinverse Parameters -------- prior: pd.dataframe binary or numeric g by t matrix stating existence of gene-TF interactions. g--gene, t--TF. exp.mat: pd.dataframe normalized expression g by c matrix. g--gene, c--conditions exp.mat.halftau: pd.dataframe normalized expression matrix for time series. dup_self=True: boolean If dup_slef (duplicate self) is True, TFs that other TFs with the exact same set of interactions in the prior are kept and will have the same activities """ def __init__(self, prior, exp_mat, exp_mat_halftau): self.prior = prior self.exp_mat = exp_mat self.exp_mat_halftau = exp_mat_halftau def tfa(self, allow_self_interactions_for_duplicate_prior_columns = True): # Create activity dataframe with default values set to the expression activity = pd.DataFrame(self.exp_mat.loc[self.prior.columns,:].values, index = self.prior.columns, columns = self.exp_mat.columns) # Finds tfs that have non-zero regulation # TODO: Remove as some form of pre-processing??? non_zero_tfs = self.prior.loc[:, (self.prior != 0).any(axis=0)].columns.values.tolist() # dup_tfs: duplicated TFs dup_tfs = [] if allow_self_interactions_for_duplicate_prior_columns: # Everything up til now is useless if the prior is well-made. # could replace with checks: check the TF list is duplicates = self.prior[non_zero_tfs].transpose().duplicated(keep=False) # mark duplicates as true dup_tfs = duplicates[duplicates].index.tolist() # find non-duplicated TFs that are also present in target gene list ndup_tfs = list(set(non_zero_tfs).difference(dup_tfs)) self_tfs = list(set(ndup_tfs).intersection(self.prior.index.values.tolist())) # Set the diagonal of the self-interaction tfs to zero subset = self.prior.loc[self_tfs, self_tfs].values np.fill_diagonal(subset, 0) self.prior.set_value(self_tfs, self_tfs, subset) if non_zero_tfs: activity.loc[non_zero_tfs,:] = np.matrix(linalg.pinv2(self.prior[non_zero_tfs])) * np.matrix(self.exp_mat_halftau) return activity
import numpy as np import pandas as pd from scipy import linalg class TFA: """ TFA calculates transcription factor activity using matrix pseudoinverse Parameters -------- prior: pd.dataframe binary or numeric g by t matrix stating existence of gene-TF interactions. g--gene, t--TF. exp.mat: pd.dataframe normalized expression g by c matrix. g--gene, c--conditions exp.mat.halftau: pd.dataframe normalized expression matrix for time series. dup_self=True: boolean If dup_slef (duplicate self) is True, TFs that other TFs with the exact same set of interactions in the prior are kept and will have the same activities """ def __init__(self, prior, exp_mat, exp_mat_halftau): self.prior = prior self.exp_mat = exp_mat self.exp_mat_halftau = exp_mat_halftau def tfa(self, allow_self_interactions_for_duplicate_prior_columns = True): import pdb; pdb.set_trace() # Create activity dataframe with default values set to the expression activity = pd.DataFrame(self.exp_mat.loc[self.prior.columns,:].values, index = self.prior.columns, columns = self.exp_mat.columns) # Finds tfs that have non-zero regulation # TODO: Remove as some form of pre-processing??? non_zero_tfs = self.prior.loc[:, (self.prior != 0).any(axis=0)].columns.values.tolist() # dup_tfs: duplicated TFs dup_tfs = [] if allow_self_interactions_for_duplicate_prior_columns: # Everything up til now is useless if the prior is well-made. # could replace with checks: check the TF list is duplicates = self.prior[non_zero_tfs].transpose().duplicated(keep=False) # mark duplicates as true dup_tfs = duplicates[duplicates].index.tolist() # find non-duplicated TFs that are also present in target gene list ndup_tfs = list(set(non_zero_tfs).difference(dup_tfs)) self_tfs = list(set(ndup_tfs).intersection(self.prior.index.values.tolist())) # Set the diagonal of the self-interaction tfs to zero subset = self.prior.loc[self_tfs, self_tfs].values np.fill_diagonal(subset, 0) self.prior.set_value(self_tfs, self_tfs, subset) if non_zero_tfs: activity.loc[non_zero_tfs,:] = np.matrix(linalg.pinv2(self.prior[non_zero_tfs])) * np.matrix(self.exp_mat_halftau) return activity
Python
0.000024
ebfc7969fc2559d7f67eae628f00e0465b85e0c5
Add blur filter to supported URLs
imboclient/url/image.py
imboclient/url/image.py
from imboclient.url import accesstoken from imboclient.url import url class UrlImage (url.Url): def __init__(self, base_url, public_key, private_key, image_identifier): url.Url.__init__(self, base_url, public_key, private_key) self._image_identifier = image_identifier def resource_url(self): return self._base_url + '/users/' + self._public_key + '/images/' + self._image_identifier def border(self, color = '000000', width = 1, height = 1): self.add_query_param('t[]', "border:color={},width={},height={}".format(color, width, height)) return self def compress(self, quality = 75): self.add_query_param('t[]', "compress:quality={}".format(quality)) return self def convert(self, ctype): self._image_identifier = self._image_identifier[:32] + '.' + ctype return self def gif(self): self.convert('gif') return self def jpg(self): self.convert('jpg') return self def png(self): self.convert('png') return self def crop(self, x, y, width, height): self.add_query_param('t[]', "crop:x={},y={},width={},height={}".format(x, y, width, height)) return self def flip_horizontally(self): self.add_query_param('t[]', 'flipHorizontally') return self def flip_vertically(self): self.add_query_param('t[]', 'flipVertically') return self def resize(self, width = None, height = None): params = [] if (width): params.append('width='+str(width)) if (height): params.append('height='+str(height)) self.add_query_param('t[]', 'resize:' + ",".join(params)) return self def max_size(self, max_width = None, max_height = None): params = [] if (max_width): params.append('width='+str(max_width)) if (max_height): params.append('height='+str(max_height)) self.add_query_param('t[]', 'maxSize:' + ",".join(params)) return self def rotate(self, angle, bg = '000000'): self.add_query_param('t[]', "rotate:angle={},bg={}".format(angle, bg)) return self def thumbnail(self, width = 50, height = 50, fit = 'outbound'): self.add_query_param('t[]', "thumbnail:width={},height={},fit={}".format(width, height, fit)) return self def canvas(self, width, height, mode = None, x = None, y = None, bg = None): self.add_query_param('t[]', "canvas:width={},height={},mode={},x={},y={},bg={}".format(width, height, mode, x, y, bg)) return self def transpose(self): self.add_query_param('t[]', "transpose") return self def transverse(self): self.add_query_param('t[]', "transverse") return self def desaturate(self): self.add_query_param('t[]', "desaturate") return self def sepia(self, threshold = 80): self.add_query_param('t[]', "sepia:threshold={}".format(threshold)) return self def blur(self, type='gaussian', radius=5, sigma=2): self.add_query_param('t[]', "blur:type={},radius={},sigma={}".format(type, radius, sigma)) return self def reset(self): url.Url.reset() self._image_identifier = self._image_identifier[:32] return self
from imboclient.url import accesstoken from imboclient.url import url class UrlImage (url.Url): def __init__(self, base_url, public_key, private_key, image_identifier): url.Url.__init__(self, base_url, public_key, private_key) self._image_identifier = image_identifier def resource_url(self): return self._base_url + '/users/' + self._public_key + '/images/' + self._image_identifier def border(self, color = '000000', width = 1, height = 1): self.add_query_param('t[]', "border:color={},width={},height={}".format(color, width, height)) return self def compress(self, quality = 75): self.add_query_param('t[]', "compress:quality={}".format(quality)) return self def convert(self, ctype): self._image_identifier = self._image_identifier[:32] + '.' + ctype return self def gif(self): self.convert('gif') return self def jpg(self): self.convert('jpg') return self def png(self): self.convert('png') return self def crop(self, x, y, width, height): self.add_query_param('t[]', "crop:x={},y={},width={},height={}".format(x, y, width, height)) return self def flip_horizontally(self): self.add_query_param('t[]', 'flipHorizontally') return self def flip_vertically(self): self.add_query_param('t[]', 'flipVertically') return self def resize(self, width = None, height = None): params = [] if (width): params.append('width='+str(width)) if (height): params.append('height='+str(height)) self.add_query_param('t[]', 'resize:' + ",".join(params)) return self def max_size(self, max_width = None, max_height = None): params = [] if (max_width): params.append('width='+str(max_width)) if (max_height): params.append('height='+str(max_height)) self.add_query_param('t[]', 'maxSize:' + ",".join(params)) return self def rotate(self, angle, bg = '000000'): self.add_query_param('t[]', "rotate:angle={},bg={}".format(angle, bg)) return self def thumbnail(self, width = 50, height = 50, fit = 'outbound'): self.add_query_param('t[]', "thumbnail:width={},height={},fit={}".format(width, height, fit)) return self def canvas(self, width, height, mode = None, x = None, y = None, bg = None): self.add_query_param('t[]', "canvas:width={},height={},mode={},x={},y={},bg={}".format(width, height, mode, x, y, bg)) return self def transpose(self): self.add_query_param('t[]', "transpose") return self def transverse(self): self.add_query_param('t[]', "transverse") return self def desaturate(self): self.add_query_param('t[]', "desaturate") return self def sepia(self, threshold = 80): self.add_query_param('t[]', "sepia:threshold={}".format(threshold)) return self def reset(self): url.Url.reset() self._image_identifier = self._image_identifier[:32] return self
Python
0
f1d76611b6b7c2f1b1a15c72976e5c1029f3b4a8
Use the executable bit on AWS callback.
scripts/aws.py
scripts/aws.py
import logging import requests from requests.exceptions import RequestException import sys import boto.ec2 logger = logging.getLogger(__name__) class AWSConnection: def __init__(self, config): self.available = False self.config = config if 'cluster_name' in config: self.cluster_name = config.get('cluster_name') elif 'etcd' in config and isinstance(config['etcd'], dict): self.cluster_name = config['etcd'].get('scope', 'unknown') else: self.cluster_name = 'unknown' try: # get the instance id r = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document', timeout=0.1) except RequestException: logger.info("cannot query AWS meta-data") return if r.ok: try: content = r.json() self.instance_id = content['instanceId'] self.region = content['region'] except Exception as e: logger.info('unable to fetch instance id and region from AWS meta-data: {}'.format(e)) return self.available = True def aws_available(self): return self.available def _tag_ebs(self, role): """ set tags, carrying the cluster name, instance role and instance id for the EBS storage """ if not self.available: return False tags = {'Name': 'spilo_'+self.cluster_name, 'Role': role, 'Instance': self.instance_id} try: conn = boto.ec2.connect_to_region(self.region) volumes = conn.get_all_volumes(filters={'attachment.instance-id': self.instance_id}) conn.create_tags([v.id for v in volumes], tags) except Exception as e: logger.info('could not set tags for EBS storage devices attached: {}'.format(e)) return False return True def _tag_ec2(self, role): """ tag the current EC2 instance with a cluster role """ if not self.available: return False tags = {'Role': role} try: conn = boto.ec2.connect_to_region(self.region) conn.create_tags([self.instance_id], tags) except Exception as e: logger.info("could not set tags for EC2 instance {}: {}".format(self.instance_id, e)) return False return True def on_role_change(self, new_role): ret = self._tag_ec2(new_role) return self._tag_ebs(new_role) and ret if __name__ == '__main__': if len(sys.argv) != 3: print ("Usage: {0} action role name".format(sys.argv[0])) return 1 action, role, name = sys.argv[1:] if action in ('on_start', 'on_stop', 'on_role_change'): aws = AWSConnection({'cluster_name': name}) aws.on_role_change(role) return 0 return 2
import logging import requests from requests.exceptions import RequestException import sys import boto.ec2 logger = logging.getLogger(__name__) class AWSConnection: def __init__(self, config): self.available = False self.config = config if 'cluster_name' in config: self.cluster_name = config.get('cluster_name') elif 'etcd' in config and isinstance(config['etcd'], dict): self.cluster_name = config['etcd'].get('scope', 'unknown') else: self.cluster_name = 'unknown' try: # get the instance id r = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document', timeout=0.1) except RequestException: logger.info("cannot query AWS meta-data") return if r.ok: try: content = r.json() self.instance_id = content['instanceId'] self.region = content['region'] except Exception as e: logger.info('unable to fetch instance id and region from AWS meta-data: {}'.format(e)) return self.available = True def aws_available(self): return self.available def _tag_ebs(self, role): """ set tags, carrying the cluster name, instance role and instance id for the EBS storage """ if not self.available: return False tags = {'Name': 'spilo_'+self.cluster_name, 'Role': role, 'Instance': self.instance_id} try: conn = boto.ec2.connect_to_region(self.region) volumes = conn.get_all_volumes(filters={'attachment.instance-id': self.instance_id}) conn.create_tags([v.id for v in volumes], tags) except Exception as e: logger.info('could not set tags for EBS storage devices attached: {}'.format(e)) return False return True def _tag_ec2(self, role): """ tag the current EC2 instance with a cluster role """ if not self.available: return False tags = {'Role': role} try: conn = boto.ec2.connect_to_region(self.region) conn.create_tags([self.instance_id], tags) except Exception as e: logger.info("could not set tags for EC2 instance {}: {}".format(self.instance_id, e)) return False return True def on_role_change(self, new_role): ret = self._tag_ec2(new_role) return self._tag_ebs(new_role) and ret if __name__ == '__main__': if len(sys.argv) != 3: print ("Usage: {0} action role name".format(sys.argv[0])) return 1 action, role, name = sys.argv[1:] if action in ('on_start', 'on_stop', 'on_role_change'): aws = AWSConnection({'cluster_name': name}) aws.on_role_change(role) return 0 return 2
Python
0
c8bdf3b95b2ff8e4049a109f65728619a55a927c
Add parallelism to generate_departures (that was easy)
busstops/management/commands/generate_departures.py
busstops/management/commands/generate_departures.py
from multiprocessing import Pool from datetime import date, timedelta from django.core.management.base import BaseCommand from django.db import transaction from txc import txc from ...models import Region, Service, Journey, StopUsageUsage, StopPoint from ...utils import get_files_from_zipfile ONE_DAY = timedelta(days=1) def handle_timetable(service, timetable, day): if hasattr(timetable, 'operating_profile') and day.weekday() not in timetable.operating_profile.regular_days: return if not timetable.operating_period.contains(day): return # if not hasattr(timetable, 'groupings'): # return for grouping in timetable.groupings: stops = {row.part.stop.atco_code for row in grouping.rows} existent_stops = StopPoint.objects.filter(atco_code__in=stops).values_list('atco_code', flat=True) for vj in grouping.journeys: if not vj.should_show(day): continue date = day previous_time = None stopusageusages = [] journey = Journey(service=service, datetime='{} {}'.format(date, vj.departure_time)) for i, (su, time) in enumerate(vj.get_times()): if previous_time and previous_time > time: date += ONE_DAY if su.stop.atco_code in existent_stops: if not su.activity or su.activity.startswith('pickUp'): stopusageusages.append( StopUsageUsage(datetime='{} {}'.format(date, time), order=i, stop_id=su.stop.atco_code) ) journey.destination_id = su.stop.atco_code previous_time = time if journey.destination_id: journey.save() for suu in stopusageusages: suu.journey = journey StopUsageUsage.objects.bulk_create(stopusageusages) @transaction.atomic def handle_region(region): print(region) today = date.today() NEXT_WEEK = today + ONE_DAY * 7 # delete journeys before today print('deleting journeys before', today) print(Journey.objects.filter(service__region=region, datetime__date__lt=today).delete()) # get the date of the last generated journey last_journey = Journey.objects.filter(service__region=region).order_by('datetime').last() if last_journey: today = last_journey.datetime.date() + ONE_DAY if today > NEXT_WEEK: return for service in Service.objects.filter(region=region, current=True): # print(service) for i, xml_file in enumerate(get_files_from_zipfile(service)): timetable = txc.Timetable(xml_file, None) day = today while day <= NEXT_WEEK: # print('generating departures for', day) handle_timetable(service, timetable, day) day += ONE_DAY class Command(BaseCommand): def handle(self, *args, **options): pool = Pool(processes=4) pool.map(handle_region, Region.objects.all().exclude(id__in=('L', 'Y', 'NI')))
from datetime import date, timedelta from django.core.management.base import BaseCommand from django.db import transaction from txc import txc from ...models import Region, Service, Journey, StopUsageUsage, StopPoint from ...utils import get_files_from_zipfile ONE_DAY = timedelta(days=1) def handle_timetable(service, timetable, day): if hasattr(timetable, 'operating_profile') and day.weekday() not in timetable.operating_profile.regular_days: return if not timetable.operating_period.contains(day): return # if not hasattr(timetable, 'groupings'): # return for grouping in timetable.groupings: stops = {row.part.stop.atco_code for row in grouping.rows} existent_stops = StopPoint.objects.filter(atco_code__in=stops).values_list('atco_code', flat=True) for vj in grouping.journeys: if not vj.should_show(day): continue date = day previous_time = None stopusageusages = [] journey = Journey(service=service, datetime='{} {}'.format(date, vj.departure_time)) for i, (su, time) in enumerate(vj.get_times()): if previous_time and previous_time > time: date += ONE_DAY if su.stop.atco_code in existent_stops: if not su.activity or su.activity.startswith('pickUp'): stopusageusages.append( StopUsageUsage(datetime='{} {}'.format(date, time), order=i, stop_id=su.stop.atco_code) ) journey.destination_id = su.stop.atco_code previous_time = time if journey.destination_id: journey.save() for suu in stopusageusages: suu.journey = journey StopUsageUsage.objects.bulk_create(stopusageusages) @transaction.atomic def handle_region(region): today = date.today() NEXT_WEEK = today + ONE_DAY * 7 # delete journeys before today print('deleting journeys before', today) print(Journey.objects.filter(service__region=region, datetime__date__lt=today).delete()) # get the date of the last generated journey last_journey = Journey.objects.filter(service__region=region).order_by('datetime').last() if last_journey: today = last_journey.datetime.date() + ONE_DAY if today > NEXT_WEEK: return for service in Service.objects.filter(region=region, current=True): # print(service) for i, xml_file in enumerate(get_files_from_zipfile(service)): timetable = txc.Timetable(xml_file, None) day = today while day <= NEXT_WEEK: # print('generating departures for', day) handle_timetable(service, timetable, day) day += ONE_DAY class Command(BaseCommand): def handle(self, *args, **options): for region in Region.objects.all().exclude(id__in=('L', 'Y', 'NI')): print(region) handle_region(region)
Python
0.000002
eaa062840c0b56bbd7c47986b77f08528bb39eb7
Fix typo leading to misinterpretation of the doc in the implementation.
ppp_datamodel/communication.py
ppp_datamodel/communication.py
"""Contains the classes representing a request to and a response of a module.""" import json from .abstractnode import register, AbstractNode class Request: """Represents a request. https://github.com/ProjetPP/Documentation/blob/master/module-communication.md#request """ __slots__ = ('language', 'sentence', 'tree') def __init__(self, language, tree_or_sentence, is_sentence=False): if is_sentence: self.sentence = tree_or_sentence else: tree = tree_or_sentence if isinstance(tree, dict) or isinstance(tree, str): tree = AbstractNode.from_json(tree) self.tree = tree self.language = language def __repr__(self): return '<PPP request language=%r, tree=%r>' % \ (self.language, self.tree) def __eq__(self, other): if isinstance(other, dict) or isinstance(other, str): other = Request.from_json(other) return self.language == other.language and \ self.tree == other.tree @staticmethod def from_json(data): if isinstance(data, str): data = json.loads(data) return Request(data['language'], data['tree']) def as_dict(self): return {'language': self.language, 'tree': self.tree.as_dict()} def as_json(self): return json.dumps(self.as_dict()) class Response: """Represents a response. https://github.com/ProjetPP/Documentation/blob/master/module-communication.md#response """ __slots__ = ('language', 'pertinence', 'tree') def __init__(self, language, pertinence, tree): if isinstance(tree, dict) or isinstance(tree, str): tree = AbstractNode.from_json(tree) self.language = language self.pertinence = pertinence self.tree = tree def __repr__(self): return '<PPP response language=%r, pertinence=%r, tree=%r>' % \ (self.language, self.pertinence, self.tree) def __eq__(self, other): if isinstance(other, dict) or isinstance(other, str): other = Response.from_json(other) return self.language == other.language and \ self.pertinence == other.pertinence and \ self.tree == other.tree @staticmethod def from_json(data): if isinstance(data, str): data = json.loads(data) return Response(data['language'], data['pertinence'], data['tree']) def as_dict(self): return {'language': self.language, 'pertinence': self.pertinence, 'tree': self.tree.as_dict()} def as_json(self): return json.dumps(self.as_dict())
"""Contains the classes representing a request to and a response of a module.""" import json from .abstractnode import register, AbstractNode class Request: """Represents a request. https://github.com/ProjetPP/Documentation/blob/master/module-communication.md#request """ __slots__ = ('language', 'pertinence', 'tree') def __init__(self, language, tree): if isinstance(tree, dict) or isinstance(tree, str): tree = AbstractNode.from_json(tree) self.language = language self.tree = tree def __repr__(self): return '<PPP request language=%r, tree=%r>' % \ (self.language, self.tree) def __eq__(self, other): if isinstance(other, dict) or isinstance(other, str): other = Request.from_json(other) return self.language == other.language and \ self.tree == other.tree @staticmethod def from_json(data): if isinstance(data, str): data = json.loads(data) return Request(data['language'], data['tree']) def as_dict(self): return {'language': self.language, 'tree': self.tree.as_dict()} def as_json(self): return json.dumps(self.as_dict()) class Response: """Represents a response. https://github.com/ProjetPP/Documentation/blob/master/module-communication.md#response """ __slots__ = ('language', 'pertinence', 'tree') def __init__(self, language, pertinence, tree): if isinstance(tree, dict) or isinstance(tree, str): tree = AbstractNode.from_json(tree) self.language = language self.pertinence = pertinence self.tree = tree def __repr__(self): return '<PPP response language=%r, pertinence=%r, tree=%r>' % \ (self.language, self.pertinence, self.tree) def __eq__(self, other): if isinstance(other, dict) or isinstance(other, str): other = Response.from_json(other) return self.language == other.language and \ self.pertinence == other.pertinence and \ self.tree == other.tree @staticmethod def from_json(data): if isinstance(data, str): data = json.loads(data) return Response(data['language'], data['pertinence'], data['tree']) def as_dict(self): return {'language': self.language, 'pertinence': self.pertinence, 'tree': self.tree.as_dict()} def as_json(self): return json.dumps(self.as_dict())
Python
0
c2d543a3de566443a2c61761f9a190e915426fec
Return stream_client instead of binding it inside method (tests now passing)
stream_django/client.py
stream_django/client.py
from stream_django import conf import os import stream from stream_django.conf import DJANGO_MAJOR_VERSION from django.core.exceptions import ImproperlyConfigured def init_client(raise_config_error=False): if conf.API_KEY and conf.API_SECRET: return stream.connect(conf.API_KEY, conf.API_SECRET, location=conf.LOCATION, timeout=conf.TIMEOUT) elif os.environ.get('STREAM_URL') is not None: return stream.connect() elif raise_config_error: raise ImproperlyConfigured('Stream credentials are not set in your settings') stream_client = init_client(raise_config_error=DJANGO_MAJOR_VERSION<1.7)
from stream_django import conf import os import stream from stream_django.conf import DJANGO_MAJOR_VERSION from django.core.exceptions import ImproperlyConfigured def init_client(mayRaise=False): if conf.API_KEY and conf.API_SECRET: stream_client = stream.connect( conf.API_KEY, conf.API_SECRET, location=conf.LOCATION, timeout=conf.TIMEOUT) elif os.environ.get('STREAM_URL') is not None: stream_client = stream.connect() else: stream_client = None if mayRaise: raise ImproperlyConfigured('Stream credentials are not set in your settings') stream_client = init_client(mayRaise=DJANGO_MAJOR_VERSION<1.7)
Python
0
c895a8b62754f5df32aba06cd2231ba43acc9576
Update algo.py
server/algo.py
server/algo.py
from __future__ import division import math import itertools SPACING = 5 def iter_to_runs(visibles, pixels): cur_val = 6666666 start_idx = None out = [] for i, val in enumerate(itertools.chain(visibles, [None])): if cur_val != val: if cur_val is True: # we just ended a run of "True" values out.append((pixels[start_idx], pixels[i - 1])) cur_val = val start_idx = i return out def generate_line_segments(radius, center): """Generate radii of a circle that are a fixed width apart on the circle. Args: radius: radius of the circle, in pixels center: center of the circle (x, y) as tuple Returns: iterator of points (center, point on circle) """ ang_step = SPACING / radius # angle step in radians ang = 0 while ang < 2 * math.pi: ang += ang_step yield (center, (center[0] + radius * math.cos(ang), center[1] + radius * math.sin(ang))) def generate_visible(tower_height, heightmap): """Trace a ray and determine if a region is viewable. Args: tower_height: the elevation in meters above sea level of your antenna heightmap: an enumerable of heights in a given direction Returns: an enumerable of True/False for visibility """ min_angle = -10000 for i, height in enumerate(heightmap): if tower_height - height == 0: angle_to_point = 0 elif tower_height > height: angle_to_point = math.atan(i / (tower_height - height)) else: angle_to_point = math.atan((height - tower_height) / i) + math.pi / 2 if angle_to_point >= min_angle: min_angle = angle_to_point yield True else: yield False if __name__ == '__main__': assert iter_to_runs([False, False, True, True, False, True, False, True, True]) == [(2, 3), (5, 5), (7, 8)] assert iter_to_runs([True]) == [(0, 0)] assert iter_to_runs([True, True, True, True, False, True, True]) == [(0, 3), (5, 6)] import matplotlib.pyplot as plt heightmap = [math.sin(x/15.0) * x for x in xrange(360)] tower_height = 100.0 # foots above MSL filt = ray(tower_height, heightmap) fhm = [h if fl else 0 for (h, fl) in zip(heightmap, filt)] plt.scatter(range(len(heightmap)), fhm) plt.scatter([0], [tower_height], color='red') plt.plot(heightmap) plt.show()
from __future__ import division import math import itertools SPACING = 15 def iter_to_runs(visibles, pixels): cur_val = 6666666 start_idx = None out = [] for i, val in enumerate(itertools.chain(visibles, [None])): if cur_val != val: if cur_val is True: # we just ended a run of "True" values out.append((pixels[start_idx], pixels[i - 1])) cur_val = val start_idx = i return out def generate_line_segments(radius, center): """Generate radii of a circle that are a fixed width apart on the circle. Args: radius: radius of the circle, in pixels center: center of the circle (x, y) as tuple Returns: iterator of points (center, point on circle) """ ang_step = SPACING / radius # angle step in radians ang = 0 while ang < 2 * math.pi: ang += ang_step yield (center, (center[0] + radius * math.cos(ang), center[1] + radius * math.sin(ang))) def generate_visible(tower_height, heightmap): """Trace a ray and determine if a region is viewable. Args: tower_height: the elevation in meters above sea level of your antenna heightmap: an enumerable of heights in a given direction Returns: an enumerable of True/False for visibility """ min_angle = -10000 for i, height in enumerate(heightmap): if tower_height - height == 0: angle_to_point = 0 elif tower_height > height: angle_to_point = math.atan(i / (tower_height - height)) else: angle_to_point = math.atan((height - tower_height) / i) + math.pi / 2 if angle_to_point >= min_angle: min_angle = angle_to_point yield True else: yield False if __name__ == '__main__': assert iter_to_runs([False, False, True, True, False, True, False, True, True]) == [(2, 3), (5, 5), (7, 8)] assert iter_to_runs([True]) == [(0, 0)] assert iter_to_runs([True, True, True, True, False, True, True]) == [(0, 3), (5, 6)] import matplotlib.pyplot as plt heightmap = [math.sin(x/15.0) * x for x in xrange(360)] tower_height = 100.0 # foots above MSL filt = ray(tower_height, heightmap) fhm = [h if fl else 0 for (h, fl) in zip(heightmap, filt)] plt.scatter(range(len(heightmap)), fhm) plt.scatter([0], [tower_height], color='red') plt.plot(heightmap) plt.show()
Python
0.000007
315a8ea99240d0eebe2335f25269154475dda679
Fix broken svn_export test
py/desimodel/install.py
py/desimodel/install.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- """ desimodel.install ================= Install data files not handled by pip install. """ def default_install_dir(): """Return the default install directory. Assumes this file lives in a 'site-packages' directory. Returns ------- :class:`str` The path to the install directory. """ from os.path import dirname return dirname(dirname(dirname(dirname(dirname(__file__))))) def svn_export(desimodel_version=None): """Create a :command:`svn export` command suitable for downloading a particular desimodel version. Parameters ---------- desimodel_version : :class:`str`, optional The version X.Y.Z to download, trunk, or something of the form branches/... Defaults to trunk. Returns ------- :class:`list` A :command:`svn` command in list form, suitable for passing to :class:`subprocess.Popen`. """ from . import __version__ as this_version if desimodel_version is None: export_version = 'trunk' elif desimodel_version is 'truck' or 'branches/' in desimodel_version: export_version = desimodel_version else: export_version = 'tags/' + desimodel_version return ["svn", "export", ("https://desi.lbl.gov/svn/code/desimodel/" + "{0}/data").format(export_version)] def install(desimodel=None, version=None): """Primary workhorse function. Parameters ---------- desimodel : :class:`str`, optional Allows the install directory to be explicitly set. version : :class:`str`, optional Allows the desimodel version to be explicitly set. Raises ------ :class:`RuntimeError` Standard error output from svn export command when status is non-zero. """ from os import chdir, environ from os.path import exists, join from subprocess import Popen, PIPE try: install_dir = environ['DESIMODEL'] except KeyError: if desimodel is not None: install_dir = desimodel else: install_dir = default_install_dir() if exists(join(install_dir, 'data')): raise ValueError("{0} already exists!".format(join(install_dir, 'data'))) chdir(install_dir) command = svn_export(version) # print(' '.join(command)) proc = Popen(command, stdout=PIPE, stderr=PIPE) out, err = proc.communicate() status = proc.returncode if status != 0: raise RuntimeError(err.rstrip()) def main(): """Entry point for the :command:`install_desimodel_data` script. Returns ------- :class:`int` An integer suitable for passing to :func:`sys.exit`. """ from sys import argv from argparse import ArgumentParser desc = """Install desimodel data. This script will attempt to download and install the desimodel data/ directory. The script will attempt to attempt to install the data in the following locations, in order of preference: 1. :envvar:`DESIMODEL`, that is, the directory specified by the environment variable. 2. The value set with the -d option on the command line. 3. A directory relative to the file containing this script. This directory is currently {0}. If the data directory already exists, this script will not do anything. """.format(default_install_dir()) parser = ArgumentParser(description=desc, prog=argv[0]) parser.add_argument('-d', '--desimodel', action='store', dest='desimodel', metavar='DESIMODEL', help=('Place the data/ directory in this directory. ' + 'In other words, the environment variable ' + 'DESIMODEL should be set to this directory.')) parser.add_argument('-D', '--desimodel-version', action='store', dest='desimodel_version', metavar='VERSION', help='Explicitly set the version to download.') options = parser.parse_args() try: install(options.desimodel, options.desimodel_version) except (ValueError, RuntimeError) as e: print(e.message) return 1 return 0
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- """ desimodel.install ================= Install data files not handled by pip install. """ def default_install_dir(): """Return the default install directory. Assumes this file lives in a 'site-packages' directory. Returns ------- :class:`str` The path to the install directory. """ from os.path import dirname return dirname(dirname(dirname(dirname(dirname(__file__))))) def svn_export(desimodel_version=None): """Create a :command:`svn export` command suitable for downloading a particular desimodel version. Parameters ---------- desimodel_version : :class:`str`, optional The version X.Y.Z to download, trunk, or something of the form branches/... Defaults to trunk. Returns ------- :class:`list` A :command:`svn` command in list form, suitable for passing to :class:`subprocess.Popen`. """ from . import __version__ as this_version if desimodel_version is None: export_version = 'trunk' elif 'branches/' in desimodel_version: export_version = desimodel_version else: export_version = 'tags/' + desimodel_version return ["svn", "export", ("https://desi.lbl.gov/svn/code/desimodel/" + "{0}/data").format(export_version)] def install(desimodel=None, version=None): """Primary workhorse function. Parameters ---------- desimodel : :class:`str`, optional Allows the install directory to be explicitly set. version : :class:`str`, optional Allows the desimodel version to be explicitly set. Raises ------ :class:`RuntimeError` Standard error output from svn export command when status is non-zero. """ from os import chdir, environ from os.path import exists, join from subprocess import Popen, PIPE try: install_dir = environ['DESIMODEL'] except KeyError: if desimodel is not None: install_dir = desimodel else: install_dir = default_install_dir() if exists(join(install_dir, 'data')): raise ValueError("{0} already exists!".format(join(install_dir, 'data'))) chdir(install_dir) command = svn_export(version) # print(' '.join(command)) proc = Popen(command, stdout=PIPE, stderr=PIPE) out, err = proc.communicate() status = proc.returncode if status != 0: raise RuntimeError(err.rstrip()) def main(): """Entry point for the :command:`install_desimodel_data` script. Returns ------- :class:`int` An integer suitable for passing to :func:`sys.exit`. """ from sys import argv from argparse import ArgumentParser desc = """Install desimodel data. This script will attempt to download and install the desimodel data/ directory. The script will attempt to attempt to install the data in the following locations, in order of preference: 1. :envvar:`DESIMODEL`, that is, the directory specified by the environment variable. 2. The value set with the -d option on the command line. 3. A directory relative to the file containing this script. This directory is currently {0}. If the data directory already exists, this script will not do anything. """.format(default_install_dir()) parser = ArgumentParser(description=desc, prog=argv[0]) parser.add_argument('-d', '--desimodel', action='store', dest='desimodel', metavar='DESIMODEL', help=('Place the data/ directory in this directory. ' + 'In other words, the environment variable ' + 'DESIMODEL should be set to this directory.')) parser.add_argument('-D', '--desimodel-version', action='store', dest='desimodel_version', metavar='VERSION', help='Explicitly set the version to download.') options = parser.parse_args() try: install(options.desimodel, options.desimodel_version) except (ValueError, RuntimeError) as e: print(e.message) return 1 return 0
Python
0.000007
066e60897aa931b22ce92776b896912dbec3ccf6
bump dev version
py/desispec/_version.py
py/desispec/_version.py
__version__ = '0.47.1.dev6182'
__version__ = '0.47.1.dev6104'
Python
0
54c48073dfb8ffd418efe234c0c107f7a5c303a9
Fix failing imports in Python 2
svg/templatetags/svg.py
svg/templatetags/svg.py
from __future__ import absolute_import import logging import os from django import template from django.conf import settings from django.contrib.staticfiles import finders from django.utils.safestring import mark_safe from svg.exceptions import SVGNotFound logger = logging.getLogger(__name__) register = template.Library() @register.simple_tag def svg(filename): path = finders.find(os.path.join('svg', '%s.svg' % filename), all=True) if not path: message = "SVG 'svg/%s.svg' not found" % filename if settings.DEBUG: raise SVGNotFound(message) else: logger.warning(message) return '' if isinstance(path, (list, tuple)): path = path[0] with open(path) as svg_file: svg = mark_safe(svg_file.read()) return svg
import logging import os from django import template from django.conf import settings from django.contrib.staticfiles import finders from django.utils.safestring import mark_safe from svg.exceptions import SVGNotFound logger = logging.getLogger(__name__) register = template.Library() @register.simple_tag def svg(filename): path = finders.find(os.path.join('svg', '%s.svg' % filename), all=True) if not path: message = "SVG 'svg/%s.svg' not found" % filename if settings.DEBUG: raise SVGNotFound(message) else: logger.warning(message) return '' if isinstance(path, (list, tuple)): path = path[0] with open(path) as svg_file: svg = mark_safe(svg_file.read()) return svg
Python
0.000196
b71ef8c05a9afa9eb3614c863650c12df0967fae
document methods
svtools/vcf/genotype.py
svtools/vcf/genotype.py
import sys class Genotype(object): ''' This class stores information about each sample. ''' def __init__(self, variant, gt): ''' Initialize the class. All instances have a GT field. ''' self.format = dict() self.variant = variant self.set_format('GT', gt) def set_formats(self, fields, values): ''' Set many format fields for this instance. Updates format information in the owning Variant class. ''' format_set = self.variant.format_set add_to_active = self.variant.active_formats.add active_formats = self.variant.active_formats format_dict = self.format for field, value in zip(fields, values): if field in format_set: format_dict[field] = value if field not in active_formats: add_to_active(field) else: sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n') sys.exit(1) def set_format(self, field, value, update_active=True): ''' Set information for an individual format field. ''' if field in self.variant.format_set: self.format[field] = value if field not in self.variant.active_formats: self.variant.active_formats.add(field) self.variant.update_active_format_list() else: sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n') sys.exit(1) def get_format(self, field): ''' Get value of particular field key ''' return self.format[field] def get_gt_string(self): ''' Convert object back to string. If some values are missing (at the end for example) they are printed out as all format fields present in any Genotype instance in the Variant line are tracked. ''' g_list = list() for f in self.variant.active_format_list: if f in self.format: if type(self.format[f]) == float: g_list.append('%0.2f' % self.format[f]) else: g_list.append(str(self.format[f])) else: g_list.append('.') return ':'.join(g_list)
import sys class Genotype(object): def __init__(self, variant, gt): self.format = dict() self.variant = variant self.set_format('GT', gt) def set_formats(self, fields, values): format_set = self.variant.format_set add_to_active = self.variant.active_formats.add active_formats = self.variant.active_formats format_dict = self.format for field, value in zip(fields, values): if field in format_set: format_dict[field] = value if field not in active_formats: add_to_active(field) else: sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n') sys.exit(1) def set_format(self, field, value, update_active=True): if field in self.variant.format_set: self.format[field] = value if field not in self.variant.active_formats: self.variant.active_formats.add(field) self.variant.update_active_format_list() else: sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n') sys.exit(1) def get_format(self, field): return self.format[field] def get_gt_string(self): g_list = list() for f in self.variant.active_format_list: if f in self.format: if type(self.format[f]) == float: g_list.append('%0.2f' % self.format[f]) else: g_list.append(str(self.format[f])) else: g_list.append('.') return ':'.join(g_list)
Python
0.000002
e2a0fb602c9de9f988d733a30b466dc400cd9503
update issue 84
test/test_issue084.py
test/test_issue084.py
from codecs import getreader from StringIO import StringIO from rdflib.term import URIRef from rdflib.graph import Graph rdf = u"""@prefix skos: <http://www.w3.org/2004/02/skos/core#> . @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . @prefix : <http://www.test.org/#> . :world rdf:type skos:Concept; skos:prefLabel "World"@en. :africa rdf:type skos:Concept; skos:prefLabel "Africa"@en; skos:broaderTransitive :world. :CI rdf:type skos:Concept; skos:prefLabel "C\u00f4te d'Ivoire"@en; skos:broaderTransitive :africa. """ rdf_utf8 = rdf.encode('utf-8') rdf_reader = getreader('utf-8')(StringIO(rdf.encode('utf-8'))) def test_a(): g = Graph() g.parse(data=rdf, format='n3') v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel")) assert v==u"C\u00f4te d'Ivoire" def test_b(): g = Graph() g.parse(data=rdf_utf8, format='n3') v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel")) assert v==u"C\u00f4te d'Ivoire" def test_c(): g = Graph() g.parse(source=rdf_reader, format='n3') v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel")) assert v==u"C\u00f4te d'Ivoire"
from rdflib.term import URIRef from rdflib.graph import Graph rdf = u"""@prefix skos: <http://www.w3.org/2004/02/skos/core#> . @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . @prefix : <http://www.test.org/#> . :world rdf:type skos:Concept; skos:prefLabel "World"@en. :africa rdf:type skos:Concept; skos:prefLabel "Africa"@en; skos:broaderTransitive :world. :CI rdf:type skos:Concept; skos:prefLabel "C\u00f4te d'Ivoire"@en; skos:broaderTransitive :africa. """.encode('utf-8') def test_issue(): g = Graph() g.parse(data=rdf, format='n3') v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel")) assert v==u"C\u00f4te d'Ivoire"
Python
0
f486280a264c195c989d59f0b3fa631d9e165a18
Fix comment
servo_write.py
servo_write.py
from tamproxy import Sketch, SyncedSketch, Timer from tamproxy.devices import Servo class ServoWrite(Sketch): """Cycles a servo back and forth between 1050us and 1950us pulse widths (most servos are 1000-2000)""" def setup(self): self.servo = Servo(self.tamp, 9) self.servo.write(1050) self.timer = Timer() self.end = False def loop(self): if (self.timer.millis() > 2000): self.timer.reset() if self.end: self.servo.write(1050) else: self.servo.write(1950) self.end = not self.end if __name__ == "__main__": sketch = ServoWrite() sketch.run()
from tamproxy import Sketch, SyncedSketch, Timer from tamproxy.devices import Servo # Cycles a motor back and forth between -255 and 255 PWM every ~5 seconds class ServoWrite(Sketch): def setup(self): self.servo = Servo(self.tamp, 9) self.servo.write(1050) self.timer = Timer() self.end = False def loop(self): if (self.timer.millis() > 2000): self.timer.reset() if self.end: self.servo.write(1050) else: self.servo.write(1950) self.end = not self.end if __name__ == "__main__": sketch = ServoWrite() sketch.run()
Python
0
a0a2810e52ba27bb2b6eba5d13d8a3bc88bca266
Complete overhaul because I hated the ConfigParser module.
camoco/Config.py
camoco/Config.py
#!/usr/env/python3 import os import configparser import yaml import pprint global cf default_config = '''--- # YAML Camoco Configuration File options: basedir: ~/.camoco/ testdir: ~/.camoco/tests/ logging: log_level: verbose test: force: RefGen: True COB: True Ontology: True refgen: Zm5bFGS cob: NewRoot ontology: ZmIonome term: Fe57 gene: GRMZM2G000014 ''' class Level(dict): ''' Ha! Take that config parser! I am accessing everything like an object. ''' def __init__(self,*args,**kwargs): super().__init__(*args,**kwargs) def __getattr__(self,item): if isinstance(self[item],dict): return Level(self[item]) else: if 'dir' in item and '~' in self[item]: return os.path.expanduser(self[item]) return self[item] class Config(object): def __init__(self,filename): filename = os.path.expanduser(filename) self.data = Level(yaml.load(open(filename,'r'))) def __getattr__(self,item): return Level(self.data[item]) def __repr__(self): return pprint.pformat(self.data) ''' ------------------------------------------------------------------------- Program Logic ''' cf_file = os.path.expanduser('~/.camoco.conf') # Check to see if there is a config file available if not os.path.isfile(cf_file): with open(cf_file, 'w') as CF: print(default_config, file=CF) else: cf = Config(cf_file)
#!/usr/env/python3 import os import configparser global cf cf = configparser.ConfigParser() cf._interpolation = configparser.ExtendedInterpolation() cf_file = os.path.expanduser('~/.camoco.conf') default_config = ''' [options] basedir = ~/.camoco/ testdir = ~/.camoco/tests/ [logging] log_level = verbose [test] force = True refgen = Zm5bFGS cob = NewRoot ontology = ZmIonome term = Fe57 gene = GRMZM2G000014 ''' # Check to see if if not os.path.isfile(cf_file): with open(cf_file, 'w') as CF: print(default_config, file=CF) cf.read(os.path.expanduser('~/.camoco.conf'))
Python
0
648de375f5e9ae1620bc836e5d647688b541690c
Add atom package
test/test_packages.py
test/test_packages.py
import pytest @pytest.mark.parametrize("name", [ ("apt-file"), ("apt-transport-https"), ("atom"), ("blktrace"), ("ca-certificates"), ("chromium-browser"), ("cron"), ("curl"), ("diod"), ("docker-ce"), ("fonts-font-awesome"), ("git"), ("gnupg"), ("handbrake"), ("handbrake-cli"), ("haveged"), ("htop"), ("i3"), ("iotop"), ("language-pack-en-base"), ("laptop-mode-tools"), ("nfs-common"), ("ntop"), ("ntp"), ("openssh-client"), ("openssh-server"), ("openssh-sftp-server"), ("openssl"), ("python"), ("python-pip"), ("software-properties-common"), ("suckless-tools"), ("sysstat"), ("tree"), ("vagrant"), ("vim"), ("virtualbox"), ("vlc"), ("wget"), ("whois"), ("x264"), ("xfce4-terminal"), ("xfonts-terminus"), ("xinit"), ]) def test_packages(Package, name): assert Package(name).is_installed
import pytest @pytest.mark.parametrize("name", [ ("apt-file"), ("apt-transport-https"), ("blktrace"), ("ca-certificates"), ("chromium-browser"), ("cron"), ("curl"), ("diod"), ("docker-ce"), ("fonts-font-awesome"), ("git"), ("gnupg"), ("handbrake"), ("handbrake-cli"), ("haveged"), ("htop"), ("i3"), ("iotop"), ("language-pack-en-base"), ("laptop-mode-tools"), ("nfs-common"), ("ntop"), ("ntp"), ("openssh-client"), ("openssh-server"), ("openssh-sftp-server"), ("openssl"), ("python"), ("python-pip"), ("software-properties-common"), ("suckless-tools"), ("sysstat"), ("tree"), ("vagrant"), ("vim"), ("virtualbox"), ("vlc"), ("wget"), ("whois"), ("x264"), ("xfce4-terminal"), ("xfonts-terminus"), ("xinit"), ]) def test_packages(Package, name): assert Package(name).is_installed
Python
0.000002
89664ec37036553534c07d65f2df2b9fa07bfe80
Check total weights remain correct.
test/test_priority.py
test/test_priority.py
# -*- coding: utf-8 -*- """ test_priority ~~~~~~~~~~~~~ Tests for the Priority trees """ from hypothesis import given from hypothesis.strategies import integers, lists, tuples import priority STREAMS_AND_WEIGHTS = lists( elements=tuples( integers(min_value=1), integers(min_value=1, max_value=255) ), unique_by=lambda x: x[0], ) class TestPriorityTree(object): def test_priority_tree_one_stream(self): """ When only one stream is in the PriorityTree, priorities are easy. """ p = priority.PriorityTree() p.insert_stream(stream_id=1) priorities = p.priorities() assert len(priorities) == 1 priorities.total_weight == 16 @given(lists(elements=integers(min_value=0))) def test_priority_tree_single_level(self, weights): """ If lots of elements are added to the tree all at the top level, their weights are summed properly and the priorities object has the correct length. """ p = priority.PriorityTree() stream_id = 1 for weight in weights: p.insert_stream(stream_id=stream_id, weight=weight) stream_id += 1 priorities = p.priorities() assert len(priorities) == len(weights) assert priorities.total_weight == sum(weights) @given(STREAMS_AND_WEIGHTS) def test_priorities_stream_weights(self, stream_data): """ For a given set of priorities, we can index by ID and find the weight of the stream. """ p = priority.PriorityTree() for stream_id, weight in stream_data: p.insert_stream(stream_id=stream_id, weight=weight) priorities = p.priorities() for stream_id, weight in stream_data: assert weight == priorities.stream_weight(stream_id) def test_drilling_down(self, readme_tree): """ We can drill down each layer of the tree by stream ID. """ top_level = readme_tree.priorities() assert 7 in top_level dependents = top_level[7] assert len(dependents) == 1 assert 11 in dependents assert dependents.total_weight == 16 second_level_dependents = dependents[11] assert len(second_level_dependents) == 1 assert 9 in second_level_dependents assert second_level_dependents.total_weight == 8
# -*- coding: utf-8 -*- """ test_priority ~~~~~~~~~~~~~ Tests for the Priority trees """ from hypothesis import given from hypothesis.strategies import integers, lists, tuples import priority STREAMS_AND_WEIGHTS = lists( elements=tuples( integers(min_value=1), integers(min_value=1, max_value=255) ), unique_by=lambda x: x[0], ) class TestPriorityTree(object): def test_priority_tree_one_stream(self): """ When only one stream is in the PriorityTree, priorities are easy. """ p = priority.PriorityTree() p.insert_stream(stream_id=1) priorities = p.priorities() assert len(priorities) == 1 priorities.total_weight == 16 @given(lists(elements=integers(min_value=0))) def test_priority_tree_single_level(self, weights): """ If lots of elements are added to the tree all at the top level, their weights are summed properly and the priorities object has the correct length. """ p = priority.PriorityTree() stream_id = 1 for weight in weights: p.insert_stream(stream_id=stream_id, weight=weight) stream_id += 1 priorities = p.priorities() assert len(priorities) == len(weights) assert priorities.total_weight == sum(weights) @given(STREAMS_AND_WEIGHTS) def test_priorities_stream_weights(self, stream_data): """ For a given set of priorities, we can index by ID and find the weight of the stream. """ p = priority.PriorityTree() for stream_id, weight in stream_data: p.insert_stream(stream_id=stream_id, weight=weight) priorities = p.priorities() for stream_id, weight in stream_data: assert weight == priorities.stream_weight(stream_id) def test_drilling_down(self, readme_tree): """ We can drill down each layer of the tree by stream ID. """ top_level = readme_tree.priorities() assert 7 in top_level dependents = top_level[7] assert len(dependents) == 1 assert 11 in dependents second_level_dependents = dependents[11] assert len(second_level_dependents) == 1 assert 9 in second_level_dependents
Python
0
4530eea92e37c087b6f25fe3a0e48e54b949b68b
allow setup.py to work without django
cart/__init__.py
cart/__init__.py
__version__ = '1.1' VERSION = tuple(map(int, __version__.split('.'))) + ('dev',) def get_helper_module(): '''Get the helper module as defined in the settings.''' # need to be able to import file without importing django, so these can't go # at the top from django.utils.importlib import import_module from django.core.exceptions import ImproperlyConfigured import settings as cart_settings if cart_settings.HELPER_MODULE: try: package = import_module(cart_settings.HELPER_MODULE) except ImportError, e: raise ImproperlyConfigured(u'The CART_HELPER_MODULE setting refers to a ' \ 'non-existent package, or the import failed ' \ 'due to an error. Error details: %s' % e) return package else: return None
from django.utils.importlib import import_module from django.core.exceptions import ImproperlyConfigured __version__ = '1.1' VERSION = tuple(map(int, __version__.split('.'))) + ('dev',) def get_helper_module(): '''Get the helper module as defined in the settings.''' import settings as cart_settings if cart_settings.HELPER_MODULE: try: package = import_module(cart_settings.HELPER_MODULE) except ImportError, e: raise ImproperlyConfigured(u'The CART_HELPER_MODULE setting refers to a ' \ 'non-existent package, or the import failed ' \ 'due to an error. Error details: %s' % e) return package else: return None
Python
0.000001
a3b6306b2288b6dc4a9ec6e04a5962c7fb94699e
Update addition.py: s/stop/N
pyeda/logic/addition.py
pyeda/logic/addition.py
""" Logic functions for addition Interface Functions: ripple_carry_add kogge_stone_add brent_kung_add """ # Disable "invalid variable name" # pylint: disable=C0103 from math import floor, log from pyeda.boolalg.expr import Xor, Majority from pyeda.boolalg.vexpr import BitVector from pyeda.util import clog2 def ripple_carry_add(A, B, cin=0): """Return symbolic logic for an N-bit ripple carry adder.""" assert len(A) == len(B) s, c = list(), list() for i, ai in enumerate(A, A.start): carry = (cin if i == 0 else c[i-1]) s.append(Xor(ai, B[i], carry)) c.append(Majority(ai, B[i], carry)) return BitVector(s), BitVector(c) def kogge_stone_add(A, B, cin=0): """Return symbolic logic for an N-bit Kogge-Stone adder.""" assert len(A) == len(B) N = len(A) # generate/propagate logic g = [A[i] * B[i] for i in range(N)] p = [Xor(A[i], B[i]) for i in range(N)] for i in range(clog2(N)): start = 1 << i for j in range(start, N): g[j] = g[j] + p[j] * g[j-start] p[j] = p[j] * p[j-start] # sum logic s = [Xor(A[i], B[i], (cin if i == 0 else g[i-1])) for i in range(N)] return BitVector(s), BitVector(g) def brent_kung_add(A, B, cin=0): """Return symbolic logic for an N-bit Brent-Kung adder.""" assert len(A) == len(B) N = len(A) # generate/propagate logic g = [A[i] * B[i] for i in range(N)] p = [Xor(A[i], B[i]) for i in range(N)] # carry tree for i in range(floor(log(N, 2))): step = 2**i for start in range(2**(i+1)-1, N, 2**(i+1)): g[start] = g[start] + p[start] * g[start-step] p[start] = p[start] * p[start-step] # inverse carry tree for i in range(floor(log(N, 2))-2, -1, -1): start = 2**(i+1)-1 step = 2**i while start + step < N: g[start+step] = g[start+step] + p[start+step] * g[start] p[start+step] = p[start+step] * p[start] start += step # sum logic s = [Xor(A[i], B[i], (cin if i == 0 else g[i-1])) for i in range(N)] return BitVector(s), BitVector(g)
""" Logic functions for addition Interface Functions: ripple_carry_add kogge_stone_add brent_kung_add """ # Disable "invalid variable name" # pylint: disable=C0103 from math import floor, log from pyeda.boolalg.expr import Xor, Majority from pyeda.boolalg.vexpr import BitVector from pyeda.util import clog2 def ripple_carry_add(A, B, cin=0): """Return symbolic logic for an N-bit ripple carry adder.""" assert len(A) == len(B) s, c = list(), list() for i, ai in enumerate(A, A.start): carry = (cin if i == 0 else c[i-1]) s.append(Xor(ai, B[i], carry)) c.append(Majority(ai, B[i], carry)) return BitVector(s), BitVector(c) def kogge_stone_add(A, B, cin=0): """Return symbolic logic for an N-bit Kogge-Stone adder.""" assert len(A) == len(B) stop = len(A) # generate/propagate logic g = [A[i] * B[i] for i in range(stop)] p = [Xor(A[i], B[i]) for i in range(stop)] for i in range(clog2(stop)): start = 1 << i for j in range(start, stop): g[j] = g[j] + p[j] * g[j-start] p[j] = p[j] * p[j-start] # sum logic s = [Xor(A[i], B[i], (cin if i == 0 else g[i-1])) for i in range(stop)] return BitVector(s), BitVector(g) def brent_kung_add(A, B, cin=0): """Return symbolic logic for an N-bit Brent-Kung adder.""" assert len(A) == len(B) N = len(A) # generate/propagate logic g = [A[i] * B[i] for i in range(N)] p = [Xor(A[i], B[i]) for i in range(N)] # carry tree for i in range(floor(log(N, 2))): step = 2**i for start in range(2**(i+1)-1, N, 2**(i+1)): g[start] = g[start] + p[start] * g[start-step] p[start] = p[start] * p[start-step] # inverse carry tree for i in range(floor(log(N, 2))-2, -1, -1): start = 2**(i+1)-1 step = 2**i while start + step < N: g[start+step] = g[start+step] + p[start+step] * g[start] p[start+step] = p[start+step] * p[start] start += step # sum logic s = [Xor(A[i], B[i], (cin if i == 0 else g[i-1])) for i in range(N)] return BitVector(s), BitVector(g)
Python
0.000001
99d76458256da781fc4b25a75d68a7a6d8c9379d
Correcting a typo in entries URLConf
urls/entries.py
urls/entries.py
""" URLs for entries in a weblog. """ from django.conf.urls.defaults import * from django.views.generic import date_based from coltrane.models import Entry entry_info_dict = { 'queryset': Entry.live.all(), 'date_field': 'pub_date', } urlpatterns = patterns('', url(r'^$', date_based.archive_index, entry_info_dict, name='coltrane_entry_archive_index'), url(r'^(?P<year>\d{4})/$', date_based.archive_year, entry_info_dict, name='coltrane_entry_archive_year'), url(r'^(?P<year>\d{4})/(?P<month>\w{3})/$', date_based.archive_month, entry_info_dict, name='coltrane_entry_archive_month'), url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/$', date_based.archive_day, entry_info_dict, name='coltrane_entry_archive_day'), url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$', date_based.object_detail, dict(entry_info_dict, slug_field='slug'), name='coltrane_entry_detail'), )
""" URLs for entries in a weblog. """ from django.conf.urls.defaults import * from django.views.generic import date_based from coltrane.models import Entry entry_info_dict = { 'queryset': Entry.live.all(), 'date_field': 'pub_date', } urlpatterns = patterns('', url(r'^$', date_based.archive_index, entry_info_dict, name='coltrane_entry_archive_index'), url(r'^(?P<year>\d{4})/$', date_based.archive_year, entry_info_dict, name='coltrane_entry_archive_year'), url(r'^(?P<year>\d{4})/(?P<month>\w{3})/$', date_based.archive_month, entry_info_dict, name='coltrane_entry_archive_month'), url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/$', date_based.archive_day, entry_info_dict, name='coltrane.entry_archive_day'), url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$', date_based.object_detail, dict(entry_info_dict, slug_field='slug'), name='coltrane_entry_detail'), )
Python
0.998607
f89bc55aebeba0cbf3c8423c97599aa0d334d9c9
Fix lint error (#113)
synthtool/gcp/common.py
synthtool/gcp/common.py
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from synthtool.languages import node from synthtool.sources import templates from synthtool import _tracked_paths _TEMPLATES_DIR = Path(__file__).parent / "templates" class CommonTemplates: def __init__(self): self._templates = templates.Templates(_TEMPLATES_DIR) def py_library(self) -> Path: raise NotImplementedError() def node_library(self, **kwargs) -> Path: kwargs["metadata"] = node.read_metadata() t = templates.TemplateGroup(_TEMPLATES_DIR / "node_library") result = t.render(**kwargs) _tracked_paths.add(result) return result def php_library(self, **kwargs) -> Path: t = templates.TemplateGroup(_TEMPLATES_DIR / "php_library") result = t.render(**kwargs) _tracked_paths.add(result) return result def render(self, template_name: str, **kwargs) -> Path: return self._templates.render(template_name, **kwargs)
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from synthtool.languages import node from synthtool.sources import templates from synthtool import _tracked_paths _TEMPLATES_DIR = Path(__file__).parent / "templates" class CommonTemplates: def __init__(self): self._templates = templates.Templates(_TEMPLATES_DIR) def py_library(self) -> Path: raise NotImplemented() def node_library(self, **kwargs) -> Path: kwargs["metadata"] = node.read_metadata() t = templates.TemplateGroup(_TEMPLATES_DIR / "node_library") result = t.render(**kwargs) _tracked_paths.add(result) return result def php_library(self, **kwargs) -> Path: t = templates.TemplateGroup(_TEMPLATES_DIR / "php_library") result = t.render(**kwargs) _tracked_paths.add(result) return result def render(self, template_name: str, **kwargs) -> Path: return self._templates.render(template_name, **kwargs)
Python
0.000001
7b4531ec867982ba2f660a2a08e85dbae457083e
Fix new line stripping in admin site
users/models.py
users/models.py
import hashlib import urllib.parse as urllib from django.contrib.auth.models import User from django.db import models # extension to django's User class which has authentication details # as well as some basic info such as name class Member(models.Model): def gravatar(self, size=128): default = "https://pbs.twimg.com/media/Civ9AUkVAAAwihS.jpg" h = hashlib.md5( self.equiv_user.email.encode('utf8').lower() ).hexdigest() q = urllib.urlencode({ # 'd':default, 'd': 'identicon', 's': str(size), }) return 'https://www.gravatar.com/avatar/{}?{}'.format(h, q) equiv_user = models.OneToOneField(User, on_delete=models.CASCADE) def __str__(self): return self.equiv_user.username bio = models.TextField(max_length=4096, blank=True) signature = models.TextField(max_length=1024, blank=True) def notification_count(self): return len(self.notifications_owned.filter(is_unread=True)) official_photo_url = models.CharField(max_length=512, null=True, blank=True) def is_exec(self): return len(self.execrole_set.all()) > 0
import hashlib import urllib.parse as urllib from django.contrib.auth.models import User from django.db import models # extension to django's User class which has authentication details # as well as some basic info such as name class Member(models.Model): def gravatar(self, size=128): default = "https://pbs.twimg.com/media/Civ9AUkVAAAwihS.jpg" h = hashlib.md5( self.equiv_user.email.encode('utf8').lower() ).hexdigest() q = urllib.urlencode({ # 'd':default, 'd': 'identicon', 's': str(size), }) return 'https://www.gravatar.com/avatar/{}?{}'.format(h, q) equiv_user = models.OneToOneField(User, on_delete=models.CASCADE) def __str__(self): return self.equiv_user.username bio = models.CharField(max_length=4096, blank=True) signature = models.CharField(max_length=1024, blank=True) def notification_count(self): return len(self.notifications_owned.filter(is_unread=True)) official_photo_url = models.CharField(max_length=512, null=True, blank=True) def is_exec(self): return len(self.execrole_set.all()) > 0
Python
0
57cec2b03eaa6857bcb1b3780c4de00c3165b281
Return early if owner
utils/checks.py
utils/checks.py
from discord.ext import commands def is_owner_or(**perms): async def predicate(ctx): if await ctx.bot.is_owner(ctx.author): return True permissions = ctx.channel.permissions_for(ctx.author) return all(getattr(permissions, perm, None) == value for perm, value in perms.items()) return commands.check(predicate)
from discord.ext import commands def is_owner_or(**perms): async def predicate(ctx): owner = await ctx.bot.is_owner(ctx.author) permissions = ctx.channel.permissions_for(ctx.author) return all(getattr(permissions, perm, None) == value for perm, value in perms.items()) or owner return commands.check(predicate)
Python
0.000006
28c314e98ec88586b8c423b0941d8f029e4946e9
fix function which has obviously never been tested
lib/xdg_secret.py
lib/xdg_secret.py
import subprocess def xdg_secret_store(label, secret, attrs): with subprocess.Popen(["secret-tool", "store", "--label", label] + attrs, stdin=subprocess.PIPE) as proc: proc.communicate(secret.encode("utf-8")) return proc.wait() == 0 def xdg_secret_lookup_secret(attrs): with subprocess.Popen(["secret-tool", "lookup"] + attrs, stdout=subprocess.PIPE) as proc: return proc.stdout.read().rstrip(b"\n") def xdg_secret_search_stdout(attrs): return subprocess.call(["secret-tool", "search"] + attrs) == 0 def xdg_secret_clear(attrs): return subprocess.call(["secret-tool", "clear"] + attrs) == 0
import subprocess def xdg_secret_store(label, secret, attrs): with subprocess.Popen(["secret-tool", "store", "--label", label] + attrs, stdin=subprocess.PIPE) as proc: proc.communicate(secret.encode("utf-8")) return proc.wait() == 0 def xdg_secret_lookup_secret(attrs): with subprocess.Popen(["secret-tool", "lookup"] + attrs, stdout=subprocess.PIPE) as proc: return proc.stdout.read().rstrip("\n") def xdg_secret_search_stdout(attrs): return subprocess.call(["secret-tool", "search"] + attrs) == 0 def xdg_secret_clear(attrs): return subprocess.call(["secret-tool", "clear"] + attrs) == 0
Python
0.000001
2d3016ce69e9a40dc5e428a0aa6ea75775c7d84a
Fix subscribe merge_vars.
pybossa/newsletter/__init__.py
pybossa/newsletter/__init__.py
# -*- coding: utf8 -*- # This file is part of PyBossa. # # Copyright (C) 2014 SF Isle of Man Limited # # PyBossa is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyBossa is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PyBossa. If not, see <http://www.gnu.org/licenses/>. """PyBossa module for subscribing users to Mailchimp lists.""" import mailchimp class Newsletter(object): """Newsletter class to handle mailchimp subscriptions.""" def __init__(self, app=None): """Init method for flask extensions.""" self.app = app if app is not None: # pragma: no cover self.init_app(app) def init_app(self, app): """Configure newsletter Mailchimp client.""" self.app = app self.client = mailchimp.Mailchimp(app.config.get('MAILCHIMP_API_KEY')) self.list_id = app.config.get('MAILCHIMP_LIST_ID') def is_user_subscribed(self, email, list_id=None): """Check if user is subscribed or not.""" try: if list_id is None: list_id = self.list_id res = self.client.lists.member_info(list_id, [{'email': email}]) if (res.get('success_count') == 1 and res['data'][0]['email'] == email): return True else: return False except mailchimp.Error, e: msg = 'MAILCHIMP: An error occurred: %s - %s' % (e.__class__, e) self.app.logger.error(msg) def subscribe_user(self, user, list_id=None, old_email=None): """Subscribe, update a user of a mailchimp list.""" try: update_existing = False if list_id is None: list_id = self.list_id merge_vars = {'FNAME': user.fullname} if old_email: email = {'email': old_email} merge_vars['new-email'] = user.email_addr update_existing = self.is_user_subscribed(old_email) else: email = {'email': user.email_addr} self.client.lists.subscribe(list_id, email, merge_vars, update_existing=update_existing) except mailchimp.Error, e: msg = 'MAILCHIMP: An error occurred: %s - %s' % (e.__class__, e) self.app.logger.error(msg)
# -*- coding: utf8 -*- # This file is part of PyBossa. # # Copyright (C) 2014 SF Isle of Man Limited # # PyBossa is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyBossa is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PyBossa. If not, see <http://www.gnu.org/licenses/>. """PyBossa module for subscribing users to Mailchimp lists.""" import mailchimp class Newsletter(object): """Newsletter class to handle mailchimp subscriptions.""" def __init__(self, app=None): """Init method for flask extensions.""" self.app = app if app is not None: # pragma: no cover self.init_app(app) def init_app(self, app): """Configure newsletter Mailchimp client.""" self.app = app self.client = mailchimp.Mailchimp(app.config.get('MAILCHIMP_API_KEY')) self.list_id = app.config.get('MAILCHIMP_LIST_ID') def is_user_subscribed(self, email, list_id=None): """Check if user is subscribed or not.""" try: if list_id is None: list_id = self.list_id res = self.client.lists.member_info(list_id, [{'email': email}]) if (res.get('success_count') == 1 and res['data'][0]['email'] == email): return True else: return False except mailchimp.Error, e: msg = 'MAILCHIMP: An error occurred: %s - %s' % (e.__class__, e) self.app.logger.error(msg) def subscribe_user(self, user, list_id=None, old_email=None): """Subscribe, update a user of a mailchimp list.""" try: update_existing = False if list_id is None: list_id = self.list_id merge_vars = {'FNAME': user.fullname} if old_email: email = {'email': old_email} merge_vars['new-email'] = user.email_addr update_existing = self.is_user_subscribed(old_email) else: email = {'email': user.email_addr} merge_vars['email'] = user.email_addr self.client.lists.subscribe(list_id, email, merge_vars, update_existing=update_existing) except mailchimp.Error, e: msg = 'MAILCHIMP: An error occurred: %s - %s' % (e.__class__, e) self.app.logger.error(msg)
Python
0
e1a4b0d7f7d9e860dce794e07aadedea193d470e
Set version to v2.0.18.dev1
spacy/about.py
spacy/about.py
# inspired from: # https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/ # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py __title__ = 'spacy' __version__ = '2.0.18.dev1' __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' __uri__ = 'https://spacy.io' __author__ = 'Explosion AI' __email__ = 'contact@explosion.ai' __license__ = 'MIT' __release__ = False __download_url__ = 'https://github.com/explosion/spacy-models/releases/download' __compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json' __shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json'
# inspired from: # https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/ # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py __title__ = 'spacy' __version__ = '2.0.18' __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' __uri__ = 'https://spacy.io' __author__ = 'Explosion AI' __email__ = 'contact@explosion.ai' __license__ = 'MIT' __release__ = True __download_url__ = 'https://github.com/explosion/spacy-models/releases/download' __compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json' __shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json'
Python
0.000011
d5c8d2f5fd4177b6f4980689ae972352563c28e5
Update about.py and increment version
spacy/about.py
spacy/about.py
# inspired from: # https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/ # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py __title__ = 'spacy' __version__ = '2.0.0' __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' __uri__ = 'https://spacy.io' __author__ = 'Explosion AI' __email__ = 'contact@explosion.ai' __license__ = 'MIT' __docs_models__ = 'https://spacy.io/docs/usage/models' __download_url__ = 'https://github.com/explosion/spacy-models/releases/download' __compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json' __shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts.json' __model_files__ = 'https://raw.githubusercontent.com/explosion/spacy-dev-resources/v2/templates/model/'
# inspired from: # https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/ # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py __title__ = 'spacy' __version__ = '1.8.2' __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' __uri__ = 'https://spacy.io' __author__ = 'Matthew Honnibal' __email__ = 'matt@explosion.ai' __license__ = 'MIT' __docs_models__ = 'https://spacy.io/docs/usage/models' __download_url__ = 'https://github.com/explosion/spacy-models/releases/download' __compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json' __shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts.json' __model_files__ = 'https://raw.githubusercontent.com/explosion/spacy-dev-resources/v2/templates/model/'
Python
0
845be624ed0dfb8d942b240034af8b58f7a32e13
Fix the benchmark warm up code to make sure op that graph is not re-optimized during the timed run.
tensorflow/python/data/benchmarks/benchmark_base.py
tensorflow/python/data/benchmarks/benchmark_base.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test utilities for tf.data benchmarking functionality.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from tensorflow.python.client import session from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.platform import test # TODO(b/119837791): Add eager benchmarks. class DatasetBenchmarkBase(test.Benchmark): """Base class for dataset benchmarks.""" def run_benchmark(self, dataset, num_elements, iters=1, warmup=True, apply_default_optimizations=False): """Benchmarks the dataset. Runs the dataset `iters` times. In each iteration, the benchmark measures the time it takes to go through `num_elements` elements of the dataset. Args: dataset: Dataset to benchmark. num_elements: Number of dataset elements to iterate through each benchmark iteration. iters: Number of times to repeat the timing. warmup: If true, warms up the session caches by running an untimed run. apply_default_optimizations: Determines whether default optimizations should be applied. Returns: A float, representing the per-element wall time of the dataset in seconds. This is the median time (with respect to `iters`) it takes for the dataset to go through `num_elements` elements, divided by `num_elements.` """ options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = ( apply_default_optimizations) dataset = dataset.with_options(options) # NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding # the overhead of multiple `session.run()` calls. Note that this relies on # the underlying implementation of `skip`: if it is optimized in the future, # we will have to change this code. dataset = dataset.skip(num_elements - 1) iterator = dataset_ops.make_initializable_iterator(dataset) next_element = iterator.get_next() next_element = nest.flatten(next_element)[0] deltas = [] for _ in range(iters): with session.Session() as sess: if warmup: # Run once to warm up the session caches. sess.run(iterator.initializer) sess.run(next_element.op) sess.run(iterator.initializer) start = time.time() sess.run(next_element.op) end = time.time() deltas.append(end - start) return np.median(deltas) / float(num_elements) def run_and_report_benchmark(self, dataset, num_elements, name, iters=5, extras=None, warmup=True, apply_default_optimizations=False): # Measure the per-element wall time. wall_time = self.run_benchmark(dataset, num_elements, iters, warmup, apply_default_optimizations) if extras is None: extras = {} extras["num_elements"] = num_elements self.report_benchmark( wall_time=wall_time, iters=iters, name=name, extras=extras)
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test utilities for tf.data benchmarking functionality.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from tensorflow.python.client import session from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.platform import test # TODO(b/119837791): Add eager benchmarks. class DatasetBenchmarkBase(test.Benchmark): """Base class for dataset benchmarks.""" def run_benchmark(self, dataset, num_elements, iters=1, warmup=True, apply_default_optimizations=False): """Benchmarks the dataset. Runs the dataset `iters` times. In each iteration, the benchmark measures the time it takes to go through `num_elements` elements of the dataset. Args: dataset: Dataset to benchmark. num_elements: Number of dataset elements to iterate through each benchmark iteration. iters: Number of times to repeat the timing. warmup: If true, warms up the session caches by running an untimed run. apply_default_optimizations: Determines whether default optimizations should be applied. Returns: A float, representing the per-element wall time of the dataset in seconds. This is the median time (with respect to `iters`) it takes for the dataset to go through `num_elements` elements, divided by `num_elements.` """ options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = ( apply_default_optimizations) dataset = dataset.with_options(options) # NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding # the overhead of multiple `session.run()` calls. Note that this relies on # the underlying implementation of `skip`: if it is optimized in the future, # we will have to change this code. dataset = dataset.skip(num_elements - 1) iterator = dataset_ops.make_initializable_iterator(dataset) next_element = iterator.get_next() next_element = nest.flatten(next_element)[0] deltas = [] for _ in range(iters): with session.Session() as sess: if warmup: # Run once to warm up the session caches. sess.run(iterator.initializer) sess.run(next_element) sess.run(iterator.initializer) start = time.time() sess.run(next_element.op) end = time.time() deltas.append(end - start) return np.median(deltas) / float(num_elements) def run_and_report_benchmark(self, dataset, num_elements, name, iters=5, extras=None, warmup=True, apply_default_optimizations=False): # Measure the per-element wall time. wall_time = self.run_benchmark(dataset, num_elements, iters, warmup, apply_default_optimizations) if extras is None: extras = {} extras["num_elements"] = num_elements self.report_benchmark( wall_time=wall_time, iters=iters, name=name, extras=extras)
Python
0.000087
4e2f5c79b67a86fce622c486a0ea28fca0130015
clean up default arguments in strip_training_tags()
taggertester/testing.py
taggertester/testing.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from nltk.tag.stanford import StanfordPOSTagger from .config import DATA_DIR_NAME, PATH_TO_DATA_DIR from .files import TrainingFile, write_to_directory from .tag import FilePair class TaggerTester(object): """Collection of files for training/testing part-of-speech taggers. """ def __init__(self): """Initialize the test suite.""" pass class SentencePair(object): """Pair of sentences: one tagged by hand, one by a POS tagger.""" def __init__(self, hand_tagged_sentence, auto_tagged_sentence, separator='_'): """Initialize the object. Parameters ---------- hand_tagged_sentence (unicode / str) : a sentence which has been tagged by hand (i.e., it belongs to part of the original training file which was set aside to serve as a test set) auto_tagged_sentence (list) : a sentence which has been tagged automatically by a part-of-speech tagger separator (str) : the character which serves to separate words from their part-of-speech tags (likely '_' or '/') """ # split the hand-tagged sentence on whitespace, since the auto-tagged # sentence will already be split and we want them to match self.hand_tagged = hand_tagged_sentence.split() self.auto_tagged = auto_tagged_sentence self.sep = separator def strip_training_tags(self, sentence=None, sep=None): """Remove the part-of-speech tags from a test sentence.""" if sentence == None: sentence = self.hand_tagged if sep == None: sep = self.sep return [w.split(sep, 1)[0] for w in sentence]
#!/usr/bin/env python # -*- coding: utf-8 -*- from nltk.tag.stanford import StanfordPOSTagger from .config import DATA_DIR_NAME, PATH_TO_DATA_DIR from .files import TrainingFile, write_to_directory from .tag import FilePair class TaggerTester(object): """Collection of files for training/testing part-of-speech taggers. """ def __init__(self): """Initialize the test suite.""" pass class SentencePair(object): """Pair of sentences: one tagged by hand, one by a POS tagger.""" def __init__(self, hand_tagged_sentence, auto_tagged_sentence, separator='_'): """Initialize the object. Parameters ---------- hand_tagged_sentence (unicode / str) : a sentence which has been tagged by hand (i.e., it belongs to part of the original training file which was set aside to serve as a test set) auto_tagged_sentence (list) : a sentence which has been tagged automatically by a part-of-speech tagger separator (str) : the character which serves to separate words from their part-of-speech tags (likely '_' or '/') """ # split the hand-tagged sentence on whitespace, since the auto-tagged # sentence will already be split and we want them to match self.hand_tagged = hand_tagged_sentence.split() self.auto_tagged = auto_tagged_sentence self.sep = separator def strip_training_tags(self, hand_tagged_sentence): """Remove the part-of-speech tags from a test sentence.""" return [w.split(self.sep, 1)[0] for w in self.hand_tagged]
Python
0