commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
44db9815aeaad8b23eecebd3e761466ffdf854f4
Normalize target dir, ensure we have target path.
scaffolder/vcs.py
scaffolder/vcs.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import traceback import subprocess import os import sys import shutil def normalize_path(file_path, mkdir=False): file_path = os.path.realpath(os.path.expanduser(file_path)) if mkdir and not os.path.isdir(file_path): os.makedirs(file_path) return file_path class VCS(): def __init__(self, url=''): """ https://github.com/CG-TTDET/Platform.git git@github.com:goliatone/minions.git https://goliatone@bitbucket.org/goliatone/tty2gif ssh://hg@bitbucket.org/goliatone/personaldetection @param url: @return: """ self.url = url def get_handler(self, url): #TODO: Make this for realz if '.git' in url: return 'git' elif 'hg@' or 'bitbucket.org' in url: return 'hg' else: raise Exception def get_repo_name(self, url, target_dir): tail = url.rpartition('/')[2] tail = tail.replace('.git', '') return os.path.normpath(os.path.join(target_dir, tail)) def notify_existing_repo(self, repo_path): if not os.path.isdir(repo_path): return question = "Repo '{0}' exists, want to delete and clone?".format(repo_path) if self.prompt_question(question): print "Removing '{0}'...".format(repo_path) shutil.rmtree(repo_path) os.mkdir(repo_path) else: print "You don't want to overwrite. Bye!" sys.exit(0) def prompt_question(self, question, default=True): valid = {'yes':True, 'y':True, 'no':False, 'n':False} prompt = '[y/n]' if default == True: prompt = '[Y/n]' elif default == False: prompt = '[y/N]' while True: sys.stdout.write("{0} {1} ".format(question, prompt)) choice = raw_input().lower() if default is not None and choice == '': return default elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no'"\ "(or 'y' or 'n')") def clone(self, url=None, checkout_branch=None, target_dir='.'): if url: self.url = url url = self.url #let's check target dir: target_dir = normalize_path(target_dir, mkdir=True) #did we get a git or hg repo? vcs = self.get_handler(url) print vcs repo_path = self.get_repo_name(url, target_dir) print repo_path if os.path.isdir(repo_path): self.notify_existing_repo(repo_path) else: os.mkdir(repo_path) try: subprocess.check_call([vcs, 'clone', url], cwd=target_dir) except Exception, e: print e exit() if checkout_branch: subprocess.check_call([vcs, 'checkout', checkout_branch], cwd=target_dir) return repo_path
#!/usr/bin/env python # -*- coding: utf-8 -*- import traceback import subprocess import os import sys import shutil def ensure_path(path): pass class VCS(): def __init__(self, url=''): """ https://github.com/CG-TTDET/Platform.git git@github.com:goliatone/minions.git https://goliatone@bitbucket.org/goliatone/tty2gif ssh://hg@bitbucket.org/goliatone/personaldetection @param url: @return: """ self.url = url def get_handler(self, url): #TODO: Make this for realz if '.git' in url: return 'git' elif 'hg@' or 'bitbucket.org' in url: return 'hg' else: raise Exception def get_repo_name(self, url, target_dir): tail = url.rpartition('/')[2] tail = tail.replace('.git', '') return os.path.normpath(os.path.join(target_dir, tail)) def notify_existing_repo(self, repo_path): if not os.path.isdir(repo_path): return question = "Repo '{0}' exists, want to delete and clone?".format(repo_path) if self.prompt_question(question): print "Removing '{0}'...".format(repo_path) shutil.rmtree(repo_path) else: print "You don't want to overwrite. Bye!" sys.exit(0) def prompt_question(self, question, default=True): valid = {'yes':True, 'y':True, 'no':False, 'n':False} prompt = '[y/n]' if default == True: prompt = '[Y/n]' elif default == False: prompt = '[y/N]' while True: sys.stdout.write("{0} {1} ".format(question, prompt)) choice = raw_input().lower() if default is not None and choice == '': return default elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no'"\ "(or 'y' or 'n')") def clone(self, url=None, checkout_branch=None, target_dir='.'): if url: self.url = url url = self.url #let's check target dir: target_dir = os.path.expanduser(target_dir) ensure_path(target_dir) #did we get a git or hg repo? vcs = self.get_handler(url) print vcs repo_path = self.get_repo_name(url, target_dir) print repo_path if os.path.isdir(repo_path): self.notify_existing_repo(repo_path) try: subprocess.check_call([vcs, 'clone', url], cwd=target_dir) except Exception, e: print e exit() if checkout_branch: subprocess.check_call([vcs, 'checkout', checkout_branch], cwd=target_dir) return repo_path
Python
0
d00d809735210f53c3da71195107f1991814eb52
fix minor bug most likely due to merge error
labonneboite/common/models/user_favorite_offices.py
labonneboite/common/models/user_favorite_offices.py
# coding: utf8 import datetime from sqlalchemy import Column, ForeignKey, UniqueConstraint from sqlalchemy import desc from sqlalchemy import Integer, String, DateTime from sqlalchemy.orm import relationship from labonneboite.common.database import Base from labonneboite.common.database import db_session from labonneboite.common.models.base import CRUDMixin from labonneboite.conf import get_current_env, ENV_LBBDEV class UserFavoriteOffice(CRUDMixin, Base): """ Stores the favorites offices of a user. Important: This model has a relation to the `etablissements` model via the `office_siret` field. But the `etablissements` table is dropped and recreated during the offices import process (remember that `etablissements` is currently excluded from the migration system). Some entries in `etablissements` may disappear during this process. Therefore the `office_siret` foreign key integrity may be broken. So the foreign key integrity must be enforced by the script of the data deployment process. """ __tablename__ = 'user_favorite_offices' __table_args__ = ( UniqueConstraint('user_id', 'office_siret', name='_user_fav_office'), ) id = Column(Integer, primary_key=True) # Set `ondelete` to `CASCADE`: when a `user` is deleted, all his `favorites` are deleted too. user_id = Column(Integer, ForeignKey('users.id', ondelete='CASCADE'), nullable=False) # Set `ondelete` to `CASCADE`: when an `office` is deleted, all related `favorites` are deleted too. office_siret = Column(String(191), ForeignKey('etablissements.siret', ondelete='CASCADE'), nullable=True) date_created = Column(DateTime, default=datetime.datetime.utcnow, nullable=False) user = relationship('User') if get_current_env() == ENV_LBBDEV: # disable relationship which mysteriously breaks on lbbdev only, not needed there anyway. pass else: office = relationship('Office', lazy='joined') __mapper_args__ = { 'order_by': desc(date_created), # Default order_by for all queries. } @classmethod def user_favs_as_sirets(cls, user): """ Returns the favorites offices of a user as a list of sirets. Useful to check if an office is already in the favorites of a user. """ if user.is_anonymous: return [] sirets = [fav.office_siret for fav in db_session.query(cls).filter_by(user_id=user.id)] return sirets
# coding: utf8 import datetime from sqlalchemy import Column, ForeignKey, UniqueConstraint from sqlalchemy import desc from sqlalchemy import Integer, String, DateTime from sqlalchemy.orm import relationship from labonneboite.common.database import Base from labonneboite.common.database import db_session from labonneboite.common.models.base import CRUDMixin from labonneboite.common import util from labonneboite.conf import get_current_env, ENV_LBBDEV class UserFavoriteOffice(CRUDMixin, Base): """ Stores the favorites offices of a user. Important: This model has a relation to the `etablissements` model via the `office_siret` field. But the `etablissements` table is dropped and recreated during the offices import process (remember that `etablissements` is currently excluded from the migration system). Some entries in `etablissements` may disappear during this process. Therefore the `office_siret` foreign key integrity may be broken. So the foreign key integrity must be enforced by the script of the data deployment process. """ __tablename__ = 'user_favorite_offices' __table_args__ = ( UniqueConstraint('user_id', 'office_siret', name='_user_fav_office'), ) id = Column(Integer, primary_key=True) # Set `ondelete` to `CASCADE`: when a `user` is deleted, all his `favorites` are deleted too. user_id = Column(Integer, ForeignKey('users.id', ondelete='CASCADE'), nullable=False) # Set `ondelete` to `CASCADE`: when an `office` is deleted, all related `favorites` are deleted too. office_siret = Column(String(191), ForeignKey('etablissements.siret', ondelete='CASCADE'), nullable=True) date_created = Column(DateTime, default=datetime.datetime.utcnow, nullable=False) user = relationship('User') if get_current_env() == ENV_LBBDEV: # disable relationship which mysteriously breaks on lbbdev only, not needed there anyway. pass else: office = relationship('Office', lazy='joined') __mapper_args__ = { 'order_by': desc(date_created), # Default order_by for all queries. } @classmethod def user_favs_as_sirets(cls, user): """ Returns the favorites offices of a user as a list of sirets. Useful to check if an office is already in the favorites of a user. """ if user.is_anonymous: return [] sirets = [fav.office_siret for fav in db_session.query(cls).filter_by(user_id=user.id)] return sirets
Python
0.000001
02d6e904fe02a4c53b1878a3f6c44c074de47d79
Add __str__ to Decorator
api/python/schwa/dr/decoration.py
api/python/schwa/dr/decoration.py
""" Utilities for managing document decoration by marking the document with the set of decorations that have been applied to it. """ from functools import wraps, partial def decorator(key=None): """ Wraps a docrep decorator, ensuring it is only executed once per document. Duplication is checked using the given key or the function object. """ def dec(fn): @wraps(fn) def wrapper(doc): try: if key in doc._decorated_by: return except AttributeError: doc._decorated_by = set() doc._decorated_by.add(key) fn(doc) return wrapper if callable(key): return dec(key) return dec class Decorator(object): """ An abstract document decorator, which wraps its decorate method to ensure it is only executed once per document. """ def __init__(self, key): # NOTE: wrapping __call__ like this didn't seem to work self.decorate = decorator(key)(self.decorate) self._key = key @classmethod def _build_key(cls, *args): return '{}-{}'.format(cls.__name__, '-'.join(repr(arg) for arg in args)) def __call__(self, doc): self.decorate(doc) def decorate(self, doc): raise NotImplementedError() def __str__(self): return self._key def requires_decoration(*decorators, **kwargs): """ Marks the document decoration dependencies for a function, where the document is found in the doc_arg positional argument (default 0) or doc_kwarg keyword argument (default 'doc'). """ doc_arg = kwargs.pop('doc_arg', 0) doc_kwarg = kwargs.pop('doc_kwarg', 'doc') if kwargs: raise ValueError("Got unexpected keyword arguments: {}".format(kwargs.keys())) def dec(fn): @wraps(fn) def wrapper(*args, **kwargs): try: doc = args[doc_arg] except IndexError: doc = kwargs[doc_kwarg] for decorate in decorators: decorate(doc) return fn(*args, **kwargs) return wrapper return dec method_requires_decoration = partial(requires_decoration, doc_arg=1)
""" Utilities for managing document decoration by marking the document with the set of decorations that have been applied to it. """ from functools import wraps, partial def decorator(key=None): """ Wraps a docrep decorator, ensuring it is only executed once per document. Duplication is checked using the given key or the function object. """ def dec(fn): @wraps(fn) def wrapper(doc): try: if key in doc._decorated_by: return except AttributeError: doc._decorated_by = set() doc._decorated_by.add(key) fn(doc) return wrapper if callable(key): return dec(key) return dec class Decorator(object): """ An abstract document decorator, which wraps its decorate method to ensure it is only executed once per document. """ def __init__(self, key): # NOTE: wrapping __call__ like this didn't seem to work self.decorate = decorator(key)(self.decorate) @classmethod def _build_key(cls, *args): return '{}-{}'.format(cls.__name__, '-'.join(repr(arg) for arg in args)) def __call__(self, doc): self.decorate(doc) def decorate(self, doc): raise NotImplementedError() def requires_decoration(*decorators, **kwargs): """ Marks the document decoration dependencies for a function, where the document is found in the doc_arg positional argument (default 0) or doc_kwarg keyword argument (default 'doc'). """ doc_arg = kwargs.pop('doc_arg', 0) doc_kwarg = kwargs.pop('doc_kwarg', 'doc') if kwargs: raise ValueError("Got unexpected keyword arguments: {}".format(kwargs.keys())) def dec(fn): @wraps(fn) def wrapper(*args, **kwargs): try: doc = args[doc_arg] except IndexError: doc = kwargs[doc_kwarg] for decorate in decorators: decorate(doc) return fn(*args, **kwargs) return wrapper return dec method_requires_decoration = partial(requires_decoration, doc_arg=1)
Python
0.999998
53829f5a65727ba5ba0b69785bd74bf77d3e2ecf
Remove bogus shebang line.
cli/hack.py
cli/hack.py
# Copyright 2011 Digg, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os import os.path from digg.dev.hackbuilder.util import get_root_of_repo_directory_tree import digg.dev.hackbuilder.cli.commands.build import digg.dev.hackbuilder.plugins def main(): logging.basicConfig(level=logging.DEBUG) parser = get_parser() args = parser.parse_args() plugins = get_plugin_modules(args.plugins) digg.dev.hackbuilder.plugins.initialize_plugins(plugins) args.func(args) def get_parser(): parser = argparse.ArgumentParser(description='Hack build tool.') parser.add_argument('--plugins', action='append', default=['debian', 'python'], help='List of plugins to load') subparsers = parser.add_subparsers(title='Subcommands') parser_help = subparsers.add_parser('help', help='Subcommand help') parser_help.add_argument( 'subcommand_name', help='Name of command to get help for', nargs='?') parser_build = digg.dev.hackbuilder.cli.commands.build.get_build_argparser( subparsers) parser_clean = subparsers.add_parser('clean', help='Clean up the mess.') parser_clean.set_defaults(func=do_clean) subcommand_parsers = { 'help': parser_help, 'build': parser_build, 'clean': parser_clean, } parser_help.set_defaults(func=get_help_parser_handler(parser, subcommand_parsers)) return parser def get_help_parser_handler(main_parser, subcommand_parsers): def do_help(args): try: subcommand_parser = subcommand_parsers[args.subcommand_name] subcommand_parser.print_help() except KeyError: main_parser.print_help() return do_help def do_clean(args): repo_root = os.path.abspath(get_root_of_repo_directory_tree()) logging.info('Repository root: %s', repo_root) normalizer = digg.dev.hackbuilder.target.Normalizer(repo_root) build = digg.dev.hackbuilder.build.Build(None, normalizer) build.remove_directories() def get_plugin_modules(requested_plugins): plugins = set() for requested_plugin in requested_plugins: plugin_name = 'digg.dev.hackbuilder.plugins.' + requested_plugin logging.info('Loading plugin module: %s', plugin_name) module = __import__(plugin_name, fromlist=['buildfile_locals'], level=0) plugins.add(module) return plugins if __name__ == '__main__': main()
# Copyright 2011 Digg, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python import argparse import logging import os import os.path from digg.dev.hackbuilder.util import get_root_of_repo_directory_tree import digg.dev.hackbuilder.cli.commands.build import digg.dev.hackbuilder.plugins def main(): logging.basicConfig(level=logging.DEBUG) parser = get_parser() args = parser.parse_args() plugins = get_plugin_modules(args.plugins) digg.dev.hackbuilder.plugins.initialize_plugins(plugins) args.func(args) def get_parser(): parser = argparse.ArgumentParser(description='Hack build tool.') parser.add_argument('--plugins', action='append', default=['debian', 'python'], help='List of plugins to load') subparsers = parser.add_subparsers(title='Subcommands') parser_help = subparsers.add_parser('help', help='Subcommand help') parser_help.add_argument( 'subcommand_name', help='Name of command to get help for', nargs='?') parser_build = digg.dev.hackbuilder.cli.commands.build.get_build_argparser( subparsers) parser_clean = subparsers.add_parser('clean', help='Clean up the mess.') parser_clean.set_defaults(func=do_clean) subcommand_parsers = { 'help': parser_help, 'build': parser_build, 'clean': parser_clean, } parser_help.set_defaults(func=get_help_parser_handler(parser, subcommand_parsers)) return parser def get_help_parser_handler(main_parser, subcommand_parsers): def do_help(args): try: subcommand_parser = subcommand_parsers[args.subcommand_name] subcommand_parser.print_help() except KeyError: main_parser.print_help() return do_help def do_clean(args): repo_root = os.path.abspath(get_root_of_repo_directory_tree()) logging.info('Repository root: %s', repo_root) normalizer = digg.dev.hackbuilder.target.Normalizer(repo_root) build = digg.dev.hackbuilder.build.Build(None, normalizer) build.remove_directories() def get_plugin_modules(requested_plugins): plugins = set() for requested_plugin in requested_plugins: plugin_name = 'digg.dev.hackbuilder.plugins.' + requested_plugin logging.info('Loading plugin module: %s', plugin_name) module = __import__(plugin_name, fromlist=['buildfile_locals'], level=0) plugins.add(module) return plugins if __name__ == '__main__': main()
Python
0
d7e6db61a0100e69b9a18c17a906e094e91ce7b3
fix wrong keyword param (passws) to MySQLdb.connect
database.py
database.py
""" Database Manager. """ import MySQLdb import MySQLdb.cursors class DatabaseManager(object): def __init__(self, host, user, passwd, database, charset='utf8', large_scale=False): """Be careful using large_scale=True, SSDictCursor seems not reliable.""" self.conn = MySQLdb.connect(host=host, user=user, passwd=passwd, db=database, charset=charset) self.large_scale = large_scale def close(self): self.conn.close() # put here for better understandability cursor_types = { True: { True: MySQLdb.cursors.SSDictCursor, False: MySQLdb.cursors.SSCursor, }, False: { True: MySQLdb.cursors.DictCursor, False: MySQLdb.cursors.Cursor, }, } def __get_cursor_type(self, use_dict): return self.cursor_types[self.large_scale][use_dict] def __query(self, sql, values=(), use_dict=True): """Execute any SQL. You can use %s placeholder in sql and fill with values. return cursor""" cursor = self.conn.cursor(self.__get_cursor_type(use_dict)) cursor.execute(sql, values) return cursor def query(self, sql, values=()): """Execute any SQL and return affected rows.""" cursor = self.__query(sql, values) return cursor.rowcount def insert(self, sql, values=()): """Insert a row and return insert id.""" cursor = self.__query(sql, values) return cursor.lastrowid def get_rows(self, sql, values=()): """[Generator]Get rows of SELECT query.""" cursor = self.__query(sql, values) for i in xrange(cursor.rowcount): yield cursor.fetchone() def get_value(self, sql, idx=0): """Get value of the first row. This is handy if you want to retrive COUNT(*).""" cursor = self.__query(sql, use_dict=False) row = cursor.fetchone() return row[idx]
""" Database Manager. """ import MySQLdb import MySQLdb.cursors class DatabaseManager(object): def __init__(self, host, user, passwd, database, charset='utf8', large_scale=False): """Be careful using large_scale=True, SSDictCursor seems not reliable.""" self.conn = MySQLdb.connect(host=host, user=user, passws=passwd, db=database, charset=charset) self.large_scale = large_scale def close(self): self.conn.close() # put here for better understandability cursor_types = { True: { True: MySQLdb.cursors.SSDictCursor, False: MySQLdb.cursors.SSCursor, }, False: { True: MySQLdb.cursors.DictCursor, False: MySQLdb.cursors.Cursor, }, } def __get_cursor_type(self, use_dict): return self.cursor_types[self.large_scale][use_dict] def __query(self, sql, values=(), use_dict=True): """Execute any SQL. You can use %s placeholder in sql and fill with values. return cursor""" cursor = self.conn.cursor(self.__get_cursor_type(use_dict)) cursor.execute(sql, values) return cursor def query(self, sql, values=()): """Execute any SQL and return affected rows.""" cursor = self.__query(sql, values) return cursor.rowcount def insert(self, sql, values=()): """Insert a row and return insert id.""" cursor = self.__query(sql, values) return cursor.lastrowid def get_rows(self, sql, values=()): """[Generator]Get rows of SELECT query.""" cursor = self.__query(sql, values) for i in xrange(cursor.rowcount): yield cursor.fetchone() def get_value(self, sql, idx=0): """Get value of the first row. This is handy if you want to retrive COUNT(*).""" cursor = self.__query(sql, use_dict=False) row = cursor.fetchone() return row[idx]
Python
0.000026
4baba777801765b0ce9025c9ef170d3465d874fc
Add state class measurement to SwitchBot signal strength sensors (#79886)
homeassistant/components/switchbot/sensor.py
homeassistant/components/switchbot/sensor.py
"""Support for SwitchBot sensors.""" from __future__ import annotations from homeassistant.components.sensor import ( SensorDeviceClass, SensorEntity, SensorEntityDescription, SensorStateClass, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( PERCENTAGE, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, TEMP_CELSIUS, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import EntityCategory from homeassistant.helpers.entity_platform import AddEntitiesCallback from .const import DOMAIN from .coordinator import SwitchbotDataUpdateCoordinator from .entity import SwitchbotEntity PARALLEL_UPDATES = 0 SENSOR_TYPES: dict[str, SensorEntityDescription] = { "rssi": SensorEntityDescription( key="rssi", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, device_class=SensorDeviceClass.SIGNAL_STRENGTH, state_class=SensorStateClass.MEASUREMENT, entity_registry_enabled_default=False, entity_category=EntityCategory.DIAGNOSTIC, ), "wifi_rssi": SensorEntityDescription( key="wifi_rssi", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, device_class=SensorDeviceClass.SIGNAL_STRENGTH, state_class=SensorStateClass.MEASUREMENT, entity_registry_enabled_default=False, entity_category=EntityCategory.DIAGNOSTIC, ), "battery": SensorEntityDescription( key="battery", native_unit_of_measurement=PERCENTAGE, device_class=SensorDeviceClass.BATTERY, state_class=SensorStateClass.MEASUREMENT, entity_category=EntityCategory.DIAGNOSTIC, ), "lightLevel": SensorEntityDescription( key="lightLevel", native_unit_of_measurement="Level", state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.ILLUMINANCE, ), "humidity": SensorEntityDescription( key="humidity", native_unit_of_measurement=PERCENTAGE, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.HUMIDITY, ), "temperature": SensorEntityDescription( key="temperature", native_unit_of_measurement=TEMP_CELSIUS, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.TEMPERATURE, ), } async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up Switchbot sensor based on a config entry.""" coordinator: SwitchbotDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] entities = [ SwitchBotSensor( coordinator, sensor, ) for sensor in coordinator.data["data"] if sensor in SENSOR_TYPES ] entities.append(SwitchbotRSSISensor(coordinator, "rssi")) async_add_entities(entities) class SwitchBotSensor(SwitchbotEntity, SensorEntity): """Representation of a Switchbot sensor.""" def __init__( self, coordinator: SwitchbotDataUpdateCoordinator, sensor: str, ) -> None: """Initialize the Switchbot sensor.""" super().__init__(coordinator) self._sensor = sensor self._attr_unique_id = f"{coordinator.base_unique_id}-{sensor}" name = coordinator.device_name self._attr_name = f"{name} {sensor.replace('_', ' ').title()}" self.entity_description = SENSOR_TYPES[sensor] @property def native_value(self) -> str | int: """Return the state of the sensor.""" return self.data["data"][self._sensor] class SwitchbotRSSISensor(SwitchBotSensor): """Representation of a Switchbot RSSI sensor.""" @property def native_value(self) -> str | int: """Return the state of the sensor.""" return self.coordinator.ble_device.rssi
"""Support for SwitchBot sensors.""" from __future__ import annotations from homeassistant.components.sensor import ( SensorDeviceClass, SensorEntity, SensorEntityDescription, SensorStateClass, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( PERCENTAGE, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, TEMP_CELSIUS, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import EntityCategory from homeassistant.helpers.entity_platform import AddEntitiesCallback from .const import DOMAIN from .coordinator import SwitchbotDataUpdateCoordinator from .entity import SwitchbotEntity PARALLEL_UPDATES = 0 SENSOR_TYPES: dict[str, SensorEntityDescription] = { "rssi": SensorEntityDescription( key="rssi", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, device_class=SensorDeviceClass.SIGNAL_STRENGTH, entity_registry_enabled_default=False, entity_category=EntityCategory.DIAGNOSTIC, ), "wifi_rssi": SensorEntityDescription( key="wifi_rssi", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, device_class=SensorDeviceClass.SIGNAL_STRENGTH, entity_registry_enabled_default=False, entity_category=EntityCategory.DIAGNOSTIC, ), "battery": SensorEntityDescription( key="battery", native_unit_of_measurement=PERCENTAGE, device_class=SensorDeviceClass.BATTERY, state_class=SensorStateClass.MEASUREMENT, entity_category=EntityCategory.DIAGNOSTIC, ), "lightLevel": SensorEntityDescription( key="lightLevel", native_unit_of_measurement="Level", state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.ILLUMINANCE, ), "humidity": SensorEntityDescription( key="humidity", native_unit_of_measurement=PERCENTAGE, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.HUMIDITY, ), "temperature": SensorEntityDescription( key="temperature", native_unit_of_measurement=TEMP_CELSIUS, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.TEMPERATURE, ), } async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up Switchbot sensor based on a config entry.""" coordinator: SwitchbotDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] entities = [ SwitchBotSensor( coordinator, sensor, ) for sensor in coordinator.data["data"] if sensor in SENSOR_TYPES ] entities.append(SwitchbotRSSISensor(coordinator, "rssi")) async_add_entities(entities) class SwitchBotSensor(SwitchbotEntity, SensorEntity): """Representation of a Switchbot sensor.""" def __init__( self, coordinator: SwitchbotDataUpdateCoordinator, sensor: str, ) -> None: """Initialize the Switchbot sensor.""" super().__init__(coordinator) self._sensor = sensor self._attr_unique_id = f"{coordinator.base_unique_id}-{sensor}" name = coordinator.device_name self._attr_name = f"{name} {sensor.replace('_', ' ').title()}" self.entity_description = SENSOR_TYPES[sensor] @property def native_value(self) -> str | int: """Return the state of the sensor.""" return self.data["data"][self._sensor] class SwitchbotRSSISensor(SwitchBotSensor): """Representation of a Switchbot RSSI sensor.""" @property def native_value(self) -> str | int: """Return the state of the sensor.""" return self.coordinator.ble_device.rssi
Python
0
2e164c5fe2e3a208dbdcbc51f287a9e5b7cc34a8
Add package_data entry in setup.py
setup.py
setup.py
from setuptools import setup from klink import __version__ setup( name='klink', version=__version__, url='https://github.com/pmorissette/klink', description='Klink is a simple and clean theme for creating Sphinx docs, inspired by jrnl', license='MIT', author='Philippe Morissette', author_email='morissette.philippe@gmail.com', packages=['klink'], package_data = {'klink': [ 'theme.conf', 'layout.html', 'static/css/klink.css', 'static/fonts/*.*', ]}, )
from setuptools import setup from klink import __version__ setup( name='klink', version=__version__, url='https://github.com/pmorissette/klink', description='Klink is a simple and clean theme for creating Sphinx docs, inspired by jrnl', license='MIT', author='Philippe Morissette', author_email='morissette.philippe@gmail.com', packages=['klink'] )
Python
0.000001
64c50a273c3e113affdb700f137bda78fd1a684d
update examples/progressbar.by
examples/progressbar.py
examples/progressbar.py
#!/usr/bin/env python # Tai Sakuma <sakuma@fnal.gov> from AlphaTwirl.ProgressBar import ProgressBar, ProgressBar2, MPProgressMonitor, ProgressReport from AlphaTwirl.EventReader import MPEventLoopRunner import time, random ##____________________________________________________________________________|| class EventLoop(object): def __init__(self, name): self.name = name self.readers = [ ] def __call__(self, progressReporter = None): n = random.randint(5, 50) time.sleep(random.randint(0, 3)) for i in xrange(n): time.sleep(0.1) report = ProgressReport(name = self.name, done = i + 1, total = n) progressReporter.report(report) return self.readers ##____________________________________________________________________________|| progressBar = ProgressBar() progressMonitor = MPProgressMonitor(presentation = progressBar) runner = MPEventLoopRunner(progressMonitor = progressMonitor) runner.begin() runner.run(EventLoop("loop")) runner.run(EventLoop("another loop")) runner.run(EventLoop("more loop")) runner.run(EventLoop("loop loop loop")) runner.run(EventLoop("l")) runner.run(EventLoop("loop6")) runner.run(EventLoop("loop7")) runner.run(EventLoop("loop8")) runner.end() ##____________________________________________________________________________||
#!/usr/bin/env python # Tai Sakuma <sakuma@fnal.gov> from AlphaTwirl.ProgressBar import ProgressBar, MPProgressMonitor, ProgressReport from AlphaTwirl.EventReader import MPEventLoopRunner import time, random ##____________________________________________________________________________|| class EventLoop(object): def __init__(self, name): self.name = name self.readers = [ ] def __call__(self, progressReporter = None): n = random.randint(5, 50) for i in xrange(n): time.sleep(0.1) report = ProgressReport(name = self.name, done = i + 1, total = n) progressReporter.report(report) return self.readers ##____________________________________________________________________________|| progressBar = ProgressBar() progressMonitor = MPProgressMonitor(presentation = progressBar) runner = MPEventLoopRunner(progressMonitor = progressMonitor) runner.begin() runner.run(EventLoop("loop1")) runner.run(EventLoop("loop2")) runner.run(EventLoop("loop3")) runner.run(EventLoop("loop4")) runner.run(EventLoop("loop5")) runner.run(EventLoop("loop6")) runner.run(EventLoop("loop7")) runner.run(EventLoop("loop8")) runner.end() ##____________________________________________________________________________||
Python
0
f8bd4073beb50f9fb750170e79804d13ea50db0b
update example
examples/raster_mesh.py
examples/raster_mesh.py
from bluesky.examples import Mover, SynGauss, Syn2DGauss import bluesky.plans as bp import bluesky.spec_api as bsa import bluesky.callbacks from bluesky.standard_config import gs import bluesky.qt_kicker bluesky.qt_kicker.install_qt_kicker() # motors theta = Mover('theta', ['theta']) gamma = Mover('gamma', ['gamma']) # synthetic detectors coupled to one motor theta_det = SynGauss('theta_det', theta, 'theta', center=0, Imax=1, sigma=1) gamma_det = SynGauss('gamma_det', gamma, 'gamma', center=0, Imax=1, sigma=1) # synthetic detector coupled to two detectors tgd = Syn2DGauss('theta_gamma_det', theta, 'theta', gamma, 'gamma', center=(0, 0), Imax=1) # set up the default detectors gs.DETS = [theta_det, gamma_det, tgd] ysteps = 25 xsteps = 20 # hook up the live raster callback cb = bluesky.callbacks.LiveRaster((ysteps, xsteps), 'theta_gamma_det', clim=[0, 1]) lt = bluesky.callbacks.LiveTable([theta, gamma, tgd]) gs.MASTER_DET_FIELD = 'theta_gamma_det' mesha = bp.OuterProductAbsScanPlan(gs.DETS, theta, -2.5, 2.5, ysteps, gamma, -2, 2, xsteps, True) gs.RE(mesha, [cb, lt])
from bluesky.examples import Mover, SynGauss, Syn2DGauss import bluesky.simple_scans as bss import bluesky.spec_api as bsa import bluesky.callbacks from bluesky.standard_config import gs import bluesky.qt_kicker # motors theta = Mover('theta', ['theta']) gamma = Mover('gamma', ['gamma']) # synthetic detectors coupled to one motor theta_det = SynGauss('theta_det', theta, 'theta', center=0, Imax=1, sigma=1) gamma_det = SynGauss('gamma_det', gamma, 'gamma', center=0, Imax=1, sigma=1) # synthetic detector coupled to two detectors tgd = Syn2DGauss('theta_gamma_det', theta, 'theta', gamma, 'gamma', center=(0, 0), Imax=1) # set up the default detectors gs.DETS = [theta_det, gamma_det, tgd] ysteps = 25 xsteps = 20 # hook up the live raster callback #cb = bluesky.callbacks.LiveRaster((ysteps + 1, xsteps + 1), # 'theta_gamma_det', clim=[0, 1]) mesha = bss.OuterProductAbsScanPlan() # run a mesh scan gs.MASTER_DET_FIELD = 'theta_gamma_det' bsa.mesh(theta, -2.5, 2.5, ysteps, gamma, -2, 2, xsteps, False)
Python
0
2fe555e71d0b428a85c63c39dcfeecb30420f9b1
Handle SIGTERM by raising SystemExit
src/main/python/afp_alppaca/main.py
src/main/python/afp_alppaca/main.py
from __future__ import print_function, absolute_import, unicode_literals, division import argparse import signal import sys import threading from afp_alppaca.assume_role import AssumedRoleCredentialsProvider from afp_alppaca.ims_interface import IMSCredentialsProvider from afp_alppaca.scheduler import Scheduler from afp_alppaca.webapp import WebApp from afp_alppaca.util import setup_logging, load_config from afp_alppaca.compat import OrderedDict from succubus import Daemon def sigterm_handler(*args): raise SystemExit("SIGTERM was received") class AlppacaDaemon(Daemon): def run(self): self.logger.warn("Alppaca starting.") try: # Handle SIGTERM by raising SystemExit to make the "finally:" work. signal.signal(signal.SIGTERM, sigterm_handler) # Credentials is a shared object that connects the scheduler and the # bottle_app. The scheduler writes into it and the bottle_app reads # from it. self.credentials = OrderedDict() self.launch_scheduler() self.run_webapp() except Exception: self.logger.exception("Error in Alppaca") finally: self.logger.warn("Alppaca shutting down.") def parse_arguments(self): parser = argparse.ArgumentParser() parser.add_argument( '-c', '--config', help="Alppaca YAML config directory", type=str, default='/etc/alppaca') return parser.parse_args() def load_configuration(self): args = self.parse_arguments() self.config = load_config(args.config) self.setup_logging() def setup_logging(self): try: self.logger = setup_logging(self.config) except Exception: print("Could not setup logging with config '{0}'".format(self.config), file=sys.stderr) raise else: self.logger.debug("Alppaca logging was set up") def run_webapp(self): bind_ip = self.config.get('bind_ip', '127.0.0.1') bind_port = self.config.get('bind_port', '25772') self.logger.debug("Starting webserver on %s:%s", bind_ip, bind_port) webapp = WebApp(self.credentials) webapp.run(host=bind_ip, port=bind_port, quiet=True) def get_credentials_provider(self): # initialize the credentials provider ims_host_port = '%s:%s' % (self.config['ims_host'], self.config['ims_port']) ims_protocol = self.config.get('ims_protocol', 'https') self.logger.info("Will get credentials from '%s' using '%s'", ims_host_port, ims_protocol) credentials_provider = IMSCredentialsProvider(ims_host_port, ims_protocol=ims_protocol) role_to_assume = self.config.get('assume_role') if role_to_assume: self.logger.info("Option assume_role set to '%s'", role_to_assume) credentials_provider = AssumedRoleCredentialsProvider( credentials_provider, role_to_assume, self.config.get('aws_proxy_host'), self.config.get('aws_proxy_port'), self.config.get('aws_region') ) return credentials_provider def launch_scheduler(self): credentials_provider = self.get_credentials_provider() scheduler = Scheduler(self.credentials, credentials_provider) scheduler_thread = threading.Thread(target=scheduler.refresh_credentials) scheduler_thread.daemon = True scheduler_thread.start()
from __future__ import print_function, absolute_import, unicode_literals, division import argparse import signal import sys import threading from afp_alppaca.assume_role import AssumedRoleCredentialsProvider from afp_alppaca.ims_interface import IMSCredentialsProvider from afp_alppaca.scheduler import Scheduler from afp_alppaca.webapp import WebApp from afp_alppaca.util import setup_logging, load_config from afp_alppaca.compat import OrderedDict from succubus import Daemon class AlppacaDaemon(Daemon): def run(self): self.logger.warn("Alppaca starting.") try: # Handle SIGTERM the same way SIGINT is handled, i.e. throw a # KeyboardInterrupt exception. This makes the "finally:" work. sigint_handler = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGTERM, sigint_handler) # Credentials is a shared object that connects the scheduler and the # bottle_app. The scheduler writes into it and the bottle_app reads # from it. self.credentials = OrderedDict() self.launch_scheduler() self.run_webapp() except Exception: self.logger.exception("Error in Alppaca") finally: self.logger.warn("Alppaca shutting down.") def parse_arguments(self): parser = argparse.ArgumentParser() parser.add_argument( '-c', '--config', help="Alppaca YAML config directory", type=str, default='/etc/alppaca') return parser.parse_args() def load_configuration(self): args = self.parse_arguments() self.config = load_config(args.config) self.setup_logging() def setup_logging(self): try: self.logger = setup_logging(self.config) except Exception: print("Could not setup logging with config '{0}'".format(self.config), file=sys.stderr) raise else: self.logger.debug("Alppaca logging was set up") def run_webapp(self): bind_ip = self.config.get('bind_ip', '127.0.0.1') bind_port = self.config.get('bind_port', '25772') self.logger.debug("Starting webserver on %s:%s", bind_ip, bind_port) webapp = WebApp(self.credentials) webapp.run(host=bind_ip, port=bind_port, quiet=True) def get_credentials_provider(self): # initialize the credentials provider ims_host_port = '%s:%s' % (self.config['ims_host'], self.config['ims_port']) ims_protocol = self.config.get('ims_protocol', 'https') self.logger.info("Will get credentials from '%s' using '%s'", ims_host_port, ims_protocol) credentials_provider = IMSCredentialsProvider(ims_host_port, ims_protocol=ims_protocol) role_to_assume = self.config.get('assume_role') if role_to_assume: self.logger.info("Option assume_role set to '%s'", role_to_assume) credentials_provider = AssumedRoleCredentialsProvider( credentials_provider, role_to_assume, self.config.get('aws_proxy_host'), self.config.get('aws_proxy_port'), self.config.get('aws_region') ) return credentials_provider def launch_scheduler(self): credentials_provider = self.get_credentials_provider() scheduler = Scheduler(self.credentials, credentials_provider) scheduler_thread = threading.Thread(target=scheduler.refresh_credentials) scheduler_thread.daemon = True scheduler_thread.start()
Python
0
5da88c648e338d21b782a8a36a69e873da6c04ae
use --http-socket rather than --http for uwsgi
gnocchi/cli/api.py
gnocchi/cli/api.py
# Copyright (c) 2013 Mirantis Inc. # Copyright (c) 2015-2017 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from distutils import spawn import math import os import sys import daiquiri from oslo_config import cfg from oslo_policy import opts as policy_opts from gnocchi import opts from gnocchi import service from gnocchi import utils LOG = daiquiri.getLogger(__name__) def prepare_service(conf=None): if conf is None: conf = cfg.ConfigOpts() opts.set_defaults() policy_opts.set_defaults(conf) conf = service.prepare_service(conf=conf) cfg_path = conf.oslo_policy.policy_file if not os.path.isabs(cfg_path): cfg_path = conf.find_file(cfg_path) if cfg_path is None or not os.path.exists(cfg_path): cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'rest', 'policy.json')) conf.set_default('policy_file', cfg_path, group='oslo_policy') return conf def api(): # Compat with previous pbr script try: double_dash = sys.argv.index("--") except ValueError: double_dash = None else: sys.argv.pop(double_dash) conf = cfg.ConfigOpts() for opt in opts.API_OPTS: # NOTE(jd) Register the API options without a default, so they are only # used to override the one in the config file c = copy.copy(opt) c.default = None conf.register_cli_opt(c) conf = prepare_service(conf) if double_dash is not None: # NOTE(jd) Wait to this stage to log so we're sure the logging system # is in place LOG.warning( "No need to pass `--' in gnocchi-api command line anymore, " "please remove") uwsgi = spawn.find_executable("uwsgi") if not uwsgi: LOG.error("Unable to find `uwsgi'.\n" "Be sure it is installed and in $PATH.") return 1 workers = utils.get_default_workers() args = [ "--if-not-plugin", "python", "--plugin", "python", "--endif", "--http-socket", "%s:%d" % (conf.host or conf.api.host, conf.port or conf.api.port), "--master", "--enable-threads", "--die-on-term", # NOTE(jd) See https://github.com/gnocchixyz/gnocchi/issues/156 "--add-header", "Connection: close", "--processes", str(math.floor(workers * 1.5)), "--threads", str(workers), "--lazy-apps", "--chdir", "/", "--wsgi", "gnocchi.rest.wsgi", "--pyargv", " ".join(sys.argv[1:]), ] virtual_env = os.getenv("VIRTUAL_ENV") if virtual_env is not None: args.extend(["-H", os.getenv("VIRTUAL_ENV", ".")]) return os.execl(uwsgi, uwsgi, *args)
# Copyright (c) 2013 Mirantis Inc. # Copyright (c) 2015-2017 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from distutils import spawn import math import os import sys import daiquiri from oslo_config import cfg from oslo_policy import opts as policy_opts from gnocchi import opts from gnocchi import service from gnocchi import utils LOG = daiquiri.getLogger(__name__) def prepare_service(conf=None): if conf is None: conf = cfg.ConfigOpts() opts.set_defaults() policy_opts.set_defaults(conf) conf = service.prepare_service(conf=conf) cfg_path = conf.oslo_policy.policy_file if not os.path.isabs(cfg_path): cfg_path = conf.find_file(cfg_path) if cfg_path is None or not os.path.exists(cfg_path): cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'rest', 'policy.json')) conf.set_default('policy_file', cfg_path, group='oslo_policy') return conf def api(): # Compat with previous pbr script try: double_dash = sys.argv.index("--") except ValueError: double_dash = None else: sys.argv.pop(double_dash) conf = cfg.ConfigOpts() for opt in opts.API_OPTS: # NOTE(jd) Register the API options without a default, so they are only # used to override the one in the config file c = copy.copy(opt) c.default = None conf.register_cli_opt(c) conf = prepare_service(conf) if double_dash is not None: # NOTE(jd) Wait to this stage to log so we're sure the logging system # is in place LOG.warning( "No need to pass `--' in gnocchi-api command line anymore, " "please remove") uwsgi = spawn.find_executable("uwsgi") if not uwsgi: LOG.error("Unable to find `uwsgi'.\n" "Be sure it is installed and in $PATH.") return 1 workers = utils.get_default_workers() args = [ "--if-not-plugin", "python", "--plugin", "python", "--endif", "--if-not-plugin", "http", "--plugin", "http", "--endif", "--http", "%s:%d" % (conf.host or conf.api.host, conf.port or conf.api.port), "--master", "--enable-threads", "--die-on-term", # NOTE(jd) See https://github.com/gnocchixyz/gnocchi/issues/156 "--add-header", "Connection: close", "--processes", str(math.floor(workers * 1.5)), "--threads", str(workers), "--lazy-apps", "--chdir", "/", "--wsgi", "gnocchi.rest.wsgi", "--pyargv", " ".join(sys.argv[1:]), ] virtual_env = os.getenv("VIRTUAL_ENV") if virtual_env is not None: args.extend(["-H", os.getenv("VIRTUAL_ENV", ".")]) return os.execl(uwsgi, uwsgi, *args)
Python
0
f8bb295bf1d10410d36a8a8880ff96303bbda451
Update announcements.py
announcements.py
announcements.py
import sys import icalendar import requests import pytz from datetime import datetime, timedelta from libs import post_text from icalendar import Calendar from database import find_bot_nname import re r = requests.get(sys.argv[2]) icsData = r.text cal = Calendar.from_ical(icsData) for evt in cal.subcomponents: print(evt.items()) print(evt.subcomponents) start = evt.decoded('DTSTART') now = datetime.now(tz=pytz.utc) time_left = start - now if timedelta(minutes=0) < time_left < timedelta(minutes=10): raw_text = str(evt.decoded('SUMMARY')) search = re.search(r"([^ ]+)\s(.+)", raw_text) (nname, message) = search.groups('1') nname = nname[2:] message = message[:-1] print(nname) print(message) bot_id = find_bot_nname(nname) if not bot_id: bot_id = sys.argv[1] post_text("I was supposed to post '" + message + "' to " + nname, bot_id) else: bot_id = bot_id[0][0] post_text(message, bot_id)
import sys import icalendar import requests import pytz from datetime import datetime, timedelta from libs import post_text from icalendar import Calendar from database import find_bot_nname import re r = requests.get(sys.argv[2]) icsData = r.text cal = Calendar.from_ical(icsData) for evt in cal.subcomponents: print(evt.items()) print(evt.subcomponents start = evt.decoded('DTSTART') now = datetime.now(tz=pytz.utc) time_left = start - now if timedelta(minutes=0) < time_left < timedelta(minutes=10): raw_text = str(evt.decoded('SUMMARY')) search = re.search(r"([^ ]+)\s(.+)", raw_text) (nname, message) = search.groups('1') nname = nname[2:] message = message[:-1] print(nname) print(message) bot_id = find_bot_nname(nname) if not bot_id: bot_id = sys.argv[1] post_text("I was supposed to post '" + message + "' to " + nname, bot_id) else: bot_id = bot_id[0][0] post_text(message, bot_id)
Python
0
d7a4948b8ee015ad918dac473114b728c65418f8
add total number of assignments to progress API (AA-816)
lms/djangoapps/course_home_api/progress/v1/serializers.py
lms/djangoapps/course_home_api/progress/v1/serializers.py
""" Progress Tab Serializers """ from rest_framework import serializers from rest_framework.reverse import reverse from lms.djangoapps.course_home_api.mixins import VerifiedModeSerializerMixin class CourseGradeSerializer(serializers.Serializer): """ Serializer for course grade """ letter_grade = serializers.CharField() percent = serializers.FloatField() is_passing = serializers.BooleanField(source='passed') class SubsectionScoresSerializer(serializers.Serializer): """ Serializer for subsections in section_scores """ assignment_type = serializers.CharField(source='format') display_name = serializers.CharField() has_graded_assignment = serializers.BooleanField(source='graded') num_points_earned = serializers.IntegerField(source='graded_total.earned') num_points_possible = serializers.IntegerField(source='graded_total.possible') percent_graded = serializers.FloatField() show_correctness = serializers.CharField() show_grades = serializers.SerializerMethodField() url = serializers.SerializerMethodField() def get_url(self, subsection): relative_path = reverse('jump_to', args=[self.context['course_key'], subsection.location]) request = self.context['request'] return request.build_absolute_uri(relative_path) def get_show_grades(self, subsection): return subsection.show_grades(self.context['staff_access']) class SectionScoresSerializer(serializers.Serializer): """ Serializer for sections in section_scores """ display_name = serializers.CharField() subsections = SubsectionScoresSerializer(source='sections', many=True) class GradingPolicySerializer(serializers.Serializer): """ Serializer for grading policy """ assignment_policies = serializers.SerializerMethodField() grade_range = serializers.DictField(source='GRADE_CUTOFFS') def get_assignment_policies(self, grading_policy): return [{ 'num_droppable': assignment_policy['drop_count'], 'num_total': assignment_policy['min_count'], 'short_label': assignment_policy.get('short_label', ''), 'type': assignment_policy['type'], 'weight': assignment_policy['weight'], } for assignment_policy in grading_policy['GRADER']] class CertificateDataSerializer(serializers.Serializer): """ Serializer for certificate data """ cert_status = serializers.CharField() cert_web_view_url = serializers.CharField() download_url = serializers.CharField() class VerificationDataSerializer(serializers.Serializer): """ Serializer for verification data object """ link = serializers.URLField() status = serializers.CharField() status_date = serializers.DateTimeField() class ProgressTabSerializer(VerifiedModeSerializerMixin): """ Serializer for progress tab """ certificate_data = CertificateDataSerializer() completion_summary = serializers.DictField() course_grade = CourseGradeSerializer() end = serializers.DateTimeField() user_has_passing_grade = serializers.BooleanField() has_scheduled_content = serializers.BooleanField() section_scores = SectionScoresSerializer(many=True) enrollment_mode = serializers.CharField() grading_policy = GradingPolicySerializer() studio_url = serializers.CharField() verification_data = VerificationDataSerializer()
""" Progress Tab Serializers """ from rest_framework import serializers from rest_framework.reverse import reverse from lms.djangoapps.course_home_api.mixins import VerifiedModeSerializerMixin class CourseGradeSerializer(serializers.Serializer): """ Serializer for course grade """ letter_grade = serializers.CharField() percent = serializers.FloatField() is_passing = serializers.BooleanField(source='passed') class SubsectionScoresSerializer(serializers.Serializer): """ Serializer for subsections in section_scores """ assignment_type = serializers.CharField(source='format') display_name = serializers.CharField() has_graded_assignment = serializers.BooleanField(source='graded') num_points_earned = serializers.IntegerField(source='graded_total.earned') num_points_possible = serializers.IntegerField(source='graded_total.possible') percent_graded = serializers.FloatField() show_correctness = serializers.CharField() show_grades = serializers.SerializerMethodField() url = serializers.SerializerMethodField() def get_url(self, subsection): relative_path = reverse('jump_to', args=[self.context['course_key'], subsection.location]) request = self.context['request'] return request.build_absolute_uri(relative_path) def get_show_grades(self, subsection): return subsection.show_grades(self.context['staff_access']) class SectionScoresSerializer(serializers.Serializer): """ Serializer for sections in section_scores """ display_name = serializers.CharField() subsections = SubsectionScoresSerializer(source='sections', many=True) class GradingPolicySerializer(serializers.Serializer): """ Serializer for grading policy """ assignment_policies = serializers.SerializerMethodField() grade_range = serializers.DictField(source='GRADE_CUTOFFS') def get_assignment_policies(self, grading_policy): return [{ 'num_droppable': assignment_policy['drop_count'], 'short_label': assignment_policy.get('short_label', ''), 'type': assignment_policy['type'], 'weight': assignment_policy['weight'], } for assignment_policy in grading_policy['GRADER']] class CertificateDataSerializer(serializers.Serializer): """ Serializer for certificate data """ cert_status = serializers.CharField() cert_web_view_url = serializers.CharField() download_url = serializers.CharField() class VerificationDataSerializer(serializers.Serializer): """ Serializer for verification data object """ link = serializers.URLField() status = serializers.CharField() status_date = serializers.DateTimeField() class ProgressTabSerializer(VerifiedModeSerializerMixin): """ Serializer for progress tab """ certificate_data = CertificateDataSerializer() completion_summary = serializers.DictField() course_grade = CourseGradeSerializer() end = serializers.DateTimeField() user_has_passing_grade = serializers.BooleanField() has_scheduled_content = serializers.BooleanField() section_scores = SectionScoresSerializer(many=True) enrollment_mode = serializers.CharField() grading_policy = GradingPolicySerializer() studio_url = serializers.CharField() verification_data = VerificationDataSerializer()
Python
0
c5e13436d7d453bd851e39591f82e2ef0d740d92
Fix typo
pyfarm/scheduler/celery_app.py
pyfarm/scheduler/celery_app.py
# No shebang line, this module is meant to be imported # # Copyright 2014 Ambient Entertainment GmbH & Co. KG # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import timedelta from pyfarm.core.config import read_env_int from celery import Celery celery_app = Celery('pyfarm.tasks', broker='redis://', include=['pyfarm.scheduler.tasks']) celery_app.conf.CELERYBEAT_SCHEDULE = { "periodically_poll_agents": { "task": "pyfarm.scheduler.tasks.poll_agents", "schedule": timedelta( seconds=read_env_int("AGENTS_POLL_INTERVAL", 30))}, "periodical_scheduler": { "task": "pyfarm.scheduler.tasks.assign_tasks", "schedule": timedelta(seconds=read_env_int("SCHEDULER_INTERVAL", 30))}} if __name__ == '__main__': celery_app.start()
# No shebang line, this module is meant to be imported # # Copyright 2014 Ambient Entertainment GmbH & Co. KG # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import timedelta from pyfarm.core.config import read_env_int from celery import Celery celery_app = Celery('pyfarm.tasks', broker='redis://', include=['pyfarm.scheduler.tasks']) celery_app.conf.CELERYBEAT_SCHEDULE = { "periodically_poll_agents": { "task": "pyfarm.scheduler.tasks.poll_agents", "schedule": timedelta( seconds=read_env_int("AGENTS_POLL_INTERVALE", 30))}, "periodical_scheduler": { "task": "pyfarm.scheduler.tasks.assign_tasks", "schedule": timedelta(seconds=read_env_int("SCHEDULER_INTERVAL", 30))}} if __name__ == '__main__': celery_app.start()
Python
0.999999
f755f9857020cfceaeb3cf9607e96cef66ccb048
update dev version after 0.21.1 tag [skip ci]
py/desitarget/_version.py
py/desitarget/_version.py
__version__ = '0.21.1.dev2037'
__version__ = '0.21.1'
Python
0
47527996fe967d8ef713ff8814f71d49ab539fd8
update version
grizli/version.py
grizli/version.py
# git describe --tags __version__ = "0.6.0-109-g647e4b4"
# git describe --tags __version__ = "0.6.0-86-g140db75"
Python
0
b493082352de19ed8d3d52c8eda838064957bbc2
bump version to 1.2-BETA2
libnamebench/version.py
libnamebench/version.py
VERSION = '1.2-BETA2'
VERSION = '1.2-BETA1'
Python
0
19cfe70c69b026429454fb8361ec3e8d6f1a0505
add show/hide requested signals
pyqode/core/widgets/preview.py
pyqode/core/widgets/preview.py
""" This module contains a widget that can show the html preview of an editor. """ from weakref import proxy from pyqode.qt import QtCore, QtWebWidgets from pyqode.core.api import DelayJobRunner class HtmlPreviewWidget(QtWebWidgets.QWebView): hide_requested = QtCore.Signal() show_requested = QtCore.Signal() def __init__(self, parent=None): super(HtmlPreviewWidget, self).__init__(parent) self._editor = None self._timer = DelayJobRunner(delay=1000) try: # prevent opening internal links when using QtWebKit self.page().setLinkDelegationPolicy( QtWebWidgets.QWebPage.DelegateAllLinks) except (TypeError, AttributeError): # no needed with QtWebEngine, internal links are properly handled # by the default implementation pass def set_editor(self, editor): url = QtCore.QUrl('') if editor is not None: url = QtCore.QUrl.fromLocalFile(editor.file.path) try: self.setHtml(editor.to_html(), url) except (TypeError, AttributeError): self.setHtml('<center>No preview available...</center>', url) self._editor = None self.hide_requested.emit() else: if self._editor is not None and editor != self._editor: try: self._editor.textChanged.disconnect(self._on_text_changed) except TypeError: pass editor.textChanged.connect(self._on_text_changed) self._editor = proxy(editor) self.show_requested.emit() def _on_text_changed(self, *_): self._timer.request_job(self._update_preview) def _update_preview(self): url = QtCore.QUrl('') if self._editor is not None: url = QtCore.QUrl.fromLocalFile(self._editor.file.path) try: pos = self.page().mainFrame().scrollBarValue(QtCore.Qt.Vertical) self.setHtml(self._editor.to_html(), url) self.page().mainFrame().setScrollBarValue(QtCore.Qt.Vertical, pos) except AttributeError: # Not possible with QtWebEngine??? # self._scroll_pos = self.page().mainFrame().scrollBarValue( # QtCore.Qt.Vertical) self.setHtml(self._editor.to_html(), url)
""" This module contains a widget that can show the html preview of an editor. """ from weakref import proxy from pyqode.qt import QtCore, QtWebWidgets from pyqode.core.api import DelayJobRunner class HtmlPreviewWidget(QtWebWidgets.QWebView): def __init__(self, parent=None): super(HtmlPreviewWidget, self).__init__(parent) self._editor = None self._timer = DelayJobRunner(delay=1000) try: # prevent opening internal links when using QtWebKit self.page().setLinkDelegationPolicy( QtWebWidgets.QWebPage.DelegateAllLinks) except (TypeError, AttributeError): # no needed with QtWebEngine, internal links are properly handled # by the default implementation pass def set_editor(self, editor): try: self.setHtml(editor.to_html()) except (TypeError, AttributeError): self.setHtml('<center>No preview available...</center>') self._editor = None else: if self._editor is not None and editor != self._editor: try: self._editor.textChanged.disconnect(self._on_text_changed) except TypeError: pass editor.textChanged.connect(self._on_text_changed) self._editor = proxy(editor) def _on_text_changed(self, *_): self._timer.request_job(self._update_preview) def _update_preview(self): try: pos = self.page().mainFrame().scrollBarValue(QtCore.Qt.Vertical) self.setHtml(self._editor.to_html()) self.page().mainFrame().setScrollBarValue(QtCore.Qt.Vertical, pos) except AttributeError: # Not possible with QtWebEngine??? # self._scroll_pos = self.page().mainFrame().scrollBarValue( # QtCore.Qt.Vertical) self.setHtml(self._editor.to_html())
Python
0
d428bb582c6fe71e39bdedfbed1b355421f48139
Fix that
src/mysql_proto/com/stmt/prepare.py
src/mysql_proto/com/stmt/prepare.py
#!/usr/bin/env python # coding=utf-8 from packet import Packet from proto import Proto from flags import Flags class Prepare(Packet): query = "" def getPayload(self): payload = bytearray() payload.extend(Proto.build_byte(Flags.COM_STMT_PREPARE)) payload.extend(Proto.build_eop_str(self.query)) return payload @staticmethod def loadFromPacket(packet): obj = Prepare() proto = Proto(packet, 3) obj.sequenceId = proto.get_fixed_int(1) proto.get_filler(1) obj.query = proto.get_eop_str() return obj if __name__ == "__main__": import doctest doctest.testmod()
#!/usr/bin/env python # coding=utf-8 from packet import Packet from proto import Proto from flags import Flags class Prepare(Packet): query = "" def getPayload(self): payload = bytearray() payload.extend(Proto.build_byte(Flags.COM_STMT_PREPARE)) payload.extend(Proto.build_eop_str(self.query)) return payload @staticmethod def loadFromPacket(packet): obj = Statistics() proto = Proto(packet, 3) obj.sequenceId = proto.get_fixed_int(1) proto.get_filler(1) obj.query = proto.get_eop_str() return obj if __name__ == "__main__": import doctest doctest.testmod()
Python
0.999999
76611b7e6e97089b93626b472f91c04f16644034
Fix up some comments
channels/management/commands/runserver.py
channels/management/commands/runserver.py
import threading from django.core.management.commands.runserver import \ Command as RunserverCommand from channels import DEFAULT_CHANNEL_LAYER, channel_layers from channels.handler import ViewConsumer from channels.log import setup_logger from channels.worker import Worker class Command(RunserverCommand): def handle(self, *args, **options): self.verbosity = options.get("verbosity", 1) self.logger = setup_logger('django.channels', self.verbosity) super(Command, self).handle(*args, **options) def inner_run(self, *args, **options): # Check a handler is registered for http reqs; if not, add default one self.channel_layer = channel_layers[DEFAULT_CHANNEL_LAYER] if not self.channel_layer.registry.consumer_for_channel("http.request"): self.channel_layer.registry.add_consumer(ViewConsumer(), ["http.request"]) # Launch worker as subthread worker = WorkerThread(self.channel_layer, self.logger) worker.daemon = True worker.start() # Launch server in 'main' thread. Signals are disabled as it's still # actually a subthread under the autoreloader. self.logger.info("Daphne running, listening on %s:%s", self.addr, self.port) from daphne.server import Server Server( channel_layer=self.channel_layer, host=self.addr, port=int(self.port), signal_handlers=False, ).run() class WorkerThread(threading.Thread): """ Class that runs a worker """ def __init__(self, channel_layer, logger): super(WorkerThread, self).__init__() self.channel_layer = channel_layer self.logger = logger def run(self): self.logger.info("Worker thread running") worker = Worker(channel_layer=self.channel_layer) worker.run()
import threading from django.core.management.commands.runserver import \ Command as RunserverCommand from channels import DEFAULT_CHANNEL_LAYER, channel_layers from channels.handler import ViewConsumer from channels.log import setup_logger from channels.worker import Worker class Command(RunserverCommand): def handle(self, *args, **options): self.verbosity = options.get("verbosity", 1) self.logger = setup_logger('django.channels', self.verbosity) super(Command, self).handle(*args, **options) def inner_run(self, *args, **options): # Check a handler is registered for http reqs; if not, add default one self.channel_layer = channel_layers[DEFAULT_CHANNEL_LAYER] if not self.channel_layer.registry.consumer_for_channel("http.request"): self.channel_layer.registry.add_consumer(ViewConsumer(), ["http.request"]) # Report starting up # Launch worker as subthread (including autoreload logic) worker = WorkerThread(self.channel_layer, self.logger) worker.daemon = True worker.start() # Launch server in main thread (Twisted doesn't like being in a # subthread, and it doesn't need to autoreload as there's no user code) self.logger.info("Daphne running, listening on %s:%s", self.addr, self.port) from daphne.server import Server Server( channel_layer=self.channel_layer, host=self.addr, port=int(self.port), signal_handlers=False, ).run() class WorkerThread(threading.Thread): """ Class that runs a worker """ def __init__(self, channel_layer, logger): super(WorkerThread, self).__init__() self.channel_layer = channel_layer self.logger = logger def run(self): self.logger.info("Worker thread running") worker = Worker(channel_layer=self.channel_layer) worker.run()
Python
0.000153
e451ea4d698450813bd11fed6b501b839cd477a6
Reformat runworker a bit
channels/management/commands/runworker.py
channels/management/commands/runworker.py
from __future__ import unicode_literals from django.core.management import BaseCommand, CommandError from channels import DEFAULT_CHANNEL_LAYER, channel_layers from channels.log import setup_logger from channels.worker import Worker class Command(BaseCommand): leave_locale_alone = True def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument( '--layer', action='store', dest='layer', default=DEFAULT_CHANNEL_LAYER, help='Channel layer alias to use, if not the default.', ) parser.add_argument( '--only-channels', action='append', dest='only_channels', help='Limits this worker to only listening on the provided channels (supports globbing).', ) parser.add_argument( '--exclude-channels', action='append', dest='exclude_channels', help='Prevents this worker from listening on the provided channels (supports globbing).', ) def handle(self, *args, **options): # Get the backend to use self.verbosity = options.get("verbosity", 1) self.logger = setup_logger('django.channels', self.verbosity) self.channel_layer = channel_layers[options.get("layer", DEFAULT_CHANNEL_LAYER)] # Check that handler isn't inmemory if self.channel_layer.local_only(): raise CommandError( "You cannot span multiple processes with the in-memory layer. " + "Change your settings to use a cross-process channel layer." ) # Check a handler is registered for http reqs self.channel_layer.router.check_default() # Launch a worker self.logger.info("Running worker against channel layer %s", self.channel_layer) # Optionally provide an output callback callback = None if self.verbosity > 1: callback = self.consumer_called # Run the worker try: Worker( channel_layer=self.channel_layer, callback=callback, only_channels=options.get("only_channels", None), exclude_channels=options.get("exclude_channels", None), ).run() except KeyboardInterrupt: pass def consumer_called(self, channel, message): self.logger.debug("%s", channel)
from __future__ import unicode_literals from django.core.management import BaseCommand, CommandError from channels import DEFAULT_CHANNEL_LAYER, channel_layers from channels.log import setup_logger from channels.worker import Worker class Command(BaseCommand): leave_locale_alone = True def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument('--layer', action='store', dest='layer', default=DEFAULT_CHANNEL_LAYER, help='Channel layer alias to use, if not the default.') parser.add_argument('--only-channels', action='append', dest='only_channels', help='Limits this worker to only listening on the provided channels (supports globbing).') parser.add_argument('--exclude-channels', action='append', dest='exclude_channels', help='Prevents this worker from listening on the provided channels (supports globbing).') def handle(self, *args, **options): # Get the backend to use self.verbosity = options.get("verbosity", 1) self.logger = setup_logger('django.channels', self.verbosity) self.channel_layer = channel_layers[options.get("layer", DEFAULT_CHANNEL_LAYER)] # Check that handler isn't inmemory if self.channel_layer.local_only(): raise CommandError( "You cannot span multiple processes with the in-memory layer. " + "Change your settings to use a cross-process channel layer." ) # Check a handler is registered for http reqs self.channel_layer.router.check_default() # Launch a worker self.logger.info("Running worker against channel layer %s", self.channel_layer) # Optionally provide an output callback callback = None if self.verbosity > 1: callback = self.consumer_called # Run the worker try: Worker( channel_layer=self.channel_layer, callback=callback, only_channels=options.get("only_channels", None), exclude_channels=options.get("exclude_channels", None), ).run() except KeyboardInterrupt: pass def consumer_called(self, channel, message): self.logger.debug("%s", channel)
Python
0
f980f4b557df7cb4984cb428dd4bebcfe7ca7bc6
use urgent when you got mails
py3status/modules/imap.py
py3status/modules/imap.py
# -*- coding: utf-8 -*- """ Display number of unread messages from IMAP account. Configuration parameters: allow_urgent: display urgency on unread messages (default False) cache_timeout: refresh interval for this module (default 60) criterion: status of emails to check for (default 'UNSEEN') format: display format for this module (default 'Mail: {unseen}') hide_if_zero: hide this module when no new mail (default False) mailbox: name of the mailbox to check (default 'INBOX') password: login password (default None) port: number to use (default '993') security: login authentication method: 'ssl' or 'starttls' (startssl needs python 3.2 or later) (default 'ssl') server: server to connect (default None) user: login user (default None) Format placeholders: {unseen} number of unread emails Color options: color_new_mail: use color when new mail arrives, default to color_good @author obb """ import imaplib from ssl import create_default_context STRING_UNAVAILABLE = 'N/A' class Py3status: """ """ # available configuration parameters allow_urgent = False cache_timeout = 60 criterion = 'UNSEEN' format = 'Mail: {unseen}' hide_if_zero = False mailbox = 'INBOX' password = None port = '993' security = 'ssl' server = None user = None class Meta: deprecated = { 'rename': [ { 'param': 'new_mail_color', 'new': 'color_new_mail', 'msg': 'obsolete parameter use `color_new_mail`', }, { 'param': 'imap_server', 'new': 'server', 'msg': 'obsolete parameter use `server`', }, ], } def post_config_hook(self): if self.security not in ["ssl", "starttls"]: raise ValueError("Unknown security protocol") def check_mail(self): mail_count = self._get_mail_count() response = {'cached_until': self.py3.time_in(self.cache_timeout)} if mail_count is None: response['color'] = self.py3.COLOR_BAD, response['full_text'] = self.py3.safe_format( self.format, {'unseen': STRING_UNAVAILABLE}) elif mail_count > 0: response['color'] = self.py3.COLOR_NEW_MAIL or self.py3.COLOR_GOOD if self.allow_urgent: response['urgent'] = True if mail_count == 0 and self.hide_if_zero: response['full_text'] = '' else: response['full_text'] = self.py3.safe_format(self.format, {'unseen': mail_count}) return response def _connection_ssl(self): connection = imaplib.IMAP4_SSL(self.server, self.port) return connection def _connection_starttls(self): connection = imaplib.IMAP4(self.server, self.port) connection.starttls(create_default_context()) return connection def _get_mail_count(self): try: mail_count = 0 directories = self.mailbox.split(',') if self.security == "ssl": connection = self._connection_ssl() elif self.security == "starttls": connection = self._connection_starttls() connection.login(self.user, self.password) for directory in directories: connection.select(directory) unseen_response = connection.search(None, self.criterion) mails = unseen_response[1][0].split() mail_count += len(mails) connection.close() return mail_count except: return None if __name__ == "__main__": """ Run module in test mode. """ from py3status.module_test import module_test module_test(Py3status)
# -*- coding: utf-8 -*- """ Display number of unread messages from IMAP account. Configuration parameters: cache_timeout: refresh interval for this module (default 60) criterion: status of emails to check for (default 'UNSEEN') format: display format for this module (default 'Mail: {unseen}') hide_if_zero: hide this module when no new mail (default False) mailbox: name of the mailbox to check (default 'INBOX') password: login password (default None) port: number to use (default '993') security: login authentication method: 'ssl' or 'starttls' (startssl needs python 3.2 or later) (default 'ssl') server: server to connect (default None) user: login user (default None) Format placeholders: {unseen} number of unread emails Color options: color_new_mail: use color when new mail arrives, default to color_good @author obb """ import imaplib from ssl import create_default_context STRING_UNAVAILABLE = 'N/A' class Py3status: """ """ # available configuration parameters cache_timeout = 60 criterion = 'UNSEEN' format = 'Mail: {unseen}' hide_if_zero = False mailbox = 'INBOX' password = None port = '993' security = 'ssl' server = None user = None class Meta: deprecated = { 'rename': [ { 'param': 'new_mail_color', 'new': 'color_new_mail', 'msg': 'obsolete parameter use `color_new_mail`', }, { 'param': 'imap_server', 'new': 'server', 'msg': 'obsolete parameter use `server`', }, ], } def post_config_hook(self): if self.security not in ["ssl", "starttls"]: raise ValueError("Unknown security protocol") def check_mail(self): mail_count = self._get_mail_count() response = {'cached_until': self.py3.time_in(self.cache_timeout)} if mail_count is None: response['color'] = self.py3.COLOR_BAD, response['full_text'] = self.py3.safe_format( self.format, {'unseen': STRING_UNAVAILABLE}) elif mail_count > 0: response['color'] = self.py3.COLOR_NEW_MAIL or self.py3.COLOR_GOOD if mail_count == 0 and self.hide_if_zero: response['full_text'] = '' else: response['full_text'] = self.py3.safe_format(self.format, {'unseen': mail_count}) return response def _connection_ssl(self): connection = imaplib.IMAP4_SSL(self.server, self.port) return connection def _connection_starttls(self): connection = imaplib.IMAP4(self.server, self.port) connection.starttls(create_default_context()) return connection def _get_mail_count(self): try: mail_count = 0 directories = self.mailbox.split(',') if self.security == "ssl": connection = self._connection_ssl() elif self.security == "starttls": connection = self._connection_starttls() connection.login(self.user, self.password) for directory in directories: connection.select(directory) unseen_response = connection.search(None, self.criterion) mails = unseen_response[1][0].split() mail_count += len(mails) connection.close() return mail_count except: return None if __name__ == "__main__": """ Run module in test mode. """ from py3status.module_test import module_test module_test(Py3status)
Python
0
596f9752a7956c259217b0528bed924812d0631f
Add admin filter to filter attendees with children.
pyconde/accounts/admin.py
pyconde/accounts/admin.py
from django.contrib import admin from django.contrib.admin import SimpleListFilter from . import models class WithChildrenFilter(SimpleListFilter): title = 'Anzahl Kinder' parameter_name = 'children' def lookups(self, request, model_admin): return (('y', 'mit Kindern'), ('n', 'ohne Kinder')) def queryset(self, request, queryset): if self.value() == 'y': queryset = queryset.filter(num_accompanying_children__gt=0) elif self.value() == 'n': queryset = queryset.filter(num_accompanying_children=0) return queryset class ProfileAdmin(admin.ModelAdmin): list_display = ('pk', 'user', 'num_accompanying_children') list_display_links = ('pk', 'user') list_filter = (WithChildrenFilter,) admin.site.register(models.Profile, ProfileAdmin)
from django.contrib import admin from . import models admin.site.register(models.Profile, list_display=['user'])
Python
0
1b5b43542fe3ba8f85076c6b6cb1e98a4614a0c6
reformat JobGroup to match other tables
pyfarm/models/jobgroup.py
pyfarm/models/jobgroup.py
# No shebang line, this module is meant to be imported # # Copyright 2015 Ambient Entertainment GmbH & Co. KG # Copyright 2015 Oliver Palmer # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Job Group Model =============== Model for job groups """ from pyfarm.master.application import db from pyfarm.models.core.cfg import ( TABLE_JOB_GROUP, TABLE_JOB_TYPE, TABLE_USER, MAX_JOBGROUP_NAME_LENGTH) from pyfarm.models.core.mixins import UtilityMixins from pyfarm.models.core.types import id_column, IDTypeWork class JobGroup(db.Model, UtilityMixins): """ Used to group jobs together for better presentation in the UI """ __tablename__ = TABLE_JOB_GROUP id = id_column(IDTypeWork) title = db.Column( db.String(MAX_JOBGROUP_NAME_LENGTH), nullable=False, doc="The title of the job group's name" ) main_jobtype_id = db.Column( IDTypeWork, db.ForeignKey("%s.id" % TABLE_JOB_TYPE), nullable=False, doc="ID of the jobtype of the main job in this " "group. Purely for display and filtering.") user_id = db.Column( db.Integer, db.ForeignKey("%s.id" % TABLE_USER), doc="The id of the user who owns these jobs" ) # # Relationships # main_jobtype = db.relationship( "JobType", backref=db.backref("jobgroups", lazy="dynamic"), doc="The jobtype of the main job in this group") user = db.relationship( "User", backref=db.backref("jobgroups", lazy="dynamic"), doc="The user who owns these jobs" )
# No shebang line, this module is meant to be imported # # Copyright 2015 Ambient Entertainment GmbH & Co. KG # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Job Group Model =============== Model for job groups """ from pyfarm.master.application import db from pyfarm.models.core.cfg import ( TABLE_JOB_GROUP, TABLE_JOB_TYPE, TABLE_USER, MAX_JOBGROUP_NAME_LENGTH) from pyfarm.models.core.mixins import UtilityMixins from pyfarm.models.core.types import id_column, IDTypeWork class JobGroup(db.Model, UtilityMixins): """ Used to group jobs together for better presentation in the UI """ __tablename__ = TABLE_JOB_GROUP id = id_column(IDTypeWork) title = db.Column(db.String(MAX_JOBGROUP_NAME_LENGTH), nullable=False) main_jobtype_id = db.Column(IDTypeWork, db.ForeignKey("%s.id" % TABLE_JOB_TYPE), nullable=False, doc="ID of the jobtype of the main job in this " "group. Purely for display and " "filtering.") user_id = db.Column(db.Integer, db.ForeignKey("%s.id" % TABLE_USER), doc="The id of the user who owns these jobs") main_jobtype = db.relationship("JobType", backref=db.backref("jobgroups", lazy="dynamic"), doc="The jobtype of the main job in this " "group") user = db.relationship("User", backref=db.backref("jobgroups", lazy="dynamic"), doc="The user who owns these jobs")
Python
0.000001
853878cbf218728608a783260ae74c408ef4b8a2
fix the wrong format
python/paddle/fluid/average.py
python/paddle/fluid/average.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import numpy as np import warnings """ Class of all kinds of Average. All Averages are accomplished via Python totally. They do not change Paddle's Program, nor do anything to modify NN model's configuration. They are completely wrappers of Python functions. """ __all__ = ["WeightedAverage"] def _is_number_(var): return isinstance(var, int) or isinstance(var, float) or (isinstance( var, np.ndarray) and var.shape == (1, )) def _is_number_or_matrix_(var): return _is_number_(var) or isinstance(var, np.ndarray) class WeightedAverage(object): """ Calculate weighted average. The average calculating is accomplished via Python totally. They do not change Paddle's Program, nor do anything to modify NN model's configuration. They are completely wrappers of Python functions. Examples: .. code-block:: python avg = fluid.average.WeightedAverage() avg.add(value=2.0, weight=1) avg.add(value=4.0, weight=2) avg.eval() # The result is 3.333333333. # For (2.0 * 1 + 4.0 * 2) / (1 + 2) = 3.333333333 """ def __init__(self): warnings.warn( "The %s is deprecated, please use fluid.metrics.Accuracy instead." % (self.__class__.__name__), Warning) self.reset() def reset(self): self.numerator = None self.denominator = None def add(self, value, weight): if not _is_number_or_matrix_(value): raise ValueError( "The 'value' must be a number(int, float) or a numpy ndarray.") if not _is_number_(weight): raise ValueError("The 'weight' must be a number(int, float).") if self.numerator is None or self.denominator is None: self.numerator = value * weight self.denominator = weight else: self.numerator += value * weight self.denominator += weight def eval(self): if self.numerator is None or self.denominator is None: raise ValueError( "There is no data to be averaged in WeightedAverage.") return self.numerator / self.denominator
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import numpy as np import warnings """ Class of all kinds of Average. All Averages are accomplished via Python totally. They do not change Paddle's Program, nor do anything to modify NN model's configuration. They are completely wrappers of Python functions. """ __all__ = ["WeightedAverage"] def _is_number_(var): return isinstance(var, int) or isinstance(var, float) or (isinstance( var, np.ndarray) and var.shape == (1, )) def _is_number_or_matrix_(var): return _is_number_(var) or isinstance(var, np.ndarray) class WeightedAverage(object): """ Calculate weighted average. The average calculating is accomplished via Python totally. They do not change Paddle's Program, nor do anything to modify NN model's configuration. They are completely wrappers of Python functions. Examples: .. code-block:: python avg = fluid.average.WeightedAverage() avg.add(value=2.0, weight=1) avg.add(value=4.0, weight=2) avg.eval() # The result is 3.333333333. # For (2.0 * 1 + 4.0 * 2) / (1 + 2) = 3.333333333 """ def __init__(self): warnings.warn( "The %s is deprecated, please use fluid.metrics.Accuracy instead." % (self.__class__.__name__), Warning) self.reset() def reset(self): self.numerator = None self.denominator = None def add(self, value, weight): if not _is_number_or_matrix_(value): raise ValueError( "The 'value' must be a number(int, float) or a numpy ndarray.") if not _is_number_(weight): raise ValueError("The 'weight' must be a number(int, float).") if self.numerator is None or self.denominator is None: self.numerator = value * weight self.denominator = weight else: self.numerator += value * weight self.denominator += weight def eval(self): if self.numerator is None or self.denominator is None: raise ValueError( "There is no data to be averaged in WeightedAverage.") return self.numerator / self.denominator
Python
0.999964
662aaa79305cbbbceeba8d46f9a7e543621f45a3
Add harvest edit view
Seeder/harvests/views.py
Seeder/harvests/views.py
import time import models import source import forms import datetime from django.http.response import Http404, HttpResponseRedirect from django.utils.translation import ugettext_lazy as _ from django.views.generic.base import TemplateView from django.views.generic import DetailView, FormView from urljects import U, URLView, pk from core import generic_views from comments.views import CommentViewGeneric from core.generic_views import EditView def timestamp_to_datetime(ms_string): """ :param ms_string: string representing milliseconds since the famous day :return: datetime or None """ try: return datetime.datetime.fromtimestamp( float(ms_string) / 1000 ) except ValueError: return None def timestamp(dtm_object): """ :param dtm_object: datetime :return: int with epoch timestamp in milliseconds """ return time.mktime(dtm_object.timetuple()) * 1000 class HarvestView(generic_views.LoginMixin): view_name = 'harvests' model = models.Harvest title = _('Harvests') class CalendarView(HarvestView, URLView, TemplateView): template_name = 'calendar.html' url = U url_name = 'calendar' def get_context_data(self, **kwargs): context = super(CalendarView, self).get_context_data(**kwargs) context['harvest_form'] = forms.HarvestCreateForm() return context class CalendarJsonView(generic_views.JSONView, URLView): url = U / 'json' url_name = 'json_calendar' def get_data(self, context): date_from = timestamp_to_datetime(self.request.GET.get('from', '')) date_to = timestamp_to_datetime(self.request.GET.get('to', '')) if not (date_from and date_to): raise Http404('Invalid format') harvests = models.Harvest.objects.filter( scheduled_on__gte=date_from, scheduled_on__lte=date_to ) return { "success": 1, "result": [ { "id": harvest.id, "title": harvest.repr(), "url": harvest.get_absolute_url(), "class": harvest.get_calendar_style(), "start": timestamp(harvest.scheduled_on), "end": timestamp(harvest.scheduled_on) + 3600 * 1000 } for harvest in harvests ] } class AddView(HarvestView, FormView, URLView): url = U / 'add' url_name = 'add' form_class = forms.HarvestCreateForm template_name = 'add_form.html' def form_valid(self, form): harvest = form.save(commit=False) harvest.status = models.Harvest.STATE_INITIAL harvest.save() harvest.pair_custom_seeds() return HttpResponseRedirect(harvest.get_absolute_url()) class Detail(HarvestView, DetailView, CommentViewGeneric, URLView): template_name = 'harvest.html' url = U / pk / 'detail' url_name = 'detail' class Edit(HarvestView, EditView, URLView): url = U / pk / 'edit' url_name = 'edit' form_class = forms.HarvestEditForm
import time import models import forms import datetime from django.http.response import Http404, HttpResponseRedirect from django.utils.translation import ugettext_lazy as _ from django.views.generic.base import TemplateView from django.views.generic import DetailView, FormView from urljects import U, URLView, pk from core import generic_views from comments.views import CommentViewGeneric def timestamp_to_datetime(ms_string): """ :param ms_string: string representing milliseconds since the famous day :return: datetime or None """ try: return datetime.datetime.fromtimestamp( float(ms_string) / 1000 ) except ValueError: return None def timestamp(dtm_object): """ :param dtm_object: datetime :return: int with epoch timestamp in milliseconds """ return time.mktime(dtm_object.timetuple()) * 1000 class HarvestView(generic_views.LoginMixin): view_name = 'harvests' model = models.Harvest title = _('Harvests') class CalendarView(HarvestView, URLView, TemplateView): template_name = 'calendar.html' url = U url_name = 'calendar' def get_context_data(self, **kwargs): context = super(CalendarView, self).get_context_data(**kwargs) context['harvest_form'] = forms.HarvestCreateForm() return context class CalendarJsonView(generic_views.JSONView, URLView): url = U / 'json' url_name = 'json_calendar' def get_data(self, context): date_from = timestamp_to_datetime(self.request.GET.get('from', '')) date_to = timestamp_to_datetime(self.request.GET.get('to', '')) if not (date_from and date_to): raise Http404('Invalid format') harvests = models.Harvest.objects.filter( scheduled_on__gte=date_from, scheduled_on__lte=date_to ) return { "success": 1, "result": [ { "id": harvest.id, "title": harvest.repr(), "url": harvest.get_absolute_url(), "class": harvest.get_calendar_style(), "start": timestamp(harvest.scheduled_on), "end": timestamp(harvest.scheduled_on) + 3600 * 1000 } for harvest in harvests ] } class AddView(HarvestView, FormView, URLView): url = U / 'add' url_name = 'add' form_class = forms.HarvestCreateForm template_name = 'add_form.html' def form_valid(self, form): harvest = form.save(commit=False) harvest.status = models.Harvest.STATE_INITIAL harvest.save() return HttpResponseRedirect(harvest.get_absolute_url()) class Detail(HarvestView, DetailView, CommentViewGeneric, URLView): template_name = 'harvest.html' url = U / pk / 'detail' url_name = 'detail'
Python
0
f4106e3025c5dbb3136db94081b9998a052c8e70
Bump version to 2.0.0-alpha2
pyqode/python/__init__.py
pyqode/python/__init__.py
# -*- coding: utf-8 -*- """ pyqode.python is an extension of pyqode.core that brings support for the python programming language. It does so by providing a set of additional modes and panels for the frontend and by supplying dedicated workers for the backend. """ __version__ = "2.0.0-alpha2"
# -*- coding: utf-8 -*- """ pyqode.python is an extension of pyqode.core that brings support for the python programming language. It does so by providing a set of additional modes and panels for the frontend and by supplying dedicated workers for the backend. """ __version__ = "2.0.0-alpha1"
Python
0.000001
d96aac74b32a166ec724234540dc93a8ea526a3f
fix test error in windows
pythainlp/tag/__init__.py
pythainlp/tag/__init__.py
# -*- coding: utf-8 -*- # TODO ปรับ API ให้เหมือน nltk from __future__ import absolute_import,division,print_function,unicode_literals import sys def pos_tag(text,engine='old'): """ ระบบ postaggers pos_tag(text,engine='old') engine ที่รองรับ * old เป็น UnigramTagger * artagger เป็น RDR POS Tagger """ if engine=='old': from .old import tag elif engine=='artagger': if sys.version_info < (3,4): sys.exit('Sorry, Python < 3.4 is not supported') def tag(text1): try: from artagger import Tagger except ImportError: import pip pip.main(['install','https://github.com/wannaphongcom/artagger/archive/master.zip']) try: from artagger import Tagger except ImportError: print("Error ! using 'pip install https://github.com/wannaphongcom/artagger/archive/master.zip'") sys.exit(0) tagger = Tagger() words = tagger.tag(' '.join(text1)) totag=[] for word in words: totag.append((word.word, word.tag)) return totag return tag(text)
# -*- coding: utf-8 -*- # TODO ปรับ API ให้เหมือน nltk from __future__ import absolute_import,division,print_function,unicode_literals import sys def pos_tag(text,engine='old'): """ ระบบ postaggers pos_tag(text,engine='old') engine ที่รองรับ * old เป็น UnigramTagger * artagger เป็น RDR POS Tagger """ if engine=='old': from .old import tag elif engine=='artagger': if sys.version_info < (3,4): sys.exit('Sorry, Python < 3.4 is not supported') def tag(text1): try: from artagger import Tagger except ImportError: import pip pip.main(['install','https://github.com/franziz/artagger/archive/master.zip']) try: from artagger import Tagger except ImportError: print("Error ! using 'pip install https://github.com/franziz/artagger/archive/master.zip'") sys.exit(0) tagger = Tagger() words = tagger.tag(' '.join(text1)) totag=[] for word in words: totag.append((word.word, word.tag)) return totag return tag(text)
Python
0.000001
6d6ba9e84c0b53cc05cec36047c8e701493d826e
Update rules
pythainlp/tokenize/tcc.py
pythainlp/tokenize/tcc.py
# -*- coding: utf-8 -*- """ The implementation of tokenizer accorinding to Thai Character Clusters (TCCs) rules purposed by `Theeramunkong et al. 2000. \ <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.59.2548>`_ Credits: * TCC: Jakkrit TeCho * Grammar: Wittawat Jitkrittum (`link to the source file \ <https://github.com/wittawatj/jtcc/blob/master/TCC.g>`_) * Python code: Korakot Chaovavanich """ import re from typing import List, Set _RE_TCC = ( """\ เc็ck เcctาะk เccีtยะk เccีtย(?=[เ-ไก-ฮ]|$)k เcc็ck เcิc์ck เcิtck เcีtยะ?k เcืtอะ?k เc[ิีุู]tย(?=[เ-ไก-ฮ]|$) เctา?ะ?k cัtวะk c[ัื]tc[ุิะ]?k c[ิุู]์ c[ะ-ู]tk c็ ct[ะาำ]?(์?) ck แc็c แcc์ แctะ แcc็c แccc์ โctะ [เ-ไ]ct ก็ อึ หึ """.replace( "c", "[ก-ฮ]" ) .replace("t", "[่-๋]?") .replace("k","(cc?[d|ิ]?[์])?") .replace("d","ุ") # DSara: lower vowel .split() ) _PAT_TCC = re.compile("|".join(_RE_TCC)) def tcc(text: str) -> str: """ TCC generator, generates Thai Character Clusters :param str text: text to be tokenized to character clusters :return: subwords (character clusters) :rtype: Iterator[str] """ if not text or not isinstance(text, str): return "" len_text = len(text) p = 0 while p < len_text: m = _PAT_TCC.match(text[p:]) if m: n = m.span()[1] else: n = 1 yield text[p : p + n] p += n def tcc_pos(text: str) -> Set[int]: """ TCC positions :param str text: text to be tokenized to character clusters :return: list of the end position of subwords :rtype: set[int] """ if not text or not isinstance(text, str): return set() p_set = set() p = 0 for w in tcc(text): p += len(w) p_set.add(p) return p_set def segment(text: str) -> List[str]: """ Subword segmentation :param str text: text to be tokenized to character clusters :return: list of subwords (character clusters), tokenized from the text :rtype: list[str] """ return list(tcc(text))
# -*- coding: utf-8 -*- """ The implementation of tokenizer accorinding to Thai Character Clusters (TCCs) rules purposed by `Theeramunkong et al. 2000. \ <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.59.2548>`_ Credits: * TCC: Jakkrit TeCho * Grammar: Wittawat Jitkrittum (`link to the source file \ <https://github.com/wittawatj/jtcc/blob/master/TCC.g>`_) * Python code: Korakot Chaovavanich """ import re from typing import List, Set _RE_TCC = ( """\ เc็ck เcctาะk เccีtยะk เccีtย(?=[เ-ไก-ฮ]|$)k เcc็ck เcิc์ck เcิtck เcีtยะ?k เcืtอะ?k เc[ิีุู]tย(?=[เ-ไก-ฮ]|$) เctา?ะ?k cัtวะk c[ัื]tc[ุิะ]?k c[ิุู]์ c[ะ-ู]tk c็ ck ct[ะาำ]?(์?) แc็c แcc์ แctะ แcc็c แccc์ โctะ [เ-ไ]ct ก็ อึ หึ """.replace( "c", "[ก-ฮ]" ) .replace("t", "[่-๋]?") .replace("k","((cc|c)?[ะ]?[์])?") .split() ) _PAT_TCC = re.compile("|".join(_RE_TCC)) def tcc(text: str) -> str: """ TCC generator, generates Thai Character Clusters :param str text: text to be tokenized to character clusters :return: subwords (character clusters) :rtype: Iterator[str] """ if not text or not isinstance(text, str): return "" len_text = len(text) p = 0 while p < len_text: m = _PAT_TCC.match(text[p:]) if m: n = m.span()[1] else: n = 1 yield text[p : p + n] p += n def tcc_pos(text: str) -> Set[int]: """ TCC positions :param str text: text to be tokenized to character clusters :return: list of the end position of subwords :rtype: set[int] """ if not text or not isinstance(text, str): return set() p_set = set() p = 0 for w in tcc(text): p += len(w) p_set.add(p) return p_set def segment(text: str) -> List[str]: """ Subword segmentation :param str text: text to be tokenized to character clusters :return: list of subwords (character clusters), tokenized from the text :rtype: list[str] """ return list(tcc(text))
Python
0.000001
8dc08d3733461ebe0ea770d0af07fdd4cfa00b64
Use mujoco function instead.
python/mujoco/__init__.py
python/mujoco/__init__.py
# Copyright 2022 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python bindings for MuJoCo.""" import ctypes import os import platform import subprocess HEADERS_DIR = os.path.join(os.path.dirname(__file__), 'include') _MUJOCO_GL_ENABLE = ('enable', 'enabled', 'on', 'true', '1' , '') _MUJOCO_GL_DISABLE = ('disable', 'disabled', 'off', 'false', '0') _MUJOCO_GL = os.environ.get('MUJOCO_GL', '').lower().strip() _MUJOCO_GL_IS_VALID = True _SYSTEM = platform.system() if _SYSTEM == 'Linux': libglew_name = None if _MUJOCO_GL in _MUJOCO_GL_ENABLE + ('glfw', 'glx'): libglew_name = 'libglew.so' elif _MUJOCO_GL == 'egl': libglew_name = 'libglewegl.so' elif _MUJOCO_GL == 'osmesa': libglew_name = 'libglewosmesa.so' elif _MUJOCO_GL not in _MUJOCO_GL_DISABLE: _MUJOCO_GL_IS_VALID = False if libglew_name is not None: ctypes.CDLL(os.path.join(os.path.dirname(__file__), libglew_name), ctypes.RTLD_GLOBAL) ctypes.CDLL( os.path.join(os.path.dirname(__file__), 'libmujoco.so.2.1.3'), ctypes.RTLD_GLOBAL) else: ctypes.CDLL( os.path.join(os.path.dirname(__file__), 'libmujoco_nogl.so.2.1.3'), ctypes.RTLD_GLOBAL) elif _SYSTEM == 'Windows': if _MUJOCO_GL in _MUJOCO_GL_ENABLE + ('glfw', 'wgl'): ctypes.WinDLL(os.path.join(os.path.dirname(__file__), 'mujoco.dll')) elif _MUJOCO_GL in _MUJOCO_GL_DISABLE: ctypes.WinDLL(os.path.join(os.path.dirname(__file__), 'mujoco_nogl.dll')) else: _MUJOCO_GL_IS_VALID = False if not _MUJOCO_GL_IS_VALID: raise RuntimeError( f'invalid value for environment variable MUJOCO_GL: {_MUJOCO_GL}') from mujoco._callbacks import * from mujoco._constants import * from mujoco._enums import * from mujoco._errors import * from mujoco._functions import * from mujoco._structs import * # pylint: disable=g-import-not-at-top if _MUJOCO_GL not in _MUJOCO_GL_DISABLE: from mujoco._render import * if _SYSTEM != 'Linux': from mujoco.glfw import GLContext else: _dl_handle = ctypes.CDLL(None) if hasattr(_dl_handle, 'OSMesaCreateContextExt'): from mujoco.osmesa import GLContext elif hasattr(_dl_handle, 'eglCreateContext'): from mujoco.egl import GLContext else: from mujoco.glfw import GLContext __version__ = mj_versionString()
# Copyright 2022 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python bindings for MuJoCo.""" import ctypes import os import platform import subprocess HEADERS_DIR = os.path.join(os.path.dirname(__file__), 'include') _MUJOCO_GL_ENABLE = ('enable', 'enabled', 'on', 'true', '1' , '') _MUJOCO_GL_DISABLE = ('disable', 'disabled', 'off', 'false', '0') _MUJOCO_GL = os.environ.get('MUJOCO_GL', '').lower().strip() _MUJOCO_GL_IS_VALID = True _SYSTEM = platform.system() if _SYSTEM == 'Linux': libglew_name = None if _MUJOCO_GL in _MUJOCO_GL_ENABLE + ('glfw', 'glx'): libglew_name = 'libglew.so' elif _MUJOCO_GL == 'egl': libglew_name = 'libglewegl.so' elif _MUJOCO_GL == 'osmesa': libglew_name = 'libglewosmesa.so' elif _MUJOCO_GL not in _MUJOCO_GL_DISABLE: _MUJOCO_GL_IS_VALID = False if libglew_name is not None: ctypes.CDLL(os.path.join(os.path.dirname(__file__), libglew_name), ctypes.RTLD_GLOBAL) ctypes.CDLL( os.path.join(os.path.dirname(__file__), 'libmujoco.so.2.1.3'), ctypes.RTLD_GLOBAL) else: ctypes.CDLL( os.path.join(os.path.dirname(__file__), 'libmujoco_nogl.so.2.1.3'), ctypes.RTLD_GLOBAL) elif _SYSTEM == 'Windows': if _MUJOCO_GL in _MUJOCO_GL_ENABLE + ('glfw', 'wgl'): ctypes.WinDLL(os.path.join(os.path.dirname(__file__), 'mujoco.dll')) elif _MUJOCO_GL in _MUJOCO_GL_DISABLE: ctypes.WinDLL(os.path.join(os.path.dirname(__file__), 'mujoco_nogl.dll')) else: _MUJOCO_GL_IS_VALID = False if not _MUJOCO_GL_IS_VALID: raise RuntimeError( f'invalid value for environment variable MUJOCO_GL: {_MUJOCO_GL}') from mujoco._callbacks import * from mujoco._constants import * from mujoco._enums import * from mujoco._errors import * from mujoco._functions import * from mujoco._structs import * # pylint: disable=g-import-not-at-top if _MUJOCO_GL not in _MUJOCO_GL_DISABLE: from mujoco._render import * if _SYSTEM != 'Linux': from mujoco.glfw import GLContext else: _dl_handle = ctypes.CDLL(None) if hasattr(_dl_handle, 'OSMesaCreateContextExt'): from mujoco.osmesa import GLContext elif hasattr(_dl_handle, 'eglCreateContext'): from mujoco.egl import GLContext else: from mujoco.glfw import GLContext def _get_version() -> str: with open(os.path.join(HEADERS_DIR, 'mujoco.h'), 'r') as f: for line in f: if line.startswith('#define mjVERSION_HEADER'): version = line.split()[2] break return '.'.join([d for d in str(version)]) __version__ = _get_version()
Python
0
adb0c2bd97c6c4ca7272d764b669cef90f81a5bb
Allow non-dev logins to dev builds
handlers/login.py
handlers/login.py
from rorn.Box import LoginBox, ErrorBox, WarningBox, SuccessBox from rorn.Session import delay from User import User from Button import Button from LoadValues import isDevMode from Event import Event from utils import * @get('login') def login(handler, request): handler.title('Login') if handler.session['user']: print WarningBox('Logged In', 'You are already logged in as %s' % handler.session['user']) else: print LoginBox() @post('login') def loginPost(handler, request, p_username, p_password): handler.title('Login') user = User.load(username = p_username, password = User.crypt(p_username, p_password)) if user: if not user.hasPrivilege('User'): Event.login(handler, user, False, "Account disabled") delay(handler, ErrorBox("Login Failed", "Your account has been disabled")) redirect('/') if user.resetkey: user.resetkey = None user.save() handler.session['user'] = user Event.login(handler, user, True) delay(handler, SuccessBox("Login Complete", "Logged in as %s" % user, close = True)) redirect('/') else: Event.login(handler, None, False, "Failed login for %s" % p_username) delay(handler, ErrorBox("Login Failed", "Invalid username/password combination")) redirect('/') @get('logout') def logout(handler, request): print "<form method=\"post\" action=\"/logout\">" print Button('Logout', type = 'submit').negative() print "</form>" @post('logout') def logoutPost(handler, request): if handler.session['user']: del handler.session['user'] if 'impersonator' in handler.session: del handler.session['impersonator'] redirect('/') else: print ErrorBox("Logout Failed", "You are not logged in")
from rorn.Box import LoginBox, ErrorBox, WarningBox, SuccessBox from rorn.Session import delay from User import User from Button import Button from LoadValues import isDevMode from Event import Event from utils import * @get('login') def login(handler, request): handler.title('Login') if handler.session['user']: print WarningBox('Logged In', 'You are already logged in as %s' % handler.session['user']) else: print LoginBox() @post('login') def loginPost(handler, request, p_username, p_password): handler.title('Login') user = User.load(username = p_username, password = User.crypt(p_username, p_password)) if user: if not user.hasPrivilege('User'): Event.login(handler, user, False, "Account disabled") delay(handler, ErrorBox("Login Failed", "Your account has been disabled")) redirect('/') elif isDevMode() and not user.hasPrivilege('Dev'): Event.login(handler, user, False, "Non-dev login blocked") delay(handler, ErrorBox("Login Failed", "This is a development build")) redirect('/') if user.resetkey: user.resetkey = None user.save() handler.session['user'] = user Event.login(handler, user, True) delay(handler, SuccessBox("Login Complete", "Logged in as %s" % user, close = True)) redirect('/') else: Event.login(handler, None, False, "Failed login for %s" % p_username) delay(handler, ErrorBox("Login Failed", "Invalid username/password combination")) redirect('/') @get('logout') def logout(handler, request): print "<form method=\"post\" action=\"/logout\">" print Button('Logout', type = 'submit').negative() print "</form>" @post('logout') def logoutPost(handler, request): if handler.session['user']: del handler.session['user'] if 'impersonator' in handler.session: del handler.session['impersonator'] redirect('/') else: print ErrorBox("Logout Failed", "You are not logged in")
Python
0
64713296cf4f4f3772a1ac23248d4fb930ee23ff
Bump to 0.3
python_gdrive/__init__.py
python_gdrive/__init__.py
from client import GoogleDrive __version__ = '0.3'
from client import GoogleDrive __version__ = '0.3-dev'
Python
0.000198
dcd84fec03daee62f05a70b93753d88cb356f196
add skipping of empty lines
catdumps.py
catdumps.py
""" Concatenates dumps from a LAMMPS script. All dumps in the given LAMMPS script will be concatenated into single files separately, which are to be written in the current working directory. """ import argparse import re import os.path import glob def main(): """Drive the script.""" parser = argparse.ArgumentParser(description=globals()['__doc__']) parser.add_argument( 'input', type=argparse.FileType(mode='r'), metavar='INPUT', help='The LAMMPS input file whose dumps are to be concatenated.' ) args = parser.parse_args() dump_cater = DumpCater(args.input) args.input.close() dump_cater.cat_dumps() return 0 class DumpCater(object): """Concatenator of LAMMPS dump files.""" __slots__ = [ 'base_path', 'vars', 'dumps' ] def __init__(self, input_fp): """Initialize the concatenator from the input file object.""" self.base_path = os.path.dirname(input_fp.name) self.vars = {} self.dumps = [] for line in input_fp: fields = line.split() if len(fields) == 0: continue cmd = fields[0] if cmd == 'variable': self.vars[fields[1]] = fields[-1] elif cmd == 'dump': self.dumps.append( self.subst_vars(fields[-1]) ) else: pass # Skip all other lines. return def subst_vars(self, inp_str): """Substitute all variable references in the given string.""" var_ref = re.compile(r'\$\{(?P<name>\w*)\}') # The string is going to be substituted for variable reference # repeatedly. curr = inp_str while True: match = var_ref.search(curr) if not match: break else: var_name = match.group('name') try: curr = curr.replace( ''.join(['${', var_name, '}']), self.vars[var_name] ) except KeyError: print('Undefined variable {} in script!'.format(var_name)) continue return curr def cat_dumps(self): """Concatenates all the dumps in the input script.""" for dump in self.dumps: # Get all the file names and sort according to step number. file_names = sorted(glob.glob( os.path.join(self.base_path, dump) ), key=self.form_step_getter(dump)) with open(dump.replace('*', ''), 'w') as out_fp: for name in file_names: with open(name, 'r') as inp_fp: out_fp.write(inp_fp.read()) continue continue return @staticmethod def form_step_getter(dump): """Form the function to get the step number from a file name.""" patt = re.compile( dump.replace('.', r'\.').replace('*', r'(?P<step>\d+)') ) def get_step(name): """Get the step number from the file name.""" match = patt.search(os.path.basename(name)) return int(match.group('step')) return get_step if __name__ == '__main__': main()
""" Concatenates dumps from a LAMMPS script. All dumps in the given LAMMPS script will be concatenated into single files separately, which are to be written in the current working directory. """ import argparse import re import os.path import glob def main(): """Drive the script.""" parser = argparse.ArgumentParser(description=globals()['__doc__']) parser.add_argument( 'input', type=argparse.FileType(mode='r'), metavar='INPUT', help='The LAMMPS input file whose dumps are to be concatenated.' ) args = parser.parse_args() dump_cater = DumpCater(args.input) args.input.close() dump_cater.cat_dumps() return 0 class DumpCater(object): """Concatenator of LAMMPS dump files.""" __slots__ = [ 'base_path', 'vars', 'dumps' ] def __init__(self, input_fp): """Initialize the concatenator from the input file object.""" self.base_path = os.path.dirname(input_fp.name) self.vars = {} self.dumps = [] for line in input_fp: fields = line.split() cmd = fields[0] if cmd == 'variable': self.vars[fields[1]] = fields[-1] elif cmd == 'dump': self.dumps.append( self.subst_vars(fields[-1]) ) else: pass # Skip all other lines. return def subst_vars(self, inp_str): """Substitute all variable references in the given string.""" var_ref = re.compile(r'\$\{(?P<name>\w*)\}') # The string is going to be substituted for variable reference # repeatedly. curr = inp_str while True: match = var_ref.search(curr) if not match: break else: var_name = match.group('name') try: curr = curr.replace( ''.join(['${', var_name, '}']), self.vars[var_name] ) except KeyError: print('Undefined variable {} in script!'.format(var_name)) continue return curr def cat_dumps(self): """Concatenates all the dumps in the input script.""" for dump in self.dumps: # Get all the file names and sort according to step number. file_names = sorted(glob.glob( os.path.join(self.base_path, dump) ), key=self.form_step_getter(dump)) with open(dump.replace('*', ''), 'w') as out_fp: for name in file_names: with open(name, 'r') as inp_fp: out_fp.write(inp_fp.read()) continue continue return @staticmethod def form_step_getter(dump): """Form the function to get the step number from a file name.""" patt = re.compile( dump.replace('.', r'\.').replace('*', r'(?P<step>\d+)') ) def get_step(name): """Get the step number from the file name.""" match = patt.search(os.path.basename(name)) return int(match.group('step')) return get_step if __name__ == '__main__': main()
Python
0.000035
3475aee89ef5b22a92a674400ea37430f8255924
handle Appengine Datastore Key Type
huTools/hujson.py
huTools/hujson.py
#!/usr/bin/env python # encoding: utf-8 """ hujson.py - extended json - tries to be compatible with simplejson hujson can encode additional types like decimal and datetime into valid json. All the heavy lifting is done by John Millikin's `jsonlib`, see https://launchpad.net/jsonlib Created by Maximillian Dornseif on 2010-09-10. Copyright (c) 2010 HUDORA. All rights reserved. """ from _jsonlib import UnknownSerializerError import _jsonlib import datetime def _unknown_handler(value): if isinstance(value, datetime.date): return str(value) elif isinstance(value, datetime.datetime): return value.isoformat() + 'Z' elif hasattr(value, 'properties'): return dict([(key, getattr(value, key)) for key in value.properties().keys()]) elif 'google.appengine.api.users.User' in str(type(value)): return "%s/%s" % (value.user_id(), value.email()) elif 'google.appengine.api.datastore_types.Key' in str(type(value)): return str(value) raise UnknownSerializerError("%s(%s)" % (type(value), value)) def dumps(val): return _jsonlib.write(val, on_unknown=_unknown_handler, indent=' ') def loads(data): return _jsonlib.read(data)
#!/usr/bin/env python # encoding: utf-8 """ hujson.py - extended json - tries to be compatible with simplejson hujson can encode additional types like decimal and datetime into valid json. All the heavy lifting is done by John Millikin's `jsonlib`, see https://launchpad.net/jsonlib Created by Maximillian Dornseif on 2010-09-10. Copyright (c) 2010 HUDORA. All rights reserved. """ from _jsonlib import UnknownSerializerError import _jsonlib import datetime def _unknown_handler(value): if isinstance(value, datetime.date): return str(value) elif isinstance(value, datetime.datetime): return value.isoformat() + 'Z' elif hasattr(value, 'properties'): return dict([(key, getattr(value, key)) for key in value.properties().keys()]) elif 'google.appengine.api.users.User' in str(type(value)): return "%s/%s" % (value.user_id(), value.email()) raise UnknownSerializerError("%s(%s)" % (type(value), value)) def dumps(val): return _jsonlib.write(val, on_unknown=_unknown_handler, indent=' ') def loads(data): return _jsonlib.read(data)
Python
0
530844a16a573ab49850a22631f97d8ad89465c9
Clean Up NLU state
sara_flexbe_states/src/sara_flexbe_states/sara_nlu_spr.py
sara_flexbe_states/src/sara_flexbe_states/sara_nlu_spr.py
#!/usr/bin/env python # encoding=utf8 from __future__ import print_function from flexbe_core import EventState, Logger import rospy from wm_nlu.srv import AnswerQuestion from std_msgs.msg import String class SaraNLUspr(EventState): ''' Use wm_nlu to parse a sentence and return the answer. ># sentence string sentence to parse #> answer string answer <= understood Finished job. <= not_understood Finished job but no commands detected. <= fail service unavailable. ''' def __init__(self): # See example_state.py for basic explanations. super(SaraNLUspr, self).__init__(outcomes=['understood', 'not_understood', 'fail'], input_keys=['sentence'], output_keys=['answer']) serviceName = "/answer_question" Logger.loginfo("waiting forservice: " + serviceName) rospy.wait_for_service(serviceName) self.service = rospy.ServiceProxy(serviceName, AnswerQuestion) def execute(self, userdata): # Call the NLU service response = self.service(String(userdata.sentence)) # Checking the validity of the response if response.str.data is "": userdata.answer = response.str.data return "fail" userdata.answer = response.str.data return "understood"
#!/usr/bin/env python # encoding=utf8 from __future__ import print_function from flexbe_core import EventState, Logger import rospy import re from wm_nlu.srv import AnswerQuestion from std_msgs.msg import String class SaraNLUspr(EventState): ''' Use wm_nlu to parse a sentence and return the detected actions in a standard format (ActionForm) ># sentence string sentence to parse #> ActionForms string[] list of ActionForms <= understood Finished job. <= not_understood Finished job but no commands detected. <= fail service unavailable. ''' def __init__(self): # See example_state.py for basic explanations. super(SaraNLUspr, self).__init__(outcomes=['understood', 'not_understood', 'fail'], input_keys=['sentence'], output_keys=['answer']) self.RecurentSubject = None self.Person = None self.serviceName = "/answer_question" Logger.loginfo("waiting forservice: " + self.serviceName) rospy.wait_for_service(self.serviceName) def execute(self, userdata): # Call the NLU service serv = rospy.ServiceProxy(self.serviceName, AnswerQuestion) Resp = serv(String(userdata.sentence)) # Checking the validity of the responce if Resp.str.data is "": userdata.answer = Resp.str return "fail" userdata.answer = Resp.str return "understood" def on_enter(self, userdata): Logger.loginfo('Enter SaraNLU')
Python
0
142d3ebf66e31aad2363fc0c421dc573dc9b1157
Simplify current_service() function
ci/utils.py
ci/utils.py
# -*- coding: utf-8 -*- """This module defines functions generally useful in scikit-ci.""" import os try: from .constants import SERVICES, SERVICES_ENV_VAR except (SystemError, ValueError): from constants import SERVICES, SERVICES_ENV_VAR def current_service(): for service, env_var in SERVICES_ENV_VAR.items(): if os.environ.get(env_var, 'false').lower() == 'true': return service raise LookupError( "unknown service: None of the environment variables {} are set " "to 'true' or 'True'".format(", ".join(SERVICES_ENV_VAR.values())) ) def current_operating_system(service): return os.environ[SERVICES[service]] if SERVICES[service] else None
# -*- coding: utf-8 -*- """This module defines functions generally useful in scikit-ci.""" import os try: from .constants import SERVICES, SERVICES_ENV_VAR except (SystemError, ValueError): from constants import SERVICES, SERVICES_ENV_VAR def current_service(): for service in SERVICES.keys(): if os.environ.get( SERVICES_ENV_VAR[service], 'false').lower() == 'true': return service raise LookupError( "unknown service: None of the environment variables {} are set " "to 'true' or 'True'".format(", ".join(SERVICES_ENV_VAR.values())) ) def current_operating_system(service): return os.environ[SERVICES[service]] if SERVICES[service] else None
Python
0.000006
9a7c0a07d14b81b134963a9459326ffdb53cf28d
Disable fe build
ci/zoeci.py
ci/zoeci.py
#!/usr/bin/env python3 # Copyright (c) 2016, Quang-Nhat Hoang-Xuan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ZOE CI entry point.""" import yaml import sys from typing import Iterable, Callable, Dict, Any, Union import docker from docker import Client from utils.DockerContainerParameter import DockerContainerParameter from deploy.frontenddeploy import ZoeFrontendDeploy from deploy.backenddeploy import ZoeBackendDeploy class ZoeDeploy(): def __init__(self, dockerUrl, dockerComposePath, image): self.currentImage = image self.typeDeploy = 1 if 'prod' in dockerComposePath else 0 self.backend = ZoeBackendDeploy(dockerUrl, dockerComposePath) self.frontend = ZoeFrontendDeploy(dockerUrl, 'apache2') def deploy(self): """ Deploy zoe backend and frontend """ try: retBE = self.backend.deploy(self.currentImage) print('Deployed BE with latest image...') if self.typeDeploy == 1 and retBE == 0: print('Redeploy BE with previous image') self.backend.deploy(self.backend.previousImage) retFE = 1 if self.typeDeploy == 1: #retFE = self.frontend.deploy() print('Deployed FE with latest codes...') if retFE == 0 or retBE == 0: retFE = self.frontend.fallback() except Exception as ex: print(ex) retBE = 0 return (retBE and retFE) class ZoeImage(): def __init__(self, dockerUrl, tag): self.cli = Client(base_url=dockerUrl) self.tag = tag def build(self): """ Build docker image """ ret = 1 for line in self.cli.build(path='.', tag=self.tag, rm=True): print(line) if 'error' in str(line): ret = 0 return ret def push(self): """ Push docker image """ ret = 1 for line in self.cli.push(self.tag, stream=True): print(line) if 'error' in str(line): ret = 0 return ret if __name__ == '__main__': if len(sys.argv) < 4: sys.exit(1) else: if sys.argv[1] == '0': deployer = ZoeDeploy(sys.argv[2], sys.argv[3], sys.argv[4]) ret = deployer.deploy() if ret == 0: sys.exit(1) elif sys.argv[1] == '1': imghandler = ZoeImage(sys.argv[2], sys.argv[3]) ret = imghandler.build() if ret == 0: sys.exit(1) elif sys.argv[1] == '2': imghandler = ZoeImage(sys.argv[2], sys.argv[3]) ret = imghandler.push() if ret == 0: sys.exit(1)
#!/usr/bin/env python3 # Copyright (c) 2016, Quang-Nhat Hoang-Xuan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ZOE CI entry point.""" import yaml import sys from typing import Iterable, Callable, Dict, Any, Union import docker from docker import Client from utils.DockerContainerParameter import DockerContainerParameter from deploy.frontenddeploy import ZoeFrontendDeploy from deploy.backenddeploy import ZoeBackendDeploy class ZoeDeploy(): def __init__(self, dockerUrl, dockerComposePath, image): self.currentImage = image self.typeDeploy = 1 if 'prod' in dockerComposePath else 0 self.backend = ZoeBackendDeploy(dockerUrl, dockerComposePath) self.frontend = ZoeFrontendDeploy(dockerUrl, 'apache2') def deploy(self): """ Deploy zoe backend and frontend """ try: retBE = self.backend.deploy(self.currentImage) print('Deployed BE with latest image...') if self.typeDeploy == 1 and retBE == 0: print('Redeploy BE with previous image') self.backend.deploy(self.backend.previousImage) retFE = 1 if self.typeDeploy == 1: retFE = self.frontend.deploy() print('Deployed FE with latest codes...') if retFE == 0 or retBE == 0: retFE = self.frontend.fallback() except Exception as ex: print(ex) retBE = 0 return (retBE and retFE) class ZoeImage(): def __init__(self, dockerUrl, tag): self.cli = Client(base_url=dockerUrl) self.tag = tag def build(self): """ Build docker image """ ret = 1 for line in self.cli.build(path='.', tag=self.tag, rm=True): print(line) if 'error' in str(line): ret = 0 return ret def push(self): """ Push docker image """ ret = 1 for line in self.cli.push(self.tag, stream=True): print(line) if 'error' in str(line): ret = 0 return ret if __name__ == '__main__': if len(sys.argv) < 4: sys.exit(1) else: if sys.argv[1] == '0': deployer = ZoeDeploy(sys.argv[2], sys.argv[3], sys.argv[4]) ret = deployer.deploy() if ret == 0: sys.exit(1) elif sys.argv[1] == '1': imghandler = ZoeImage(sys.argv[2], sys.argv[3]) ret = imghandler.build() if ret == 0: sys.exit(1) elif sys.argv[1] == '2': imghandler = ZoeImage(sys.argv[2], sys.argv[3]) ret = imghandler.push() if ret == 0: sys.exit(1)
Python
0.000001
89766874e7ef17bdce4cfa7cae9898336928c19e
Remove satellites from JSON
modules/gy-gps6mv1/core/get.py
modules/gy-gps6mv1/core/get.py
#! /usr/bin/python # Written by Dan Mandle http://dan.mandle.me September 2012 # Modified by Broda Noel @brodanoel (in all social networks) # License: GPL 2.0 from gps import * from time import * import time import threading import sys gpsd = None #seting the global variable class GpsPoller(threading.Thread): def __init__(self): threading.Thread.__init__(self) global gpsd #bring it in scope gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info self.current_value = None self.running = True #setting the thread running to true def run(self): global gpsd while gpsp.running: gpsd.next() #this will continue to loop and grab EACH set of gpsd info to clear the buffer if __name__ == '__main__': gpsp = GpsPoller() # create the thread try: gpsp.start() # start it up attempts = 0 gotData = False while gotData == False and attempts < 3: #It may take a second or two to get good data if gpsd.fix.latitude != 0 or gpsd.fix.longitude != 0: gotData = True attempts += 1 print '{' print 'latitude:', gpsd.fix.latitude, ',' print 'longitude:', gpsd.fix.longitude, ',' print 'time:', gpsd.fix.time, ',' print 'utcTime:', gpsd.utc, ',' print 'altitude:', gpsd.fix.altitude, ',' print 'eps:', gpsd.fix.eps, ',' print 'epx:', gpsd.fix.epx, ',' print 'epv:', gpsd.fix.epv, ',' print 'ept:', gpsd.fix.ept, ',' print 'speed:', gpsd.fix.speed, ',' print 'climb:', gpsd.fix.climb, ',' print 'track:', gpsd.fix.track, ',' print 'mode:', gpsd.fix.mode, ',' #print 'satellites:', gpsd.satellites print '}' sys.exit() else: time.sleep(1) #set to whatever except (KeyboardInterrupt, SystemExit): #when you press ctrl+c gpsp.running = False gpsp.join() # wait for the thread to finish what it's doing
#! /usr/bin/python # Written by Dan Mandle http://dan.mandle.me September 2012 # Modified by Broda Noel @brodanoel (in all social networks) # License: GPL 2.0 from gps import * from time import * import time import threading import sys gpsd = None #seting the global variable class GpsPoller(threading.Thread): def __init__(self): threading.Thread.__init__(self) global gpsd #bring it in scope gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info self.current_value = None self.running = True #setting the thread running to true def run(self): global gpsd while gpsp.running: gpsd.next() #this will continue to loop and grab EACH set of gpsd info to clear the buffer if __name__ == '__main__': gpsp = GpsPoller() # create the thread try: gpsp.start() # start it up attempts = 0 gotData = False while gotData == False and attempts < 3: #It may take a second or two to get good data if gpsd.fix.latitude != 0 or gpsd.fix.longitude != 0: gotData = True attempts += 1 print '{' print 'latitude:', gpsd.fix.latitude, ',' print 'longitude:', gpsd.fix.longitude, ',' print 'time:', gpsd.fix.time, ',' print 'utcTime:', gpsd.utc, ',' print 'altitude:', gpsd.fix.altitude, ',' print 'eps:', gpsd.fix.eps, ',' print 'epx:', gpsd.fix.epx, ',' print 'epv:', gpsd.fix.epv, ',' print 'ept:', gpsd.fix.ept, ',' print 'speed:', gpsd.fix.speed, ',' print 'climb:', gpsd.fix.climb, ',' print 'track:', gpsd.fix.track, ',' print 'mode:', gpsd.fix.mode, ',' print 'satellites:', gpsd.satellites print '}' sys.exit() else: time.sleep(1) #set to whatever except (KeyboardInterrupt, SystemExit): #when you press ctrl+c gpsp.running = False gpsp.join() # wait for the thread to finish what it's doing
Python
0.000003
3685715cd260f4f5ca392caddf7fb0c01af9ebcc
Add in comments for orgs and places too, remove limit
mzalendo/comments2/feeds.py
mzalendo/comments2/feeds.py
from disqus.wxr_feed import ContribCommentsWxrFeed # from comments2.models import Comment from core.models import Person, Place, Organisation # http://help.disqus.com/customer/portal/articles/472150-custom-xml-import-format class CommentWxrFeed(ContribCommentsWxrFeed): link = "/" def items(self): list = [] list.extend( Person.objects.all() ) list.extend( Organisation.objects.all() ) list.extend( Place.objects.all() ) return list def item_pubdate(self, item): return item.created def item_description(self, item): return str(item) def item_guid(self, item): # set to none so that the output dsq:thread_identifier is empty return None def item_comments(self, item): return item.comments.all() def comment_user_name(self, comment): return str(comment.user) def comment_user_email(self, comment): return comment.user.email or str(comment.id) + '@bogus-email-address.com' def comment_user_url(self, comment): return None def comment_is_approved(self, comment): return 1
from disqus.wxr_feed import ContribCommentsWxrFeed # from comments2.models import Comment from core.models import Person # http://help.disqus.com/customer/portal/articles/472150-custom-xml-import-format class CommentWxrFeed(ContribCommentsWxrFeed): link = "/" def items(self): return Person.objects.all()[:5] # remove [:5] before generating full dump def item_pubdate(self, item): return item.created def item_description(self, item): return str(item) def item_guid(self, item): # set to none so that the output dsq:thread_identifier is empty return None def item_comments(self, item): return item.comments.all() def comment_user_name(self, comment): return str(comment.user) def comment_user_email(self, comment): return comment.user.email or str(comment.id) + '@bogus-email-address.com' def comment_user_url(self, comment): return None def comment_is_approved(self, comment): return 1
Python
0
fe998a48be769f6a957611584145706b71385cc9
Fix airflow jobs check cmd for TriggererJob (#19185)
airflow/jobs/__init__.py
airflow/jobs/__init__.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import airflow.jobs.backfill_job import airflow.jobs.base_job import airflow.jobs.local_task_job import airflow.jobs.scheduler_job import airflow.jobs.triggerer_job # noqa
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import airflow.jobs.backfill_job import airflow.jobs.base_job import airflow.jobs.local_task_job import airflow.jobs.scheduler_job # noqa
Python
0
d266de64cbcc7ed8672e9bb61cdb966870fccfdc
Use random.choice() & reduce len() duplication
alg_percentile_select.py
alg_percentile_select.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import random def percentile_select(ls, k): """Kth percentile selection algorithm. Just select the kth element, without caring about the relative ordering of the rest of them. The algorithm performs in place without allocating new memory for the three sublists using three pointers. Time complexity: O(n). """ v = random.choice(ls) idx_eq_v = [i for i, a in enumerate(ls) if a == v] idx_le_v = [i for i, a in enumerate(ls) if a < v] idx_ge_v = [i for i, a in enumerate(ls) if a > v] n_le = len(idx_le_v) n_eq = len(idx_eq_v) if k <= n_le: le_v_ls = [ls[idx] for idx in idx_le_v] return percentile_select(le_v_ls, k) elif n_le < k <= n_le + n_eq: return v elif k > n_le + n_eq: ge_v_ls = [ls[idx] for idx in idx_ge_v] return percentile_select(ge_v_ls, k - n_le - n_eq) def main(): n = 100 ls = range(n) random.shuffle(ls) print('Get median by selection:') print(percentile_select(ls, n // 2)) print('Get min by selection:') print(percentile_select(ls, 1)) print('Get max by selection:') print(percentile_select(ls, n)) if __name__ == '__main__': main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function import random def percentile_select(ls, k): """Kth percentile selection algorithm. Just select the kth element, without caring about the relative ordering of the rest of them. The algorithm performs in place without allocating new memory for the three sublists using three pointers. Time complexity: O(n). """ v = random.sample(ls, 1)[0] idx_eq_v = [i for i, a in enumerate(ls) if a == v] idx_le_v = [i for i, a in enumerate(ls) if a < v] idx_ge_v = [i for i, a in enumerate(ls) if a > v] if k <= len(idx_le_v): le_v_ls = [ls[idx] for idx in idx_le_v] return percentile_select(le_v_ls, k) elif len(idx_le_v) < k <= len(idx_le_v) + len(idx_eq_v): return v elif k > len(idx_le_v) + len(idx_eq_v): ge_v_ls = [ls[idx] for idx in idx_ge_v] return percentile_select(ge_v_ls, k - len(idx_le_v) - len(idx_eq_v)) def main(): n = 100 ls = range(n) random.shuffle(ls) print('List: {}'.format(ls)) print('Get median by selection:') print(percentile_select(ls, n // 2)) print('Get min by selection:') print(percentile_select(ls, 1)) print('Get max by selection:') print(percentile_select(ls, n)) if __name__ == '__main__': main()
Python
0.000677
d1c16f90ca86bc1bd11a81f021d8317a82902a69
print annotation
ui/app/models.py
ui/app/models.py
from . import db class Spans(db.Model): __tablename__ = 'zipkin_spans' span_id = db.Column(db.Integer) parent_id = db.Column(db.Integer) trace_id = db.Column(db.Integer) span_name = db.Column(db.String(255)) debug = db.Column(db.Integer) duration = db.Column(db.Integer) created_ts = db.Column(db.Integer) def __repr__(self): return '<Span %r>' % self.span_name class Annotations(db.Model): __tablename__ = 'zipkin_annotations' span_id = db.Column(db.Integer) trace_id = db.Column(db.Integer) span_name = db.Column(db.String(255)) service_name = db.Column(db.String(255)) value = db.Column(db.Text) ipv4 = db.Column(db.Integer) port = db.Column(db.Integer) a_timestamp = db.Column(db.Integer) duration = db.Column(db.Integer) def __repr__(self): return '<Annotation %r - %r>' % (self.span_name, self.service_name)
from . import db class Spans(db.Model): __tablename__ = 'zipkin_spans' span_id = db.Column(db.Integer) parent_id = db.Column(db.Integer) trace_id = db.Column(db.Integer) span_name = db.Column(db.String(255)) debug = db.Column(db.Integer) duration = db.Column(db.Integer) created_ts = db.Column(db.Integer) def __repr__(self): return '<Span %r>' % self.span_name class Annotations(db.Model): __tablename__ = 'zipkin_annotations' span_id = db.Column(db.Integer) trace_id = db.Column(db.Integer) span_name = db.Column(db.String(255)) service_name = db.Column(db.String(255)) value = db.Column(db.Text) ipv4 = db.Column(db.Integer) port = db.Column(db.Integer) a_timestamp = db.Column(db.Integer) duration = db.Column(db.Integer)
Python
0.000009
b38555ff465f59333f32c2bb556f6b7a236e288b
disable traceview for now
seabus/web/web.py
seabus/web/web.py
from flask import Flask import oboe from oboeware import OboeMiddleware from seabus.web.blueprint import blueprint from seabus.common.database import db from seabus.web.socketio import socketio def create_app(config=None): app = Flask(__name__) if config is not None: app.config.from_object('seabus.web.config.{}'.format(config)) else: app.config.from_object('seabus.web.config.Dev') socketio.init_app(app) app.register_blueprint(blueprint) db.init_app(app) tv_app = OboeMiddleware(app) return app
from flask import Flask import oboe from oboeware import OboeMiddleware from seabus.web.blueprint import blueprint from seabus.common.database import db from seabus.web.socketio import socketio def create_app(config=None): app = Flask(__name__) if config is not None: app.config.from_object('seabus.web.config.{}'.format(config)) else: app.config.from_object('seabus.web.config.Dev') socketio.init_app(app) app.register_blueprint(blueprint) db.init_app(app) #TODO: tv_app = OboeMiddleware(app) return app
Python
0
b5241e62cb7cc09b5d469f1cf3908fa1d7cedc21
Tweak the settings.
gobble/settings.py
gobble/settings.py
"""User configurable settings""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from os import getenv from logging import DEBUG, INFO from os.path import expanduser, join, abspath _home = abspath(join(expanduser('~'))) class Production(object): JSON_INDENT = 4 EXPANDED_LOG_STYLE = True CONSOLE_LOG_LEVEL = DEBUG FILE_LOG_LEVEL = DEBUG FILE_LOG_FORMAT = '[%(asctime)s] [%(module)s] [%(levelname)s] %(message)s' CONSOLE_LOG_FORMAT = '[%(name)s] [%(levelname)s] %(message)s' OS_URL = 'http://next.openspending.org' DATAPACKAGE_DETECTION_THRESHOLD = 1 VALIDATION_FEEDBACK_OPTIONS = ['message'] DATAFILE_HASHING_BLOCK_SIZE = 65536 CONFIG_DIR = join(_home, '.gobble') CONFIG_FILE = join(_home, '.gobble', 'settings.json') TOKEN_FILE = join(_home, '.gobble', 'token.json') LOG_FILE = join(_home, '.gobble', 'user.log') MOCK_REQUESTS = False LOCALHOST = ('127.0.0.1', 8001) class Development(Production): CONSOLE_LOG_LEVEL = DEBUG FILE_LOG_LEVEL = None LOG_FILE = None OS_URL = 'http://dev.openspending.org' CONFIG_DIR = join(_home, '.gobble.dev') CONFIG_FILE = join(_home, '.gobble.dev', 'config.json') TOKEN_FILE = join(_home, '.gobble.dev', 'token.json') MOCK_REQUESTS = bool(getenv('GOBBLE_MOCK_REQUESTS', False)) CONSOLE_LOG_FORMAT = ('[%(name)s] ' '[%(asctime)s] ' '[%(module)s] ' '[%(funcName)s] ' '[%(lineno)d] ' '[%(levelname)s] ' '%(message)s') class Testing(Production): MOCK_REQUESTS = True
"""User configurable settings""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from os import getenv from logging import DEBUG, INFO from os.path import expanduser, join, abspath _home = abspath(join(expanduser('~'))) class Production(object): CONSOLE_LOG_LEVEL = INFO FILE_LOG_LEVEL = DEBUG FILE_LOG_FORMAT = '[%(asctime)s] [%(module)s] [%(levelname)s] %(message)s' CONSOLE_LOG_FORMAT = '[%(name)s] [%(module)s] [%(levelname)s] %(message)s' OS_URL = 'http://next.openspending.org' DATAPACKAGE_DETECTION_THRESHOLD = 1 VALIDATION_FEEDBACK_OPTIONS = ['message'] DATAFILE_HASHING_BLOCK_SIZE = 65536 CONFIG_DIR = join(_home, '.gobble') CONFIG_FILE = join(_home, '.gobble', 'settings.json') TOKEN_FILE = join(_home, '.gobble', 'token.json') LOG_FILE = join(_home, '.gobble', 'user.log') MOCK_REQUESTS = False LOCALHOST = ('127.0.0.1', 8001) class Development(Production): CONSOLE_LOG_LEVEL = DEBUG FILE_LOG_LEVEL = None LOG_FILE = None OS_URL = 'http://dev.openspending.org' CONFIG_DIR = join(_home, '.gobble.dev') CONFIG_FILE = join(_home, '.gobble.dev', 'config.json') TOKEN_FILE = join(_home, '.gobble.dev', 'token.json') MOCK_REQUESTS = bool(getenv('GOBBLE_MOCK_REQUESTS', False)) class Testing(Production): MOCK_REQUESTS = True
Python
0
2050017ced613f5c0282dcfaf07494b8dbcc8e41
Update ipc_lista2.05.py
lista2/ipc_lista2.05.py
lista2/ipc_lista2.05.py
#ipc_lista2.05 #Professor: Jucimar Junior #Any Mendes Carvalho - 1615310044 # # # # #Faça um programa para a leitura de duas notas parciais de um aluno. O programa deve calcular a média alcançada por aluno e apresentar: #--A mensagem "Aprovado", se a média alcançada for maior ou igual a sete; #--A mensagem "Reprovado", se a média for menor que sete; #--A mensagem "Aprovado com Distincao", se a média for igual a dez. n1 = int(input("Insira a primeira nota: ")) n2 = int(input("Insira a segunda nota: ")) media = (n1+n2)/2
#ipc_lista2.05 #Professor: Jucimar Junior #Any Mendes Carvalho - 1615310044 # # # # #Faça um programa para a leitura de duas notas parciais de um aluno. O programa deve calcular a média alcançada por aluno e apresentar: #--A mensagem "Aprovado", se a média alcançada for maior ou igual a sete; #--A mensagem "Reprovado", se a média for menor que sete; #--A mensagem "Aprovado com Distincao", se a média for igual a dez. n1 = int(input("Insira a primeira nota: ")) n2 = int(input("Insira a segunda nota: ")) media = (n1+n2)
Python
0
e1fb17476770620546d0bd244b35591b99ba6ea7
Revert 7392f01f for pkg_resources/extern. 3.3 is the right signal there.
pkg_resources/extern/__init__.py
pkg_resources/extern/__init__.py
import sys class VendorImporter: """ A PEP 302 meta path importer for finding optionally-vendored or otherwise naturally-installed packages from root_name. """ def __init__(self, root_name, vendored_names=(), vendor_pkg=None): self.root_name = root_name self.vendored_names = set(vendored_names) self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') @property def search_path(self): """ Search first the vendor package then as a natural package. """ yield self.vendor_pkg + '.' yield '' def find_module(self, fullname, path=None): """ Return self when fullname starts with root_name and the target module is one vendored through this importer. """ root, base, target = fullname.partition(self.root_name + '.') if root: return if not any(map(target.startswith, self.vendored_names)): return return self def load_module(self, fullname): """ Iterate over the search path to locate and load fullname. """ root, base, target = fullname.partition(self.root_name + '.') for prefix in self.search_path: try: extant = prefix + target __import__(extant) mod = sys.modules[extant] sys.modules[fullname] = mod # mysterious hack: # Remove the reference to the extant package/module # on later Python versions to cause relative imports # in the vendor package to resolve the same modules # as those going through this importer. if sys.version_info > (3, 3): del sys.modules[extant] return mod except ImportError: pass else: raise ImportError( "The '{target}' package is required; " "normally this is bundled with this package so if you get " "this warning, consult the packager of your " "distribution.".format(**locals()) ) def install(self): """ Install this importer into sys.meta_path if not already present. """ if self not in sys.meta_path: sys.meta_path.append(self) names = 'packaging', 'pyparsing', 'six', 'appdirs' VendorImporter(__name__, names).install()
import sys class VendorImporter: """ A PEP 302 meta path importer for finding optionally-vendored or otherwise naturally-installed packages from root_name. """ def __init__(self, root_name, vendored_names=(), vendor_pkg=None): self.root_name = root_name self.vendored_names = set(vendored_names) self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') @property def search_path(self): """ Search first the vendor package then as a natural package. """ yield self.vendor_pkg + '.' yield '' def find_module(self, fullname, path=None): """ Return self when fullname starts with root_name and the target module is one vendored through this importer. """ root, base, target = fullname.partition(self.root_name + '.') if root: return if not any(map(target.startswith, self.vendored_names)): return return self def load_module(self, fullname): """ Iterate over the search path to locate and load fullname. """ root, base, target = fullname.partition(self.root_name + '.') for prefix in self.search_path: try: extant = prefix + target __import__(extant) mod = sys.modules[extant] sys.modules[fullname] = mod # mysterious hack: # Remove the reference to the extant package/module # on later Python versions to cause relative imports # in the vendor package to resolve the same modules # as those going through this importer. if sys.version_info.major >= 3: del sys.modules[extant] return mod except ImportError: pass else: raise ImportError( "The '{target}' package is required; " "normally this is bundled with this package so if you get " "this warning, consult the packager of your " "distribution.".format(**locals()) ) def install(self): """ Install this importer into sys.meta_path if not already present. """ if self not in sys.meta_path: sys.meta_path.append(self) names = 'packaging', 'pyparsing', 'six', 'appdirs' VendorImporter(__name__, names).install()
Python
0
f3da1fab9af2279182a09922aae00fcee73a92ee
Fix imports for Django >= 1.6
goog/middleware.py
goog/middleware.py
from django.conf import settings try: from django.conf.urls.defaults import patterns, include except ImportError: # Django >= 1.6 from django.conf.urls import patterns, include import goog.urls from goog import utils class GoogDevelopmentMiddleware(object): def devmode_enabled(self, request): """Returns True iff the devmode is enabled.""" return utils.is_devmode() def process_request(self, request): # This urlconf patching is inspired by debug_toolbar. # https://github.com/robhudson/django-debug-toolbar if self.devmode_enabled(request): original_urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if original_urlconf != 'goog.urls': goog.urls.urlpatterns += patterns( '', ('', include(original_urlconf)), ) request.urlconf = 'goog.urls'
from django.conf import settings from django.conf.urls.defaults import patterns, include import goog.urls from goog import utils class GoogDevelopmentMiddleware(object): def devmode_enabled(self, request): """Returns True iff the devmode is enabled.""" return utils.is_devmode() def process_request(self, request): # This urlconf patching is inspired by debug_toolbar. # https://github.com/robhudson/django-debug-toolbar if self.devmode_enabled(request): original_urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if original_urlconf != 'goog.urls': goog.urls.urlpatterns += patterns( '', ('', include(original_urlconf)), ) request.urlconf = 'goog.urls'
Python
0
91178909bab31e9db42d86d5783152890f65795d
update cms
cms/urls.py
cms/urls.py
from django.conf.urls import url from cms import views from django.contrib.auth import views as auth_views urlpatterns = [ # 一覧 url(r'^dailyreport/$', views.daily_list, name='daily_list'), # 日報操作 url(r'^dailyreport/add/$', views.daily_edit, name='daily_add'), # 登録 url(r'^dailyreport/mod/(?P<daily_id>\d+)/$', views.daily_edit, name='daily_mod'), # 修正 url(r'^dailyreport/del/(?P<daily_id>\d+)/$', views.daily_del, name='daily_del'), # 削除 # コメント操作 url(r'^dailyreport/comment/add/(?P<daily_id>\d+)/$', views.comment_edit, name='comment_add'), # 登録 url(r'^dailyreport/comment/mod/(?P<daily_id>\d+)/(?P<comment_id>\d+)/$', views.comment_edit, name='comment_mod'), # 修正 # 詳細 url(r'^dailyreport/detail/(?P<pk>\d+)/$', views.daily_detail.as_view(), name='daily_detail'), # 削除 ]
from django.conf.urls import url from cms import views from django.contrib.auth import views as auth_views urlpatterns = [ # 一覧 url(r'^dailyreport/$', views.daily_list, name='daily_list'), # 日報操作 url(r'^dailyreport/add/$', views.daily_edit, name='daily_add'), # 登録 url(r'^dailyreport/mod/(?P<daily_id>\d+)/$', views.daily_edit, name='daily_mod'), # 修正 url(r'^dailyreport/del/(?P<daily_id>\d+)/$', views.daily_del, name='daily_del'), # 削除 # コメント操作 url(r'^dailyreport/comment/add/(?P<daily_id>\d+)/$', views.comment_edit, name='comment_add'), # 登録 url(r'^dailyreport/comment/mod/(?P<daily_id>\d+)/(?P<impression_id>\d+)/$', views.comment_edit, name='comment_mod'), # 修正 # 詳細 url(r'^dailyreport/detail/(?P<daily_id>\d+)/$', views.daily_detail.as_view, name='daily_detail'), # 削除 ]
Python
0
b7d29e2a67c314b5d1aff343eef1a9ca2c3b0cbe
add dbl integration
cogs/dbl.py
cogs/dbl.py
import dbl from cogs.cog import Cog import logging import asyncio from threading import Thread logger = logging.getLogger('debug') class DBApi(Cog): def __init__(self, bot): super().__init__(bot) self._token = self.bot.config.dbl_token self.dbl = dbl.Client(self.bot, self._token) if not self.bot.test_mode: self.update_task = self.bot.loop.create_task(self.update_stats()) try: from sanic import Sanic from sanic.response import json except ImportError: return self.server = Thread(target=self.run_webhook_server, args=(self.bot.loop,)) self.server.start() async def update_stats(self): while True: logger.info('Posting server count') try: await self.dbl.post_server_count() logger.info(f'Posted server count {len(self.bot.guilds)}') except Exception as e: logger.exception(f'Failed to post server count\n{e}') await asyncio.sleep(3600) def run_webhook_server(self, main_loop): asyncio.new_event_loop() app = Sanic() @app.route("/webhook", methods=["POST", ]) async def webhook(request): if request.headers.get('Authorization') != self.bot.config.dbl_auth: logger.warning('Unauthorized webhook access') return js = request.json main_loop.create_task(self.on_vote(int(js['bot']), int(js['user']), js['type'], js['isWeekend'])) return json({'a': 'a'}, status=200) if __name__ == "__main__": app.run(host=self.bot.config.dbl_server, port=self.bot.config.dbl_port) async def on_vote(self, bot: int, user: int, type: str, is_weekend: bool): print(f'{user} voted on bot {bot}') def setup(bot): bot.add_cog(DBApi(bot))
import dbl from cogs.cog import Cog import logging import asyncio from threading import Thread logger = logging.getLogger('debug') class DBApi(Cog): def __init__(self, bot): super().__init__(bot) self._token = self.bot.config.dbl_token self.dbl = dbl.Client(self.bot, self._token) if not self.bot.test_mode: self.update_task = self.bot.loop.create_task(self.update_stats()) try: from sanic import Sanic from sanic.response import json except ImportError: return self.server = Thread() async def update_stats(self): while True: logger.info('Posting server count') try: await self.dbl.post_server_count() logger.info(f'Posted server count {len(self.bot.guilds)}') except Exception as e: logger.exception(f'Failed to post server count\n{e}') await asyncio.sleep(3600) def run_webhook_server(self, main_loop): asyncio.new_event_loop() app = Sanic() @app.route("/webhook", methods=["POST", ]) async def webhook(request): if request.headers.get('Authorization') != self.bot.config.dbl_auth: logger.warning('Unauthorized webhook access') return js = request.json main_loop.create_task(self.on_vote(int(js['bot']), int(js['user']), js['type'], js['isWeekend'])) return json({'a': 'a'}, status=200) if __name__ == "__main__": app.run(host=self.bot.config.dbl_server, port=self.bot.config.dbl_port) async def on_vote(self, bot: int, user: int, type: str, is_weekend: bool): print(f'{user} voted on bot {bot}') def setup(bot): bot.add_cog(DBApi(bot))
Python
0
1212966326eb096e10b52277b0c6b53126262e3b
Improve messages in example
examples/basic_usage.py
examples/basic_usage.py
import os from twilio.twiml import Response from twilio.rest import Client ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID') AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN') def example(): """ Some example usage of different twilio resources. """ client = Client(ACCOUNT_SID, AUTH_TOKEN) # Get all messages all_messages = client.messages.list() print('There are {} messages in your account.'.format(len(all_messages))) # Get only last 10 messages... some_messages = client.messages.list(limit=10) print('Here are the last 10 messages in your account:') for m in some_messages: print(m) # Get messages in smaller pages... all_messages = client.messages.list(page_size=10) print('There are {} messages in your account.'.format(len(all_messages))) print('Sending a message...') new_message = client.messages.create(to='XXXX', from_='YYYY', body='Twilio rocks!') print('Making a call...') new_call = client.calls.create(to='XXXX', from_='YYYY', method='GET') print('Serving TwiML') twiml_response = Response() twiml_response.say('Hello!') twiml_response.hangup() twiml_xml = twiml_response.toxml() print('Generated twiml: {}'.format(twiml_xml)) if __name__ == '__main__': example()
import os from twilio.twiml import Response from twilio.rest import Client ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID') AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN') def example(): """ Some example usage of different twilio resources. """ client = Client(ACCOUNT_SID, AUTH_TOKEN) print('Get all the messages...') all_messages = client.messages.list() print('There are {} messages in your account.'.format(len(all_messages))) print('Get only last 10 messages...') some_messages = client.messages.list(limit=10) print('Get messages in smaller pages...') some_messages = client.messages.list(page_size=10) print('Sending a message...') new_message = client.messages.create(to='XXXX', from_='YYYY', body='Twilio rocks!') print('Making a call...') new_call = client.calls.create(to='XXXX', from_='YYYY', method='GET') print('Serving TwiML') twiml_response = Response() twiml_response.say('Hello!') twiml_response.hangup() twiml_xml = twiml_response.toxml() print('Generated twiml: {}'.format(twiml_xml)) if __name__ == '__main__': example()
Python
0.000028
9b5dc2f9998d374263b2e1d35d6b5cfc7a831b1e
undo setuid on return
univention-openvpn/openvpn-master2.py
univention-openvpn/openvpn-master2.py
# # Univention OpenVPN integration -- openvpn-master2.py # __package__ = '' # workaround for PEP 366 import listener from univention import debug as ud import univention.uldap as ul from datetime import date from M2Crypto import RSA, BIO from base64 import b64decode name = 'openvpn-master2' description = 'create user openvpn package with updated config' filter = '(&(objectClass=univentionOpenvpn)(univentionOpenvpnActive=1))' attributes = ['univentionOpenvpnPort', 'univentionOpenvpnAddress'] modrdn = 1 pubbio = BIO.MemoryBuffer(''' -----BEGIN PUBLIC KEY----- MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAN0VVx22Oou8UTDsrug/UnZLiX2UcXeE GvQ6kWcXBhqvSUl0cVavYL5Su45RXz7CeoImotwUzrVB8JnsIcrPYw8CAwEAAQ== -----END PUBLIC KEY----- ''') pub = RSA.load_pub_key_bio(pubbio) pbs = pub.__len__() / 8 def license(key): try: enc = b64decode(key) raw = '' while len(enc) > pbs: d, key = (enc[:pbs], enc[pbs:]) raw = raw + pub.public_decrypt(d, 1) if len(enc) != pbs: return None # invalid license raw = raw + pub.public_decrypt(enc, 1) # items = raw.rstrip().split('\n') if not items: return None # invalid license vdate = int(items.pop(0)) if date.today().toordinal() > vdate: return None # expired l = {'valid': True} # at least one feature returned while items: kv = items.pop(0).split('=', 1) kv.append(True) l[kv[0]] = kv[1] return l # valid license except: return None # invalid license def maxvpnusers(key): mnlu = 5 try: return max(int(license(key)['u']), mnlu) except: return mnlu # invalid license # called to create (update) bundle for user when openvpn is activated def handler(dn, new, old, cmd): ud.debug(ud.LISTENER, ud.INFO, 'openvpn-master2.handler() invoked') if cmd == 'n': return name = new.get('cn', [None])[0] port = new.get('univentionOpenvpnPort', [None])[0] addr = new.get('univentionOpenvpnAddress', [None])[0] if not name or not port or not addr: return listener.setuid(0) lo = ul.getAdminConnection() vpnusers = lo.search('(univentionOpenvpnAccount=1)') vpnuc = len(vpnusers) licuc = 5 maxu = maxvpnusers(new.get('univentionOpenvpnLicense', [None])[0]) ud.debug(ud.LISTENER, ud.INFO, 'openvpn/handler: found %u active openvpn users (%u allowed)' % (vpnuc, maxu)) if vpnuc > maxu: listener.unsetuid() return # do nothing for user in vpnusers: uid = user[1].get('uid', [None])[0] home = user[1].get('homeDirectory', [None])[0] ud.debug(ud.LISTENER, ud.INFO, 'openvpn/handler: create new certificate for %s in %s' % (uid, home)) if uid and home: # update bundle for this openvpn server with new config try: listener.run('/usr/lib/openvpn-int/create-bundle', ['create-bundle', 'no', uid, home, name, addr, port], uid=0) finally: listener.unsetuid() listener.unsetuid() ### end ###
# # Univention OpenVPN integration -- openvpn-master2.py # __package__ = '' # workaround for PEP 366 import listener from univention import debug as ud import univention.uldap as ul from datetime import date from M2Crypto import RSA, BIO from base64 import b64decode name = 'openvpn-master2' description = 'create user openvpn package with updated config' filter = '(&(objectClass=univentionOpenvpn)(univentionOpenvpnActive=1))' attributes = ['univentionOpenvpnPort', 'univentionOpenvpnAddress'] modrdn = 1 pubbio = BIO.MemoryBuffer(''' -----BEGIN PUBLIC KEY----- MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAN0VVx22Oou8UTDsrug/UnZLiX2UcXeE GvQ6kWcXBhqvSUl0cVavYL5Su45RXz7CeoImotwUzrVB8JnsIcrPYw8CAwEAAQ== -----END PUBLIC KEY----- ''') pub = RSA.load_pub_key_bio(pubbio) pbs = pub.__len__() / 8 def license(key): try: enc = b64decode(key) raw = '' while len(enc) > pbs: d, key = (enc[:pbs], enc[pbs:]) raw = raw + pub.public_decrypt(d, 1) if len(enc) != pbs: return None # invalid license raw = raw + pub.public_decrypt(enc, 1) # items = raw.rstrip().split('\n') if not items: return None # invalid license vdate = int(items.pop(0)) if date.today().toordinal() > vdate: return None # expired l = {'valid': True} # at least one feature returned while items: kv = items.pop(0).split('=', 1) kv.append(True) l[kv[0]] = kv[1] return l # valid license except: return None # invalid license def maxvpnusers(key): mnlu = 5 try: return max(int(license(key)['u']), mnlu) except: return mnlu # invalid license # called to create (update) bundle for user when openvpn is activated def handler(dn, new, old, cmd): ud.debug(ud.LISTENER, ud.INFO, 'openvpn-master2.handler() invoked') if cmd == 'n': return name = new.get('cn', [None])[0] port = new.get('univentionOpenvpnPort', [None])[0] addr = new.get('univentionOpenvpnAddress', [None])[0] if not name or not port or not addr: return listener.setuid(0) lo = ul.getAdminConnection() vpnusers = lo.search('(univentionOpenvpnAccount=1)') vpnuc = len(vpnusers) licuc = 5 maxu = maxvpnusers(new.get('univentionOpenvpnLicense', [None])[0]) ud.debug(ud.LISTENER, ud.INFO, 'openvpn/handler: found %u active openvpn users (%u allowed)' % (vpnuc, maxu)) if vpnuc > maxu: return # do nothing for user in vpnusers: uid = user[1].get('uid', [None])[0] home = user[1].get('homeDirectory', [None])[0] ud.debug(ud.LISTENER, ud.INFO, 'openvpn/handler: create new certificate for %s in %s' % (uid, home)) if uid and home: # update bundle for this openvpn server with new config try: listener.run('/usr/lib/openvpn-int/create-bundle', ['create-bundle', 'no', uid, home, name, addr, port], uid=0) finally: listener.unsetuid() listener.unsetuid() ### end ###
Python
0.000002
5ab3f3d06216381b697781d80069354745110de1
make yaml put out unicode
plexlibrary/utils.py
plexlibrary/utils.py
# -*- coding: utf-8 -*- import yaml from yaml import Loader, SafeLoader class Colors(object): RED = "\033[1;31m" BLUE = "\033[1;34m" CYAN = "\033[1;36m" GREEN = "\033[0;32m" RESET = "\033[0;0m" BOLD = "\033[;1m" REVERSE = "\033[;7m" class YAMLBase(object): def __init__(self, filename): # Make sure pyyaml always returns unicode def construct_yaml_str(self, node): return self.construct_scalar(node) Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str) SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str) with open(filename, 'r') as f: try: self.data = yaml.safe_load(f) except yaml.YAMLError as e: raise e def __getitem__(self, k): return self.data[k] def __iter__(self, k): return self.data.itervalues()
# -*- coding: utf-8 -*- import yaml class Colors(object): RED = "\033[1;31m" BLUE = "\033[1;34m" CYAN = "\033[1;36m" GREEN = "\033[0;32m" RESET = "\033[0;0m" BOLD = "\033[;1m" REVERSE = "\033[;7m" class YAMLBase(object): def __init__(self, filename): with open(filename, 'r') as f: try: self.data = yaml.safe_load(f) except yaml.YAMLError as e: raise e def __getitem__(self, k): return self.data[k] def __iter__(self, k): return self.data.itervalues()
Python
0.000063
16bec17e7337fd1cbaef12934cfeae05a563719f
fix var scoping bug
inbox/util/url.py
inbox/util/url.py
from dns.resolver import Resolver from dns.resolver import NoNameservers, NXDOMAIN, Timeout, NoAnswer from urllib import urlencode from inbox.log import get_logger import re log = get_logger('inbox.util.url') from inbox.providers import providers # http://www.regular-expressions.info/email.html EMAIL_REGEX = re.compile(r'[A-Z0-9._%+-]+@(?:[A-Z0-9-]+\.)+[A-Z]{2,4}', re.IGNORECASE) # Use Google's Public DNS server (8.8.8.8) dns_resolver = Resolver() dns_resolver.nameservers = ['8.8.8.8'] class InvalidEmailAddressError(Exception): pass def provider_from_address(email_address): if not EMAIL_REGEX.match(email_address): raise InvalidEmailAddressError('Invalid email address') domain = email_address.split('@')[1].lower() mx_records = [] try: mx_records = dns_resolver.query(domain, 'MX') except NoNameservers: log.error("NoMXservers error", domain=domain) except NXDOMAIN: log.error("No such domain", domain=domain) except Timeout: log.error("Timed out while resolving", domain=domain) except NoAnswer: log.error("Provider didn't answer", domain=domain) ns_records = [] try: ns_records = dns_resolver.query(domain, 'NS') except NoNameservers: log.error("NoNameservers error", domain=domain) except NXDOMAIN: log.error("No such domain", domain=domain) except Timeout: log.error("Timed out while resolving", domain=domain) except NoAnswer: log.error("Provider didn't answer", domain=domain) for (p_name, p) in providers.iteritems(): mx_servers = p.get('mx_servers', []) ns_servers = p.get('ns_servers', []) domains = p.get('domains', []) if domain in domains: return p_name valid = True for rdata in mx_records: mx_domain = str(rdata.exchange).lower() # Depending on how the MX server is configured, domain may # refer to a relative name or to an absolute one. # FIXME @karim: maybe resolve the server instead. if mx_domain[-1] == '.': mx_domain = mx_domain[:-1] if mx_domain not in mx_servers: valid = False break if valid: return p_name for rdata in ns_records: if str(rdata).lower() not in ns_servers: valid = False break if valid: return p_name return 'unknown' # From tornado.httputil def url_concat(url, args, fragments=None): """Concatenate url and argument dictionary regardless of whether url has existing query parameters. >>> url_concat("http://example.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d' """ if not args and not fragments: return url # Strip off hashes while url[-1] == '#': url = url[:-1] fragment_tail = '' if fragments: fragment_tail = '#' + urlencode(fragments) args_tail = '' if args: if url[-1] not in ('?', '&'): args_tail += '&' if ('?' in url) else '?' args_tail += urlencode(args) return url + args_tail + fragment_tail
from dns.resolver import Resolver from dns.resolver import NoNameservers, NXDOMAIN, Timeout, NoAnswer from urllib import urlencode from inbox.log import get_logger import re log = get_logger('inbox.util.url') from inbox.providers import providers # http://www.regular-expressions.info/email.html EMAIL_REGEX = re.compile(r'[A-Z0-9._%+-]+@(?:[A-Z0-9-]+\.)+[A-Z]{2,4}', re.IGNORECASE) # Use Google's Public DNS server (8.8.8.8) dns_resolver = Resolver() dns_resolver.nameservers = ['8.8.8.8'] class InvalidEmailAddressError(Exception): pass def provider_from_address(email_address): if not EMAIL_REGEX.match(email_address): raise InvalidEmailAddressError('Invalid email address') domain = email_address.split('@')[1].lower() mx_records = [] try: mx_records = dns_resolver.query(domain, 'MX') except NoNameservers: log.error("NoMXservers error", domain=domain) except NXDOMAIN: log.error("No such domain", domain=domain) except Timeout: log.error("Timed out while resolving", domain=domain) except NoAnswer: log.error("Provider didn't answer", domain=domain) ns_records = [] try: ns_records = dns_resolver.query(domain, 'NS') except NoNameservers: log.error("NoNameservers error", domain=domain) except NXDOMAIN: log.error("No such domain", domain=domain) except Timeout: log.error("Timed out while resolving", domain=domain) except NoAnswer: log.error("Provider didn't answer", domain=domain) for (p_name, p) in providers.iteritems(): mx_servers = p.get('mx_servers', []) ns_servers = p.get('ns_servers', []) domains = p.get('domains', []) if domain in domains: return p_name valid = True for rdata in mx_records: domain = str(rdata.exchange).lower() # Depending on how the MX server is configured, domain may # refer to a relative name or to an absolute one. # FIXME @karim: maybe resolve the server instead. if domain[-1] == '.': domain = domain[:-1] if domain not in mx_servers: valid = False break if valid: return p_name for rdata in ns_records: if str(rdata).lower() not in ns_servers: valid = False break if valid: return p_name return 'unknown' # From tornado.httputil def url_concat(url, args, fragments=None): """Concatenate url and argument dictionary regardless of whether url has existing query parameters. >>> url_concat("http://example.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d' """ if not args and not fragments: return url # Strip off hashes while url[-1] == '#': url = url[:-1] fragment_tail = '' if fragments: fragment_tail = '#' + urlencode(fragments) args_tail = '' if args: if url[-1] not in ('?', '&'): args_tail += '&' if ('?' in url) else '?' args_tail += urlencode(args) return url + args_tail + fragment_tail
Python
0
201a9d75e9c4a2c84372fe58a674977f2435130f
update fastapi example.
examples/fastapi/app.py
examples/fastapi/app.py
from fastapi import FastAPI, HTTPException, APIRouter from honeybadger import honeybadger, contrib import pydantic honeybadger.configure(api_key='c10787cf') app = FastAPI(title="Honeybadger - FastAPI.") app.add_middleware(contrib.ASGIHoneybadger, params_filters=["client"]) @app.get("/raise_some_error", tags=["Notify"]) def raise_some_error(a: str = "foo"): """Raises an error.""" raise Exception(f"SomeError Occurred (a = {a})") class DivideRequest(pydantic.BaseModel): a: int b: int = 0 @app.post("/divide", response_model=int, tags=["Notify"]) def divide(req: DivideRequest): """Divides `a` by `b`.""" return req.a / req.b @app.post("/raise_status_code", tags=["Don't Notify"]) def raise_status_code(status_code: int = 404, detail: str = "Forced 404."): """This exception is raised on purpose, so will not be notified.""" raise HTTPException(status_code=404, detail=detail) some_router = APIRouter() @some_router.get("/some_router/endpoint", tags=["Notify"]) def some_router_endpoint(): """Try raising an error from a router.""" raise Exception("Exception Raised by some router endpoint.") app.include_router(some_router)
from fastapi import FastAPI, HTTPException, APIRouter from honeybadger import honeybadger, contrib from honeybadger.contrib import asgi from honeybadger.contrib import fastapi import pydantic honeybadger.configure(api_key='c10787cf') app = FastAPI() # contrib.FastAPIHoneybadger(app) app.add_middleware(asgi.ASGIHoneybadger, params_filters=["user-agent", "host", "url", "query_string", "client"]) @app.get("/raise_some_error") def raise_some_error(a: str): """Raises an error.""" raise Exception(f"SomeError Occurred (a = {a})") class DivideRequest(pydantic.BaseModel): a: int b: int = 0 @app.post("/divide") def divide(req: DivideRequest): """Divides `a` by `b`.""" return req.a / req.b @app.post("/raise_404") def raise_404(req: DivideRequest, a: bool = True): raise HTTPException(status_code=404, detail="Raising on purpose.") some_router = APIRouter() @some_router.get("/some_router_endpoint") def some_router_endpoint(): raise Exception("Exception Raised by some router endpoint.") app.include_router(some_router)
Python
0
ba084db6c16e5dee9e9ff06a3bee02f4dbfb5c82
Add environment variable to control use of UNIX socket proxying
powerstrip.tac
powerstrip.tac
import os from twisted.application import service, internet #from twisted.protocols.policies import TrafficLoggingFactory from urlparse import urlparse from powerstrip.powerstrip import ServerProtocolFactory application = service.Application("Powerstrip") DOCKER_HOST = os.environ.get('DOCKER_HOST') ENABLE_UNIX_SOCKET = os.environ.get('POWERSTRIP_UNIX_SOCKET', "") if DOCKER_HOST is None: # Default to assuming we've got a Docker socket bind-mounted into a # container we're running in. if "YES" in ENABLE_UNIX_SOCKET: DOCKER_HOST = "unix:///host-var-run/docker.real.sock" else: DOCKER_HOST = "unix:///host-var-run/docker.sock" if "://" not in DOCKER_HOST: DOCKER_HOST = "tcp://" + DOCKER_HOST if DOCKER_HOST.startswith("tcp://"): parsed = urlparse(DOCKER_HOST) dockerAPI = ServerProtocolFactory(dockerAddr=parsed.hostname, dockerPort=parsed.port) elif DOCKER_HOST.startswith("unix://"): socketPath = DOCKER_HOST[len("unix://"):] dockerAPI = ServerProtocolFactory(dockerSocket=socketPath) #logged = TrafficLoggingFactory(dockerAPI, "api-") # Refuse to listen on a TCP port, until # https://github.com/ClusterHQ/powerstrip/issues/56 is resolved. # TODO: maybe allow to specify a numberic Docker group (gid) as environment # variable, and also (optionally) the name of the socket file it creates... if "YES" in ENABLE_UNIX_SOCKET: dockerServer = internet.UNIXServer("/host-var-run/docker.sock", dockerAPI, mode=0660) dockerServer.setServiceParent(application)
import os from twisted.application import service, internet #from twisted.protocols.policies import TrafficLoggingFactory from urlparse import urlparse from powerstrip.powerstrip import ServerProtocolFactory application = service.Application("Powerstrip") DOCKER_HOST = os.environ.get('DOCKER_HOST') if DOCKER_HOST is None: # Default to assuming we've got a Docker socket bind-mounted into a # container we're running in. DOCKER_HOST = "unix:///host-var-run/docker.real.sock" if "://" not in DOCKER_HOST: DOCKER_HOST = "tcp://" + DOCKER_HOST if DOCKER_HOST.startswith("tcp://"): parsed = urlparse(DOCKER_HOST) dockerAPI = ServerProtocolFactory(dockerAddr=parsed.hostname, dockerPort=parsed.port) elif DOCKER_HOST.startswith("unix://"): socketPath = DOCKER_HOST[len("unix://"):] dockerAPI = ServerProtocolFactory(dockerSocket=socketPath) #logged = TrafficLoggingFactory(dockerAPI, "api-") # Refuse to listen on a TCP port, until # https://github.com/ClusterHQ/powerstrip/issues/56 is resolved. # TODO: maybe allow to specify a numberic Docker group (gid) as environment # variable, and also (optionally) the name of the socket file it creates... dockerServer = internet.UNIXServer("/host-var-run/docker.sock", dockerAPI, mode=0660) dockerServer.setServiceParent(application)
Python
0
33e693337ab646eaccb724b9c4b3eb3352c6e412
fix pagination
mapentity/pagination.py
mapentity/pagination.py
from rest_framework_datatables.pagination import DatatablesPageNumberPagination class MapentityDatatablePagination(DatatablesPageNumberPagination): """ Custom datatable pagination for Mapentity list views. """ pass # def get_count_and_total_count(self, queryset, view): # """ Handle count for all filters """ # count, total_count = super().get_count_and_total_count(queryset, view) # count = queryset.count() # replace count by real count - not only drf-datatables count # return count, total_count
from rest_framework_datatables.pagination import DatatablesPageNumberPagination class MapentityDatatablePagination(DatatablesPageNumberPagination): """ Custom datatable pagination for Mapentity list views. """ def get_count_and_total_count(self, queryset, view): """ Handle count for all filters """ count, total_count = super().get_count_and_total_count(queryset, view) count = queryset.count() # replace count by real count - not only drf-datatables count return count, total_count
Python
0.998471
c7679393ae11766cc9da4474f4db1d0dbe50ac91
Bump to 0.11.0
watchman/__init__.py
watchman/__init__.py
__version__ = '0.11.0'
__version__ = '0.10.1'
Python
0.000042
815fecf36f9c0114a9aa8594b58226ead223b313
fix type bug
app/app.py
app/app.py
"""Do work""" import argparse import logging import os import sys from cameracontroller.cameracontroller import CameraController from storage.cloudstorage import CloudStorage logger = logging.getLogger('pypic') log_dir = os.path.expanduser('~/log') if not os.path.exists(log_dir): os.makedirs(log_dir) logging.basicConfig( filename=os.path.join(log_dir, 'pypiclog'), format='%(asctime)s :: %(levelname)s :: %(message)s', level=logging.DEBUG ) def exception_handler(exception_type, exception, traceback): logger.error(str(exception)) sys.excepthook = exception_handler def main(): """Main script execution""" parser = argparse.ArgumentParser() parser.add_argument( '-c', '--continuous', action='store_true', help='If set, run the video feed continuously' ) parser.add_argument( '-d', '--duration', default=10, type=float, help='Duration (in seconds) to run the video loop' ) args = parser.parse_args() camera_controller = CameraController( os.path.expanduser('~/pypic_output'), CloudStorage( os.environ.get('AZSTORAGE_ACCOUNT_NAME'), os.environ.get('AZSTORAGE_ACCOUNT_KEY') ) ) camera_controller.record_video( continuous=args.continuous, duration=args.duration ) if __name__ == '__main__': main()
"""Do work""" import argparse import logging import os import sys from cameracontroller.cameracontroller import CameraController from storage.cloudstorage import CloudStorage logger = logging.getLogger('pypic') log_dir = os.path.expanduser('~/log') if not os.path.exists(log_dir): os.makedirs(log_dir) logging.basicConfig( filename=os.path.join(log_dir, 'pypiclog'), format='%(asctime)s :: %(levelname)s :: %(message)s', level=logging.DEBUG ) def exception_handler(exception_type, exception, traceback): logger.error(str(exception)) sys.excepthook = exception_handler def main(): """Main script execution""" parser = argparse.ArgumentParser() parser.add_argument( '-c', '--continuous', action='store_true', help='If set, run the video feed continuously' ) parser.add_argument( '-d', '--duration', default=10, help='Duration (in seconds) to run the video loop' ) args = parser.parse_args() camera_controller = CameraController( os.path.expanduser('~/pypic_output'), CloudStorage( os.environ.get('AZSTORAGE_ACCOUNT_NAME'), os.environ.get('AZSTORAGE_ACCOUNT_KEY') ) ) camera_controller.record_video( continuous=args.continuous, duration=args.duration ) if __name__ == '__main__': main()
Python
0.000001
b0dd7879fbf2000c86a2f77995495d480c890713
Add search by location
usecases/events/search_by_location.py
usecases/events/search_by_location.py
from predicthq import Client # Please copy paste your access token here # or read our Quickstart documentation if you don't have a token yet # https://developer.predicthq.com/guides/quickstart/ ACCESS_TOKEN = 'abc123' phq = Client(access_token=ACCESS_TOKEN) # The events endpoint supports three types of search by location: # - by area # - by fuzzy location search around # - by geoname place ID (see places endpoint for more details) # The within parameter allows you to search for events within # a specified area. It expects a string in the form # {radius}{unit}@{latitude},{longitude} # where the radius unit can be one of: m, km, ft, mi. # https://developer.predicthq.com/resources/events/#param-within # Please note that the the within parameter uses the lat, lon order # but the location field in the event response uses the lon, lat GeoJSON order. for event in phq.events.search(within='10km@-36.844480,174.768368'): print(event.rank, event.category, event.title, event.location) # The fuzzy location search around doesn't restrict search results # to the specified latitude, longitude and offset. # In most cases, you only need to use the `origin` key, # e.g. {'origin': '{lat},{lon}'} # Please not that this affects the relevance of your search results. # https://developer.predicthq.com/resources/events/#param-loc-around for event in phq.events.search(location_around={'origin': '-36.844480,174.768368'}): print(event.rank, event.category, event.title, event.location, event.relevance) # Finally, you can specify a geoname place ID or a list of place IDs or # airport codes (see https://developer.predicthq.com/csv/airport_codes.csv) # The scope suffix (includes events having children or parent of the place ID) # or the exact (only events with the specified place ID) suffixes can be used. # https://developer.predicthq.com/resources/events/#param-place for event in phq.events.search(place={'scope': '5128638'}): # place ID print(event.rank, event.category, event.title, event.place_hierarchies) for event in phq.events.search(place={'scope': 'SFO'}): # airport code print(event.rank, event.category, event.title, event.place_hierarchies)
from predicthq import Client # Please copy paste your access token here # or read our Quickstart documentation if you don't have a token yet # https://developer.predicthq.com/guides/quickstart/ ACCESS_TOKEN = 'abc123' phq = Client(access_token=ACCESS_TOKEN)
Python
0
997cd53d1d045840118876227b9c5588e153195b
fix not equal override. thanks @hodgestar
cms/models.py
cms/models.py
import re import unicodedata RE_NUMERICAL_SUFFIX = re.compile(r'^[\w-]*-(\d+)+$') from gitmodel import fields, models class FilterMixin(object): @classmethod def filter(cls, **fields): items = list(cls.all()) for field, value in fields.items(): if hasattr(cls, field): items = [a for a in items if getattr(a, field) == value] else: raise Exception('invalid field %s' % field) return items class SlugifyMixin(object): def slugify(self, value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. """ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') value = unicode(re.sub('[^\w\s-]', '', value).strip().lower()) return re.sub('[-\s]+', '-', value) def generate_slug(self): if hasattr(self, 'title') and self.title: if hasattr(self, 'slug') and not self.slug: self.slug = self.slugify(unicode(self.title))[:40] def save(self, *args, **kwargs): self.generate_slug() return super(SlugifyMixin, self).save(*args, **kwargs) class Category(FilterMixin, SlugifyMixin, models.GitModel): slug = fields.SlugField(required=True, id=True) title = fields.CharField(required=True) def __eq__(self, other): return self.slug == other.slug def __ne__(self, other): return self.slug != other.slug class Page(FilterMixin, SlugifyMixin, models.GitModel): slug = fields.SlugField(required=True, id=True) title = fields.CharField(required=True) content = fields.CharField(required=False) published = fields.BooleanField(default=True) primary_category = fields.RelatedField(Category, required=False)
import re import unicodedata RE_NUMERICAL_SUFFIX = re.compile(r'^[\w-]*-(\d+)+$') from gitmodel import fields, models class FilterMixin(object): @classmethod def filter(cls, **fields): items = list(cls.all()) for field, value in fields.items(): if hasattr(cls, field): items = [a for a in items if getattr(a, field) == value] else: raise Exception('invalid field %s' % field) return items class SlugifyMixin(object): def slugify(self, value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. """ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') value = unicode(re.sub('[^\w\s-]', '', value).strip().lower()) return re.sub('[-\s]+', '-', value) def generate_slug(self): if hasattr(self, 'title') and self.title: if hasattr(self, 'slug') and not self.slug: self.slug = self.slugify(unicode(self.title))[:40] def save(self, *args, **kwargs): self.generate_slug() return super(SlugifyMixin, self).save(*args, **kwargs) class Category(FilterMixin, SlugifyMixin, models.GitModel): slug = fields.SlugField(required=True, id=True) title = fields.CharField(required=True) def __eq__(self, other): return self.slug == other.slug def __ne__(self, other): return self.slug == other.slug class Page(FilterMixin, SlugifyMixin, models.GitModel): slug = fields.SlugField(required=True, id=True) title = fields.CharField(required=True) content = fields.CharField(required=False) published = fields.BooleanField(default=True) primary_category = fields.RelatedField(Category, required=False)
Python
0.000002
dbfb095f6b90c2517416652d53b6db6b5ee919a4
Bump version
fabdeploy/__init__.py
fabdeploy/__init__.py
VERSION = (0, 3, 4, 'final', 0) def get_version(): version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2]: version = '%s.%s' % (version, VERSION[2]) if VERSION[3:] == ('alpha', 0): version = '%s pre-alpha' % version else: if VERSION[3] != 'final': version = '%s %s %s' % (version, VERSION[3], VERSION[4]) return version
VERSION = (0, 3, 3, 'final', 0) def get_version(): version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2]: version = '%s.%s' % (version, VERSION[2]) if VERSION[3:] == ('alpha', 0): version = '%s pre-alpha' % version else: if VERSION[3] != 'final': version = '%s %s %s' % (version, VERSION[3], VERSION[4]) return version
Python
0
dc49ce292d4e0669598abb7f45ba389efde0dabc
Fix testTeleopPanel
src/python/tests/testTeleopPanel.py
src/python/tests/testTeleopPanel.py
from director import robotsystem from director.consoleapp import ConsoleApp from director import transformUtils from director import visualization as vis from director import objectmodel as om from director import teleoppanel from director import playbackpanel from director import planningutils from PythonQt import QtCore, QtGui import numpy as np def checkGraspFrame(inputGraspFrame, side): ''' Return True if the given grasp frame matches the grasp frame of the teleop robot model's current pose, else False. ''' pose = teleopJointController.q teleopGraspFrame = ikPlanner.newGraspToWorldFrame(pose, side, ikPlanner.newGraspToHandFrame(side)) p1, q1 = transformUtils.poseFromTransform(inputGraspFrame) p2, q2 = transformUtils.poseFromTransform(teleopGraspFrame) try: np.testing.assert_allclose(p1, p2, rtol=1e-3) np.testing.assert_allclose(q1, q2, rtol=1e-3) return True except AssertionError: return False def onIkStartup(ikServer, startSuccess): side = 'left' goalFrame = transformUtils.frameFromPositionAndRPY([0.5, 0.5, 1.2], [0, 90, -90]) assert not checkGraspFrame(goalFrame, side) frame = teleopPanel.endEffectorTeleop.newReachTeleop(goalFrame, side) assert checkGraspFrame(goalFrame, side) teleopPanel.ui.planButton.click() assert playbackPanel.plan is not None teleopPanel.ikPlanner.useCollision = True; teleopPanel.ui.planButton.click() assert playbackPanel.plan is not None frame.setProperty('Edit', True) app.startTestingModeQuitTimer() app = ConsoleApp() app.setupGlobals(globals()) view = app.createView() robotsystem.create(view, globals()) playbackPanel = playbackpanel.PlaybackPanel(planPlayback, playbackRobotModel, playbackJointController, robotStateModel, robotStateJointController, manipPlanner) planningUtils = planningutils.PlanningUtils(robotStateModel, robotStateJointController) teleopPanel = teleoppanel.TeleopPanel(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, affordanceManager, playbackPanel.setPlan, playbackPanel.hidePlan, planningUtils) manipPlanner.connectPlanReceived(playbackPanel.setPlan) ikServer.connectStartupCompleted(onIkStartup) startIkServer() w = QtGui.QWidget() l = QtGui.QGridLayout(w) l.addWidget(view, 0, 0) l.addWidget(playbackPanel.widget, 1, 0) l.addWidget(teleopPanel.widget, 0, 1, 2, 1) l.setMargin(0) l.setSpacing(0) w.show() w.resize(1600, 900) app.start(enableAutomaticQuit=False)
from director import robotsystem from director.consoleapp import ConsoleApp from director import transformUtils from director import visualization as vis from director import objectmodel as om from director import teleoppanel from director import playbackpanel from PythonQt import QtCore, QtGui import numpy as np def checkGraspFrame(inputGraspFrame, side): ''' Return True if the given grasp frame matches the grasp frame of the teleop robot model's current pose, else False. ''' pose = teleopJointController.q teleopGraspFrame = ikPlanner.newGraspToWorldFrame(pose, side, ikPlanner.newGraspToHandFrame(side)) p1, q1 = transformUtils.poseFromTransform(inputGraspFrame) p2, q2 = transformUtils.poseFromTransform(teleopGraspFrame) try: np.testing.assert_allclose(p1, p2, rtol=1e-3) np.testing.assert_allclose(q1, q2, rtol=1e-3) return True except AssertionError: return False def onIkStartup(ikServer, startSuccess): side = 'left' goalFrame = transformUtils.frameFromPositionAndRPY([0.5, 0.5, 1.2], [0, 90, -90]) assert not checkGraspFrame(goalFrame, side) frame = teleopPanel.endEffectorTeleop.newReachTeleop(goalFrame, side) assert checkGraspFrame(goalFrame, side) teleopPanel.ui.planButton.click() assert playbackPanel.plan is not None teleopPanel.ikPlanner.useCollision = True; teleopPanel.ui.planButton.click() assert playbackPanel.plan is not None frame.setProperty('Edit', True) app.startTestingModeQuitTimer() app = ConsoleApp() app.setupGlobals(globals()) view = app.createView() robotsystem.create(view, globals()) playbackPanel = playbackpanel.PlaybackPanel(planPlayback, playbackRobotModel, playbackJointController, robotStateModel, robotStateJointController, manipPlanner) teleopPanel = teleoppanel.TeleopPanel(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, affordanceManager, playbackPanel.setPlan, playbackPanel.hidePlan) manipPlanner.connectPlanReceived(playbackPanel.setPlan) ikServer.connectStartupCompleted(onIkStartup) startIkServer() w = QtGui.QWidget() l = QtGui.QGridLayout(w) l.addWidget(view, 0, 0) l.addWidget(playbackPanel.widget, 1, 0) l.addWidget(teleopPanel.widget, 0, 1, 2, 1) l.setMargin(0) l.setSpacing(0) w.show() w.resize(1600, 900) app.start(enableAutomaticQuit=False)
Python
0.000001
0d056e041f141391b115aef1f1cc5aa684876535
save signature saliency
view_saliency.py
view_saliency.py
#!/usr/bin/env python import cv2 import numpy import sys import salienpy.frequency_tuned import salienpy.signature def main(img): cv2.imshow('Original Image', img) ftuned = salienpy.frequency_tuned.frequency_tuned_saliency(img) cv2.imshow('Frequency Tuned', ftuned) signa = salienpy.signature.signature_saliency(img) cv2.imshow('Signature Saliency', signa) cv2.imwrite('signature.png', signa) cv2.waitKey() if __name__ == '__main__': if len(sys.argv) > 1: img = cv2.imread(sys.argv[1]) else: cam = cv2.VideoCapture(0) status, img = cam.read() main(img)
#!/usr/bin/env python import cv2 import numpy import sys import salienpy.frequency_tuned import salienpy.signature def main(img): cv2.imshow('Original Image', img) ftuned = salienpy.frequency_tuned.frequency_tuned_saliency(img) cv2.imshow('Frequency Tuned', ftuned) signa = salienpy.signature.signature_saliency(img) cv2.imshow('Signature Saliency', signa) cv2.waitKey() if __name__ == '__main__': if len(sys.argv) > 1: img = cv2.imread(sys.argv[1]) else: cam = cv2.VideoCapture(0) status, img = cam.read() main(img)
Python
0
28968ca117fc18dfe513c06ce4ead2295830fd94
remove redundant parenthesis
plugins/BasePlugin.py
plugins/BasePlugin.py
__author__ = 'marcusmorgenstern' __mail__ = '' from abc import ABCMeta, abstractmethod class BasePlugin: """ Metaclass for guarantee of interface. Each plugin must provide initialisation taking optional configuration and invoke method taking data """ __metaclass__ = ABCMeta def __init__(self, config=None): """ initialisation :param config (dict): configuration params for plugin :return: void """ self.dep = [] @abstractmethod def invoke(self, data): """ Entry for plugin execution :param data (dict): input data :return: void """ pass
__author__ = 'marcusmorgenstern' __mail__ = '' from abc import ABCMeta, abstractmethod class BasePlugin(): """ Metaclass for guarantee of interface. Each plugin must provide initialisation taking optional configuration and invoke method taking data """ __metaclass__ = ABCMeta def __init__(self, config=None): """ initialisation :param config (dict): configuration params for plugin :return: void """ self.dep = [] @abstractmethod def invoke(self, data): """ Entry for plugin execution :param data (dict): input data :return: void """ pass
Python
0.999999
b754ee143ed0a022706bfeed287e392e11dd0e28
Update to work with python3
external/stacktracer.py
external/stacktracer.py
"""Stack tracer for multi-threaded applications. Usage: import stacktracer stacktracer.start_trace("trace.html",interval=5,auto=True) # Set auto flag to always update file! .... stacktracer.stop_trace() """ # Source: http://code.activestate.com/recipes/577334-how-to-debug-deadlocked-multi-threaded-programs/ import sys import traceback from pygments import highlight from pygments.lexers import PythonLexer from pygments.formatters import HtmlFormatter # Taken from http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/ def stacktraces(): code = [] for threadId, stack in sys._current_frames().items(): code.append("\n# ThreadID: %s" % threadId) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) return highlight("\n".join(code), PythonLexer(), HtmlFormatter( full=False, # style="native", noclasses=True, )) # This part was made by nagylzs import os import time import threading class TraceDumper(threading.Thread): """Dump stack traces into a given file periodically.""" def __init__(self,fpath,interval,auto): """ @param fpath: File path to output HTML (stack trace file) @param auto: Set flag (True) to update trace continuously. Clear flag (False) to update only if file not exists. (Then delete the file to force update.) @param interval: In seconds: how often to update the trace file. """ assert(interval>0.1) self.auto = auto self.interval = interval self.fpath = os.path.abspath(fpath) self.stop_requested = threading.Event() threading.Thread.__init__(self) def run(self): while not self.stop_requested.isSet(): time.sleep(self.interval) if self.auto or not os.path.isfile(self.fpath): self.stacktraces() def stop(self): self.stop_requested.set() self.join() try: if os.path.isfile(self.fpath): os.unlink(self.fpath) except: pass def stacktraces(self): fout = open(self.fpath,"wb+") try: fout.write(bytes(stacktraces(), "UTF-8")) finally: fout.close() _tracer = None def trace_start(fpath,interval=5,auto=True): """Start tracing into the given file.""" global _tracer if _tracer is None: _tracer = TraceDumper(fpath,interval,auto) _tracer.setDaemon(True) _tracer.start() else: raise Exception("Already tracing to %s"%_tracer.fpath) def trace_stop(): """Stop tracing.""" global _tracer if _tracer is None: raise Exception("Not tracing, cannot stop.") else: _trace.stop() _trace = None
"""Stack tracer for multi-threaded applications. Usage: import stacktracer stacktracer.start_trace("trace.html",interval=5,auto=True) # Set auto flag to always update file! .... stacktracer.stop_trace() """ # Source: http://code.activestate.com/recipes/577334-how-to-debug-deadlocked-multi-threaded-programs/ import sys import traceback from pygments import highlight from pygments.lexers import PythonLexer from pygments.formatters import HtmlFormatter # Taken from http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/ def stacktraces(): code = [] for threadId, stack in sys._current_frames().items(): code.append("\n# ThreadID: %s" % threadId) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) return highlight("\n".join(code), PythonLexer(), HtmlFormatter( full=False, # style="native", noclasses=True, )) # This part was made by nagylzs import os import time import threading class TraceDumper(threading.Thread): """Dump stack traces into a given file periodically.""" def __init__(self,fpath,interval,auto): """ @param fpath: File path to output HTML (stack trace file) @param auto: Set flag (True) to update trace continuously. Clear flag (False) to update only if file not exists. (Then delete the file to force update.) @param interval: In seconds: how often to update the trace file. """ assert(interval>0.1) self.auto = auto self.interval = interval self.fpath = os.path.abspath(fpath) self.stop_requested = threading.Event() threading.Thread.__init__(self) def run(self): while not self.stop_requested.isSet(): time.sleep(self.interval) if self.auto or not os.path.isfile(self.fpath): self.stacktraces() def stop(self): self.stop_requested.set() self.join() try: if os.path.isfile(self.fpath): os.unlink(self.fpath) except: pass def stacktraces(self): fout = file(self.fpath,"wb+") try: fout.write(stacktraces()) finally: fout.close() _tracer = None def trace_start(fpath,interval=5,auto=True): """Start tracing into the given file.""" global _tracer if _tracer is None: _tracer = TraceDumper(fpath,interval,auto) _tracer.setDaemon(True) _tracer.start() else: raise Exception("Already tracing to %s"%_tracer.fpath) def trace_stop(): """Stop tracing.""" global _tracer if _tracer is None: raise Exception("Not tracing, cannot stop.") else: _trace.stop() _trace = None
Python
0
79e2044380d2d5a9568b76777bc7b1950dcaaeb8
Bump version to 14.1.0
recipe_scrapers/__version__.py
recipe_scrapers/__version__.py
__version__ = "14.1.0"
__version__ = "14.0.0"
Python
0
725785c59ca6aca23338b0f727dd2c492cb166df
fix a silly bug
process/LDA.py
process/LDA.py
# -*- coding: utf-8 -*- import jieba import time import json import pickle import os from sklearn.feature_extraction.text import CountVectorizer from sklearn.decomposition import LatentDirichletAllocation from util import RAW_DATA_DIR from util import STOP_WORDS from util import LDA_MODEL_PATH from util import DOC_PATH from util import TOPIC_PATH TOPICS_NUMBER = 4 TOPICS_WORD = 6 def __get_row_data(): """ build bag of words model """ raw_data = [] files_info = [] t0 = time.time() print('reading raw data') for parent, _, file_names in os.walk(RAW_DATA_DIR): for filename in file_names: full_file_name = os.path.join(parent, filename) with open(full_file_name, 'r', encoding='utf-8') as file_data: raw_data.append(file_data.read()) file_info = filename.split(':') files_info.append({'date':file_info[0], 'filename': file_info[1][:-4]}) print(f'got {len(raw_data)} files in {time.time()-t0}s') return files_info, raw_data def vectorizer(): print('extracting tf features') t0 = time.time() vectorized = CountVectorizer(max_df=0.8, min_df=0.01, stop_words=STOP_WORDS, analyzer='word', tokenizer=jieba.cut) print(f'finish in {time.time()-t0}s') return vectorized def __build_lda_model(tf): lda = LatentDirichletAllocation(n_topics=TOPICS_NUMBER, max_iter=5, learning_method='online', learning_offset=50., random_state=0) t0 = time.time() print('building lda model') lda.fit(tf) print(f'done in {time.time() - t0}') return lda def __topic_list(lda, feature_names): topic_list = [] for topic_idx, topic in enumerate(lda.components_): topic_list.append([feature_names[i] for i in topic.argsort()[:-TOPICS_WORD - 1:-1]]) return topic_list def __set_lda_info_to_file_info(file_info, lda_model): for index, item in enumerate(file_info): item['lda'] = lda_model[index].tolist() if __name__ == '__main__': file_info, raw_data = __get_row_data() vectorized = vectorizer() tf = vectorized.fit_transform(raw_data) lda = __build_lda_model(tf) topic_list = __topic_list(lda, vectorized.get_feature_names()) __set_lda_info_to_file_info(file_info, lda.transform(tf)) print('saving model') pickle.dump(lda, open(LDA_MODEL_PATH, 'wb')) json.dump(topic_list, open(TOPIC_PATH, 'w'), ensure_ascii=False) json.dump(file_info, open(DOC_PATH, 'w'), ensure_ascii=False)
# -*- coding: utf-8 -*- import jieba import time import json import pickle import os from sklearn.feature_extraction.text import CountVectorizer from sklearn.decomposition import LatentDirichletAllocation from util import RAW_DATA_DIR from util import STOP_WORDS from util import LDA_MODEL_PATH from util import DOC_PATH from util import TOPIC_PATH TOPICS_NUMBER = 4 TOPICS_WORD = 6 def __get_row_data(): """ build bag of words model """ raw_data = [] files_info = [] t0 = time.time() print('reading raw data') for parent, _, file_names in os.walk(RAW_DATA_DIR): for filename in file_names: full_file_name = os.path.join(parent, filename) with open(full_file_name, 'r', encoding='utf-8') as file_data: raw_data.append(file_data.read()) file_info = filename.split(':') files_info.append({'date':file_info[0], 'filename': file_info[1][:-4]}) print(f'got {len(raw_data)} files in {time.time()-t0}s') return files_info, raw_data def vectorizer(): print('extracting tf features') t0 = time.time() vectorized = CountVectorizer(max_df=0.8, min_df=0.01, stop_words=STOP_WORDS, analyzer='word', tokenizer=jieba.cut) print(f'finish in {time.time()-t0}s') return vectorized def __build_lda_model(tf): lda = LatentDirichletAllocation(n_topics=TOPICS_NUMBER, max_iter=5, learning_method='online', learning_offset=50., random_state=0) t0 = time.time() print('building lda model') lda.fit(tf) print(f'done in {time.time() - t0}') return lda def __topic_list(lda, feature_names): topic_list = [] for topic_idx, topic in enumerate(lda.components_): topic_list.append([feature_names[i] for i in topic.argsort()[:-TOPICS_WORD - 1:-1]]) return topic_list def __set_lda_info_to_file_info(file_info, lda_model): for index, item in enumerate(file_info): item['lda'] = lda_model[index].tolist() if __name__ == '__main__': file_info, raw_data = __get_row_data() vectorized = vectorizer() tf = vectorized.fit_transform(raw_data) lda = __build_lda_model(tf) topic_list = __topic_list(lda, vectorized.get_feature_names()) __set_lda_info_to_file_info(file_info, lda.transform(tf)) print('saving model') pickle.dump(pickle.dump, open(LDA_MODEL_PATH, 'wb')) json.dump(topic_list, open(TOPIC_PATH, 'w'), ensure_ascii=False) json.dump(file_info, open(DOC_PATH, 'w'), ensure_ascii=False)
Python
0.000753
7009e1f0b316da5f17247786810676f70d282f93
Add assertion.__all__
extenteten/assertion.py
extenteten/assertion.py
import collections import numpy import tensorflow as tf from .util import func_scope __all__ = [ 'is_int', 'is_natural_num', 'is_natural_num_sequence', 'is_sequence', 'assert_no_nan', ] def is_int(num): return (isinstance(num, int) or isinstance(num, numpy.integer) or (isinstance(num, numpy.ndarray) and num.ndim == 0 and issubclass(num.dtype.type, numpy.integer))) def is_natural_num(num): return is_int(num) and num > 0 def is_natural_num_sequence(num_list, length=None): return (is_sequence(num_list) and all(is_natural_num(num) for num in num_list) and (length == None or len(num_list) == length)) def is_sequence(obj): return isinstance(obj, collections.Sequence) @func_scope() def assert_no_nan(tensor): return tf.assert_equal(tf.reduce_any(tf.is_nan(tensor)), False)
import collections import numpy import tensorflow as tf from .util import func_scope def is_int(num): return (isinstance(num, int) or isinstance(num, numpy.integer) or (isinstance(num, numpy.ndarray) and num.ndim == 0 and issubclass(num.dtype.type, numpy.integer))) def is_natural_num(num): return is_int(num) and num > 0 def is_natural_num_sequence(num_list, length=None): return (is_sequence(num_list) and all(is_natural_num(num) for num in num_list) and (length == None or len(num_list) == length)) def is_sequence(obj): return isinstance(obj, collections.Sequence) @func_scope() def assert_no_nan(tensor): return tf.assert_equal(tf.reduce_any(tf.is_nan(tensor)), False)
Python
0.002638
3876130a94f3a43a6b34dd3be22ef963238bda3b
fix migration
mygpo/usersettings/migrations/0002_move_existing.py
mygpo/usersettings/migrations/0002_move_existing.py
import json from django.db import migrations def move_podcastsettings(apps, schema_editor): PodcastConfig = apps.get_model("subscriptions", "PodcastConfig") UserSettings = apps.get_model("usersettings", "UserSettings") ContentType = apps.get_model('contenttypes', 'ContentType') for cfg in PodcastConfig.objects.all(): if not json.loads(cfg.settings): continue setting, created = UserSettings.objects.update_or_create( user=cfg.user, # we can't get the contenttype from cfg.podcast as it would be a # different model content_type=ContentType.objects.get(app_label='podcasts', model='podcast'), object_id=cfg.podcast.pk, defaults={ 'settings': cfg.settings, } ) def move_usersettings(apps, schema_editor): UserProfile = apps.get_model("users", "UserProfile") UserSettings = apps.get_model("usersettings", "UserSettings") for profile in UserProfile.objects.all(): if not json.loads(profile.settings): continue setting, created = UserSettings.objects.update_or_create( user=profile.user, content_type=None, object_id=None, defaults={ 'settings': profile.settings, } ) class Migration(migrations.Migration): dependencies = [ ('usersettings', '0001_initial'), ('subscriptions', '0002_unique_constraint'), ('users', '0011_syncgroup_blank'), ] operations = [ migrations.RunPython(move_podcastsettings), migrations.RunPython(move_usersettings), ]
import json from django.db import migrations from django.contrib.contenttypes.models import ContentType def move_podcastsettings(apps, schema_editor): PodcastConfig = apps.get_model("subscriptions", "PodcastConfig") UserSettings = apps.get_model("usersettings", "UserSettings") for cfg in PodcastConfig.objects.all(): if not json.loads(cfg.settings): continue setting, created = UserSettings.objects.update_or_create( user=cfg.user, # we can't get the contenttype from cfg.podcast as it would be a # different model content_type=ContentType.objects.filter(app_label='podcasts', model='podcast'), object_id=cfg.podcast.pk, defaults={ 'settings': cfg.settings, } ) def move_usersettings(apps, schema_editor): UserProfile = apps.get_model("users", "UserProfile") UserSettings = apps.get_model("usersettings", "UserSettings") for profile in UserProfile.objects.all(): if not json.loads(profile.settings): continue setting, created = UserSettings.objects.update_or_create( user=profile.user, content_type=None, object_id=None, defaults={ 'settings': profile.settings, } ) class Migration(migrations.Migration): dependencies = [ ('usersettings', '0001_initial'), ('subscriptions', '0002_unique_constraint'), ('users', '0011_syncgroup_blank'), ] operations = [ migrations.RunPython(move_podcastsettings), migrations.RunPython(move_usersettings), ]
Python
0
5c50d3fcda08da468b2f6b5e61fa1777cc08b17b
FIx test.
kolibri/content/test/test_downloadcontent.py
kolibri/content/test/test_downloadcontent.py
import os import tempfile import hashlib import mimetypes from django.test import TestCase, Client from django.test.utils import override_settings from kolibri.auth.models import DeviceOwner from kolibri.content.models import File, ContentNode from kolibri.content.utils.paths import get_content_storage_file_path from le_utils.constants import file_formats, format_presets CONTENT_STORAGE_DIR_TEMP = tempfile.mkdtemp() @override_settings( CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP, ) class DownloadContentTestCase(TestCase): """ Test case for the downloadcontent endpoint. """ def setUp(self): # create DeviceOwner to pass the setup_wizard middleware check DeviceOwner.objects.create(username='test-device-owner', password=123) self.client = Client() self.hash = hashlib.md5("DUMMYDATA".encode()).hexdigest() self.extension = file_formats.PDF self.filename = "{}.{}".format(self.hash, self.extension) self.title = "abc123!@#$%^&*();'[],./?><" self.contentnode = ContentNode(title=self.title) self.available = True self.preset = format_presets.DOCUMENT self.file = File(checksum=self.hash, extension=self.extension, available=self.available, contentnode=self.contentnode, preset=self.preset) self.path = get_content_storage_file_path(self.filename) path_dir = os.path.dirname(self.path) if not os.path.exists(path_dir): os.makedirs(path_dir) tempfile = open(self.path, "w") tempfile.write("test") tempfile.close() def test_generate_download_filename(self): self.assertEqual(self.file.get_download_filename(), "abc123._Document.{}".format(self.extension)) def test_generate_download_url(self): self.assertEqual(self.file.get_download_url(), "/downloadcontent/{}/{}".format(self.filename, self.file.get_download_filename())) def test_download_existing_file(self): response = self.client.get(self.file.get_download_url()) self.assertEqual(response.status_code, 200) def test_download_non_existing_file(self): bad_download_url = self.file.get_download_url().replace(self.file.get_download_url()[25:25], "aaaaa") response = self.client.get(bad_download_url) self.assertEqual(response.status_code, 404) def test_download_headers(self): response = self.client.get(self.file.get_download_url()) self.assertEqual(response['Content-Type'], mimetypes.guess_type(self.filename)[0]) self.assertEqual(response['Content-Disposition'], 'attachment;') self.assertEqual(response['Content-Length'], str(os.path.getsize(self.path)))
import os import tempfile import hashlib import mimetypes from django.test import TestCase, Client from django.test.utils import override_settings from kolibri.auth.models import DeviceOwner from kolibri.content.models import File, ContentNode from kolibri.content.utils.paths import get_content_storage_file_path from le_utils.constants import file_formats, format_presets CONTENT_STORAGE_DIR_TEMP = tempfile.mkdtemp() @override_settings( CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP, ) class DownloadContentTestCase(TestCase): """ Test case for the downloadcontent endpoint. """ def setUp(self): # create DeviceOwner to pass the setup_wizard middleware check DeviceOwner.objects.create(username='test-device-owner', password=123) self.client = Client() self.hash = hashlib.md5("DUMMYDATA".encode()).hexdigest() self.extension = dict(file_formats.choices).get("pdf") self.filename = "{}.{}".format(self.hash, self.extension) self.title = "abc123!@#$%^&*();'[],./?><" self.contentnode = ContentNode(title=self.title) self.available = True self.preset = format_presets.DOCUMENT self.file = File(checksum=self.hash, extension=self.extension, available=self.available, contentnode=self.contentnode, preset=self.preset) self.path = get_content_storage_file_path(self.filename) path_dir = os.path.dirname(self.path) if not os.path.exists(path_dir): os.makedirs(path_dir) tempfile = open(self.path, "w") tempfile.write("test") tempfile.close() def test_generate_download_filename(self): self.assertEqual(self.file.get_download_filename(), "abc123._Document.{}".format(self.extension)) def test_generate_download_url(self): self.assertEqual(self.file.get_download_url(), "/downloadcontent/{}/{}".format(self.filename, self.file.get_download_filename())) def test_download_existing_file(self): response = self.client.get(self.file.get_download_url()) self.assertEqual(response.status_code, 200) def test_download_non_existing_file(self): bad_download_url = self.file.get_download_url().replace(self.file.get_download_url()[25:25], "aaaaa") response = self.client.get(bad_download_url) self.assertEqual(response.status_code, 404) def test_download_headers(self): response = self.client.get(self.file.get_download_url()) self.assertEqual(response['Content-Type'], mimetypes.guess_type(self.filename)[0]) self.assertEqual(response['Content-Disposition'], 'attachment;') self.assertEqual(response['Content-Length'], str(os.path.getsize(self.path)))
Python
0
b410cbc1d58c5dce85b1bdff85fa881de58bf299
fix BadArgument
cogs/error.py
cogs/error.py
#!/bin/env python from discord.ext.commands import errors import sys import traceback class ErrorHandler: def __init__(self, bot): self.bot = bot async def on_command_error(self, ctx, error): """ Handle command errors more gracefully """ if isinstance(error, errors.CommandNotFound): return if isinstance(error, errors.NotOwner): return await ctx.send('Sorry, only the owner of qtbot may run this command.') if isinstance(error, errors.CommandOnCooldown): return await ctx.send(f'This command is on cooldown. Please retry in `{error.retry_after:.0f}` second(s).') if isinstance(error, errors.MissingRequiredArgument): return await ctx.send(f'Command missing required argument `{error.param}`.') if isinstance(error, errors.MissingPermissions): return await ctx.send(f'Sorry you need permissions: `{",".join(error.missing_perms)}` to do that.') if isinstance(error, errors.BotMissingPermissions): return await ctx.send(f'Sorry I need permissions: `{",".join(error.missing_perms)}` to do that.') if isinstance(error, errors.BadArgument): return await ctx.send(error.__traceback__) print(f'Ignoring exception in command {ctx.command}:', file=sys.stderr) traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr) def setup(bot): bot.add_cog(ErrorHandler(bot))
#!/bin/env python from discord.ext.commands import errors import sys import traceback class ErrorHandler: def __init__(self, bot): self.bot = bot async def on_command_error(self, ctx, error): """ Handle command errors more gracefully """ if isinstance(error, errors.CommandNotFound): return if isinstance(error, errors.NotOwner): return await ctx.send('Sorry, only the owner of qtbot may run this command.') if isinstance(error, errors.CommandOnCooldown): return await ctx.send(f'This command is on cooldown. Please retry in `{error.retry_after:.0f}` second(s).') if isinstance(error, errors.MissingRequiredArgument): return await ctx.send(f'Command missing required argument `{error.param}`.') if isinstance(error, errors.MissingPermissions): return await ctx.send(f'Sorry you need permissions: `{",".join(error.missing_perms)}` to do that.') if isinstance(error, errors.BotMissingPermissions): return await ctx.send(f'Sorry I need permissions: `{",".join(error.missing_perms)}` to do that.') if isinstance(error, errors.BadArgument): return await ctx.send(f'{error.message}') print(f'Ignoring exception in command {ctx.command}:', file=sys.stderr) traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr) def setup(bot): bot.add_cog(ErrorHandler(bot))
Python
0.998609
164f8e665dd2a292dbfe44ba98989725c209990d
Update radio.py
cogs/radio.py
cogs/radio.py
from .utils import config, checks, formats import discord from discord.ext import commands import discord.utils from .utils.api.pycopy import Copy import random, json, asyncio class Radio: """The radio-bot related commands.""" def __init__(self, bot): self.bot = bot self.player = None self.stopped = True self.q = asyncio.Queue() self.play_next_song = asyncio.Event() self.current_song = None copy_creds = self.load_copy_creds() self.copycom = Copy(copy_creds['login'], copy_creds['passwd']) self.songs = [] self.update_song_list() if not discord.opus.is_loaded(): discord.opus.load_opus('/usr/local/lib/libopus.so') #FreeBSD path def load_copy_creds(self): with open('../copy_creds.json') as f: return json.load(f) @property def is_playing(self): return self.player is not None and self.player.is_playing() and not self.stopped def toggle_next_song(self): if not self.stopped: self.bot.loop.call_soon_threadsafe(self.play_next_song.set) def update_song_list(self): self.files = self.copycom.list_files(settings.copy_radio_path) @commands.command() async def join(self, *, channel : discord.Channel = None): """Join voice channel. """ if channel is None or channel != discord.ChannelType.voice: await self.bot.say('Cannot find a voice channel by that name.') await self.bot.join_voice_channel(channel) @commands.command() async def leave(self): """Leave voice channel. """ await self.stop().invoke(ctx) await self.bot.voice.disconnect() @commands.command() async def pause(self): """Pause. """ if self.player is not None: self.player.pause() @commands.command() async def resume(self): """Resume playing. """ if self.player is not None and not self.is_playing(): self.player.resume() @commands.command() async def skip(self): """Skip song and play next. """ if self.player is not None and self.is_playing(): self.player.stop() self.toggle_next_song() @commands.command() async def stop(): """Stop playing song. """ if self.is_playing(): self.stopped = True self.player.stop() @commands.command(pass_context=True) async def play(self, ctx): """Start playing song from queue. """ if self.player is not None: if not self.is_playing(): await self.resume().invoke(ctx) return else: await self.bot.say('Already playing a song') return while True: if not selfbot.is_voice_connected(): await self.join(channel=ctx.message.author.voice_channel).invoke(ctx) continue if self.q.empty(): await self.q.put(random.choice(self.songs)) self.play_next_song.clear() self.current = await self.q.get() self.player = self.bot.voice.create_ffmpeg_player( self.copycom.direct_link(settings.copy_radio_path + self.current), after=self.toggle_next_song, #options="-loglevel debug -report", headers = dict(self.copycom.session.headers)) self.stopped = False self.player.start() fmt = 'Playing song "{0}"' song_name = unquote(self.current.split('/')[-1]) await bot.say(fmt.format(song_name)) self.bot.change_status(discord.Game(name=song_name)) await self.play_next_song.wait() def setup(bot): bot.add_cog(Radio(bot))
from .utils import config, checks, formats import discord from discord.ext import commands import discord.utils from .utils.api.pycopy import Copy import random, json, asyncio class Radio: """The radio-bot related commands.""" def __init__(self, bot): self.bot = bot self.player = None self.stopped = True self.q = asyncio.Queue() self.play_next_song = asyncio.Event() self.current_song = None copy_creds = self.load_copy_creds() self.copycom = Copy(copy_creds['login'], copy_creds['passwd']) self.songs = [] self.update_song_list() if not discord.opus.is_loaded(): discord.opus.load_opus('/usr/local/lib/libopus.so') #FreeBSD path def load_copy_creds(): with open('../copy_creds.json') as f: return json.load(f) @property def is_playing(self): return self.player is not None and self.player.is_playing() and not self.stopped def toggle_next_song(self): if not self.stopped: self.bot.loop.call_soon_threadsafe(self.play_next_song.set) def update_song_list(self): self.files = self.copycom.list_files(settings.copy_radio_path) @commands.command() async def join(self, *, channel : discord.Channel = None): """Join voice channel. """ if channel is None or channel != discord.ChannelType.voice: await self.bot.say('Cannot find a voice channel by that name.') await self.bot.join_voice_channel(channel) @commands.command() async def leave(self): """Leave voice channel. """ await self.stop().invoke(ctx) await self.bot.voice.disconnect() @commands.command() async def pause(self): """Pause. """ if self.player is not None: self.player.pause() @commands.command() async def resume(self): """Resume playing. """ if self.player is not None and not self.is_playing(): self.player.resume() @commands.command() async def skip(self): """Skip song and play next. """ if self.player is not None and self.is_playing(): self.player.stop() self.toggle_next_song() @commands.command() async def stop(): """Stop playing song. """ if self.is_playing(): self.stopped = True self.player.stop() @commands.command(pass_context=True) async def play(self, ctx): """Start playing song from queue. """ if self.player is not None: if not self.is_playing(): await self.resume().invoke(ctx) return else: await self.bot.say('Already playing a song') return while True: if not selfbot.is_voice_connected(): await self.join(channel=ctx.message.author.voice_channel).invoke(ctx) continue if self.q.empty(): await self.q.put(random.choice(self.songs)) self.play_next_song.clear() self.current = await self.q.get() self.player = self.bot.voice.create_ffmpeg_player( self.copycom.direct_link(settings.copy_radio_path + self.current), after=self.toggle_next_song, #options="-loglevel debug -report", headers = dict(self.copycom.session.headers)) self.stopped = False self.player.start() fmt = 'Playing song "{0}"' song_name = unquote(self.current.split('/')[-1]) await bot.say(fmt.format(song_name)) self.bot.change_status(discord.Game(name=song_name)) await self.play_next_song.wait() def setup(bot): bot.add_cog(Radio(bot))
Python
0.000001
1a83696454d5be09b07d1e1e6a23ea76c77012a9
Fix global imports
src/rnaseq_lib/__init__.py
src/rnaseq_lib/__init__.py
import rnaseq_lib.civic import rnaseq_lib.data import rnaseq_lib.diff_exp import rnaseq_lib.dim_red import rnaseq_lib.docker import rnaseq_lib.drugs import rnaseq_lib.graphs import rnaseq_lib.gtf import rnaseq_lib.images import rnaseq_lib.plot import rnaseq_lib.plot.dr import rnaseq_lib.plot.hview import rnaseq_lib.tissues import rnaseq_lib.tissues.plots import rnaseq_lib.utils import rnaseq_lib.web import rnaseq_lib.web.openfda import rnaseq_lib.web.synapse
import rnaseq_lib.R import rnaseq_lib.civic import rnaseq_lib.data import rnaseq_lib.de import rnaseq_lib.dim_red import rnaseq_lib.docker import rnaseq_lib.drugs import rnaseq_lib.graphs import rnaseq_lib.gtf import rnaseq_lib.images import rnaseq_lib.plotting import rnaseq_lib.tissues import rnaseq_lib.utils import rnaseq_lib.web import rnaseq_lib.web.openfda import rnaseq_lib.web.synapse
Python
0.005989
1a6516765f7d95d8a3d89449dc181a9de27cb868
Shove the input into the main method
files/create_project.py
files/create_project.py
# # This script checks to see if a project exists for the given # app_env/team. # import os import sys from optparse import OptionParser from urllib import quote def build_parser(): parser = OptionParser() parser.add_option("-p", "--project", dest="project", help="Application/Project name.", type="string") parser.add_option("-l", "--platform", dest="platform", help="Application Language/Platform.", type="string") parser.add_option("-o", "--org", dest="org", help="Organization to own this project", type="string") parser.add_option("-t", "--team", dest="team", help="Team to own this project", type="string") parser.add_option("-v", "--verbose", dest="verbose", help="Verbose output", action="store_true") parser.add_option("-s", "--sentry-path", dest="sentry_path", help="Path to sentry project", type="string") return parser def main(): parser = build_parser() options, _args = parser.parse_args() os.environ['SENTRY_CONF'] = options.sentry_path from sentry.utils.runner import configure configure() from django.conf import settings # Add in the sentry object models from sentry.models import Organization, Project, ProjectKey, Team, User admin_email = settings.SENTRY_OPTIONS['system.admin-email'] if not options.project: parser.error("Project name required") if not options.platform: parser.error("Platform is required") try: o = Organization.objects.get(name=options.org) except Organization.DoesNotExist: print "Organization not found: %s" % options.org sys.exit(1) try: u = User.objects.get(email=admin_email) except User.DoesNotExist: print "Admin user not found: %s" % admin_email sys.exit(1) # try to load the requested team try: t = Team.objects.get(name=options.team, organization_id=o.id) except Team.DoesNotExist: # this team does not yet exist. Create it. t = Team() t.name = options.team t.organization_id = o.id t.owner_id = u.id t.save() # reload the object t = Team.objects.get(name=options.team, organization_id=o.id) try: p = Project.objects.get(name=options.project, team_id=t.id) except: # the project doesn't exist. Create it! p = Project() # ensure all project names are in lowercase p.name = options.project.lower() p.team_id = t.id p.organization_id = o.id p.platform = options.platform try: p.save() except: print "Project save failed for %s" % (options.project) sys.exit(1) # create a static file containing this application's DSN k = ProjectKey.objects.get(project_id=p.id).get_dsn() prefix = quote(o.name.lower() + "-" + t.name.lower() + "-") dsn_path = "%s/dsn/%s%s" % (options.sentry_path, prefix, p.name) dsn = open(dsn_path, 'w') dsn.write(k) dsn.close() if options.verbose: print "Project %s created in team %s." % (options.project, t.name) if __name__ == "__main__": main()
# # This script checks to see if a project exists for the given # app_env/team. # import os import sys from optparse import OptionParser from urllib import quote from sentry.utils.runner import configure configure() from django.conf import settings # Add in the sentry object models from sentry.models import Organization, Project, ProjectKey, Team, User def build_parser(): parser = OptionParser() parser.add_option("-p", "--project", dest="project", help="Application/Project name.", type="string") parser.add_option("-l", "--platform", dest="platform", help="Application Language/Platform.", type="string") parser.add_option("-o", "--org", dest="org", help="Organization to own this project", type="string") parser.add_option("-t", "--team", dest="team", help="Team to own this project", type="string") parser.add_option("-v", "--verbose", dest="verbose", help="Verbose output", action="store_true") parser.add_option("-s", "--sentry-path", dest="sentry_path", help="Path to sentry project", action="store_true") return parser def main(): parser = build_parser() options, _args = parser.parse_args() os.environ['SENTRY_CONF'] = options.sentry_path admin_email = settings.SENTRY_OPTIONS['system.admin-email'] if not options.project: parser.error("Project name required") if not options.platform: parser.error("Platform is required") try: o = Organization.objects.get(name=options.org) except Organization.DoesNotExist: print "Organization not found: %s" % options.org sys.exit(1) try: u = User.objects.get(email=admin_email) except User.DoesNotExist: print "Admin user not found: %s" % admin_email sys.exit(1) # try to load the requested team try: t = Team.objects.get(name=options.team, organization_id=o.id) except Team.DoesNotExist: # this team does not yet exist. Create it. t = Team() t.name = options.team t.organization_id = o.id t.owner_id = u.id t.save() # reload the object t = Team.objects.get(name=options.team, organization_id=o.id) try: p = Project.objects.get(name=options.project, team_id=t.id) except: # the project doesn't exist. Create it! p = Project() # ensure all project names are in lowercase p.name = options.project.lower() p.team_id = t.id p.organization_id = o.id p.platform = options.platform try: p.save() except: print "Project save failed for %s" % (options.project) sys.exit(1) # create a static file containing this application's DSN k = ProjectKey.objects.get(project_id=p.id).get_dsn() prefix = quote(o.name.lower() + "-" + t.name.lower() + "-") dsn_path = "%s/dsn/%s%s" % (options.sentry_path, prefix, p.name) dsn = open(dsn_path, 'w') dsn.write(k) dsn.close() if options.verbose: print "Project %s created in team %s." % (options.project, t.name) if __name__ == "__main__": main()
Python
0.999874
918a168b53e9f026393aaa17347fc855f7e4a70a
add background task, remove extra roles code, use .format
files/devops/fabfile.py
files/devops/fabfile.py
# Fabfile from Quickstart # qkst.io/devops/fabfile from fabric.api import ( task, parallel, roles, run, local, sudo, put, env, settings ) from fabric.contrib.project import rsync_project from fabric.context_managers import cd, prefix from fabric.tasks import execute env.hosts = ['root@localhost:22'] @task def bootstrap(): sudo('apt-get update') sudo('apt-get install -y sysstat wget unzip htop dtach') @task def start(): execute('service', 'cron') @task def service(name, action='start'): sudo('service {0} {1} || true'.format(name, action)) @task def background(process, name='bgprocess'): run('dtach -n `mktemp -u /tmp/{0}.XXXXX` {1}'.format(process, name)) @task def install_deb(url): sudo('wget {0} -O /tmp/download.deb'.format(url)) sudo('dpkg -i /tmp/download.deb && rm /tmp/download.deb') @task def status(): run('service --status-all') run('vmstat') run('df -h') run('iostat') @task def upload(local='./', remote='/tmp'): rsync_project( local_dir=local, remote_dir=remote, exclude=['.git', '*.pyc', '.DS_Store'], extra_opts='-lp' # preserve symlinks and permissions ) @task def put_as_user(file, remote, user): with settings(user=user): put(file, remote)
# Fabfile from Quickstart # qkst.io/devops/fabfile from fabric.api import ( task, parallel, roles, run, local, sudo, put, env, settings ) from fabric.contrib.project import rsync_project from fabric.context_managers import cd, prefix from fabric.tasks import execute env.user = 'root' env.roledefs = { 'local': ['localhost:22'] } env.roledefs['all'] = [host for role in env.roledefs.values() for host in role] @task @roles('local') def setup(): sudo('apt-get update') sudo('apt-get install -y python python-pip python-virtualenv') run('pip install fabric') @task @parallel def install_deb(url): sudo('wget %s -O /tmp/download.deb' % url) sudo('dpkg -i /tmp/download.deb && rm /tmp/download.deb') @task def upload(local='./', remote='/tmp'): rsync_project( local_dir=local, remote_dir=remote, exclude=['.git'], extra_opts='-lp' # preserve symlinks and permissions ) @task def put_as_user(file, remote, user): with settings(user=user): put(file, remote) @task def context_demo(): with cd('/tmp'): run('touch testfile') with prefix('cd /tmp') run('rm testfile')
Python
0.000001
a4f69decb2b22822660033265a6517510c8a2eb5
clean up some convert some strings to fstrings use fewer imports
cogs/utils.py
cogs/utils.py
# -*- coding: utf-8 -*- from discord.ext import commands from datetime import datetime from cogs.cog import Cog import discord class Utils(Cog): """The description for Utils goes here.""" @commands.command(name='reload', hidden=True) @commands.is_owner() async def cog_reload(self, ctx, *, cog: str): """Command which Reloads a Module. Remember to use dot path. e.g: cogs.owner""" try: self.bot.unload_extension(cog) self.bot.load_extension(cog) except Exception as e: await ctx.send(f'**`ERROR:`** {type(e).__name__} - {e}') else: await ctx.send('**`SUCCESS`**') @commands.command() async def ping(self, ctx): await ctx.send(f"Pong! time is {ctx.bot.latency * 1000:.2f} ms") @commands.command() async def time(self,ctx): time = datetime.now().strftime("%a, %e %b %Y %H:%M:%S (%-I:%M %p)") await ctx.send(f'the time in alaska is {time}') @commands.command() @commands.is_owner() async def upload(self, ctx, file): with open(file, 'rb') as f: try: await ctx.send(file = discord.File(f, file)) except FileNotFoundError: await ctx.send(f"no such file: {file}") def setup(bot): bot.add_cog(Utils(bot))
# -*- coding: utf-8 -*- from discord.ext import commands from datetime import datetime from cogs.cog import Cog import discord class Utils(Cog): """The description for Utils goes here.""" @commands.command(name='reload', hidden=True) @commands.is_owner() async def cog_reload(self, ctx, *, cog: str): """Command which Reloads a Module. Remember to use dot path. e.g: cogs.owner""" try: self.bot.unload_extension(cog) self.bot.load_extension(cog) except Exception as e: await ctx.send(f'**`ERROR:`** {type(e).__name__} - {e}') else: await ctx.send('**`SUCCESS`**') @commands.command() async def ping(self, ctx): await ctx.send(f"Pong! time is {ctx.bot.latency * 1000:.2f)} ms") @commands.command() async def time(self,ctx): time = datetime.now().strftime("%a, %e %b %Y %H:%M:%S (%-I:%M %p)") await ctx.send(f'the time in alaska is {time}') @commands.command() @commands.is_owner() async def upload(self, ctx, file): with open(file, 'rb') as f: try: await ctx.send(file = discord.File(f, file)) except FileNotFoundError: await ctx.send(f"no such file: {file}") def setup(bot): bot.add_cog(Utils(bot))
Python
0.000523
ba81222c33b4b80c5148c21bb30c60412c85847b
Fix search query
files/kernel-cleanup.py
files/kernel-cleanup.py
#!/usr/bin/env python2.7 """ kernel-cleanup.py Find all installed kernel-related packages and mark them as automatically installed. Then, purge those of these packages that APT now considers auto-removable. Ubuntu APT has logic that prevents us from removing all kernels this way. As an additional safeguard, we always avoid purging the currently running kernel from this script. """ import apt import os os.environ["PATH"] = "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin" class SourcePackageFilter(apt.cache.Filter): def __init__(self, source_packages): self.spkgs = source_packages def apply(self, pkg): if pkg.is_installed: if pkg.installed.source_name in self.spkgs: return True return False class SignedKernelFilter(apt.cache.Filter): def apply(self, pkg): return bool(pkg.is_installed and pkg.section in ("kernel", "utils") and pkg.name.startswith("linux-signed")) class KernelCleaner(object): def __init__(self): self.c = apt.cache.Cache() def get_kernels(self): return self.c.get_providing_packages("linux-image") def get_tracks(self): return set([(pkg.installed or pkg.candidate).source_name for pkg in self.get_kernels()]) def get_kernel_packages(self): packages = apt.cache.FilteredCache(self.c) packages.set_filter(SourcePackageFilter(self.get_tracks())) return packages def get_signed_kernel_packages(self): packages = apt.cache.FilteredCache(self.c) packages.set_filter(SignedKernelFilter()) return packages def mark_kernels_auto(self): for pkg in self.get_kernel_packages(): pkg.mark_auto() self.c.commit() def purge_signed_kernels(self): for pkg in self.get_signed_kernel_packages(): pkg.mark_delete(auto_fix=False, purge=True) self.c.commit() def purge_old_kernels(self): release = os.uname()[2] for pkg in self.get_kernel_packages(): if release not in pkg.name: if pkg.is_auto_removable: pkg.mark_delete(auto_fix=False, purge=True) self.c.commit() def main(): kc = KernelCleaner() kc.purge_signed_kernels() kc.mark_kernels_auto() kc.purge_old_kernels() if __name__ == "__main__": main()
#!/usr/bin/env python2.7 """ kernel-cleanup.py Find all installed kernel-related packages and mark them as automatically installed. Then, purge those of these packages that APT now considers auto-removable. Ubuntu APT has logic that prevents us from removing all kernels this way. As an additional safeguard, we always avoid purging the currently running kernel from this script. """ import apt import os os.environ["PATH"] = "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin" class SourcePackageFilter(apt.cache.Filter): def __init__(self, source_packages): self.spkgs = source_packages def apply(self, pkg): if pkg.is_installed: if pkg.installed.source_name in self.spkgs: return True return False class SignedKernelFilter(apt.cache.Filter): def apply(self, pkg): return bool(pkg.is_installed and pkg.section == "kernel" and pkg.name.startswith("linux-signed")) class KernelCleaner(object): def __init__(self): self.c = apt.cache.Cache() def get_kernels(self): return self.c.get_providing_packages("linux-image") def get_tracks(self): return set([(pkg.installed or pkg.candidate).source_name for pkg in self.get_kernels()]) def get_kernel_packages(self): packages = apt.cache.FilteredCache(self.c) packages.set_filter(SourcePackageFilter(self.get_tracks())) return packages def get_signed_kernel_packages(self): packages = apt.cache.FilteredCache(self.c) packages.set_filter(SignedKernelFilter()) return packages def mark_kernels_auto(self): for pkg in self.get_kernel_packages(): pkg.mark_auto() self.c.commit() def purge_signed_kernels(self): for pkg in self.get_signed_kernel_packages(): pkg.mark_delete(auto_fix=False, purge=True) self.c.commit() def purge_old_kernels(self): release = os.uname()[2] for pkg in self.get_kernel_packages(): if release not in pkg.name: if pkg.is_auto_removable: pkg.mark_delete(auto_fix=False, purge=True) self.c.commit() def main(): kc = KernelCleaner() kc.purge_signed_kernels() kc.mark_kernels_auto() kc.purge_old_kernels() if __name__ == "__main__": main()
Python
0.999382
1c17b4b10374129d9e26f7023a93ea587dfe7fc7
update version number to 1.0.10-pre as prep for staging/release
findingaids/__init__.py
findingaids/__init__.py
__version_info__ = (1, 0, 10, 'pre') # Dot-connect all but the last. Last is dash-connected if not None. __version__ = '.'.join(str(i) for i in __version_info__[:-1]) if __version_info__[-1] is not None: __version__ += ('-%s' % (__version_info__[-1],)) #THIS IS DUPLICATE CODE FROM DWRANGLER AND SHOULD EVENTUALLY BE MOVED INTO EULCORE # Extends the normal render_to_response to include RequestContext objects. # Taken from http://www.djangosnippets.org/snippets/3/ # Other similar implementations and adaptations http://lincolnloop.com/blog/2008/may/10/getting-requestcontext-your-templates/ # I also added the SCRIPT_NAME to dictionary so it would be available to templates # Since I always uset his for this application it makes sense for this app but # I'm unsure this is the best way overall. # TODO: update to use new render shortcut provided in newer versions of django def render_with_context(req, *args, **kwargs): kwargs['context_instance'] = RequestContext(req, {'script_name': req.META['SCRIPT_NAME']}) # Line below was an attempt to add script name to the context so I could # deal with template paths for the SITE_URL in a way that handled # apps being installed in a site subURL. # args[1]['script_name'] = req.META['SCRIPT_NAME'] return render_to_response(*args, **kwargs)
__version_info__ = (1, 0, 9, None) # Dot-connect all but the last. Last is dash-connected if not None. __version__ = '.'.join(str(i) for i in __version_info__[:-1]) if __version_info__[-1] is not None: __version__ += ('-%s' % (__version_info__[-1],)) #THIS IS DUPLICATE CODE FROM DWRANGLER AND SHOULD EVENTUALLY BE MOVED INTO EULCORE # Extends the normal render_to_response to include RequestContext objects. # Taken from http://www.djangosnippets.org/snippets/3/ # Other similar implementations and adaptations http://lincolnloop.com/blog/2008/may/10/getting-requestcontext-your-templates/ # I also added the SCRIPT_NAME to dictionary so it would be available to templates # Since I always uset his for this application it makes sense for this app but # I'm unsure this is the best way overall. def render_with_context(req, *args, **kwargs): kwargs['context_instance'] = RequestContext(req, {'script_name': req.META['SCRIPT_NAME']}) # Line below was an attempt to add script name to the context so I could # deal with template paths for the SITE_URL in a way that handled # apps being installed in a site subURL. # args[1]['script_name'] = req.META['SCRIPT_NAME'] return render_to_response(*args, **kwargs)
Python
0
8700bcaabc2470849a47383c991c37a886da1b4a
add profiler
corehq/apps/data_interfaces/dispatcher.py
corehq/apps/data_interfaces/dispatcher.py
from django.utils.decorators import method_decorator from corehq import privileges from corehq.apps.accounting.decorators import requires_privilege_with_fallback from corehq.apps.reports.dispatcher import ReportDispatcher, ProjectReportDispatcher, datespan_default from corehq.apps.users.decorators import require_permission from corehq.apps.users.models import Permissions from django_prbac.exceptions import PermissionDenied from django_prbac.utils import ensure_request_has_privilege from dimagi.utils.decorators.profile import * require_can_edit_data = require_permission(Permissions.edit_data) class DataInterfaceDispatcher(ProjectReportDispatcher): prefix = 'data_interface' map_name = 'DATA_INTERFACES' @profile("/home/sravfeyn/src/hotshot-logfiles/users.prof") def dispatch(self, request, *args, **kwargs): from corehq.apps.reports.standard.export import DeidExportReport if kwargs['report_slug'] in [DeidExportReport.slug]: return self.deid_dispatch(request, *args, **kwargs) return super(DataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) @method_decorator(requires_privilege_with_fallback(privileges.DEIDENTIFIED_DATA)) def deid_dispatch(self, request, *args, **kwargs): return super(DataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) def permissions_check(self, report, request, domain=None, is_navigation_check=False): if is_navigation_check: from corehq.apps.reports.standard.export import DeidExportReport if report.split('.')[-1] in [DeidExportReport.__name__]: try: ensure_request_has_privilege(request, privileges.DEIDENTIFIED_DATA) except PermissionDenied: return False return super(DataInterfaceDispatcher, self).permissions_check(report, request, domain) class EditDataInterfaceDispatcher(ReportDispatcher): prefix = 'edit_data_interface' map_name = 'EDIT_DATA_INTERFACES' @method_decorator(require_can_edit_data) @datespan_default def dispatch(self, request, *args, **kwargs): from corehq.apps.importer.base import ImportCases if kwargs['report_slug'] in [ImportCases.slug]: return self.bulk_dispatch(request, *args, **kwargs) return super(EditDataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) @method_decorator(requires_privilege_with_fallback(privileges.BULK_CASE_MANAGEMENT)) def bulk_dispatch(self, request, *args, **kwargs): return super(EditDataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) def permissions_check(self, report, request, domain=None, is_navigation_check=False): if is_navigation_check: from corehq.apps.importer.base import ImportCases if report.split('.')[-1] in [ImportCases.__name__]: try: ensure_request_has_privilege(request, privileges.BULK_CASE_MANAGEMENT) except PermissionDenied: return False return request.couch_user.can_edit_data(domain)
from django.utils.decorators import method_decorator from corehq import privileges from corehq.apps.accounting.decorators import requires_privilege_with_fallback from corehq.apps.reports.dispatcher import ReportDispatcher, ProjectReportDispatcher, datespan_default from corehq.apps.users.decorators import require_permission from corehq.apps.users.models import Permissions from django_prbac.exceptions import PermissionDenied from django_prbac.utils import ensure_request_has_privilege require_can_edit_data = require_permission(Permissions.edit_data) class DataInterfaceDispatcher(ProjectReportDispatcher): prefix = 'data_interface' map_name = 'DATA_INTERFACES' def dispatch(self, request, *args, **kwargs): from corehq.apps.reports.standard.export import DeidExportReport if kwargs['report_slug'] in [DeidExportReport.slug]: return self.deid_dispatch(request, *args, **kwargs) return super(DataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) @method_decorator(requires_privilege_with_fallback(privileges.DEIDENTIFIED_DATA)) def deid_dispatch(self, request, *args, **kwargs): return super(DataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) def permissions_check(self, report, request, domain=None, is_navigation_check=False): if is_navigation_check: from corehq.apps.reports.standard.export import DeidExportReport if report.split('.')[-1] in [DeidExportReport.__name__]: try: ensure_request_has_privilege(request, privileges.DEIDENTIFIED_DATA) except PermissionDenied: return False return super(DataInterfaceDispatcher, self).permissions_check(report, request, domain) class EditDataInterfaceDispatcher(ReportDispatcher): prefix = 'edit_data_interface' map_name = 'EDIT_DATA_INTERFACES' @method_decorator(require_can_edit_data) @datespan_default def dispatch(self, request, *args, **kwargs): from corehq.apps.importer.base import ImportCases if kwargs['report_slug'] in [ImportCases.slug]: return self.bulk_dispatch(request, *args, **kwargs) return super(EditDataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) @method_decorator(requires_privilege_with_fallback(privileges.BULK_CASE_MANAGEMENT)) def bulk_dispatch(self, request, *args, **kwargs): return super(EditDataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) def permissions_check(self, report, request, domain=None, is_navigation_check=False): if is_navigation_check: from corehq.apps.importer.base import ImportCases if report.split('.')[-1] in [ImportCases.__name__]: try: ensure_request_has_privilege(request, privileges.BULK_CASE_MANAGEMENT) except PermissionDenied: return False return request.couch_user.can_edit_data(domain)
Python
0.000002
61e67ed5740148f74e67aef09afc65ef1c3fd6a8
Handle commands in a very trivial way
hackday_bot/bot.py
hackday_bot/bot.py
"""hackday_bot.bot module.""" import logging import re import time from prawcore.exceptions import PrawcoreException AVAILABLE_COMMANDS = ('help', 'interested', 'join', 'leave', 'uninterested') COMMAND_RE = re.compile(r'(?:\A|\s)!({})(?=\s|\Z)' .format('|'.join(AVAILABLE_COMMANDS))) logger = logging.getLogger(__package__) class Bot(object): """Bot manages comments made to the specified subreddit.""" def __init__(self, subreddit): """Initialize an instance of Bot. :param subreddit: The subreddit to monitor for new comments. """ self.subreddit = subreddit def _command_help(self, comment): comment.reply('help text will go here') def _command_interested(self, comment): comment.reply('soon I will record your interest') def _command_join(self, comment): comment.reply('soon I will record your sign up') def _command_leave(self, comment): comment.reply('soon I will record your abdication') def _command_uninterested(self, comment): comment.reply('soon I will record your uninterest') def _handle_comment(self, comment): commands = set(COMMAND_RE.findall(comment.body)) if len(commands) > 1: comment.reply('Please provide only a single command.') elif len(commands) == 1: command = commands.pop() getattr(self, '_command_{}'.format(command))(comment) logger.debug('Handled {} by {}'.format(command, comment.author)) def run(self): """Run the bot indefinitely.""" running = True subreddit_url = '{}{}'.format(self.subreddit._reddit.config.reddit_url, self.subreddit.url) logger.info('Watching for comments on: {}'.format(subreddit_url)) while running: try: for comment in self.subreddit.stream.comments(): self._handle_comment(comment) except KeyboardInterrupt: logger.info('Termination received. Goodbye!') running = False except PrawcoreException: logger.exception('run loop') time.sleep(10) return 0
"""hackday_bot.bot module.""" import logging import time from prawcore.exceptions import PrawcoreException logger = logging.getLogger(__package__) class Bot(object): """Bot manages comments made to the specified subreddit.""" def __init__(self, subreddit): """Initialize an instance of Bot. :param subreddit: The subreddit to monitor for new comments. """ self.subreddit = subreddit def _handle_comment(self, comment): logger.info(comment) def run(self): """Run the bot indefinitely.""" running = True subreddit_url = '{}{}'.format(self.subreddit._reddit.config.reddit_url, self.subreddit.url) logger.info('Watching for comments on: {}'.format(subreddit_url)) while running: try: for comment in self.subreddit.stream.comments(): self._handle_comment(comment) except KeyboardInterrupt: logger.info('Termination received. Goodbye!') running = False except PrawcoreException: logger.exception('run loop') time.sleep(10) return 0
Python
0.00022
8c519c3d91e7bb9acf7f2bfedbf97c7b2a911a14
add host and port params to Emulator
anom/testing/emulator.py
anom/testing/emulator.py
import logging import os import re import signal import shlex import subprocess from queue import Empty, Queue from threading import Thread #: The command to run in order to start the emulator. _emulator_command = "gcloud beta emulators datastore start --consistency={consistency:0.2f} --host-port={host}:{port} --no-store-on-disk" # noqa #: The regexp that is used to search for env vars in the emulator output. _env_var_re = re.compile(r"export ([^=]+)=(.+)") #: The string that is used to determine when the Emulator has finished starting up. _log_marker = "Dev App Server is now running" class Emulator: """Runs the Cloud Datastore emulator in a subprocess for testing purposes. Parameters: host(str): The host name the emulator should bind to. port(int): The port on which the emulator should listen on. consistency(float): A value between 0.0 and 1.0 representing the percentage of datastore requests that should succeed. Example:: from anom.testing import Emulator @pytest.fixture(scope="session") def emulator(): emulator = Emulator() emulator.start(inject=True) yield emulator.stop() """ def __init__(self, *, host="127.0.0.1", port=9898, consistency=1): self._emulator_command = shlex.split(_emulator_command.format( host=host, port=port, consistency=consistency )) self._logger = logging.getLogger("Emulator") self._proc = None self._queue = Queue() self._thread = Thread(target=self._run, daemon=True) def start(self, *, timeout=15, inject=False): """Start the emulator process and wait for it to initialize. Parameters: timeout(int): The maximum number of seconds to wait for the Emulator to start up. inject(bool): Whether or not to inject the emulator env vars into the current process. Returns: dict: A dictionary of env vars that can be used to access the Datastore emulator. """ try: self._thread.start() env_vars = self._queue.get(block=True, timeout=timeout) if inject: os.environ.update(env_vars) return env_vars except Empty: # pragma: no cover raise RuntimeError("Timed out while waiting for Emulator to start up.") def stop(self): """Stop the emulator process. Returns: int: The process return code or None if the process isn't currently running. """ if self._proc is not None: if self._proc.poll() is None: try: os.killpg(self._proc.pid, signal.SIGTERM) _, returncode = os.waitpid(self._proc.pid, 0) self._logger.debug("Emulator process exited with code %d.", returncode) return returncode except ChildProcessError: # pragma: no cover return self._proc.returncode return self._proc.returncode # pragma: no cover return None # pragma: no cover def _run(self): self._proc = subprocess.Popen( self._emulator_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid, ) env_vars = {} while self._proc.poll() is None: line = self._proc.stdout.readline().strip().decode("utf-8") self._logger.debug(line) match = _env_var_re.search(line) if match: name, value = match.groups() env_vars[name] = value # If no env vars were found this will eventually cause # `start` to time out which is what we want since running # tests w/o the env vars set up could prove dangerous. if _log_marker in line and env_vars: self._queue.put(env_vars)
import logging import os import re import signal import shlex import subprocess from queue import Empty, Queue from threading import Thread #: The command to run in order to start the emulator. _emulator_command = "gcloud beta emulators datastore start --consistency={consistency:0.2f} --no-store-on-disk" #: The regexp that is used to search for env vars in the emulator output. _env_var_re = re.compile(r"export ([^=]+)=(.+)") #: The string that is used to determine when the Emulator has finished starting up. _log_marker = "Dev App Server is now running" class Emulator: """Runs the Cloud Datastore emulator in a subprocess for testing purposes. Parameters: consistency(float): A value between 0.0 and 1.0 representing the percentage of datastore requests that should succeed. Example:: from anom.testing import Emulator @pytest.fixture(scope="session") def emulator(): emulator = Emulator() emulator.start(inject=True) yield emulator.stop() """ def __init__(self, *, consistency=1): self._emulator_command = shlex.split(_emulator_command.format( consistency=consistency )) self._logger = logging.getLogger("Emulator") self._proc = None self._queue = Queue() self._thread = Thread(target=self._run, daemon=True) def start(self, *, timeout=15, inject=False): """Start the emulator process and wait for it to initialize. Parameters: timeout(int): The maximum number of seconds to wait for the Emulator to start up. inject(bool): Whether or not to inject the emulator env vars into the current process. Returns: dict: A dictionary of env vars that can be used to access the Datastore emulator. """ try: self._thread.start() env_vars = self._queue.get(block=True, timeout=timeout) if inject: os.environ.update(env_vars) return env_vars except Empty: # pragma: no cover raise RuntimeError("Timed out while waiting for Emulator to start up.") def stop(self): """Stop the emulator process. Returns: int: The process return code or None if the process isn't currently running. """ if self._proc is not None: if self._proc.poll() is None: try: os.killpg(self._proc.pid, signal.SIGTERM) _, returncode = os.waitpid(self._proc.pid, 0) self._logger.debug("Emulator process exited with code %d.", returncode) return returncode except ChildProcessError: # pragma: no cover return self._proc.returncode return self._proc.returncode # pragma: no cover return None # pragma: no cover def _run(self): self._proc = subprocess.Popen( self._emulator_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid, ) env_vars = {} while self._proc.poll() is None: line = self._proc.stdout.readline().strip().decode("utf-8") self._logger.debug(line) match = _env_var_re.search(line) if match: name, value = match.groups() env_vars[name] = value # If no env vars were found this will eventually cause # `start` to time out which is what we want since running # tests w/o the env vars set up could prove dangerous. if _log_marker in line and env_vars: self._queue.put(env_vars)
Python
0
61cf4e2feb3d8920179e28719822c7fb34ea6550
Add defaults to the ibm RNG
3/ibm_rng.py
3/ibm_rng.py
def ibm_rng(x1, a=65539, c=0, m=2**31): x = x1 while True: x = (a * x + c) % m yield x / (m-1) def main(): rng = ibm_rng(1, 65539, 0, 2**31) while True: x = next(rng) print(x) if __name__ == '__main__': main()
def ibm_rng(x1, a, c, m): x = x1 while True: x = (a * x + c) % m yield x / (m-1) def main(): rng = ibm_rng(1, 65539, 0, 2**31) while True: x = next(rng) print(x) if __name__ == '__main__': main()
Python
0.000002
776c2992b64911f86740cdf0af4f05c7587430c7
Bump version
hbmqtt/__init__.py
hbmqtt/__init__.py
# Copyright (c) 2015 Nicolas JOUANIN # # See the file license.txt for copying permission. VERSION = (0, 9, 5, 'alpha', 0)
# Copyright (c) 2015 Nicolas JOUANIN # # See the file license.txt for copying permission. VERSION = (0, 9, 4, 'final', 0)
Python
0
0202eeed429149cbfafd53d9ba6281a0926ea9df
Add labels to account forms and add a NewUserWithPasswordForm that adds password inputs to the new user form.
froide/account/forms.py
froide/account/forms.py
from django import forms from django.utils.translation import ugettext as _ from django.utils.safestring import mark_safe from django.core.urlresolvers import reverse from django.contrib.auth.models import User from helper.widgets import EmailInput class NewUserForm(forms.Form): first_name = forms.CharField(max_length=30, label=_('First name'), widget=forms.TextInput(attrs={'placeholder': _('First Name'), 'class': 'inline'})) last_name = forms.CharField(max_length=30, label=_('Last name'), widget=forms.TextInput(attrs={'placeholder': _('Last Name'), 'class': 'inline'})) user_email = forms.EmailField(label=_('Email address'), widget=EmailInput(attrs={'placeholder': _('mail@ddress.net')})) def clean_first_name(self): return self.cleaned_data['first_name'].strip() def clean_last_name(self): return self.cleaned_data['last_name'].strip() def clean_user_email(self): email = self.cleaned_data['user_email'] try: User.objects.get(email=email) except User.DoesNotExist: pass else: raise forms.ValidationError(mark_safe( _('This email address already has an account. <a href="%s?simple" class="target-small">Please login using that email address.</a>') % reverse("account-login"))) return email class NewUserWithPasswordForm(NewUserForm): password = forms.CharField(widget=forms.PasswordInput, label=_('Password')) password2 = forms.CharField(widget=forms.PasswordInput, label=_('Password (repeat)')) def clean(self): cleaned = super(NewUserWithPasswordForm, self).clean() if cleaned['password'] != cleaned['password2']: raise forms.ValidationError(_("Passwords do not match!")) return cleaned class UserLoginForm(forms.Form): email = forms.EmailField(widget=EmailInput( attrs={'placeholder': _('mail@ddress.net')}), label=_('Email address')) password = forms.CharField(widget=forms.PasswordInput, label=_('Password'))
from django import forms from django.utils.translation import ugettext as _ from django.utils.safestring import mark_safe from django.core.urlresolvers import reverse from django.contrib.auth.models import User from helper.widgets import EmailInput class NewUserForm(forms.Form): first_name = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'placeholder': _('First Name'), 'class': 'inline'})) last_name = forms.CharField(max_length=30, widget=forms.TextInput( attrs={'placeholder': _('Last Name'), 'class': 'inline'})) user_email = forms.EmailField(widget=EmailInput( attrs={'placeholder': _('mail@ddress.net')})) def clean_first_name(self): return self.cleaned_data['first_name'].strip() def clean_last_name(self): return self.cleaned_data['last_name'].strip() def clean_user_email(self): email = self.cleaned_data['user_email'] try: User.objects.get(email=email) except User.DoesNotExist: pass else: raise forms.ValidationError(mark_safe( _('This email address already has an account. <a href="%s?simple" class="target-small">Please login using that email address.</a>') % reverse("account-login"))) return email class UserLoginForm(forms.Form): email = forms.EmailField(widget=EmailInput( attrs={'placeholder': _('mail@ddress.net')})) password = forms.CharField(widget=forms.PasswordInput)
Python
0
2db84e6c94fdc8de821a98442ce928db9dd73441
Sponsored event should dump title
src/remotedb/dumpers.py
src/remotedb/dumpers.py
import collections import functools import urllib.parse from django.core.serializers.json import DjangoJSONEncoder SITE_PREFIX = 'https://tw.pycon.org/2016/media/' USER_DUMP_KEYS = [ 'bio', 'email', 'speaker_name', 'facebook_profile_url', 'github_id', 'twitter_id', ] PROPOSAL_DUMP_KEYS = SPONSORED_EVENT_DUMP_KEYS = [ 'abstract', 'category', 'detailed_description', 'language', 'python_level', 'recording_policy', 'slide_link', 'title', ] def dump_user(user): data = {key: getattr(user, key) for key in USER_DUMP_KEYS} if user.photo: data['photo_url'] = urllib.parse.urljoin(SITE_PREFIX, user.photo.url) return data def dump_proposal(proposal): data = {key: getattr(proposal, key) for key in PROPOSAL_DUMP_KEYS} data['speakers'] = [dump_user(info.user) for info in proposal.speakers] return data def dump_sponsored_event_detail(event): data = {key: getattr(event, key) for key in SPONSORED_EVENT_DUMP_KEYS} data['speakers'] = [dump_user(event.host)] return data json_encoder = DjangoJSONEncoder() def event_dumper(f): """Decorator to provide dumping of common event fields. """ @functools.wraps(f) def inner(obj): data = { 'begin_time': json_encoder.encode(obj.begin_time.value).strip('"'), 'end_time': json_encoder.encode(obj.end_time.value).strip('"'), 'location': obj.location, } data.update(f(obj)) return data return inner @event_dumper def dump_keynote_event(event): return { 'type': 'keynote', 'speakers': [event.speaker_name], } @event_dumper def dump_custom_event(event): return { 'type': 'custom', 'title': event.title, } @event_dumper def dump_sponsored_event(event): return { 'type': 'sponsored_talk', 'title': event.title, 'speakers': [event.host.speaker_name], 'detail_id': 'sponsored_{}'.format(event.pk) } @event_dumper def dump_proposed_talk_event(event): return { 'type': 'talk', 'title': event.proposal.title, 'speakers': [ speaker.user.speaker_name for speaker in event.proposal.speakers ], 'detail_id': str(event.proposal.pk), } EVENT_LOADERS = { 'keynoteevent': dump_keynote_event, 'customevent': dump_custom_event, 'sponsoredevent': dump_sponsored_event, 'proposedtalkevent': dump_proposed_talk_event, } def dump_schedule(event_iter): schedule_data_lists = collections.defaultdict(list) for event in event_iter: loader = EVENT_LOADERS[event._meta.model_name] data = loader(event) key = data['begin_time'].split('T', 1)[0] schedule_data_lists[key].append(data) for data_list in schedule_data_lists.values(): data_list.sort(key=lambda data: (data['begin_time'], data['location'])) return schedule_data_lists
import collections import functools import urllib.parse from django.core.serializers.json import DjangoJSONEncoder SITE_PREFIX = 'https://tw.pycon.org/2016/media/' USER_DUMP_KEYS = [ 'bio', 'email', 'speaker_name', 'facebook_profile_url', 'github_id', 'twitter_id', ] PROPOSAL_DUMP_KEYS = SPONSORED_EVENT_DUMP_KEYS = [ 'abstract', 'category', 'detailed_description', 'language', 'python_level', 'recording_policy', 'slide_link', 'title', ] def dump_user(user): data = {key: getattr(user, key) for key in USER_DUMP_KEYS} if user.photo: data['photo_url'] = urllib.parse.urljoin(SITE_PREFIX, user.photo.url) return data def dump_proposal(proposal): data = {key: getattr(proposal, key) for key in PROPOSAL_DUMP_KEYS} data['speakers'] = [dump_user(info.user) for info in proposal.speakers] return data def dump_sponsored_event_detail(event): data = {key: getattr(event, key) for key in SPONSORED_EVENT_DUMP_KEYS} data['speakers'] = [dump_user(event.host)] return data json_encoder = DjangoJSONEncoder() def event_dumper(f): """Decorator to provide dumping of common event fields. """ @functools.wraps(f) def inner(obj): data = { 'begin_time': json_encoder.encode(obj.begin_time.value).strip('"'), 'end_time': json_encoder.encode(obj.end_time.value).strip('"'), 'location': obj.location, } data.update(f(obj)) return data return inner @event_dumper def dump_keynote_event(event): return { 'type': 'keynote', 'speakers': [event.speaker_name], } @event_dumper def dump_custom_event(event): return { 'type': 'custom', 'title': event.title, } @event_dumper def dump_sponsored_event(event): return { 'type': 'sponsored_talk', 'speakers': [event.host.speaker_name], 'detail_id': 'sponsored_{}'.format(event.pk) } @event_dumper def dump_proposed_talk_event(event): return { 'type': 'talk', 'title': event.proposal.title, 'speakers': [ speaker.user.speaker_name for speaker in event.proposal.speakers ], 'detail_id': str(event.proposal.pk), } EVENT_LOADERS = { 'keynoteevent': dump_keynote_event, 'customevent': dump_custom_event, 'sponsoredevent': dump_sponsored_event, 'proposedtalkevent': dump_proposed_talk_event, } def dump_schedule(event_iter): schedule_data_lists = collections.defaultdict(list) for event in event_iter: loader = EVENT_LOADERS[event._meta.model_name] data = loader(event) key = data['begin_time'].split('T', 1)[0] schedule_data_lists[key].append(data) for data_list in schedule_data_lists.values(): data_list.sort(key=lambda data: (data['begin_time'], data['location'])) return schedule_data_lists
Python
0.999635
3c4e65f123dc56255262e38a934b9cacd03c0bfe
remove debug prints
django_babel/management/commands/babel.py
django_babel/management/commands/babel.py
#-*- coding: utf-8 -*- import os from distutils.dist import Distribution from optparse import make_option from subprocess import call from django.core.management.base import LabelCommand, CommandError from django.conf import settings class Command(LabelCommand): args = '[makemessages] [compilemessages]' option_list = LabelCommand.option_list + ( make_option('--locale', '-l', default=None, dest='locale', action='append', help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). ' 'Can be used multiple times.'), make_option('--domain', '-d', default='django', dest='domain', help='The domain of the message files (default: "django").'), make_option('--mapping-file', '-F', default=None, dest='mapping_file', help='Mapping file') ) def handle_label(self, command, **options): if command not in ('makemessages', 'compilemessages'): raise CommandError("You must either apply 'makemessages' or 'compilemessages'") if command == 'makemessages': self.handle_makemessages(**options) if command == 'compilemessages': self.handle_compilemessages(**options) def handle_makemessages(self, **options): locale_paths = list(settings.LOCALE_PATHS) domain = options.pop('domain') locales = options.pop('locale') # support for mapping file specification via setup.cfg # TODO: Try to support all possible options. distribution = Distribution() distribution.parse_config_files(distribution.find_config_files()) mapping_file = options.pop('mapping_file', None) if mapping_file is None and 'extract_messages' in distribution.command_options: opts = distribution.command_options['extract_messages'] try: mapping_file = opts.get('mapping_file', ())[1] except IndexError: mapping_file = None for path in locale_paths: potfile = os.path.join(path, '%s.pot' % domain) if not os.path.exists(potfile): continue cmd = ['pybabel', 'extract', '-o', os.path.join(path, '%s.pot' % domain)] if mapping_file is not None: cmd.extend(['-F', mapping_file]) cmd.append(os.path.dirname(path)) call(cmd) for locale in locales: cmd = ['pybabel', 'update', '-D', domain, '-i', os.path.join(path, '%s.pot' % domain), '-d', path, '-l', locale] call(cmd) def handle_compilemessages(self, **options): locale_paths = list(settings.LOCALE_PATHS) domain = options.pop('domain') locales = options.pop('locale') for path in locale_paths: for locale in locales: po_file = os.path.join(path, locale, 'LC_MESSAGES', domain + '.po') if os.path.exists(po_file): cmd = ['pybabel', 'compile', '-D', domain, '-d', path, '-l', locale] call(cmd)
#-*- coding: utf-8 -*- import os from distutils.dist import Distribution from optparse import make_option from subprocess import call from django.core.management.base import LabelCommand, CommandError from django.conf import settings class Command(LabelCommand): args = '[makemessages] [compilemessages]' option_list = LabelCommand.option_list + ( make_option('--locale', '-l', default=None, dest='locale', action='append', help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). ' 'Can be used multiple times.'), make_option('--domain', '-d', default='django', dest='domain', help='The domain of the message files (default: "django").'), make_option('--mapping-file', '-F', default=None, dest='mapping_file', help='Mapping file') ) def handle_label(self, command, **options): if command not in ('makemessages', 'compilemessages'): raise CommandError("You must either apply 'makemessages' or 'compilemessages'") if command == 'makemessages': self.handle_makemessages(**options) if command == 'compilemessages': self.handle_compilemessages(**options) def handle_makemessages(self, **options): locale_paths = list(settings.LOCALE_PATHS) domain = options.pop('domain') locales = options.pop('locale') # support for mapping file specification via setup.cfg # TODO: Try to support all possible options. distribution = Distribution() distribution.parse_config_files(distribution.find_config_files()) mapping_file = options.pop('mapping_file', None) if mapping_file is None and 'extract_messages' in distribution.command_options: opts = distribution.command_options['extract_messages'] try: mapping_file = opts.get('mapping_file', ())[1] except IndexError: mapping_file = None for path in locale_paths: potfile = os.path.join(path, '%s.pot' % domain) if not os.path.exists(potfile): continue cmd = ['pybabel', 'extract', '-o', os.path.join(path, '%s.pot' % domain)] if mapping_file is not None: cmd.extend(['-F', mapping_file]) cmd.append(os.path.dirname(path)) print cmd call(cmd) for locale in locales: cmd = ['pybabel', 'update', '-D', domain, '-i', os.path.join(path, '%s.pot' % domain), '-d', path, '-l', locale] print cmd call(cmd) def handle_compilemessages(self, **options): locale_paths = list(settings.LOCALE_PATHS) domain = options.pop('domain') locales = options.pop('locale') for path in locale_paths: for locale in locales: po_file = os.path.join(path, locale, 'LC_MESSAGES', domain + '.po') if os.path.exists(po_file): cmd = ['pybabel', 'compile', '-D', domain, '-d', path, '-l', locale] call(cmd)
Python
0.000001
294254aad0d798ffcfca6e34b48b4ed704bb5cd0
Simplify CachingManager logic
django_prices_openexchangerates/models.py
django_prices_openexchangerates/models.py
from __future__ import unicode_literals from decimal import Decimal from django.conf import settings from django.core.exceptions import ValidationError from django.core.cache import cache from django.db import models from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import python_2_unicode_compatible from .currencies import CURRENCIES BASE_CURRENCY = getattr(settings, 'OPENEXCHANGERATES_BASE_CURRENCY', 'USD') CACHE_KEY = getattr(settings, 'OPENEXCHANGERATES_CACHE_KEY', 'conversion_rates') CACHE_TIME = getattr(settings, 'OPENEXCHANGERATES_CACHE_TTL', 60*60) def get_rates(qs): conversion_rates = cache.get(CACHE_KEY) if not conversion_rates: conversion_rates = {rate.to_currency: rate for rate in qs} cache.set(CACHE_KEY, conversion_rates, CACHE_TIME) return conversion_rates class CachingManager(models.Manager): def get_rate(self, to_currency): # noqa all_rates = get_rates(self.all()) try: return all_rates[to_currency] except KeyError: msg = 'ConversionRate for %s does not exist' % to_currency raise ConversionRate.DoesNotExist(msg) @python_2_unicode_compatible class ConversionRate(models.Model): base_currency = BASE_CURRENCY to_currency = models.CharField( _('To'), max_length=3, db_index=True, choices=CURRENCIES.items(), unique=True) rate = models.DecimalField( _('Conversion rate'), max_digits=20, decimal_places=12) modified_at = models.DateTimeField(auto_now=True) objects = CachingManager() class Meta: ordering = ['to_currency'] def save(self, *args, **kwargs): # noqa """ Save the model instance but only on successful validation. """ self.full_clean() super(ConversionRate, self).save(*args, **kwargs) def clean(self): # noqa if self.rate <= Decimal(0): raise ValidationError('Conversion rate has to be positive') if self.base_currency == self.to_currency: raise ValidationError( 'Can\'t set a conversion rate for the same currency') super(ConversionRate, self).clean() def __str__(self): # noqa return '1 %s = %.04f %s' % (self.base_currency, self.rate, self.to_currency) def __repr__(self): # noqa return ( 'ConversionRate(pk=%r, base_currency=%r, to_currency=%r, rate=%r)' % ( self.pk, self.base_currency, self.to_currency, self.rate))
from __future__ import unicode_literals from decimal import Decimal from django.conf import settings from django.core.exceptions import ValidationError from django.core.cache import cache from django.db import models from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import python_2_unicode_compatible from .currencies import CURRENCIES BASE_CURRENCY = getattr(settings, 'OPENEXCHANGERATES_BASE_CURRENCY', 'USD') CACHE_KEY = getattr(settings, 'OPENEXCHANGERATES_CACHE_KEY', 'conversion_rates') CACHE_TIME = getattr(settings, 'OPENEXCHANGERATES_CACHE_TTL', 60*60) class CachingManager(models.Manager): def get_rate(self, to_currency): # noqa conversion_rates = cache.get(CACHE_KEY) update_cache = False if not conversion_rates: conversion_rates = {} update_cache = True if to_currency not in conversion_rates: rates = self.all() for rate in rates: conversion_rates[rate.to_currency] = rate try: rate = conversion_rates[to_currency] except KeyError: rate = self.get(to_currency=to_currency) conversion_rates[to_currency] = rate update_cache = True if update_cache: cache.set(CACHE_KEY, conversion_rates, CACHE_TIME) return rate @python_2_unicode_compatible class ConversionRate(models.Model): base_currency = BASE_CURRENCY to_currency = models.CharField( _('To'), max_length=3, db_index=True, choices=CURRENCIES.items(), unique=True) rate = models.DecimalField( _('Conversion rate'), max_digits=20, decimal_places=12) modified_at = models.DateTimeField(auto_now=True) objects = CachingManager() class Meta: ordering = ['to_currency'] def save(self, *args, **kwargs): # noqa """ Save the model instance but only on successful validation. """ self.full_clean() super(ConversionRate, self).save(*args, **kwargs) def clean(self): # noqa if self.rate <= Decimal(0): raise ValidationError('Conversion rate has to be positive') if self.base_currency == self.to_currency: raise ValidationError( 'Can\'t set a conversion rate for the same currency') super(ConversionRate, self).clean() def __str__(self): # noqa return '1 %s = %.04f %s' % (self.base_currency, self.rate, self.to_currency) def __repr__(self): # noqa return ( 'ConversionRate(pk=%r, base_currency=%r, to_currency=%r, rate=%r)' % ( self.pk, self.base_currency, self.to_currency, self.rate))
Python
0.000007
a21b9588002013c5efff895e63f29fe362110656
Spell checker: identify multiple positions of mispelled word - precision : 0.05457300369812355 - recall : 0.6653793967226803
src/righter/__init__.py
src/righter/__init__.py
""" Identifies common English writing mistakes """ import re import unicodedata from righter import dictionary from righter import utils def findall(sub, string): """ >>> text = "Allowed Hello Hollow" >>> tuple(findall('ll', text)) (1, 10, 16) """ index = 0 - len(sub) try: while True: index = string.index(sub, index + len(sub)) yield index except ValueError: pass def check_spelling(original_text): """ Check if a text has spelling errors. Return a list with objects: { "selection": <wrong-spelled-word>, "start": <position-of-the-first-character-in-string> } """ text = original_text.lower() text = utils.remove_punctuation(text) words = text.split() response = [] for word in words: if not dictionary.is_english_word(word) and\ not utils.contains_digit(word): for pos in findall(word, text): item = { "selection": original_text[pos: (pos + len(word))], "start": pos } if item not in response: response.append(item) return response def check_capitalization(text): """ Check if a text has spelling errors. Return a list with objects: { "selection": <wrong-capitalized-word>, "start": <position-of-the-first-character-in-string> } """ response = [] sentences = re.split('[!?.]', text) # TODO: add \n pos = 0 for sentence in sentences: clean_sentence = sentence.strip() if not clean_sentence: continue # Check if first character is capital if clean_sentence[0].islower(): first_word = clean_sentence.split()[0] first_word_position = pos + sentence.find(first_word) item = { "selection": first_word, "start": first_word_position } response.append(item) else: # check if a common English word in the middle of the text is # wrongly capitalized words = clean_sentence.split() for word in words[1:]: if word[0].isupper() and\ dictionary.is_english_word(word.lower()): item = { "selection": word, "start": text.find(word) } response.append(item) pos += len(sentence) + 1 return response def check(text): changes = [] for change in check_capitalization(text): change['symbol'] = 'C' changes.append(change) for change in check_spelling(text): change['symbol'] = 'SP' changes.append(change) return changes
""" Identifies common English writing mistakes """ import re import unicodedata from righter import dictionary from righter import utils def check_spelling(text): """ Check if a text has spelling errors. Return a list with objects: { "selection": <wrong-spelled-word>, "start": <position-of-the-first-character-in-string> } """ text = text.lower() text = utils.remove_punctuation(text) words = text.split() response = [] for word in words: if not dictionary.is_english_word(word) and\ not utils.contains_digit(word): item = { "selection": word, "start": text.find(word) } response.append(item) return response def check_capitalization(text): """ Check if a text has spelling errors. Return a list with objects: { "selection": <wrong-capitalized-word>, "start": <position-of-the-first-character-in-string> } """ response = [] sentences = re.split('[!?.]', text) # TODO: add \n pos = 0 for sentence in sentences: clean_sentence = sentence.strip() if not clean_sentence: continue # Check if first character is capital if clean_sentence[0].islower(): first_word = clean_sentence.split()[0] first_word_position = pos + sentence.find(first_word) item = { "selection": first_word, "start": first_word_position } response.append(item) else: # check if a common English word in the middle of the text is # wrongly capitalized words = clean_sentence.split() for word in words[1:]: if word[0].isupper() and\ dictionary.is_english_word(word.lower()): item = { "selection": word, "start": text.find(word) } response.append(item) pos += len(sentence) + 1 return response def check(text): changes = [] for change in check_capitalization(text): change['symbol'] = 'C' changes.append(change) for change in check_spelling(text): change['symbol'] = 'SP' changes.append(change) return changes
Python
0.999581
2a5fbcd2e3da01150c2690c145100270d3f0ec81
fix clipnorm
model/lang_model_sgd.py
model/lang_model_sgd.py
import copy import numpy as np import tensorflow as tf from keras import backend as K from keras.optimizers import Optimizer from keras.callbacks import LearningRateScheduler from model.setting import Setting class LangModelSGD(Optimizer): def __init__(self, setting, verbose=True): super(LangModelSGD, self).__init__(clipnorm=setting.norm_clipping) self.iterations = K.variable(0., name="iterations") self.lr = K.variable(1.0, name="lr") self.epoch_interval = K.variable(setting.epoch_interval) self.decay = K.variable(setting.decay) self.verbose = verbose def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) self.updates = [] self.updates.append(K.update_add(self.iterations, 1)) for p, g in zip(params, grads): self.updates.append((p, p - self.lr * g)) return self.updates def get_config(self): config = {"iterations": float(K.get_value(self.iterations)), "lr": float(K.get_value(self.lr)) } base_config = super(LangModelSGD, self).get_config() return dict(list(base_config.items()) + list(config.items())) def get_lr_scheduler(self): def scheduler(epoch): epoch_interval = K.get_value(self.epoch_interval) if epoch != 0 and (epoch + 1) % epoch_interval == 0: lr = K.get_value(self.lr) decay = K.get_value(self.decay) K.set_value(self.lr, lr * decay) if self.verbose: print(self.get_config()) return K.get_value(self.lr) return LearningRateScheduler(scheduler)
import copy import numpy as np import tensorflow as tf from keras import backend as K from keras.optimizers import Optimizer from keras.callbacks import LearningRateScheduler from model.setting import Setting class LangModelSGD(Optimizer): def __init__(self, setting, verbose=True): super(LangModelSGD, self).__init__() self.iterations = K.variable(0., name="iterations") self.lr = K.variable(1.0, name="lr") self.epoch_interval = K.variable(setting.epoch_interval) self.decay = K.variable(setting.decay) self._clipnorm = setting.norm_clipping self.verbose = verbose def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads])) grads = [clip_norm(g, self._clipnorm, norm) for g in grads] self.updates = [] self.updates.append(K.update_add(self.iterations, 1)) for p, g in zip(params, grads): self.updates.append((p, p - self.lr * g)) return self.updates def get_config(self): config = {"iterations": float(K.get_value(self.iterations)), "lr": float(K.get_value(self.lr)) } base_config = super(LangModelSGD, self).get_config() return dict(list(base_config.items()) + list(config.items())) def get_lr_scheduler(self): def scheduler(epoch): epoch_interval = K.get_value(self.epoch_interval) if epoch != 0 and (epoch + 1) % epoch_interval == 0: lr = K.get_value(self.lr) decay = K.get_value(self.decay) K.set_value(self.lr, lr * decay) if self.verbose: print(self.get_config()) return K.get_value(self.lr) return LearningRateScheduler(scheduler)
Python
0.000001
db3cee63baf64d00b2d2ac4fcf726f287b6d7af2
Update call to proxy fix to use new method signature
app/proxy_fix.py
app/proxy_fix.py
from werkzeug.middleware.proxy_fix import ProxyFix class CustomProxyFix(object): def __init__(self, app, forwarded_proto): self.app = ProxyFix(app, x_for=1, x_proto=1, x_host=1, x_port=0, x_prefix=0) self.forwarded_proto = forwarded_proto def __call__(self, environ, start_response): environ.update({ "HTTP_X_FORWARDED_PROTO": self.forwarded_proto }) return self.app(environ, start_response) def init_app(app): app.wsgi_app = CustomProxyFix(app.wsgi_app, app.config.get('HTTP_PROTOCOL', 'http'))
from werkzeug.middleware.proxy_fix import ProxyFix class CustomProxyFix(object): def __init__(self, app, forwarded_proto): self.app = ProxyFix(app) self.forwarded_proto = forwarded_proto def __call__(self, environ, start_response): environ.update({ "HTTP_X_FORWARDED_PROTO": self.forwarded_proto }) return self.app(environ, start_response) def init_app(app): app.wsgi_app = CustomProxyFix(app.wsgi_app, app.config.get('HTTP_PROTOCOL', 'http'))
Python
0
fee4ec26f52c584faa0aa5e35de955972b7c56bd
return a sorted list so tests can be deterministic
lms/djangoapps/bulk_user_retirement/views.py
lms/djangoapps/bulk_user_retirement/views.py
""" An API for retiring user accounts. """ import logging from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication from django.contrib.auth import get_user_model from django.db import transaction from rest_framework import permissions, status from rest_framework.response import Response from rest_framework.views import APIView from openedx.core.djangoapps.user_api.accounts.permissions import CanRetireUser from openedx.core.djangoapps.user_api.accounts.utils import create_retirement_request_and_deactivate_account log = logging.getLogger(__name__) class BulkUsersRetirementView(APIView): """ **Use Case** Implementation for Bulk User Retirement API. Creates a retirement request for one or more users. **Example Request** POST /v1/accounts/bulk_retire_users { "usernames": "test_user1, test_user2" } **POST Parameters** A POST request can include the following parameter. * usernames: Comma separated strings of usernames that should be retired. """ authentication_classes = (JwtAuthentication, ) permission_classes = (permissions.IsAuthenticated, CanRetireUser) def post(self, request, **kwargs): # pylint: disable=unused-argument """ Initiates the bulk retirement process for the given users. """ request_usernames = request.data.get('usernames') if request_usernames: usernames_to_retire = [each_username.strip() for each_username in request_usernames.split(',')] else: usernames_to_retire = [] User = get_user_model() successful_user_retirements, failed_user_retirements = [], [] for username in usernames_to_retire: try: user_to_retire = User.objects.get(username=username) with transaction.atomic(): create_retirement_request_and_deactivate_account(user_to_retire) except User.DoesNotExist: log.exception(f'The user "{username}" does not exist.') failed_user_retirements.append(username) except Exception as exc: # pylint: disable=broad-except log.exception(f'500 error retiring account {exc}') failed_user_retirements.append(username) successful_user_retirements = sorted(set(usernames_to_retire).difference(failed_user_retirements)) return Response( status=status.HTTP_200_OK, data={ "successful_user_retirements": successful_user_retirements, "failed_user_retirements": failed_user_retirements } )
""" An API for retiring user accounts. """ import logging from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication from django.contrib.auth import get_user_model from django.db import transaction from rest_framework import permissions, status from rest_framework.response import Response from rest_framework.views import APIView from openedx.core.djangoapps.user_api.accounts.permissions import CanRetireUser from openedx.core.djangoapps.user_api.accounts.utils import create_retirement_request_and_deactivate_account log = logging.getLogger(__name__) class BulkUsersRetirementView(APIView): """ **Use Case** Implementation for Bulk User Retirement API. Creates a retirement request for one or more users. **Example Request** POST /v1/accounts/bulk_retire_users { "usernames": "test_user1, test_user2" } **POST Parameters** A POST request can include the following parameter. * usernames: Comma separated strings of usernames that should be retired. """ authentication_classes = (JwtAuthentication, ) permission_classes = (permissions.IsAuthenticated, CanRetireUser) def post(self, request, **kwargs): # pylint: disable=unused-argument """ Initiates the bulk retirement process for the given users. """ request_usernames = request.data.get('usernames') if request_usernames: usernames_to_retire = [each_username.strip() for each_username in request_usernames.split(',')] else: usernames_to_retire = [] User = get_user_model() successful_user_retirements, failed_user_retirements = [], [] for username in usernames_to_retire: try: user_to_retire = User.objects.get(username=username) with transaction.atomic(): create_retirement_request_and_deactivate_account(user_to_retire) except User.DoesNotExist: log.exception(f'The user "{username}" does not exist.') failed_user_retirements.append(username) except Exception as exc: # pylint: disable=broad-except log.exception(f'500 error retiring account {exc}') failed_user_retirements.append(username) successful_user_retirements = list(set(usernames_to_retire).difference(failed_user_retirements)) return Response( status=status.HTTP_200_OK, data={ "successful_user_retirements": successful_user_retirements, "failed_user_retirements": failed_user_retirements } )
Python
0.99995
d56cfbf87c01ac496200341a723ddcee88798a01
Add setup of default translator object so doctests can run when using _. Fixes #509.
pylons/test.py
pylons/test.py
"""Test related functionality Adds a Pylons plugin to `nose <http://www.somethingaboutorange.com/mrl/projects/nose/>`_ that loads the Pylons app *before* scanning for doc tests. This can be configured in the projects :file:`setup.cfg` under a ``[nosetests]`` block: .. code-block:: ini [nosetests] with-pylons=development.ini Alternate ini files may be specified if the app should be loaded using a different configuration. """ import os import sys import nose.plugins import pkg_resources from paste.deploy import loadapp import pylons from pylons.i18n.translation import _get_translator pylonsapp = None class PylonsPlugin(nose.plugins.Plugin): """Nose plugin extension For use with nose to allow a project to be configured before nose proceeds to scan the project for doc tests and unit tests. This prevents modules from being loaded without a configured Pylons environment. """ enabled = False enableOpt = 'pylons_config' name = 'pylons' def add_options(self, parser, env=os.environ): """Add command-line options for this plugin""" env_opt = 'NOSE_WITH_%s' % self.name.upper() env_opt.replace('-', '_') parser.add_option("--with-%s" % self.name, dest=self.enableOpt, type="string", default="", help="Setup Pylons environment with the config file" " specified by ATTR [NOSE_ATTR]") def configure(self, options, conf): """Configure the plugin""" self.config_file = None self.conf = conf if hasattr(options, self.enableOpt): self.enabled = bool(getattr(options, self.enableOpt)) self.config_file = getattr(options, self.enableOpt) def begin(self): """Called before any tests are collected or run Loads the application, and in turn its configuration. """ global pylonsapp path = os.getcwd() sys.path.insert(0, path) pkg_resources.working_set.add_entry(path) self.app = pylonsapp = loadapp('config:' + self.config_file, relative_to=path) # For tests that utilize the i18n _ object, initialize a NullTranslator pylons.translator._push_object(_get_translator(pylons.config.get('lang')))
"""Test related functionality Adds a Pylons plugin to `nose <http://www.somethingaboutorange.com/mrl/projects/nose/>`_ that loads the Pylons app *before* scanning for doc tests. This can be configured in the projects :file:`setup.cfg` under a ``[nosetests]`` block: .. code-block:: ini [nosetests] with-pylons=development.ini Alternate ini files may be specified if the app should be loaded using a different configuration. """ import os import sys import nose.plugins import pkg_resources from paste.deploy import loadapp import pylons from pylons.i18n.translation import _get_translator pylonsapp = None class PylonsPlugin(nose.plugins.Plugin): """Nose plugin extension For use with nose to allow a project to be configured before nose proceeds to scan the project for doc tests and unit tests. This prevents modules from being loaded without a configured Pylons environment. """ enabled = False enableOpt = 'pylons_config' name = 'pylons' def add_options(self, parser, env=os.environ): """Add command-line options for this plugin""" env_opt = 'NOSE_WITH_%s' % self.name.upper() env_opt.replace('-', '_') parser.add_option("--with-%s" % self.name, dest=self.enableOpt, type="string", default="", help="Setup Pylons environment with the config file" " specified by ATTR [NOSE_ATTR]") def configure(self, options, conf): """Configure the plugin""" self.config_file = None self.conf = conf if hasattr(options, self.enableOpt): self.enabled = bool(getattr(options, self.enableOpt)) self.config_file = getattr(options, self.enableOpt) def begin(self): """Called before any tests are collected or run Loads the application, and in turn its configuration. """ global pylonsapp path = os.getcwd() sys.path.insert(0, path) pkg_resources.working_set.add_entry(path) self.app = pylonsapp = loadapp('config:' + self.config_file, relative_to=path)
Python
0
4db28d9f8ae0c3ad22121226c1ec0b59f4258759
Update pylsy.py
pylsy/pylsy.py
pylsy/pylsy.py
# -*- coding: utf-8 -*- from __future__ import print_function class PylsyTable(object): def __init__(self, attributes): self.Attributes = attributes self.Table = [] self.AttributesLength = [] self.cols_num = len(self.Attributes) self.lines_num = 0 for attribute in self.Attributes: col = dict() col[attribute] = "" self.Table.append(col) def print_divide(self): for space in self.AttributesLength: print("+ ", end='') for sign in range(space): print("- ", end='') print("+") def add_data(self, attribute, values): for col in self.Table: if attribute in col: dict_values = [str(value) for value in values] col[attribute] = dict_values def create_table(self): for col in self.Table: values = list(col.values())[0] if self.lines_num < len(values): self.lines_num = len(values) # find the length of longest word in current column key_length = len(list(col.keys())[0]) for value in values: length = len(value) if length > key_length: key_length = length self.AttributesLength.append(key_length) self.print_head() self.print_value() def print_head(self): self.print_divide() print("| ", end='') for spaces, attr in zip(self.AttributesLength, self.Attributes): space_num = spaces * 2 - 1 start = (space_num - len(attr)) // 2 for space in range(start): print(" ", end='') print(attr + ' ', end='') end = space_num - start - len(attr) for space in range(end): print(" ", end='') print("| ", end='') print("") self.print_divide() def print_value(self): for line in range(self.lines_num): for col, length in zip(self.Table, self.AttributesLength): print("| ", end='') value_length = length * 2 - 1 value = list(col.values())[0] if len(value) != 0: start = (value_length - len(value[line])) // 2 for space in range(start): print(" ", end='') print(value[line] + ' ', end='') end = value_length - start - len(value[line]) for space in range(end): print(" ", end='') else: start = 0 end = value_length - start + 1 for space in range(end): print(" ", end='') print("|") self.print_divide()
# -*- coding: utf-8 -*- from __future__ import print_function class PylsyTable(object): def __init__(self, attributes): self.Attributes = attributes self.Table = [] self.AttributesLength = [] self.cols_num = len(self.Attributes) self.lines_num = 0 for attribute in self.Attributes: col = dict() col[attribute] = "" self.Table.append(col) def print_divide(self): for space in self.AttributesLength: print("+ ", end='') for sign in range(space): print("- ", end='') print("+") def add_data(self, attribute, values): for col in self.Table: if attribute in col: dict_values = [str(value) for value in values] col[attribute] = dict_values def create_table(self): for col in self.Table: values = list(col.values())[0] if self.lines_num < len(values): self.lines_num = len(values) # find the length of longest word in current column key_length = len(list(col.keys())[0]) for value in values: length = len(value) if length > key_length: key_length = length self.AttributesLength.append(key_length) self.print_head() self.print_value() def print_head(self): self.print_divide() print("| ", end='') for spaces, attr in zip(self.AttributesLength, self.Attributes): space_num = spaces * 2 - 1 start = (space_num - len(attr)) // 2 for space in range(start): print(" ", end='') print(attr + ' ', end='') end = space_num - start - len(attr) for space in range(end): print(" ", end='') print("| ", end='') print("") self.print_divide() def print_value(self): for line in range(self.lines_num): for col, length in zip(self.Table, self.AttributesLength): print("| ", end='') value_length = length * 2 - 1 value = list(col.values())[0] if len(value) != 0: start = (value_length - len(value[line])) // 2 for space in range(start): print(" ", end='') print(value[line] + ' ', end='') end = value_length - start - len(value[line]) for space in range(end): print(" ", end='') else: start = 0 end = value_length - start + 1 for space in range(end): print(" ", end='') print("|") self.print_divide()
Python
0
ad78e28d4537054a0d19643bb7efb1572dd4702c
Encode topic heading as UTF8
app/utils/pdf.py
app/utils/pdf.py
import pdftotext from PIL import Image from wand.image import Image import os import io TOPICS = [ 'Philosophy', 'Society', 'Esoterica', 'Art', 'Culture', 'Science & Nature', 'Gods & Heroes', 'Myths Of The World' ] def extract_first_page(blob): pdf = Image(blob=blob, resolution=200) image = Image( width=pdf.width, height=pdf.height ) image.composite( pdf.sequence[0], top=0, left=0 ) return image.make_blob('png') def extract_topics(pdf_binary): pdf = pdftotext.PDF(io.BytesIO(pdf_binary)) topic_headings = '' for n in range(4, len(pdf)): topic = '' topic_heading = '' for line_no, l in enumerate(pdf[n].split('\n')): words = [w.capitalize() for w in l.strip().split(' ') if w.strip()] if not words: continue if not topic and len(words) < 5: heading = ' '.join(words) if heading in TOPICS: topic = heading continue if topic: line = ' '.join(words) if len(line) < 30 and u'\u201c' not in line: topic_heading += line + ' ' if line_no > 2: break if topic_heading: topic_headings += '{}: {}\n'.format(topic, topic_heading.encode("utf8")[:-1]) return topic_headings[:-1] if topic_headings else ''
import pdftotext from PIL import Image from wand.image import Image import os import io TOPICS = [ 'Philosophy', 'Society', 'Esoterica', 'Art', 'Culture', 'Science & Nature', 'Gods & Heroes', 'Myths Of The World' ] def extract_first_page(blob): pdf = Image(blob=blob, resolution=200) image = Image( width=pdf.width, height=pdf.height ) image.composite( pdf.sequence[0], top=0, left=0 ) return image.make_blob('png') def extract_topics(pdf_binary): pdf = pdftotext.PDF(io.BytesIO(pdf_binary)) topic_headings = '' for n in range(4, len(pdf)): topic = '' topic_heading = '' for line_no, l in enumerate(pdf[n].split('\n')): words = [w.capitalize() for w in l.strip().split(' ') if w.strip()] if not words: continue if not topic and len(words) < 5: heading = ' '.join(words) if heading in TOPICS: topic = heading continue if topic: line = ' '.join(words) if len(line) < 30 and u'\u201c' not in line: topic_heading += line + ' ' if line_no > 2: break if topic_heading: topic_headings += '{}: {}\n'.format(topic, topic_heading[:-1]) return topic_headings[:-1] if topic_headings else ''
Python
0.999996
747d2563fd566a70420a04d3db209fffc813f147
fix docs/hash-tree.py for python 3
docs/hash-tree.py
docs/hash-tree.py
#!/usr/bin/env python # Write a directory to the Git index. # Prints the directory's SHA-1 to stdout. # # Copyright 2013 Lars Buitinck / University of Amsterdam. # License: MIT (http://opensource.org/licenses/MIT) # Based on: # https://github.com/larsmans/seqlearn/blob/d7a3d82c/doc/hash-tree.py import os from os.path import split from posixpath import join from subprocess import check_output, Popen, PIPE import sys def hash_file(path): """Write file at path to Git index, return its SHA1 as a string.""" return check_output(["git", "hash-object", "-w", "--", path]).decode().strip() def _lstree(files, dirs): """Make git ls-tree like output.""" for f, sha1 in files: yield "100644 blob {}\t{}\0".format(sha1, f) for d, sha1 in dirs: yield "040000 tree {}\t{}\0".format(sha1, d) def _mktree(files, dirs): mkt = Popen(["git", "mktree", "-z"], stdin=PIPE, stdout=PIPE) inp = "".join(_lstree(files, dirs)).encode('ascii') return mkt.communicate(inp)[0].strip().decode() def hash_dir(path): """Write directory at path to Git index, return its SHA1 as a string.""" dir_hash = {} for root, dirs, files in os.walk(path, topdown=False): f_hash = ((f, hash_file(join(root, f))) for f in files) d_hash = ((d, dir_hash[join(root, d)]) for d in dirs) # split+join normalizes paths on Windows (note the imports) dir_hash[join(*split(root))] = _mktree(f_hash, d_hash) return dir_hash[path] if __name__ == "__main__": print(hash_dir(sys.argv[1]))
#!/usr/bin/env python # Write a directory to the Git index. # Prints the directory's SHA-1 to stdout. # # Copyright 2013 Lars Buitinck / University of Amsterdam. # License: MIT (http://opensource.org/licenses/MIT) # https://github.com/larsmans/seqlearn/blob/d7a3d82c/doc/hash-tree.py import os from os.path import split from posixpath import join from subprocess import check_output, Popen, PIPE import sys def hash_file(path): """Write file at path to Git index, return its SHA1 as a string.""" return check_output(["git", "hash-object", "-w", "--", path]).strip() def _lstree(files, dirs): """Make git ls-tree like output.""" for f, sha1 in files: yield "100644 blob {}\t{}\0".format(sha1, f) for d, sha1 in dirs: yield "040000 tree {}\t{}\0".format(sha1, d) def _mktree(files, dirs): mkt = Popen(["git", "mktree", "-z"], stdin=PIPE, stdout=PIPE) return mkt.communicate("".join(_lstree(files, dirs)))[0].strip() def hash_dir(path): """Write directory at path to Git index, return its SHA1 as a string.""" dir_hash = {} for root, dirs, files in os.walk(path, topdown=False): f_hash = ((f, hash_file(join(root, f))) for f in files) d_hash = ((d, dir_hash[join(root, d)]) for d in dirs) # split+join normalizes paths on Windows (note the imports) dir_hash[join(*split(root))] = _mktree(f_hash, d_hash) return dir_hash[path] if __name__ == "__main__": print(hash_dir(sys.argv[1]))
Python
0.00011
34c0c6c73a65da3120aa52600254afc909e9a3bc
Remove unused main and unused imports
pytach/wsgi.py
pytach/wsgi.py
import bottle import config from web import web app = application = bottle.Bottle() app.merge(web.app) config.arguments['--verbose'] = True
import bottle from bottle import route, run from web import web import config app = application = bottle.Bottle() app.merge(web.app) config.arguments['--verbose'] = True if __name__ == '__main__': app.run(host='0.0.0.0', port=8082, debug=True)
Python
0
bea0ead3dfcc055d219966c64437652c0eb2cf84
Update demo.py
python/demo.py
python/demo.py
#! /usr/bin/env python import serial import time # Serial port N = "/dev/ttyUSB0" def ints2str(lst): ''' Taking a list of notes/lengths, convert it to a string ''' s = "" for i in lst: if i < 0 or i > 255: raise Exception s = s + str(chr(i)) return s # do some initialization magic s = serial.Serial(N, 57600, timeout=4) # start code s.write(ints2str([128])) # Full mode s.write(ints2str([132])) # Drive s.write(ints2str([137, 1, 44, 128, 0])) # wait s.write(ints2str([156, 1, 144])) # Turn s.write(ints2str([137, 1, 44, 0, 1])) #wait s.write(ints2str([157, 0, 90])) quit()
#! /usr/bin/env python import serial import time import sys # Serial port N = "/dev/ttyUSB0" def ints2str(lst): ''' Taking a list of notes/lengths, convert it to a string ''' s = "" for i in lst: if i < 0 or i > 255: raise Exception s = s + str(chr(i)) return s # do some initialization magic s = serial.Serial(N, 57600, timeout=4) # start code s.write(ints2str([128])) # Full mode s.write(ints2str([132])) # Drive s.write(ints2str([137, 1, 44, 128, 0])) # wait s.write(ints2str([156, 1, 144])) # Turn s.write(ints2str([137, 1, 44, 0, 1])) #wait s.write(ints2str([157, 0, 90])) sys.exit()
Python
0.000001
1ca6ccb50992836720e86a7c3c766a5497cf7588
Remove unused import
mint/django_rest/rbuilder/querysets/views.py
mint/django_rest/rbuilder/querysets/views.py
#!/usr/bin/python # # Copyright (c) 2011 rPath, Inc. # # All rights reserved. # from mint.django_rest.deco import return_xml, requires from mint.django_rest.rbuilder import service class BaseQuerySetService(service.BaseService): pass class QuerySetService(BaseQuerySetService): @return_xml def rest_GET(self, request, querySetId=None): return self.get(querySetId) def get(self, querySetId): if querySetId: return self.mgr.getQuerySet(querySetId) else: return self.mgr.getQuerySets() @requires('query_set') @return_xml def rest_POST(self, request, query_set): return self.mgr.addQuerySet(query_set)
#!/usr/bin/python # # Copyright (c) 2011 rPath, Inc. # # All rights reserved. # from mint.django_rest.deco import return_xml, requires from mint.django_rest.rbuilder import service from mint.django_rest.rbuilder.querysets import manager class BaseQuerySetService(service.BaseService): pass class QuerySetService(BaseQuerySetService): @return_xml def rest_GET(self, request, querySetId=None): return self.get(querySetId) def get(self, querySetId): if querySetId: return self.mgr.getQuerySet(querySetId) else: return self.mgr.getQuerySets() @requires('query_set') @return_xml def rest_POST(self, request, query_set): return self.mgr.addQuerySet(query_set)
Python
0.000001
aa6a72c419846bc9d1ae5d8f114d214cbc2be60c
Fix randomize without cache
fake_useragent/utils.py
fake_useragent/utils.py
import re import os try: from urllib import urlopen, quote_plus except ImportError: # Python 3 from urllib.request import urlopen from urllib.parse import quote_plus try: import json except ImportError: import simplejson as json from fake_useragent import settings def get(url, annex=None): if not annex is None: url = url % (quote_plus(annex), ) return urlopen(url).read() def get_browsers(): ''' very very hardcoded/dirty re/split stuff, but no dependencies ''' html = get(settings.BROWSERS_STATS_PAGE) html = html.decode('windows-1252') html = html.split('<table class="reference">')[1] html = html.split('<td>&nbsp;</td>')[0] browsers = re.findall(r'\.asp">(.+?)<', html, re.UNICODE) browsers_statistics = re.findall(r'"right">(.+?)\s', html, re.UNICODE) # TODO: unsure encoding # browsers = list(map( # lambda stat: stat.encode('utf-8', 'ignore'), browsers) # ) # browsers_statistics = list( # map( # lambda stat: stat.encode('utf-8', 'ignore'), # browsers_statistics # ) # ) return list(zip(browsers, browsers_statistics)) def get_browser_versions(browser): ''' very very hardcoded/dirty re/split stuff, but no dependencies ''' html = get(settings.BROWSER_BASE_PAGE, browser) html = html.decode('iso-8859-1') html = html.split('<div id=\'liste\'>')[1] html = html.split('</div>')[0] browsers_iter = re.finditer(r'\.php\'>(.+?)</a', html, re.UNICODE) count = 0 browsers = [] for browser in browsers_iter: if 'more' in browser.group(1).lower(): continue # TODO: ensure encoding # browser.group(1).encode('utf-8', 'ignore') browsers.append(browser.group(1)) count += 1 if count == settings.BROWSERS_COUNT_LIMIT: break return browsers def load(): browsers_dict = {} randomize_dict = {} for item in get_browsers(): browser, percent = item clear_browser = browser.replace(' ', '').lower() browsers_dict[clear_browser] = get_browser_versions(browser) for counter in range(int(float(percent))): randomize_dict[str(len(randomize_dict))] = clear_browser db = {} db['browsers'] = browsers_dict db['randomize'] = randomize_dict return db def write(data): data = json.dumps(data) # no codecs\with for python 2.5 f = open(settings.DB, 'w+') f.write(data) f.close() def read(): # no codecs\with for python 2.5 f = open(settings.DB, 'r') data = f.read() f.close() return json.loads(data, 'utf-8') def exist(): return os.path.isfile(settings.DB) def rm(): if exist(): os.remove(settings.DB) def refresh(): if exist(): rm() write(load()) def load_cached(): if not exist(): refresh() return read()
import re import os try: from urllib import urlopen, quote_plus except ImportError: # Python 3 from urllib.request import urlopen from urllib.parse import quote_plus try: import json except ImportError: import simplejson as json from fake_useragent import settings def get(url, annex=None): if not annex is None: url = url % (quote_plus(annex), ) return urlopen(url).read() def get_browsers(): ''' very very hardcoded/dirty re/split stuff, but no dependencies ''' html = get(settings.BROWSERS_STATS_PAGE) html = html.decode('windows-1252') html = html.split('<table class="reference">')[1] html = html.split('<td>&nbsp;</td>')[0] browsers = re.findall(r'\.asp">(.+?)<', html, re.UNICODE) browsers_statistics = re.findall(r'"right">(.+?)\s', html, re.UNICODE) # TODO: unsure encoding # browsers = list(map( # lambda stat: stat.encode('utf-8', 'ignore'), browsers) # ) # browsers_statistics = list( # map( # lambda stat: stat.encode('utf-8', 'ignore'), # browsers_statistics # ) # ) return list(zip(browsers, browsers_statistics)) def get_browser_versions(browser): ''' very very hardcoded/dirty re/split stuff, but no dependencies ''' html = get(settings.BROWSER_BASE_PAGE, browser) html = html.decode('iso-8859-1') html = html.split('<div id=\'liste\'>')[1] html = html.split('</div>')[0] browsers_iter = re.finditer(r'\.php\'>(.+?)</a', html, re.UNICODE) count = 0 browsers = [] for browser in browsers_iter: if 'more' in browser.group(1).lower(): continue # TODO: ensure encoding # browser.group(1).encode('utf-8', 'ignore') browsers.append(browser.group(1)) count += 1 if count == settings.BROWSERS_COUNT_LIMIT: break return browsers def load(): browsers_dict = {} randomize_dict = {} for item in get_browsers(): browser, percent = item clear_browser = browser.replace(' ', '').lower() browsers_dict[clear_browser] = get_browser_versions(browser) for counter in range(int(float(percent))): randomize_dict[len(randomize_dict)] = clear_browser db = {} db['browsers'] = browsers_dict db['randomize'] = randomize_dict return db def write(data): data = json.dumps(data) # no codecs\with for python 2.5 f = open(settings.DB, 'w+') f.write(data) f.close() def read(): # no codecs\with for python 2.5 f = open(settings.DB, 'r') data = f.read() f.close() return json.loads(data, 'utf-8') def exist(): return os.path.isfile(settings.DB) def rm(): if exist(): os.remove(settings.DB) def refresh(): if exist(): rm() write(load()) def load_cached(): if not exist(): refresh() return read()
Python
0.000005
aa8e51fc8ad969cd04098a5714ff78092b35f58f
Remove unused import
polyaxon/libs/http.py
polyaxon/libs/http.py
import os import requests import tarfile from urllib.parse import parse_qs, urlencode, urljoin, urlparse, urlunparse from hestia.auth import AuthenticationTypes from hestia.fs import move_recursively from django.conf import settings from libs.api import get_http_api_url def absolute_uri(url): if not url: return None if not settings.API_HOST: return url url = urljoin(settings.API_HOST.rstrip('/') + '/', url.lstrip('/')) return '{}://{}'.format(settings.PROTOCOL, url) def add_notification_referrer_param(url, provider, is_absolute=True): if not is_absolute: url = absolute_uri(url) if not url: return None parsed_url = urlparse(url) query = parse_qs(parsed_url.query) query['referrer'] = provider url_list = list(parsed_url) url_list[4] = urlencode(query, doseq=True) return urlunparse(url_list) def download(url, filename, logger, authentication_type=None, access_token=None, headers=None, internal=True, timeout=60): """Get download url from the internal api.""" if internal: authentication_type = authentication_type or AuthenticationTypes.INTERNAL_TOKEN else: authentication_type = AuthenticationTypes.TOKEN if authentication_type == AuthenticationTypes.INTERNAL_TOKEN and not access_token: access_token = settings.SECRET_INTERNAL_TOKEN # Auth headers if access_token is present request_headers = {} if access_token: request_headers["Authorization"] = "{} {}".format(authentication_type, access_token) # Add any additional headers if headers: request_headers.update(headers) try: if internal: api_url = get_http_api_url() url = '{}/{}'.format(api_url, url) logger.info("Downloading file from %s using %s" % (url, authentication_type)) response = requests.get(url, headers=request_headers, timeout=timeout, stream=True) if response.status_code != 200: logger.error("Failed to download file from %s: %s" % (url, response.status_code), extra={'stack': True}) return None with open(filename, 'wb') as f: logger.info("Processing file %s" % filename) for chunk in response.iter_content(chunk_size=1024): if chunk: f.write(chunk) return filename except requests.exceptions.RequestException: logger.error("Download exception", exc_info=True) return None def untar_file(build_path, filename, logger, delete_tar=False, internal=False, tar_suffix=None): extract_path = build_path if internal else '/tmp' if filename and os.path.exists(filename): logger.info("Untarring the contents of the file ...") tar = tarfile.open(filename) tar.extractall(extract_path) tar.close() if delete_tar: logger.info("Cleaning up the tar file ...") os.remove(filename) if not internal: tarf = [f for f in os.listdir(extract_path) if tar_suffix in f] if tarf: src = os.path.join(extract_path, tarf[0]) move_recursively(src, build_path) return filename else: logger.info("File was not found, build_path: %s" % os.listdir(build_path)) return None
import os import requests import shutil import tarfile from urllib.parse import parse_qs, urlencode, urljoin, urlparse, urlunparse from hestia.auth import AuthenticationTypes from hestia.fs import move_recursively from django.conf import settings from libs.api import get_http_api_url def absolute_uri(url): if not url: return None if not settings.API_HOST: return url url = urljoin(settings.API_HOST.rstrip('/') + '/', url.lstrip('/')) return '{}://{}'.format(settings.PROTOCOL, url) def add_notification_referrer_param(url, provider, is_absolute=True): if not is_absolute: url = absolute_uri(url) if not url: return None parsed_url = urlparse(url) query = parse_qs(parsed_url.query) query['referrer'] = provider url_list = list(parsed_url) url_list[4] = urlencode(query, doseq=True) return urlunparse(url_list) def download(url, filename, logger, authentication_type=None, access_token=None, headers=None, internal=True, timeout=60): """Get download url from the internal api.""" if internal: authentication_type = authentication_type or AuthenticationTypes.INTERNAL_TOKEN else: authentication_type = AuthenticationTypes.TOKEN if authentication_type == AuthenticationTypes.INTERNAL_TOKEN and not access_token: access_token = settings.SECRET_INTERNAL_TOKEN # Auth headers if access_token is present request_headers = {} if access_token: request_headers["Authorization"] = "{} {}".format(authentication_type, access_token) # Add any additional headers if headers: request_headers.update(headers) try: if internal: api_url = get_http_api_url() url = '{}/{}'.format(api_url, url) logger.info("Downloading file from %s using %s" % (url, authentication_type)) response = requests.get(url, headers=request_headers, timeout=timeout, stream=True) if response.status_code != 200: logger.error("Failed to download file from %s: %s" % (url, response.status_code), extra={'stack': True}) return None with open(filename, 'wb') as f: logger.info("Processing file %s" % filename) for chunk in response.iter_content(chunk_size=1024): if chunk: f.write(chunk) return filename except requests.exceptions.RequestException: logger.error("Download exception", exc_info=True) return None def untar_file(build_path, filename, logger, delete_tar=False, internal=False, tar_suffix=None): extract_path = build_path if internal else '/tmp' if filename and os.path.exists(filename): logger.info("Untarring the contents of the file ...") tar = tarfile.open(filename) tar.extractall(extract_path) tar.close() if delete_tar: logger.info("Cleaning up the tar file ...") os.remove(filename) if not internal: tarf = [f for f in os.listdir(extract_path) if tar_suffix in f] if tarf: src = os.path.join(extract_path, tarf[0]) move_recursively(src, build_path) return filename else: logger.info("File was not found, build_path: %s" % os.listdir(build_path)) return None
Python
0.000001