code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from rapidsms.contrib.locations.models import Location
from survey.models import Household
class BatchCompletionRates:
def __init__(self, batch):
self.batch = batch
def calculate_percent(self, numerator, denominator):
try:
return numerator * 100 / denominator
except ZeroDivisionError:
return 0
def percent_completed_households(self, location, survey, ea=None):
all_households = Household.all_households_in(location, survey, ea)
return self.percentage_completed(all_households)
def percentage_completed(self, all_households):
completed_households = filter(lambda household: household.has_completed_batch(self.batch), all_households)
return self.calculate_percent(len(completed_households), all_households.count())
class BatchLocationCompletionRates(BatchCompletionRates):
def __init__(self, batch, location, ea=None):
self.batch = batch
self.ea = ea
self.location = location
self.all_households = Household.all_households_in(self.location, batch.survey, ea)
def percent_completed_households(self):
all_households = self.all_households
completed_households = filter(lambda household: household.has_completed_batch(self.batch), all_households)
return self.calculate_percent(len(completed_households), all_households.count())
def interviewed_households(self):
_interviewed_households = []
for household in self.all_households:
attributes = {'household': household,
'date_interviewed': household.date_interviewed_for(self.batch),
'number_of_member_interviewed': len(household.members_interviewed(self.batch))}
_interviewed_households.append(attributes)
return _interviewed_households
class BatchHighLevelLocationsCompletionRates(BatchCompletionRates):
def __init__(self, batch, locations, ea=None):
self.batch = batch
self.locations = locations
self.ea = ea
def attributes(self):
_completion_rates =[]
for location in self.locations:
attribute = {'location': location,
'total_households': Household.all_households_in(location, self.batch.survey, self.ea).count(),
'completed_households_percent': self.percent_completed_households(location, self.batch.survey, self.ea)}
_completion_rates.append(attribute)
return _completion_rates
class BatchSurveyCompletionRates:
def __init__(self, location_type):
self.location_type = location_type
self.locations = Location.objects.filter(type=location_type)
def get_completion_formatted_for_json(self, survey):
all_batches = survey.batch.all()
completion_rates_dict = {}
number_of_batches = len(all_batches)
for location in self.locations:
percent_completed = 0.0
percent_completed = reduce(lambda percent_completed, rate: percent_completed + rate,
map(lambda batch: BatchLocationCompletionRates(batch, location).percent_completed_households(), all_batches))
completion_rates_dict[location.name] = percent_completed/number_of_batches if survey.is_open_for(location) else -1
return completion_rates_dict
| antsmc2/mics | survey/services/completion_rates_calculator.py | Python | bsd-3-clause | 3,385 |
# Enable the use of cinje templates.
__import__('cinje') # Doing it this way prevents an "imported but unused" warning.
# Get a reference to the Application class.
from web.core import Application
# Get references to web framework extensions.
from web.ext.annotation import AnnotationExtension # Built-in to WebCore.
from web.ext.debug import DebugExtension
from web.ext.serialize import SerializationExtension # New in 2.0.3!
from web.ext.db import DatabaseExtension # From external dependency: web.db
# Get a reference to our database connection adapter.
from web.db.mongo import MongoDBConnection # From extenral dependency: marrow.mongo
# Get a reference to our Wiki root object.
from web.app.wiki.root import Wiki
# This is our WSGI application instance.
app = Application(Wiki, extensions=[
# Extensions that are always enabled.
AnnotationExtension(), # Allows us to use Python 3 function annotations.
SerializationExtension(), # Allows the return of mappings from endpoints, transformed to JSON.
DatabaseExtension(MongoDBConnection("mongodb://localhost/test")),
] + ([
# Extensions that are only enabled in development or testing environments.
DebugExtension() # Interactive traceback debugger, but gives remote code execution access.
] if __debug__ else []))
# If we're run as the "main script", serve our application over HTTP.
if __name__ == "__main__":
app.serve('wsgiref')
| amcgregor/WebCore-Tutorial | web/app/wiki/__main__.py | Python | mit | 1,417 |
import traceback
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
import tmdb3
log = CPLog(__name__)
autoload = 'TheMovieDb'
class TheMovieDb(MovieProvider):
def __init__(self):
addEvent('movie.info', self.getInfo, priority = 1)
addEvent('info.search', self.search, priority = 1)
addEvent('movie.search', self.search, priority = 1)
addEvent('movie.info_by_tmdb', self.getInfo)
# Configure TMDB settings
tmdb3.set_key(self.conf('api_key'))
tmdb3.set_cache('null')
def search(self, q, limit = 12):
""" Find movie by name """
if self.isDisabled():
return False
search_string = simplifyString(q)
cache_key = 'tmdb.cache.%s.%s' % (search_string, limit)
results = self.getCache(cache_key)
if not results:
log.debug('Searching for movie: %s', q)
raw = None
try:
raw = tmdb3.searchMovie(search_string)
except:
log.error('Failed searching TMDB for "%s": %s', (search_string, traceback.format_exc()))
results = []
if raw:
try:
nr = 0
for movie in raw:
results.append(self.parseMovie(movie, extended = False))
nr += 1
if nr == limit:
break
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
self.setCache(cache_key, results)
return results
except SyntaxError as e:
log.error('Failed to parse XML response: %s', e)
return False
return results
def getInfo(self, identifier = None, extended = True):
if not identifier:
return {}
cache_key = 'tmdb.cache.%s%s' % (identifier, '.ex' if extended else '')
result = self.getCache(cache_key)
if not result:
try:
log.debug('Getting info: %s', cache_key)
# noinspection PyArgumentList
movie = tmdb3.Movie(identifier)
try: exists = movie.title is not None
except: exists = False
if exists:
result = self.parseMovie(movie, extended = extended)
self.setCache(cache_key, result)
else:
result = {}
except:
log.error('Failed getting info for %s: %s', (identifier, traceback.format_exc()))
return result
def parseMovie(self, movie, extended = True):
cache_key = 'tmdb.cache.%s%s' % (movie.id, '.ex' if extended else '')
movie_data = self.getCache(cache_key)
if not movie_data:
# Images
poster = self.getImage(movie, type = 'poster', size = 'poster')
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
images = {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {}
}
# Genres
try:
genres = [genre.name for genre in movie.genres]
except:
genres = []
# 1900 is the same as None
year = str(movie.releasedate or '')[:4]
if not movie.releasedate or year == '1900' or year.lower() == 'none':
year = None
# Gather actors data
actors = {}
if extended:
for cast_item in movie.cast:
try:
actors[toUnicode(cast_item.name)] = toUnicode(cast_item.character)
images['actors'][toUnicode(cast_item.name)] = self.getImage(cast_item, type = 'profile', size = 'original')
except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
movie_data = {
'type': 'movie',
'via_tmdb': True,
'tmdb_id': movie.id,
'titles': [toUnicode(movie.title)],
'original_title': movie.originaltitle,
'images': images,
'imdb': movie.imdb,
'runtime': movie.runtime,
'released': str(movie.releasedate),
'year': tryInt(year, None),
'plot': movie.overview,
'genres': genres,
'collection': getattr(movie.collection, 'name', None),
'actor_roles': actors
}
movie_data = dict((k, v) for k, v in movie_data.items() if v)
# Add alternative names
if extended:
movie_data['titles'].append(movie.originaltitle)
for alt in movie.alternate_titles:
alt_name = alt.title
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
movie_data['titles'].append(alt_name)
# Cache movie parsed
self.setCache(cache_key, movie_data)
return movie_data
def getImage(self, movie, type = 'poster', size = 'poster'):
image_url = ''
try:
image_url = getattr(movie, type).geturl(size = 'original')
except:
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
return image_url
def isDisabled(self):
if self.conf('api_key') == '':
log.error('No API key provided.')
return True
return False
config = [{
'name': 'themoviedb',
'groups': [
{
'tab': 'providers',
'name': 'tmdb',
'label': 'TheMovieDB',
'hidden': True,
'description': 'Used for all calls to TheMovieDB.',
'options': [
{
'name': 'api_key',
'default': '9b939aee0aaafc12a65bf448e4af9543',
'label': 'Api Key',
},
],
},
],
}]
| koomik/CouchPotatoServer | couchpotato/core/media/movie/providers/info/themoviedb.py | Python | gpl-3.0 | 6,809 |
#!/usr/bin/env python
# Copyright (C) 2012 Tianyang Li
# tmy1018@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
import getopt
import sys
from Bio import SeqIO
def main(args):
fmt = None
prefix = ""
suffix = ""
try:
opts, args = getopt.getopt(args, 'f:p:s:')
except getopt.GetoptError as err:
print >> sys.stderr, str(err)
sys.exit(1)
for opt, arg in opts:
if opt == '-s':
suffix = arg
if opt == '-p':
prefix = arg
if opt == '-f':
fmt = arg
if fmt == None:
print >> sys.stderr, "missing options"
sys.exit(1)
for fin in args:
for rec in SeqIO.parse(fin, fmt):
rec.id = "%s%s%s" % (prefix, rec.id, suffix)
print rec.format(fmt),
if __name__ == '__main__':
main(sys.argv[1:])
| tianyang-li/de-novo-metatranscriptome-analysis--the-uniform-model | misc/add_to_read_name.py | Python | gpl-3.0 | 1,385 |
'''
Created on 02.05.2012
:author: tobiasweigel
Copyright (c) 2012, Tobias Weigel, Deutsches Klimarechenzentrum GmbH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors.
'''
from lapis.model.do import DigitalObject, REFERENCE_SUBELEMENT_OF
class DigitalObjectSet(DigitalObject):
'''
A set (unsorted collection) of Digital Objects (or further sub-DO-sets), realized through a Hashmap.
The set does not impose specific semantics. It may be used both for arbitrary collections of largely
unrelated objects as well as hierarchical structures of data objects that are strongly connected.
'''
RESOURCE_TYPE = "DIGITAL_OBJECT_SET"
CHARACTERISTIC_SEGMENT_NUMBER = 3
class SetIterator(object):
def __init__(self, doset, hashiter):
self.__doset = doset
self.__hashiter = hashiter
def next(self):
index, vt, v = self.__hashiter.next()
dobj = self.__doset.infrastructure.resolve_pid(v)
return dobj
def __init__(self, do_infrastructure, identifier, references = None, alias_identifiers = None):
super(DigitalObjectSet, self).__init__(do_infrastructure, identifier, references = references, alias_identifiers=alias_identifiers)
self._resource_type = DigitalObjectSet.RESOURCE_TYPE
self.__hashmap = self._do_infra.manufacture_hashmap(self._id, self.CHARACTERISTIC_SEGMENT_NUMBER)
def add_do(self, dobj):
"""
Adds one or more Digital Objects to the set.
:param dobj: Either a DO instance or a list of DO instances.
"""
if isinstance(dobj, list):
for x in dobj:
if not isinstance(x, DigitalObject):
raise ValueError("The given list contains objects that are no Digital Object instances!")
self.__hashmap.set(x.identifier, x.identifier)
x._write_parent_info(self)
else:
if not isinstance(dobj, DigitalObject):
raise ValueError("The given object is not a Digital Object instance: %s" % dobj)
self.__hashmap.set(dobj.identifier, dobj.identifier)
dobj._write_parent_info(self)
def remove_do(self, dobj_or_index):
"""
Removes the given Digital Object(s) from the set.
:param dobj_or_index: Either a DO instance or a list of DO instances.
"""
if isinstance(dobj_or_index, list):
for x in dobj_or_index:
if not isinstance(x, DigitalObject):
raise ValueError("The given list contains objects that are no Digital Object instances!")
self.__hashmap.remove(x.identifier)
x._remove_parent_info(self)
else:
if not isinstance(dobj_or_index, DigitalObject):
raise ValueError("The given object is not a Digital Object instance: %s" % dobj_or_index)
self.__hashmap.remove(dobj_or_index.identifier)
dobj_or_index._remove_parent_info(self)
def contains_do(self, dobj):
"""
Check if the set contains the given Digital Object(s).
:param dobj: A DO instance or a list of DO instances.
:return: True if all given Digital Objects are contained in this set.
"""
if isinstance(dobj, list):
for x in dobj:
if not isinstance(x, DigitalObject):
raise ValueError("The given list contains objects that are no Digital Object instances!")
if not self.__hashmap.contains(x.identifier):
return False
return True
else:
if not isinstance(dobj, DigitalObject):
raise ValueError("The given object is not a Digital Object instance: %s" % dobj)
return self.__hashmap.contains(dobj.identifier)
def iter_set_elements(self):
"""
Iterate over the _elements in the Digital Object set.
:return: an iterator object
"""
for idx, v in self.__hashmap:
dobj = self._do_infra.lookup_pid(v[1])
yield dobj
def num_set_elements(self):
"""
Returns the number of set member elements.
:return: a non-negative int
"""
return self.__hashmap.size()
def __iter__(self):
return self.iter_set_elements()
| TobiasWeigel/lapis | lapis/model/doset.py | Python | bsd-2-clause | 5,798 |
import sys, time
sys.path.insert(0, sys.argv[1])
from cffi import FFI
def _run_callback_in_thread():
ffi = FFI()
ffi.cdef("""
typedef int (*mycallback_func_t)(int, int);
int threaded_ballback_test(mycallback_func_t mycb);
""")
lib = ffi.verify("""
#include <pthread.h>
typedef int (*mycallback_func_t)(int, int);
void *my_wait_function(void *ptr) {
mycallback_func_t cbfunc = (mycallback_func_t)ptr;
cbfunc(10, 10);
cbfunc(12, 15);
return NULL;
}
int threaded_ballback_test(mycallback_func_t mycb) {
pthread_t thread;
pthread_create(&thread, NULL, my_wait_function, (void*)mycb);
return 0;
}
""", extra_compile_args=['-pthread'])
seen = []
@ffi.callback('int(*)(int,int)')
def mycallback(x, y):
time.sleep(0.022)
seen.append((x, y))
return 0
lib.threaded_ballback_test(mycallback)
count = 300
while len(seen) != 2:
time.sleep(0.01)
count -= 1
assert count > 0, "timeout"
assert seen == [(10, 10), (12, 15)]
print('STARTING')
_run_callback_in_thread()
print('DONE')
| chevah/python-cffi | testing/cffi0/callback_in_thread.py | Python | mit | 1,211 |
from setuptools import setup, find_packages
VERSION = (1, 0, 8)
# Dynamically calculate the version based on VERSION tuple
if len(VERSION)>2 and VERSION[2] is not None:
str_version = "%d.%d_%s" % VERSION[:3]
else:
str_version = "%d.%d" % VERSION[:2]
version= str_version + '_lieryan1'
setup(
name = 'django-chargify',
version = version,
description = "chargify",
long_description = """This is a generic SDK for hooking up with the Chargify API""",
author = 'Greg Doermann',
author_email = 'gdoermann@snirk.com',
url = 'http://github.com/gdoermann/django-chargify',
license = 'GNU General Public License',
platforms = ['any'],
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django'],
packages = find_packages(),
include_package_data = True,
)
| iron-io/django-chargify | setup.py | Python | gpl-2.0 | 1,136 |
import hashlib
import json
import os
import posixpath
import re
import unicodedata
import uuid
from urllib.parse import urljoin
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.db import models
from django.dispatch import receiver
from django.template.defaultfilters import slugify
from django.utils.crypto import get_random_string
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
from django_extensions.db.fields.json import JSONField
from django_statsd.clients import statsd
import olympia.core.logger
from olympia import amo
from olympia.amo.decorators import use_primary_db
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ManagerBase, ModelBase, OnChangeMixin
from olympia.amo.storage_utils import copy_stored_file, move_stored_file
from olympia.amo.templatetags.jinja_helpers import (
urlparams, user_media_path, user_media_url)
from olympia.amo.urlresolvers import reverse
from olympia.applications.models import AppVersion
from olympia.files.utils import get_sha256, write_crx_as_xpi
log = olympia.core.logger.getLogger('z.files')
class File(OnChangeMixin, ModelBase):
id = PositiveAutoField(primary_key=True)
STATUS_CHOICES = amo.STATUS_CHOICES_FILE
version = models.ForeignKey(
'versions.Version', related_name='files',
on_delete=models.CASCADE)
platform = models.PositiveIntegerField(
choices=amo.SUPPORTED_PLATFORMS_CHOICES,
default=amo.PLATFORM_ALL.id,
db_column="platform_id"
)
filename = models.CharField(max_length=255, default='')
size = models.PositiveIntegerField(default=0) # In bytes.
hash = models.CharField(max_length=255, default='')
# The original hash of the file, before we sign it, or repackage it in
# any other way.
original_hash = models.CharField(max_length=255, default='')
status = models.PositiveSmallIntegerField(
choices=STATUS_CHOICES.items(), default=amo.STATUS_AWAITING_REVIEW)
datestatuschanged = models.DateTimeField(null=True, auto_now_add=True)
is_restart_required = models.BooleanField(default=False)
strict_compatibility = models.BooleanField(default=False)
reviewed = models.DateTimeField(null=True, blank=True)
# The `binary` field is used to store the flags from amo-validator when it
# finds files with binary extensions or files that may contain binary
# content.
binary = models.BooleanField(default=False)
# The `binary_components` field is used to store the flag from
# amo-validator when it finds "binary-components" in the chrome manifest
# file, used for default to compatible.
binary_components = models.BooleanField(default=False)
# Serial number of the certificate use for the signature.
cert_serial_num = models.TextField(blank=True)
# Is the file signed by Mozilla?
is_signed = models.BooleanField(default=False)
# Is the file an experiment (see bug 1220097)?
is_experiment = models.BooleanField(default=False)
# Is the file a WebExtension?
is_webextension = models.BooleanField(default=False)
# Is the file a special "Mozilla Signed Extension"
# see https://wiki.mozilla.org/Add-ons/InternalSigning
is_mozilla_signed_extension = models.BooleanField(default=False)
# The user has disabled this file and this was its status.
# STATUS_NULL means the user didn't disable the File - i.e. Mozilla did.
original_status = models.PositiveSmallIntegerField(
default=amo.STATUS_NULL)
class Meta(ModelBase.Meta):
db_table = 'files'
indexes = [
models.Index(fields=('created', 'version'),
name='created_idx'),
models.Index(fields=('binary_components',), name='files_cedd2560'),
models.Index(fields=('datestatuschanged', 'version'),
name='statuschanged_idx'),
models.Index(fields=('platform',), name='platform_id'),
models.Index(fields=('status',), name='status'),
]
def __str__(self):
return str(self.id)
def get_platform_display(self):
return force_text(amo.PLATFORMS[self.platform].name)
@property
def has_been_validated(self):
try:
self.validation
except FileValidation.DoesNotExist:
return False
else:
return True
@property
def automated_signing(self):
"""True if this file is eligible for automated signing. This currently
means that either its version is unlisted."""
return self.version.channel == amo.RELEASE_CHANNEL_UNLISTED
def get_file_cdn_url(self, attachment=False):
"""Return the URL for the file corresponding to this instance
on the CDN."""
if attachment:
host = posixpath.join(user_media_url('addons'), '_attachments')
else:
host = user_media_url('addons')
return posixpath.join(
*map(force_bytes, [host, self.version.addon.id, self.filename]))
def get_url_path(self, src, attachment=False):
return self._make_download_url(
'downloads.file', src, attachment=attachment)
def _make_download_url(self, view_name, src, attachment=False):
kwargs = {
'file_id': self.pk
}
if attachment:
kwargs['type'] = 'attachment'
url = os.path.join(reverse(view_name, kwargs=kwargs), self.filename)
return urlparams(url, src=src)
@classmethod
def from_upload(cls, upload, version, platform, parsed_data=None):
"""
Create a File instance from a FileUpload, a Version, a platform id
and the parsed_data generated by parse_addon().
Note that it's the caller's responsability to ensure the file is valid.
We can't check for that here because an admin may have overridden the
validation results."""
assert parsed_data is not None
file_ = cls(version=version, platform=platform)
upload_path = force_text(nfd_str(upload.path))
ext = force_text(os.path.splitext(upload_path)[1])
file_.filename = file_.generate_filename(extension=ext or '.xpi')
# Size in bytes.
file_.size = storage.size(upload_path)
file_.is_restart_required = parsed_data.get(
'is_restart_required', False)
file_.strict_compatibility = parsed_data.get(
'strict_compatibility', False)
file_.is_experiment = parsed_data.get('is_experiment', False)
file_.is_webextension = parsed_data.get('is_webextension', False)
file_.is_mozilla_signed_extension = parsed_data.get(
'is_mozilla_signed_extension', False)
file_.hash = file_.generate_hash(upload_path)
file_.original_hash = file_.hash
file_.save()
if file_.is_webextension:
permissions = list(parsed_data.get('permissions', []))
# Add content_scripts host matches too.
for script in parsed_data.get('content_scripts', []):
permissions.extend(script.get('matches', []))
if permissions:
WebextPermission.objects.create(permissions=permissions,
file=file_)
log.debug('New file: %r from %r' % (file_, upload))
# Move the uploaded file from the temp location.
copy_stored_file(upload_path, file_.current_file_path)
if upload.validation:
validation = json.loads(upload.validation)
FileValidation.from_json(file_, validation)
return file_
def generate_hash(self, filename=None):
"""Generate a hash for a file."""
with open(filename or self.current_file_path, 'rb') as fobj:
return 'sha256:{}'.format(get_sha256(fobj))
def generate_filename(self, extension=None):
"""
Files are in the format of:
{addon_name}-{version}-{apps}-{platform}
"""
parts = []
addon = self.version.addon
# slugify drops unicode so we may end up with an empty string.
# Apache did not like serving unicode filenames (bug 626587).
extension = extension or '.xpi'
name = slugify(addon.name).replace('-', '_') or 'addon'
parts.append(name)
parts.append(self.version.version)
if addon.type not in amo.NO_COMPAT and self.version.compatible_apps:
apps = '+'.join(sorted([a.shortername for a in
self.version.compatible_apps]))
parts.append(apps)
if self.platform and self.platform != amo.PLATFORM_ALL.id:
parts.append(amo.PLATFORMS[self.platform].shortname)
self.filename = '-'.join(parts) + extension
return self.filename
_pretty_filename = re.compile(r'(?P<slug>[a-z0-7_]+)(?P<suffix>.*)')
def pretty_filename(self, maxlen=20):
"""Displayable filename.
Truncates filename so that the slug part fits maxlen.
"""
m = self._pretty_filename.match(self.filename)
if not m:
return self.filename
if len(m.group('slug')) < maxlen:
return self.filename
return u'%s...%s' % (m.group('slug')[0:(maxlen - 3)],
m.group('suffix'))
def latest_xpi_url(self, attachment=False):
addon = self.version.addon
kw = {'addon_id': addon.slug}
if self.platform != amo.PLATFORM_ALL.id:
kw['platform'] = self.platform
if attachment:
kw['type'] = 'attachment'
return os.path.join(reverse('downloads.latest', kwargs=kw),
'addon-%s-latest%s' % (addon.pk, self.extension))
@property
def file_path(self):
return os.path.join(user_media_path('addons'),
str(self.version.addon_id),
self.filename)
@property
def addon(self):
return self.version.addon
@property
def guarded_file_path(self):
return os.path.join(user_media_path('guarded_addons'),
str(self.version.addon_id), self.filename)
@property
def current_file_path(self):
"""Returns the current path of the file, whether or not it is
guarded."""
file_disabled = self.status == amo.STATUS_DISABLED
addon_disabled = self.addon.is_disabled
if file_disabled or addon_disabled:
return self.guarded_file_path
else:
return self.file_path
@property
def extension(self):
return os.path.splitext(self.filename)[-1]
def move_file(self, source_path, destination_path, log_message):
"""Move a file from `source_path` to `destination_path` and delete the
source directory if it's empty once the file has been successfully
moved.
Meant to move files from/to the guarded file path as they are disabled
or re-enabled.
IOError and UnicodeEncodeError are caught and logged."""
log_message = force_text(log_message)
try:
if storage.exists(source_path):
source_parent_path = os.path.dirname(source_path)
log.info(log_message.format(
source=source_path, destination=destination_path))
move_stored_file(source_path, destination_path)
# Now that the file has been deleted, remove the directory if
# it exists to prevent the main directory from growing too
# much (#11464)
remaining_dirs, remaining_files = storage.listdir(
source_parent_path)
if len(remaining_dirs) == len(remaining_files) == 0:
storage.delete(source_parent_path)
except (UnicodeEncodeError, IOError):
msg = u'Move Failure: {} {}'.format(source_path, destination_path)
log.exception(msg)
def hide_disabled_file(self):
"""Move a file from the public path to the guarded file path."""
if not self.filename:
return
src, dst = self.file_path, self.guarded_file_path
self.move_file(
src, dst, 'Moving disabled file: {source} => {destination}')
def unhide_disabled_file(self):
"""Move a file from guarded file path to the public file path."""
if not self.filename:
return
src, dst = self.guarded_file_path, self.file_path
self.move_file(
src, dst, 'Moving undisabled file: {source} => {destination}')
@cached_property
def webext_permissions_list(self):
if not self.is_webextension:
return []
try:
# Filter out any errant non-strings included in the manifest JSON.
# Remove any duplicate permissions.
permissions = set()
permissions = [p for p in self._webext_permissions.permissions
if isinstance(p, str) and not
(p in permissions or permissions.add(p))]
return permissions
except WebextPermission.DoesNotExist:
return []
@use_primary_db
def update_status(sender, instance, **kw):
if not kw.get('raw'):
try:
addon = instance.version.addon
if 'delete' in kw:
addon.update_status(ignore_version=instance.version)
else:
addon.update_status()
except models.ObjectDoesNotExist:
pass
def update_status_delete(sender, instance, **kw):
kw['delete'] = True
return update_status(sender, instance, **kw)
models.signals.post_save.connect(
update_status, sender=File, dispatch_uid='version_update_status')
models.signals.post_delete.connect(
update_status_delete, sender=File, dispatch_uid='version_update_status')
@receiver(models.signals.post_delete, sender=File,
dispatch_uid='cleanup_file')
def cleanup_file(sender, instance, **kw):
""" On delete of the file object from the database, unlink the file from
the file system """
if kw.get('raw') or not instance.filename:
return
# Use getattr so the paths are accessed inside the try block.
for path in ('file_path', 'guarded_file_path'):
try:
filename = getattr(instance, path)
except models.ObjectDoesNotExist:
return
if storage.exists(filename):
log.info('Removing filename: %s for file: %s'
% (filename, instance.pk))
storage.delete(filename)
@File.on_change
def check_file(old_attr, new_attr, instance, sender, **kw):
if kw.get('raw'):
return
old, new = old_attr.get('status'), instance.status
if new == amo.STATUS_DISABLED and old != amo.STATUS_DISABLED:
instance.hide_disabled_file()
elif old == amo.STATUS_DISABLED and new != amo.STATUS_DISABLED:
instance.unhide_disabled_file()
# Log that the hash has changed.
old, new = old_attr.get('hash'), instance.hash
if old != new:
try:
addon = instance.version.addon.pk
except models.ObjectDoesNotExist:
addon = 'unknown'
log.info('Hash changed for file: %s, addon: %s, from: %s to: %s' %
(instance.pk, addon, old, new))
def track_new_status(sender, instance, *args, **kw):
if kw.get('raw'):
# The file is being loaded from a fixure.
return
if kw.get('created'):
track_file_status_change(instance)
models.signals.post_save.connect(track_new_status,
sender=File,
dispatch_uid='track_new_file_status')
@File.on_change
def track_status_change(old_attr=None, new_attr=None, **kwargs):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
if new_status != old_status:
track_file_status_change(kwargs['instance'])
def track_file_status_change(file_):
statsd.incr('file_status_change.all.status_{}'.format(file_.status))
class FileUpload(ModelBase):
"""Created when a file is uploaded for validation/submission."""
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
path = models.CharField(max_length=255, default='')
name = models.CharField(max_length=255, default='',
help_text="The user's original filename")
hash = models.CharField(max_length=255, default='')
user = models.ForeignKey(
'users.UserProfile', null=True, on_delete=models.CASCADE)
valid = models.BooleanField(default=False)
validation = models.TextField(null=True)
automated_signing = models.BooleanField(default=False)
compat_with_app = models.PositiveIntegerField(
choices=amo.APPS_CHOICES, db_column="compat_with_app_id", null=True)
compat_with_appver = models.ForeignKey(
AppVersion, null=True, related_name='uploads_compat_for_appver',
on_delete=models.CASCADE)
# Not all FileUploads will have a version and addon but it will be set
# if the file was uploaded using the new API.
version = models.CharField(max_length=255, null=True)
addon = models.ForeignKey(
'addons.Addon', null=True, on_delete=models.CASCADE)
access_token = models.CharField(max_length=40, null=True)
objects = ManagerBase()
class Meta(ModelBase.Meta):
db_table = 'file_uploads'
indexes = [
models.Index(fields=('compat_with_app',),
name='file_uploads_afe99c5e'),
]
constraints = [
models.UniqueConstraint(fields=('uuid',), name='uuid'),
]
def __str__(self):
return str(self.uuid.hex)
def save(self, *args, **kw):
if self.validation:
if self.load_validation()['errors'] == 0:
self.valid = True
if not self.access_token:
self.access_token = self.generate_access_token()
super(FileUpload, self).save(*args, **kw)
def add_file(self, chunks, filename, size):
if not self.uuid:
self.uuid = self._meta.get_field('uuid')._create_uuid()
filename = force_text(u'{0}_{1}'.format(self.uuid.hex, filename))
loc = os.path.join(user_media_path('addons'), 'temp', uuid.uuid4().hex)
base, ext = os.path.splitext(filename)
is_crx = False
# Change a ZIP to an XPI, to maintain backward compatibility
# with older versions of Firefox and to keep the rest of the XPI code
# path as consistent as possible for ZIP uploads.
# See: https://github.com/mozilla/addons-server/pull/2785
if ext == '.zip':
ext = '.xpi'
# If the extension is a CRX, we need to do some actual work to it
# before we just convert it to an XPI. We strip the header from the
# CRX, then it's good; see more about the CRX file format here:
# https://developer.chrome.com/extensions/crx
if ext == '.crx':
ext = '.xpi'
is_crx = True
if ext in amo.VALID_ADDON_FILE_EXTENSIONS:
loc += ext
log.info('UPLOAD: %r (%s bytes) to %r' % (filename, size, loc),
extra={'email': (self.user.email
if self.user and self.user.email else '')})
if is_crx:
hash_func = write_crx_as_xpi(chunks, loc)
else:
hash_func = hashlib.sha256()
with storage.open(loc, 'wb') as file_destination:
for chunk in chunks:
hash_func.update(chunk)
file_destination.write(chunk)
self.path = loc
self.name = filename
self.hash = 'sha256:%s' % hash_func.hexdigest()
self.save()
def generate_access_token(self):
"""
Returns an access token used to secure download URLs.
"""
return get_random_string(40)
def get_authenticated_download_url(self):
"""
Returns a download URL containing an access token bound to this file.
"""
absolute_url = urljoin(
settings.EXTERNAL_SITE_URL,
reverse('files.serve_file_upload', kwargs={'uuid': self.uuid.hex})
)
return '{}?access_token={}'.format(absolute_url, self.access_token)
@classmethod
def from_post(cls, chunks, filename, size, **params):
upload = FileUpload(**params)
upload.add_file(chunks, filename, size)
return upload
@property
def processed(self):
return bool(self.valid or self.validation)
@property
def validation_timeout(self):
if self.processed:
validation = self.load_validation()
messages = validation['messages']
timeout_id = ['validator',
'unexpected_exception',
'validation_timeout']
return any(msg['id'] == timeout_id for msg in messages)
else:
return False
@property
def processed_validation(self):
"""Return processed validation results as expected by the frontend."""
if self.validation:
# Import loop.
from olympia.devhub.utils import process_validation
validation = self.load_validation()
is_compatibility = self.compat_with_app is not None
return process_validation(
validation,
is_compatibility=is_compatibility,
file_hash=self.hash)
@property
def passed_all_validations(self):
return self.processed and self.valid
def load_validation(self):
return json.loads(self.validation)
@property
def pretty_name(self):
parts = self.name.split('_', 1)
if len(parts) > 1:
return parts[1]
return self.name
class FileValidation(ModelBase):
id = PositiveAutoField(primary_key=True)
file = models.OneToOneField(
File, related_name='validation', on_delete=models.CASCADE)
valid = models.BooleanField(default=False)
errors = models.IntegerField(default=0)
warnings = models.IntegerField(default=0)
notices = models.IntegerField(default=0)
validation = models.TextField()
class Meta:
db_table = 'file_validation'
@classmethod
def from_json(cls, file, validation):
if isinstance(validation, str):
validation = json.loads(validation)
if 'metadata' in validation:
if (validation['metadata'].get('contains_binary_extension') or
validation['metadata'].get('contains_binary_content')):
file.update(binary=True)
if validation['metadata'].get('binary_components'):
file.update(binary_components=True)
# Delete any past results.
# We most often wind up with duplicate results when multiple requests
# for the same validation data are POSTed at the same time, which we
# currently do not have the ability to track.
cls.objects.filter(file=file).delete()
return cls.objects.create(
file=file,
validation=json.dumps(validation),
errors=validation['errors'],
warnings=validation['warnings'],
notices=validation['notices'],
valid=validation['errors'] == 0)
@property
def processed_validation(self):
"""Return processed validation results as expected by the frontend."""
# Import loop.
from olympia.devhub.utils import process_validation
return process_validation(
json.loads(self.validation),
file_hash=self.file.original_hash,
channel=self.file.version.channel)
class WebextPermission(ModelBase):
NATIVE_MESSAGING_NAME = u'nativeMessaging'
permissions = JSONField(default={})
file = models.OneToOneField('File', related_name='_webext_permissions',
on_delete=models.CASCADE)
class Meta:
db_table = 'webext_permissions'
def nfd_str(u):
"""Uses NFD to normalize unicode strings."""
if isinstance(u, str):
return unicodedata.normalize('NFD', u).encode('utf-8')
return u
| psiinon/addons-server | src/olympia/files/models.py | Python | bsd-3-clause | 24,526 |
from __future__ import print_function, division
class ExistingTableException(Exception):
def __init__(self):
pass
def __str__(self):
return "Table already exists - use overwrite to replace existing table"
class TableException(Exception):
def __init__(self, tables, arg):
self.tables = tables
self.arg = arg
def __str__(self):
table_list = ""
for table in self.tables:
if type(table) == int:
table_list += " " + self.arg + \
"=%i : %s\n" % (table, self.tables[table])
elif type(table) == str:
table_list += " " + self.arg + \
"=%s\n" % table
else:
raise Exception("Unexpected table index type: %s" %
str(type(table)))
message = "There is more than one table in the requested file. " + \
"Please specify the table desired with the " + self.arg + \
"= argument. The available tables are:\n\n" + table_list
return message
class VectorException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "This table contains vector columns:\n\n" + \
self.value + "\n\n" + \
"but the output format selected does not. Remove these " + \
"columns using the remove_columns() method and try again."
| atpy/atpy | atpy/exceptions.py | Python | mit | 1,471 |
#!/usr/bin/env python3
# coding: utf-8
#
# Copyright 2020 Project U-Ray Authors
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file.
#
# SPDX-License-Identifier: ISC
import re
import sys
import json
def main():
line_re = re.compile(r'F(0x[0-9A-Fa-f]+)W(\d+)B(\d+)')
frames_to_tiles = {} # (start, size, tile, tile offset)
with open(sys.argv[1]) as tb_f:
tbj = json.load(tb_f)
for tilename, tiledata in tbj.items():
tile_offset = 0
for chunk in tiledata:
frame, start, size = chunk
if frame not in frames_to_tiles:
frames_to_tiles[frame] = []
frames_to_tiles[frame].append((start, size, tilename, tile_offset))
tile_offset += size
tile_bits = {}
with open(sys.argv[2]) as df:
for line in df:
m = line_re.match(line)
if not m:
continue
frame = int(m[1], 16)
if frame not in frames_to_tiles:
continue
framebit = int(m[2]) * 32 + int(m[3])
for fb in frames_to_tiles[frame]:
start, size, tile, toff = fb
if framebit > start and framebit < (start + size):
if tile not in tile_bits:
tile_bits[tile] = set()
tile_bits[tile].add(toff + (framebit - start))
for tile, bits in sorted(tile_bits.items()):
if "CLE" in tile:
if 152 not in bits:
print(tile)
if "INT" in tile:
if 3640 not in bits:
print(tile)
if __name__ == "__main__":
main()
| SymbiFlow/prjuray-tools | tools/oddtiles.py | Python | isc | 1,689 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetVMInstanceRequiredIDs(Model):
"""Specifies a list of virtual machine instance IDs from the VM scale set.
:param instance_ids: The virtual machine scale set instance ids.
:type instance_ids: list[str]
"""
_validation = {
'instance_ids': {'required': True},
}
_attribute_map = {
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(self, instance_ids):
super(VirtualMachineScaleSetVMInstanceRequiredIDs, self).__init__()
self.instance_ids = instance_ids
| AutorestCI/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/virtual_machine_scale_set_vm_instance_required_ids.py | Python | mit | 1,086 |
#!/usr/bin/env python
from __future__ import print_function
from collections import defaultdict
from os import listdir
from os.path import abspath, basename, dirname, isdir, isfile, join, realpath, relpath, splitext
import re
from subprocess import Popen, PIPE
import sys
# Runs the tests.
WREN_DIR = dirname(dirname(realpath(__file__)))
WREN_APP = join(WREN_DIR, 'wren','bin','Release', 'wren')
EXPECT_PATTERN = re.compile(r'// expect: ?(.*)')
EXPECT_ERROR_PATTERN = re.compile(r'// expect error(?! line)')
EXPECT_ERROR_LINE_PATTERN = re.compile(r'// expect error line (\d+)')
EXPECT_RUNTIME_ERROR_PATTERN = re.compile(r'// expect runtime error: (.+)')
ERROR_PATTERN = re.compile(r'\[.* line (\d+)\] Error')
STACK_TRACE_PATTERN = re.compile(r'\[main line (\d+)\] in')
STDIN_PATTERN = re.compile(r'// stdin: (.*)')
SKIP_PATTERN = re.compile(r'// skip: (.*)')
NONTEST_PATTERN = re.compile(r'// nontest')
passed = 0
failed = 0
skipped = defaultdict(int)
num_skipped = 0
expectations = 0
def color_text(text, color):
"""Converts text to a string and wraps it in the ANSI escape sequence for
color, if supported."""
# No ANSI escapes on Windows.
#if sys.platform == 'win32':
return str(text)
#return color + str(text) + '\033[0m'
def green(text): return color_text(text, '\033[32m')
def pink(text): return color_text(text, '\033[91m')
def red(text): return color_text(text, '\033[31m')
def yellow(text): return color_text(text, '\033[33m')
def walk(dir, callback, ignored=None):
"""
Walks [dir], and executes [callback] on each file unless it is [ignored].
"""
if not ignored:
ignored = []
ignored += [".",".."]
dir = abspath(dir)
for file in [file for file in listdir(dir) if not file in ignored]:
nfile = join(dir, file)
if isdir(nfile):
walk(nfile, callback)
else:
callback(nfile)
def print_line(line=None):
# Erase the line.
print('\033[2K', end='')
# Move the cursor to the beginning.
print('\r', end='')
if line:
print(line, end='')
sys.stdout.flush()
def run_script(app, path, type):
global passed
global failed
global skipped
global num_skipped
global expectations
if (splitext(path)[1] != '.wren'):
return
# Check if we are just running a subset of the tests.
if len(sys.argv) == 2:
this_test = relpath(path, join(WREN_DIR, 'test'))
if not this_test.startswith(sys.argv[1]):
return
# Make a nice short path relative to the working directory.
# Normalize it to use "/"
path = relpath(path).replace("\\", "/")
# Read the test and parse out the expectations.
expect_output = []
expect_error = []
expect_runtime_error_line = 0
expect_runtime_error = None
expect_return = 0
input_lines = []
print_line('Passed: ' + green(passed) +
' Failed: ' + red(failed) +
' Skipped: ' + yellow(num_skipped))
line_num = 1
with open(path, 'r') as file:
for line in file:
match = EXPECT_PATTERN.search(line)
if match:
expect_output.append((match.group(1), line_num))
expectations += 1
match = EXPECT_ERROR_PATTERN.search(line)
if match:
expect_error.append(line_num)
# If we expect compile errors, it should exit with 65.
expect_return = 65
expectations += 1
match = EXPECT_ERROR_LINE_PATTERN.search(line)
if match:
expect_error.append(int(match.group(1)))
# If we expect compile errors, it should exit with E65.
expect_return = 65
expectations += 1
match = EXPECT_RUNTIME_ERROR_PATTERN.search(line)
if match:
expect_runtime_error_line = line_num
expect_runtime_error = match.group(1)
# If we expect a runtime error, it should exit with 70.
expect_return = 70
expectations += 1
match = STDIN_PATTERN.search(line)
if match:
input_lines.append(match.group(1) + '\n')
match = SKIP_PATTERN.search(line)
if match:
num_skipped += 1
skipped[match.group(1)] += 1
return
match = NONTEST_PATTERN.search(line)
if match:
# Not a test file at all, so ignore it.
return
line_num += 1
# If any input is fed to the test in stdin, concatetate it into one string.
input_bytes = None
if len(input_lines) > 0:
input_bytes = "".join(input_lines).encode("utf-8")
# Run the test.
test_arg = path
if type == "api test":
# Just pass the suite name to API tests.
test_arg = basename(splitext(test_arg)[0])
print(test_arg)
proc = Popen([app, test_arg], stdin=PIPE, stdout=PIPE, stderr=PIPE)
(out, err) = proc.communicate(input_bytes)
fails = []
try:
out = out.decode("utf-8").replace('\r\n', '\n')
err = err.decode("utf-8").replace('\r\n', '\n')
except:
fails.append('Error decoding output.')
# Validate that no unexpected errors occurred.
if expect_return != 0 and err != '':
lines = err.split('\n')
if expect_runtime_error:
# Make sure we got the right error.
if lines[0] != expect_runtime_error:
fails.append('Expected runtime error "' + expect_runtime_error +
'" and got:')
fails.append(lines[0])
else:
lines = err.split('\n')
while len(lines) > 0:
line = lines.pop(0)
match = ERROR_PATTERN.search(line)
if match:
if float(match.group(1)) not in expect_error:
fails.append('Unexpected error:')
fails.append(line)
elif line != '':
fails.append('Unexpected output on stderr:')
fails.append(line)
else:
for line in expect_error:
fails.append('Expected error on line ' + str(line) + ' and got none.')
if expect_runtime_error:
fails.append('Expected runtime error "' + expect_runtime_error +
'" and got none.')
# Validate the exit code.
if proc.returncode != expect_return:
fails.append('Expected return code {0} and got {1}. Stderr:'
.format(expect_return, proc.returncode))
fails += err.split('\n')
else:
# Validate the output.
expect_index = 0
# Remove the trailing last empty line.
out_lines = out.split('\n')
if out_lines[-1] == '':
del out_lines[-1]
for line in out_lines:
#if sys.version_info < (3, 0):
#line = line.encode('utf-8')
if type == "example":
# Ignore output from examples.
pass
elif expect_index >= len(expect_output):
fails.append('Got output "{0}" when none was expected.'.format(line))
elif expect_output[expect_index][0] != line:
fails.append('Expected output "{0}" on line {1} and got "{2}".'.
format(expect_output[expect_index][0],
expect_output[expect_index][1], line))
expect_index += 1
while expect_index < len(expect_output):
fails.append('Missing expected output "{0}" on line {1}.'.
format(expect_output[expect_index][0],
expect_output[expect_index][1]))
expect_index += 1
# Display the results.
if len(fails) == 0:
passed += 1
else:
failed += 1
print_line(red('FAIL') + ': ' + path)
print('')
for fail in fails:
print(' ' + pink(fail))
print('')
def run_test(path, example=False):
run_script(WREN_APP, path, "test")
def run_api_test(path):
pass
def run_example(path):
run_script(WREN_APP, path, "example")
walk(join(WREN_DIR, 'test'), run_test, ignored=['api', 'benchmark'])
print_line()
if failed == 0:
print('All ' + green(passed) + ' tests passed (' + str(expectations) +
' expectations).')
else:
print(green(passed) + ' tests passed. ' + red(failed) + ' tests failed.')
for key in sorted(skipped.keys()):
print('Skipped ' + yellow(skipped[key]) + ' tests: ' + key)
if failed != 0:
sys.exit(1)
| robotii/Wren.NET | script/test.py | Python | mit | 7,862 |
import specs
from twisted.internet.protocol import ClientCreator
from twisted.internet import reactor
from txamqp.client import TwistedDelegate
from txamqp.protocol import AMQClient
import txamqp.spec
def createClient(amqp_host, amqp_vhost, amqp_port=5672):
amqp_spec = txamqp.spec.loadString(specs.v0_8)
amqp_delegate = TwistedDelegate()
client = ClientCreator(reactor,
AMQClient,
delegate=amqp_delegate,
vhost=amqp_vhost,
spec=amqp_spec,
heartbeat=30).connectTCP(amqp_host, amqp_port)
return client | wehriam/awspider | awspider/amqp/amqp.py | Python | mit | 559 |
"""Add EmailAddress.
Revision ID: 9333436765cd
Revises: 79719ee38228
Create Date: 2020-06-11 07:31:23.089071
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9333436765cd'
down_revision = '79719ee38228'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'email_address',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column('updated_at', sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column('email', sa.Unicode(), nullable=True),
sa.Column('domain', sa.Unicode(), nullable=True),
sa.Column('blake2b160', sa.LargeBinary(), nullable=False),
sa.Column('blake2b160_canonical', sa.LargeBinary(), nullable=False),
sa.Column('delivery_state', sa.Integer(), nullable=False),
sa.Column('delivery_state_at', sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column('is_blocked', sa.Boolean(), nullable=False),
sa.CheckConstraint(
"email IS NULL AND domain IS NULL OR"
" (email SIMILAR TO '(xn--|%.xn--)%') OR"
" email ILIKE '%' || replace(replace(domain, '_', '\\_'), '%', '\\%')",
name='email_address_email_domain_check',
),
sa.CheckConstraint('domain = lower(domain)', name='email_address_domain_check'),
sa.CheckConstraint(
'is_blocked IS NOT true OR is_blocked IS true AND email IS NULL',
name='email_address_email_is_blocked_check',
),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('blake2b160'),
)
op.create_index(
op.f('ix_email_address_blake2b160_canonical'),
'email_address',
['blake2b160_canonical'],
unique=False,
)
op.create_index(
op.f('ix_email_address_domain'), 'email_address', ['domain'], unique=False
)
def downgrade():
op.drop_index(op.f('ix_email_address_domain'), table_name='email_address')
op.drop_index(
op.f('ix_email_address_blake2b160_canonical'), table_name='email_address'
)
op.drop_table('email_address')
| hasgeek/funnel | migrations/versions/9333436765cd_add_emailaddress.py | Python | agpl-3.0 | 2,193 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id$
"""
VirtualBox Python Shell.
This program is a simple interactive shell for VirtualBox. You can query
information and issue commands from a simple command line.
It also provides you with examples on how to use VirtualBox's Python API.
This shell is even somewhat documented, supports TAB-completion and
history if you have Python readline installed.
Finally, shell allows arbitrary custom extensions, just create
.VirtualBox/shexts/ and drop your extensions there.
Enjoy.
P.S. Our apologies for the code quality.
"""
__copyright__ = \
"""
Copyright (C) 2009-2013 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
"""
__version__ = "$Revision$"
import os, sys
import traceback
import shlex
import time
import re
import platform
from optparse import OptionParser
from pprint import pprint
#
# Global Variables
#
g_fBatchMode = False
g_sScriptFile = None
g_sCmd = None
g_fHasReadline = True
try:
import readline
import rlcompleter
except ImportError:
g_fHasReadline = False
g_sPrompt = "vbox> "
g_fHasColors = True
g_dTermColors = {
'red': '\033[31m',
'blue': '\033[94m',
'green': '\033[92m',
'yellow': '\033[93m',
'magenta': '\033[35m',
'cyan': '\033[36m'
}
def colored(strg, color):
"""
Translates a string to one including coloring settings, if enabled.
"""
if not g_fHasColors:
return strg
col = g_dTermColors.get(color, None)
if col:
return col+str(strg)+'\033[0m'
return strg
if g_fHasReadline:
class CompleterNG(rlcompleter.Completer):
def __init__(self, dic, ctx):
self.ctx = ctx
rlcompleter.Completer.__init__(self, dic)
def complete(self, text, state):
"""
taken from:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496812
"""
if False and text == "":
return ['\t', None][state]
else:
return rlcompleter.Completer.complete(self, text, state)
def canBePath(self, _phrase, word):
return word.startswith('/')
def canBeCommand(self, phrase, _word):
spaceIdx = phrase.find(" ")
begIdx = readline.get_begidx()
firstWord = (spaceIdx == -1 or begIdx < spaceIdx)
if firstWord:
return True
if phrase.startswith('help'):
return True
return False
def canBeMachine(self, phrase, word):
return not self.canBePath(phrase, word) and not self.canBeCommand(phrase, word)
def global_matches(self, text):
"""
Compute matches when text is a simple name.
Return a list of all names currently defined
in self.namespace that match.
"""
matches = []
phrase = readline.get_line_buffer()
try:
if self.canBePath(phrase, text):
(directory, rest) = os.path.split(text)
c = len(rest)
for word in os.listdir(directory):
if c == 0 or word[:c] == rest:
matches.append(os.path.join(directory, word))
if self.canBeCommand(phrase, text):
c = len(text)
for lst in [ self.namespace ]:
for word in lst:
if word[:c] == text:
matches.append(word)
if self.canBeMachine(phrase, text):
c = len(text)
for mach in getMachines(self.ctx, False, True):
# although it has autoconversion, we need to cast
# explicitly for subscripts to work
word = re.sub("(?<!\\\\) ", "\\ ", str(mach.name))
if word[:c] == text:
matches.append(word)
word = str(mach.id)
if word[:c] == text:
matches.append(word)
except Exception, e:
printErr(self.ctx, e)
if g_fVerbose:
traceback.print_exc()
return matches
def autoCompletion(cmds, ctx):
if not g_fHasReadline:
return
comps = {}
for (key, _value) in cmds.items():
comps[key] = None
completer = CompleterNG(comps, ctx)
readline.set_completer(completer.complete)
delims = readline.get_completer_delims()
readline.set_completer_delims(re.sub("[\\./-]", "", delims)) # remove some of the delimiters
readline.parse_and_bind("set editing-mode emacs")
# OSX need it
if platform.system() == 'Darwin':
# see http://www.certif.com/spec_help/readline.html
readline.parse_and_bind ("bind ^I rl_complete")
readline.parse_and_bind ("bind ^W ed-delete-prev-word")
# Doesn't work well
# readline.parse_and_bind ("bind ^R em-inc-search-prev")
readline.parse_and_bind("tab: complete")
g_fVerbose = False
def split_no_quotes(s):
return shlex.split(s)
def progressBar(ctx, progress, wait=1000):
try:
while not progress.completed:
print "%s %%\r" % (colored(str(progress.percent), 'red')),
sys.stdout.flush()
progress.waitForCompletion(wait)
ctx['global'].waitForEvents(0)
if int(progress.resultCode) != 0:
reportError(ctx, progress)
return 1
except KeyboardInterrupt:
print "Interrupted."
ctx['interrupt'] = True
if progress.cancelable:
print "Canceling task..."
progress.cancel()
return 0
def printErr(_ctx, e):
oVBoxMgr = _ctx['global'];
if oVBoxMgr.errIsOurXcptKind(e):
print colored('%s: %s' % (oVBoxMgr.xcptToString(e), oVBoxMgr.xcptGetMessage(e)), 'red');
else:
print colored(str(e), 'red')
def reportError(_ctx, progress):
errorinfo = progress.errorInfo
if errorinfo:
print colored("Error in module '%s': %s" % (errorinfo.component, errorinfo.text), 'red')
def colCat(_ctx, strg):
return colored(strg, 'magenta')
def colVm(_ctx, vmname):
return colored(vmname, 'blue')
def colPath(_ctx, path):
return colored(path, 'green')
def colSize(_ctx, byte):
return colored(byte, 'red')
def colPci(_ctx, pcidev):
return colored(pcidev, 'green')
def colDev(_ctx, pcidev):
return colored(pcidev, 'cyan')
def colSizeM(_ctx, mbyte):
return colored(str(mbyte)+'M', 'red')
def createVm(ctx, name, kind):
vbox = ctx['vb']
mach = vbox.createMachine("", name, [], kind, "")
mach.saveSettings()
print "created machine with UUID", mach.id
vbox.registerMachine(mach)
# update cache
getMachines(ctx, True)
def removeVm(ctx, mach):
uuid = mach.id
print "removing machine ", mach.name, "with UUID", uuid
cmdClosedVm(ctx, mach, detachVmDevice, ["ALL"])
mach = mach.unregister(ctx['global'].constants.CleanupMode_Full)
if mach:
mach.deleteSettings()
# update cache
getMachines(ctx, True)
def startVm(ctx, mach, vmtype):
vbox = ctx['vb']
perf = ctx['perf']
session = ctx['global'].getSessionObject(vbox)
progress = mach.launchVMProcess(session, vmtype, "")
if progressBar(ctx, progress, 100) and int(progress.resultCode) == 0:
# we ignore exceptions to allow starting VM even if
# perf collector cannot be started
if perf:
try:
perf.setup(['*'], [mach], 10, 15)
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
session.unlockMachine()
class CachedMach:
def __init__(self, mach):
if mach.accessible:
self.name = mach.name
else:
self.name = '<inaccessible>'
self.id = mach.id
def cacheMachines(_ctx, lst):
result = []
for mach in lst:
elem = CachedMach(mach)
result.append(elem)
return result
def getMachines(ctx, invalidate = False, simple=False):
if ctx['vb'] is not None:
if ctx['_machlist'] is None or invalidate:
ctx['_machlist'] = ctx['global'].getArray(ctx['vb'], 'machines')
ctx['_machlistsimple'] = cacheMachines(ctx, ctx['_machlist'])
if simple:
return ctx['_machlistsimple']
else:
return ctx['_machlist']
else:
return []
def asState(var):
if var:
return colored('on', 'green')
else:
return colored('off', 'green')
def asFlag(var):
if var:
return 'yes'
else:
return 'no'
def getFacilityStatus(ctx, guest, facilityType):
(status, _timestamp) = guest.getFacilityStatus(facilityType)
return asEnumElem(ctx, 'AdditionsFacilityStatus', status)
def perfStats(ctx, mach):
if not ctx['perf']:
return
for metric in ctx['perf'].query(["*"], [mach]):
print metric['name'], metric['values_as_string']
def guestExec(ctx, machine, console, cmds):
exec cmds
def printMouseEvent(_ctx, mev):
print "Mouse : mode=%d x=%d y=%d z=%d w=%d buttons=%x" % (mev.mode, mev.x, mev.y, mev.z, mev.w, mev.buttons)
def printKbdEvent(ctx, kev):
print "Kbd: ", ctx['global'].getArray(kev, 'scancodes')
def printMultiTouchEvent(ctx, mtev):
print "MultiTouch : contacts=%d time=%d" % (mtev.contactCount, mtev.scanTime)
xPositions = ctx['global'].getArray(mtev, 'xPositions')
yPositions = ctx['global'].getArray(mtev, 'yPositions')
contactIds = ctx['global'].getArray(mtev, 'contactIds')
contactFlags = ctx['global'].getArray(mtev, 'contactFlags')
for i in range(0, mtev.contactCount):
print " [%d] %d,%d %d %d" % (i, xPositions[i], yPositions[i], contactIds[i], contactFlags[i])
def monitorSource(ctx, eventSource, active, dur):
def handleEventImpl(event):
evtype = event.type
print "got event: %s %s" % (str(evtype), asEnumElem(ctx, 'VBoxEventType', evtype))
if evtype == ctx['global'].constants.VBoxEventType_OnMachineStateChanged:
scev = ctx['global'].queryInterface(event, 'IMachineStateChangedEvent')
if scev:
print "machine state event: mach=%s state=%s" % (scev.machineId, scev.state)
elif evtype == ctx['global'].constants.VBoxEventType_OnSnapshotTaken:
stev = ctx['global'].queryInterface(event, 'ISnapshotTakenEvent')
if stev:
print "snapshot taken event: mach=%s snap=%s" % (stev.machineId, stev.snapshotId)
elif evtype == ctx['global'].constants.VBoxEventType_OnGuestPropertyChanged:
gpcev = ctx['global'].queryInterface(event, 'IGuestPropertyChangedEvent')
if gpcev:
print "guest property change: name=%s value=%s" % (gpcev.name, gpcev.value)
elif evtype == ctx['global'].constants.VBoxEventType_OnMousePointerShapeChanged:
psev = ctx['global'].queryInterface(event, 'IMousePointerShapeChangedEvent')
if psev:
shape = ctx['global'].getArray(psev, 'shape')
if shape is None:
print "pointer shape event - empty shape"
else:
print "pointer shape event: w=%d h=%d shape len=%d" % (psev.width, psev.height, len(shape))
elif evtype == ctx['global'].constants.VBoxEventType_OnGuestMouse:
mev = ctx['global'].queryInterface(event, 'IGuestMouseEvent')
if mev:
printMouseEvent(ctx, mev)
elif evtype == ctx['global'].constants.VBoxEventType_OnGuestKeyboard:
kev = ctx['global'].queryInterface(event, 'IGuestKeyboardEvent')
if kev:
printKbdEvent(ctx, kev)
elif evtype == ctx['global'].constants.VBoxEventType_OnGuestMultiTouch:
mtev = ctx['global'].queryInterface(event, 'IGuestMultiTouchEvent')
if mtev:
printMultiTouchEvent(ctx, mtev)
class EventListener(object):
def __init__(self, arg):
pass
def handleEvent(self, event):
try:
# a bit convoluted QI to make it work with MS COM
handleEventImpl(ctx['global'].queryInterface(event, 'IEvent'))
except:
traceback.print_exc()
pass
if active:
listener = ctx['global'].createListener(EventListener)
else:
listener = eventSource.createListener()
registered = False
if dur == -1:
# not infinity, but close enough
dur = 100000
try:
eventSource.registerListener(listener, [ctx['global'].constants.VBoxEventType_Any], active)
registered = True
end = time.time() + dur
while time.time() < end:
if active:
ctx['global'].waitForEvents(500)
else:
event = eventSource.getEvent(listener, 500)
if event:
handleEventImpl(event)
# otherwise waitable events will leak (active listeners ACK automatically)
eventSource.eventProcessed(listener, event)
# We need to catch all exceptions here, otherwise listener will never be unregistered
except:
traceback.print_exc()
pass
if listener and registered:
eventSource.unregisterListener(listener)
g_tsLast = 0
def recordDemo(ctx, console, filename, dur):
demo = open(filename, 'w')
header = "VM=" + console.machine.name + "\n"
demo.write(header)
global g_tsLast
g_tsLast = time.time()
def stamp():
global g_tsLast
tsCur = time.time()
timePassed = int((tsCur-g_tsLast)*1000)
g_tsLast = tsCur
return timePassed
def handleEventImpl(event):
evtype = event.type
#print "got event: %s %s" % (str(evtype), asEnumElem(ctx, 'VBoxEventType', evtype))
if evtype == ctx['global'].constants.VBoxEventType_OnGuestMouse:
mev = ctx['global'].queryInterface(event, 'IGuestMouseEvent')
if mev:
line = "%d: m %d %d %d %d %d %d\n" % (stamp(), mev.mode, mev.x, mev.y, mev.z, mev.w, mev.buttons)
demo.write(line)
elif evtype == ctx['global'].constants.VBoxEventType_OnGuestKeyboard:
kev = ctx['global'].queryInterface(event, 'IGuestKeyboardEvent')
if kev:
line = "%d: k %s\n" % (stamp(), str(ctx['global'].getArray(kev, 'scancodes')))
demo.write(line)
listener = console.eventSource.createListener()
registered = False
# we create an aggregated event source to listen for multiple event sources (keyboard and mouse in our case)
agg = console.eventSource.createAggregator([console.keyboard.eventSource, console.mouse.eventSource])
demo = open(filename, 'w')
header = "VM=" + console.machine.name + "\n"
demo.write(header)
if dur == -1:
# not infinity, but close enough
dur = 100000
try:
agg.registerListener(listener, [ctx['global'].constants.VBoxEventType_Any], False)
registered = True
end = time.time() + dur
while time.time() < end:
event = agg.getEvent(listener, 1000)
if event:
handleEventImpl(event)
# keyboard/mouse events aren't waitable, so no need for eventProcessed
# We need to catch all exceptions here, otherwise listener will never be unregistered
except:
traceback.print_exc()
pass
demo.close()
if listener and registered:
agg.unregisterListener(listener)
def playbackDemo(ctx, console, filename, dur):
demo = open(filename, 'r')
if dur == -1:
# not infinity, but close enough
dur = 100000
header = demo.readline()
print "Header is", header
basere = re.compile(r'(?P<s>\d+): (?P<t>[km]) (?P<p>.*)')
mre = re.compile(r'(?P<a>\d+) (?P<x>-*\d+) (?P<y>-*\d+) (?P<z>-*\d+) (?P<w>-*\d+) (?P<b>-*\d+)')
kre = re.compile(r'\d+')
kbd = console.keyboard
mouse = console.mouse
try:
end = time.time() + dur
for line in demo:
if time.time() > end:
break
match = basere.search(line)
if match is None:
continue
rdict = match.groupdict()
stamp = rdict['s']
params = rdict['p']
rtype = rdict['t']
time.sleep(float(stamp)/1000)
if rtype == 'k':
codes = kre.findall(params)
#print "KBD:", codes
kbd.putScancodes(codes)
elif rtype == 'm':
mm = mre.search(params)
if mm is not None:
mdict = mm.groupdict()
if mdict['a'] == '1':
# absolute
#print "MA: ", mdict['x'], mdict['y'], mdict['z'], mdict['b']
mouse.putMouseEventAbsolute(int(mdict['x']), int(mdict['y']), int(mdict['z']), int(mdict['w']), int(mdict['b']))
else:
#print "MR: ", mdict['x'], mdict['y'], mdict['b']
mouse.putMouseEvent(int(mdict['x']), int(mdict['y']), int(mdict['z']), int(mdict['w']), int(mdict['b']))
# We need to catch all exceptions here, to close file
except KeyboardInterrupt:
ctx['interrupt'] = True
except:
traceback.print_exc()
pass
demo.close()
def takeScreenshotOld(_ctx, console, args):
from PIL import Image
display = console.display
if len(args) > 0:
f = args[0]
else:
f = "/tmp/screenshot.png"
if len(args) > 3:
screen = int(args[3])
else:
screen = 0
(fbw, fbh, _fbbpp, fbx, fby) = display.getScreenResolution(screen)
if len(args) > 1:
w = int(args[1])
else:
w = fbw
if len(args) > 2:
h = int(args[2])
else:
h = fbh
print "Saving screenshot (%d x %d) screen %d in %s..." % (w, h, screen, f)
data = display.takeScreenShotToArray(screen, w, h, ctx['const'].BitmapFormat_RGBA)
size = (w, h)
mode = "RGBA"
im = Image.frombuffer(mode, size, str(data), "raw", mode, 0, 1)
im.save(f, "PNG")
def takeScreenshot(_ctx, console, args):
display = console.display
if len(args) > 0:
f = args[0]
else:
f = "/tmp/screenshot.png"
if len(args) > 3:
screen = int(args[3])
else:
screen = 0
(fbw, fbh, _fbbpp, fbx, fby) = display.getScreenResolution(screen)
if len(args) > 1:
w = int(args[1])
else:
w = fbw
if len(args) > 2:
h = int(args[2])
else:
h = fbh
print "Saving screenshot (%d x %d) screen %d in %s..." % (w, h, screen, f)
data = display.takeScreenShotToArray(screen, w, h, ctx['const'].BitmapFormat_PNG)
pngfile = open(f, 'wb')
pngfile.write(data)
pngfile.close()
def teleport(ctx, _session, console, args):
if args[0].find(":") == -1:
print "Use host:port format for teleport target"
return
(host, port) = args[0].split(":")
if len(args) > 1:
passwd = args[1]
else:
passwd = ""
if len(args) > 2:
maxDowntime = int(args[2])
else:
maxDowntime = 250
port = int(port)
print "Teleporting to %s:%d..." % (host, port)
progress = console.teleport(host, port, passwd, maxDowntime)
if progressBar(ctx, progress, 100) and int(progress.resultCode) == 0:
print "Success!"
else:
reportError(ctx, progress)
def guestStats(ctx, console, args):
guest = console.guest
# we need to set up guest statistics
if len(args) > 0 :
update = args[0]
else:
update = 1
if guest.statisticsUpdateInterval != update:
guest.statisticsUpdateInterval = update
try:
time.sleep(float(update)+0.1)
except:
# to allow sleep interruption
pass
all_stats = ctx['const'].all_values('GuestStatisticType')
cpu = 0
for s in all_stats.keys():
try:
val = guest.getStatistic( cpu, all_stats[s])
print "%s: %d" % (s, val)
except:
# likely not implemented
pass
def plugCpu(_ctx, machine, _session, args):
cpu = int(args[0])
print "Adding CPU %d..." % (cpu)
machine.hotPlugCPU(cpu)
def unplugCpu(_ctx, machine, _session, args):
cpu = int(args[0])
print "Removing CPU %d..." % (cpu)
machine.hotUnplugCPU(cpu)
def mountIso(_ctx, machine, _session, args):
machine.mountMedium(args[0], args[1], args[2], args[3], args[4])
machine.saveSettings()
def cond(c, v1, v2):
if c:
return v1
else:
return v2
def printHostUsbDev(ctx, ud):
print " %s: %s (vendorId=%d productId=%d serial=%s) %s" % (ud.id, colored(ud.product, 'blue'), ud.vendorId, ud.productId, ud.serialNumber, asEnumElem(ctx, 'USBDeviceState', ud.state))
def printUsbDev(_ctx, ud):
print " %s: %s (vendorId=%d productId=%d serial=%s)" % (ud.id, colored(ud.product, 'blue'), ud.vendorId, ud.productId, ud.serialNumber)
def printSf(ctx, sf):
print " name=%s host=%s %s %s" % (sf.name, colPath(ctx, sf.hostPath), cond(sf.accessible, "accessible", "not accessible"), cond(sf.writable, "writable", "read-only"))
def ginfo(ctx, console, _args):
guest = console.guest
if guest.additionsRunLevel != ctx['const'].AdditionsRunLevelType_None:
print "Additions active, version %s" % (guest.additionsVersion)
print "Support seamless: %s" % (getFacilityStatus(ctx, guest, ctx['const'].AdditionsFacilityType_Seamless))
print "Support graphics: %s" % (getFacilityStatus(ctx, guest, ctx['const'].AdditionsFacilityType_Graphics))
print "Balloon size: %d" % (guest.memoryBalloonSize)
print "Statistic update interval: %d" % (guest.statisticsUpdateInterval)
else:
print "No additions"
usbs = ctx['global'].getArray(console, 'USBDevices')
print "Attached USB:"
for ud in usbs:
printUsbDev(ctx, ud)
rusbs = ctx['global'].getArray(console, 'remoteUSBDevices')
print "Remote USB:"
for ud in rusbs:
printHostUsbDev(ctx, ud)
print "Transient shared folders:"
sfs = rusbs = ctx['global'].getArray(console, 'sharedFolders')
for sf in sfs:
printSf(ctx, sf)
def cmdExistingVm(ctx, mach, cmd, args):
session = None
try:
vbox = ctx['vb']
session = ctx['global'].getSessionObject(vbox)
mach.lockMachine(session, ctx['global'].constants.LockType_Shared)
except Exception, e:
printErr(ctx, "Session to '%s' not open: %s" % (mach.name, str(e)))
if g_fVerbose:
traceback.print_exc()
return
if session.state != ctx['const'].SessionState_Locked:
print "Session to '%s' in wrong state: %s" % (mach.name, session.state)
session.unlockMachine()
return
# this could be an example how to handle local only (i.e. unavailable
# in Webservices) functionality
if ctx['remote'] and cmd == 'some_local_only_command':
print 'Trying to use local only functionality, ignored'
session.unlockMachine()
return
console = session.console
ops = {'pause': lambda: console.pause(),
'resume': lambda: console.resume(),
'powerdown': lambda: console.powerDown(),
'powerbutton': lambda: console.powerButton(),
'stats': lambda: perfStats(ctx, mach),
'guest': lambda: guestExec(ctx, mach, console, args),
'ginfo': lambda: ginfo(ctx, console, args),
'guestlambda': lambda: args[0](ctx, mach, console, args[1:]),
'save': lambda: progressBar(ctx, console.saveState()),
'screenshot': lambda: takeScreenshot(ctx, console, args),
'teleport': lambda: teleport(ctx, session, console, args),
'gueststats': lambda: guestStats(ctx, console, args),
'plugcpu': lambda: plugCpu(ctx, session.machine, session, args),
'unplugcpu': lambda: unplugCpu(ctx, session.machine, session, args),
'mountiso': lambda: mountIso(ctx, session.machine, session, args),
}
try:
ops[cmd]()
except KeyboardInterrupt:
ctx['interrupt'] = True
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
session.unlockMachine()
def cmdClosedVm(ctx, mach, cmd, args=[], save=True):
session = ctx['global'].openMachineSession(mach, True)
mach = session.machine
try:
cmd(ctx, mach, args)
except Exception, e:
save = False
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
if save:
try:
mach.saveSettings()
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
ctx['global'].closeMachineSession(session)
def cmdAnyVm(ctx, mach, cmd, args=[], save=False):
session = ctx['global'].openMachineSession(mach)
mach = session.machine
try:
cmd(ctx, mach, session.console, args)
except Exception, e:
save = False
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
if save:
mach.saveSettings()
ctx['global'].closeMachineSession(session)
def machById(ctx, uuid):
try:
mach = ctx['vb'].getMachine(uuid)
except:
mach = ctx['vb'].findMachine(uuid)
return mach
class XPathNode:
def __init__(self, parent, obj, ntype):
self.parent = parent
self.obj = obj
self.ntype = ntype
def lookup(self, subpath):
children = self.enum()
matches = []
for e in children:
if e.matches(subpath):
matches.append(e)
return matches
def enum(self):
return []
def matches(self, subexp):
if subexp == self.ntype:
return True
if not subexp.startswith(self.ntype):
return False
match = re.search(r"@(?P<a>\w+)=(?P<v>[^\'\[\]]+)", subexp)
matches = False
try:
if match is not None:
xdict = match.groupdict()
attr = xdict['a']
val = xdict['v']
matches = (str(getattr(self.obj, attr)) == val)
except:
pass
return matches
def apply(self, cmd):
exec(cmd, {'obj':self.obj, 'node':self, 'ctx':self.getCtx()}, {})
def getCtx(self):
if hasattr(self, 'ctx'):
return self.ctx
return self.parent.getCtx()
class XPathNodeHolder(XPathNode):
def __init__(self, parent, obj, attr, heldClass, xpathname):
XPathNode.__init__(self, parent, obj, 'hld '+xpathname)
self.attr = attr
self.heldClass = heldClass
self.xpathname = xpathname
def enum(self):
children = []
for node in self.getCtx()['global'].getArray(self.obj, self.attr):
nodexml = self.heldClass(self, node)
children.append(nodexml)
return children
def matches(self, subexp):
return subexp == self.xpathname
class XPathNodeValue(XPathNode):
def __init__(self, parent, obj, xpathname):
XPathNode.__init__(self, parent, obj, 'val '+xpathname)
self.xpathname = xpathname
def matches(self, subexp):
return subexp == self.xpathname
class XPathNodeHolderVM(XPathNodeHolder):
def __init__(self, parent, vbox):
XPathNodeHolder.__init__(self, parent, vbox, 'machines', XPathNodeVM, 'vms')
class XPathNodeVM(XPathNode):
def __init__(self, parent, obj):
XPathNode.__init__(self, parent, obj, 'vm')
#def matches(self, subexp):
# return subexp=='vm'
def enum(self):
return [XPathNodeHolderNIC(self, self.obj),
XPathNodeValue(self, self.obj.BIOSSettings, 'bios'), ]
class XPathNodeHolderNIC(XPathNodeHolder):
def __init__(self, parent, mach):
XPathNodeHolder.__init__(self, parent, mach, 'nics', XPathNodeVM, 'nics')
self.maxNic = self.getCtx()['vb'].systemProperties.getMaxNetworkAdapters(self.obj.chipsetType)
def enum(self):
children = []
for i in range(0, self.maxNic):
node = XPathNodeNIC(self, self.obj.getNetworkAdapter(i))
children.append(node)
return children
class XPathNodeNIC(XPathNode):
def __init__(self, parent, obj):
XPathNode.__init__(self, parent, obj, 'nic')
def matches(self, subexp):
return subexp == 'nic'
class XPathNodeRoot(XPathNode):
def __init__(self, ctx):
XPathNode.__init__(self, None, None, 'root')
self.ctx = ctx
def enum(self):
return [XPathNodeHolderVM(self, self.ctx['vb'])]
def matches(self, subexp):
return True
def eval_xpath(ctx, scope):
pathnames = scope.split("/")[2:]
nodes = [XPathNodeRoot(ctx)]
for path in pathnames:
seen = []
while len(nodes) > 0:
node = nodes.pop()
seen.append(node)
for s in seen:
matches = s.lookup(path)
for match in matches:
nodes.append(match)
if len(nodes) == 0:
break
return nodes
def argsToMach(ctx, args):
if len(args) < 2:
print "usage: %s [vmname|uuid]" % (args[0])
return None
uuid = args[1]
mach = machById(ctx, uuid)
if mach == None:
print "Machine '%s' is unknown, use list command to find available machines" % (uuid)
return mach
def helpSingleCmd(cmd, h, sp):
if sp != 0:
spec = " [ext from "+sp+"]"
else:
spec = ""
print " %s: %s%s" % (colored(cmd, 'blue'), h, spec)
def helpCmd(_ctx, args):
if len(args) == 1:
print "Help page:"
names = commands.keys()
names.sort()
for i in names:
helpSingleCmd(i, commands[i][0], commands[i][2])
else:
cmd = args[1]
c = commands.get(cmd)
if c == None:
print "Command '%s' not known" % (cmd)
else:
helpSingleCmd(cmd, c[0], c[2])
return 0
def asEnumElem(ctx, enum, elem):
enumVals = ctx['const'].all_values(enum)
for e in enumVals.keys():
if str(elem) == str(enumVals[e]):
return colored(e, 'green')
return colored("<unknown>", 'green')
def enumFromString(ctx, enum, strg):
enumVals = ctx['const'].all_values(enum)
return enumVals.get(strg, None)
def listCmd(ctx, _args):
for mach in getMachines(ctx, True):
try:
if mach.teleporterEnabled:
tele = "[T] "
else:
tele = " "
print "%sMachine '%s' [%s], machineState=%s, sessionState=%s" % (tele, colVm(ctx, mach.name), mach.id, asEnumElem(ctx, "MachineState", mach.state), asEnumElem(ctx, "SessionState", mach.sessionState))
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
return 0
def infoCmd(ctx, args):
if (len(args) < 2):
print "usage: info [vmname|uuid]"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
vmos = ctx['vb'].getGuestOSType(mach.OSTypeId)
print " One can use setvar <mach> <var> <value> to change variable, using name in []."
print " Name [name]: %s" % (colVm(ctx, mach.name))
print " Description [description]: %s" % (mach.description)
print " ID [n/a]: %s" % (mach.id)
print " OS Type [via OSTypeId]: %s" % (vmos.description)
print " Firmware [firmwareType]: %s (%s)" % (asEnumElem(ctx, "FirmwareType", mach.firmwareType), mach.firmwareType)
print
print " CPUs [CPUCount]: %d" % (mach.CPUCount)
print " RAM [memorySize]: %dM" % (mach.memorySize)
print " VRAM [VRAMSize]: %dM" % (mach.VRAMSize)
print " Monitors [monitorCount]: %d" % (mach.monitorCount)
print " Chipset [chipsetType]: %s (%s)" % (asEnumElem(ctx, "ChipsetType", mach.chipsetType), mach.chipsetType)
print
print " Clipboard mode [clipboardMode]: %s (%s)" % (asEnumElem(ctx, "ClipboardMode", mach.clipboardMode), mach.clipboardMode)
print " Machine status [n/a]: %s (%s)" % (asEnumElem(ctx, "SessionState", mach.sessionState), mach.sessionState)
print
if mach.teleporterEnabled:
print " Teleport target on port %d (%s)" % (mach.teleporterPort, mach.teleporterPassword)
print
bios = mach.BIOSSettings
print " ACPI [BIOSSettings.ACPIEnabled]: %s" % (asState(bios.ACPIEnabled))
print " APIC [BIOSSettings.IOAPICEnabled]: %s" % (asState(bios.IOAPICEnabled))
hwVirtEnabled = mach.getHWVirtExProperty(ctx['global'].constants.HWVirtExPropertyType_Enabled)
print " Hardware virtualization [guest win machine.setHWVirtExProperty(ctx[\\'const\\'].HWVirtExPropertyType_Enabled, value)]: " + asState(hwVirtEnabled)
hwVirtVPID = mach.getHWVirtExProperty(ctx['const'].HWVirtExPropertyType_VPID)
print " VPID support [guest win machine.setHWVirtExProperty(ctx[\\'const\\'].HWVirtExPropertyType_VPID, value)]: " + asState(hwVirtVPID)
hwVirtNestedPaging = mach.getHWVirtExProperty(ctx['const'].HWVirtExPropertyType_NestedPaging)
print " Nested paging [guest win machine.setHWVirtExProperty(ctx[\\'const\\'].HWVirtExPropertyType_NestedPaging, value)]: " + asState(hwVirtNestedPaging)
print " Hardware 3d acceleration [accelerate3DEnabled]: " + asState(mach.accelerate3DEnabled)
print " Hardware 2d video acceleration [accelerate2DVideoEnabled]: " + asState(mach.accelerate2DVideoEnabled)
print " Use universal time [RTCUseUTC]: %s" % (asState(mach.RTCUseUTC))
print " HPET [HPETEnabled]: %s" % (asState(mach.HPETEnabled))
if mach.audioAdapter.enabled:
print " Audio [via audioAdapter]: chip %s; host driver %s" % (asEnumElem(ctx, "AudioControllerType", mach.audioAdapter.audioController), asEnumElem(ctx, "AudioDriverType", mach.audioAdapter.audioDriver))
print " CPU hotplugging [CPUHotPlugEnabled]: %s" % (asState(mach.CPUHotPlugEnabled))
print " Keyboard [keyboardHIDType]: %s (%s)" % (asEnumElem(ctx, "KeyboardHIDType", mach.keyboardHIDType), mach.keyboardHIDType)
print " Pointing device [pointingHIDType]: %s (%s)" % (asEnumElem(ctx, "PointingHIDType", mach.pointingHIDType), mach.pointingHIDType)
print " Last changed [n/a]: " + time.asctime(time.localtime(long(mach.lastStateChange)/1000))
# OSE has no VRDE
try:
print " VRDE server [VRDEServer.enabled]: %s" % (asState(mach.VRDEServer.enabled))
except:
pass
print
print colCat(ctx, " USB Controllers:")
for oUsbCtrl in ctx['global'].getArray(mach, 'USBControllers'):
print " '%s': type %s standard: %#x" \
% (oUsbCtrl.name, asEnumElem(ctx, "USBControllerType", oUsbCtrl.type), oUsbCtrl.USBStandard);
print
print colCat(ctx, " I/O subsystem info:")
print " Cache enabled [IOCacheEnabled]: %s" % (asState(mach.IOCacheEnabled))
print " Cache size [IOCacheSize]: %dM" % (mach.IOCacheSize)
controllers = ctx['global'].getArray(mach, 'storageControllers')
if controllers:
print
print colCat(ctx, " Storage Controllers:")
for controller in controllers:
print " '%s': bus %s type %s" % (controller.name, asEnumElem(ctx, "StorageBus", controller.bus), asEnumElem(ctx, "StorageControllerType", controller.controllerType))
attaches = ctx['global'].getArray(mach, 'mediumAttachments')
if attaches:
print
print colCat(ctx, " Media:")
for a in attaches:
print " Controller: '%s' port/device: %d:%d type: %s (%s):" % (a.controller, a.port, a.device, asEnumElem(ctx, "DeviceType", a.type), a.type)
medium = a.medium
if a.type == ctx['global'].constants.DeviceType_HardDisk:
print " HDD:"
print " Id: %s" % (medium.id)
print " Location: %s" % (colPath(ctx, medium.location))
print " Name: %s" % (medium.name)
print " Format: %s" % (medium.format)
if a.type == ctx['global'].constants.DeviceType_DVD:
print " DVD:"
if medium:
print " Id: %s" % (medium.id)
print " Name: %s" % (medium.name)
if medium.hostDrive:
print " Host DVD %s" % (colPath(ctx, medium.location))
if a.passthrough:
print " [passthrough mode]"
else:
print " Virtual image at %s" % (colPath(ctx, medium.location))
print " Size: %s" % (medium.size)
if a.type == ctx['global'].constants.DeviceType_Floppy:
print " Floppy:"
if medium:
print " Id: %s" % (medium.id)
print " Name: %s" % (medium.name)
if medium.hostDrive:
print " Host floppy %s" % (colPath(ctx, medium.location))
else:
print " Virtual image at %s" % (colPath(ctx, medium.location))
print " Size: %s" % (medium.size)
print
print colCat(ctx, " Shared folders:")
for sf in ctx['global'].getArray(mach, 'sharedFolders'):
printSf(ctx, sf)
return 0
def startCmd(ctx, args):
if len(args) < 2:
print "usage: start name <frontend>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
if len(args) > 2:
vmtype = args[2]
else:
vmtype = "gui"
startVm(ctx, mach, vmtype)
return 0
def createVmCmd(ctx, args):
if (len(args) != 3):
print "usage: createvm name ostype"
return 0
name = args[1]
oskind = args[2]
try:
ctx['vb'].getGuestOSType(oskind)
except Exception:
print 'Unknown OS type:', oskind
return 0
createVm(ctx, name, oskind)
return 0
def ginfoCmd(ctx, args):
if (len(args) < 2):
print "usage: ginfo [vmname|uuid]"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'ginfo', '')
return 0
def execInGuest(ctx, console, args, env, user, passwd, tmo, inputPipe=None, outputPipe=None):
if len(args) < 1:
print "exec in guest needs at least program name"
return
guest = console.guest
guestSession = guest.createSession(user, passwd, "", "vboxshell guest exec")
# shall contain program name as argv[0]
gargs = args
print "executing %s with args %s as %s" % (args[0], gargs, user)
flags = 0
if inputPipe is not None:
flags = 1 # set WaitForProcessStartOnly
print args[0]
process = guestSession.processCreate(args[0], gargs, env, [], tmo)
print "executed with pid %d" % (process.PID)
if pid != 0:
try:
while True:
if inputPipe is not None:
indata = inputPipe(ctx)
if indata is not None:
write = len(indata)
off = 0
while write > 0:
w = guest.setProcessInput(pid, 0, 10*1000, indata[off:])
off = off + w
write = write - w
else:
# EOF
try:
guest.setProcessInput(pid, 1, 10*1000, " ")
except:
pass
data = guest.getProcessOutput(pid, 0, 10000, 4096)
if data and len(data) > 0:
sys.stdout.write(data)
continue
progress.waitForCompletion(100)
ctx['global'].waitForEvents(0)
data = guest.getProcessOutput(pid, 0, 0, 4096)
if data and len(data) > 0:
if outputPipe is not None:
outputPipe(ctx, data)
else:
sys.stdout.write(data)
continue
if progress.completed:
break
except KeyboardInterrupt:
print "Interrupted."
ctx['interrupt'] = True
if progress.cancelable:
progress.cancel()
(_reason, code, _flags) = guest.getProcessStatus(pid)
print "Exit code: %d" % (code)
return 0
else:
reportError(ctx, progress)
def copyToGuest(ctx, console, args, user, passwd):
src = args[0]
dst = args[1]
flags = 0
print "Copying host %s to guest %s" % (src, dst)
progress = console.guest.copyToGuest(src, dst, user, passwd, flags)
progressBar(ctx, progress)
def nh_raw_input(prompt=""):
stream = sys.stdout
prompt = str(prompt)
if prompt:
stream.write(prompt)
line = sys.stdin.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
def getCred(_ctx):
import getpass
user = getpass.getuser()
user_inp = nh_raw_input("User (%s): " % (user))
if len (user_inp) > 0:
user = user_inp
passwd = getpass.getpass()
return (user, passwd)
def gexecCmd(ctx, args):
if (len(args) < 2):
print "usage: gexec [vmname|uuid] command args"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
gargs = args[2:]
env = [] # ["DISPLAY=:0"]
(user, passwd) = getCred(ctx)
gargs.insert(0, lambda ctx, mach, console, args: execInGuest(ctx, console, args, env, user, passwd, 10000))
cmdExistingVm(ctx, mach, 'guestlambda', gargs)
return 0
def gcopyCmd(ctx, args):
if (len(args) < 2):
print "usage: gcopy [vmname|uuid] host_path guest_path"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
gargs = args[2:]
(user, passwd) = getCred(ctx)
gargs.insert(0, lambda ctx, mach, console, args: copyToGuest(ctx, console, args, user, passwd))
cmdExistingVm(ctx, mach, 'guestlambda', gargs)
return 0
def readCmdPipe(ctx, _hcmd):
try:
return ctx['process'].communicate()[0]
except:
return None
def gpipeCmd(ctx, args):
if (len(args) < 4):
print "usage: gpipe [vmname|uuid] hostProgram guestProgram, such as gpipe linux '/bin/uname -a' '/bin/sh -c \"/usr/bin/tee; /bin/uname -a\"'"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
hcmd = args[2]
gcmd = args[3]
(user, passwd) = getCred(ctx)
import subprocess
ctx['process'] = subprocess.Popen(split_no_quotes(hcmd), stdout=subprocess.PIPE)
gargs = split_no_quotes(gcmd)
env = []
gargs.insert(0, lambda ctx, mach, console, args: execInGuest(ctx, console, args, env, user, passwd, 10000, lambda ctx:readCmdPipe(ctx, hcmd)))
cmdExistingVm(ctx, mach, 'guestlambda', gargs)
try:
ctx['process'].terminate()
except:
pass
ctx['process'] = None
return 0
def removeVmCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
removeVm(ctx, mach)
return 0
def pauseCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'pause', '')
return 0
def powerdownCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'powerdown', '')
return 0
def powerbuttonCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'powerbutton', '')
return 0
def resumeCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'resume', '')
return 0
def saveCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'save', '')
return 0
def statsCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'stats', '')
return 0
def guestCmd(ctx, args):
if (len(args) < 3):
print "usage: guest name commands"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
if mach.state != ctx['const'].MachineState_Running:
cmdClosedVm(ctx, mach, lambda ctx, mach, a: guestExec (ctx, mach, None, ' '.join(args[2:])))
else:
cmdExistingVm(ctx, mach, 'guest', ' '.join(args[2:]))
return 0
def screenshotCmd(ctx, args):
if (len(args) < 2):
print "usage: screenshot vm <file> <width> <height> <monitor>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'screenshot', args[2:])
return 0
def teleportCmd(ctx, args):
if (len(args) < 3):
print "usage: teleport name host:port <password>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'teleport', args[2:])
return 0
def portalsettings(_ctx, mach, args):
enabled = args[0]
mach.teleporterEnabled = enabled
if enabled:
port = args[1]
passwd = args[2]
mach.teleporterPort = port
mach.teleporterPassword = passwd
def openportalCmd(ctx, args):
if (len(args) < 3):
print "usage: openportal name port <password>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
port = int(args[2])
if (len(args) > 3):
passwd = args[3]
else:
passwd = ""
if not mach.teleporterEnabled or mach.teleporterPort != port or passwd:
cmdClosedVm(ctx, mach, portalsettings, [True, port, passwd])
startVm(ctx, mach, "gui")
return 0
def closeportalCmd(ctx, args):
if (len(args) < 2):
print "usage: closeportal name"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
if mach.teleporterEnabled:
cmdClosedVm(ctx, mach, portalsettings, [False])
return 0
def gueststatsCmd(ctx, args):
if (len(args) < 2):
print "usage: gueststats name <check interval>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'gueststats', args[2:])
return 0
def plugcpu(_ctx, mach, args):
plug = args[0]
cpu = args[1]
if plug:
print "Adding CPU %d..." % (cpu)
mach.hotPlugCPU(cpu)
else:
print "Removing CPU %d..." % (cpu)
mach.hotUnplugCPU(cpu)
def plugcpuCmd(ctx, args):
if (len(args) < 2):
print "usage: plugcpu name cpuid"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
if str(mach.sessionState) != str(ctx['const'].SessionState_Locked):
if mach.CPUHotPlugEnabled:
cmdClosedVm(ctx, mach, plugcpu, [True, int(args[2])])
else:
cmdExistingVm(ctx, mach, 'plugcpu', args[2])
return 0
def unplugcpuCmd(ctx, args):
if (len(args) < 2):
print "usage: unplugcpu name cpuid"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
if str(mach.sessionState) != str(ctx['const'].SessionState_Locked):
if mach.CPUHotPlugEnabled:
cmdClosedVm(ctx, mach, plugcpu, [False, int(args[2])])
else:
cmdExistingVm(ctx, mach, 'unplugcpu', args[2])
return 0
def setvar(_ctx, _mach, args):
expr = 'mach.'+args[0]+' = '+args[1]
print "Executing", expr
exec expr
def setvarCmd(ctx, args):
if (len(args) < 4):
print "usage: setvar [vmname|uuid] expr value"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdClosedVm(ctx, mach, setvar, args[2:])
return 0
def setvmextra(_ctx, mach, args):
key = args[0]
value = args[1]
print "%s: setting %s to %s" % (mach.name, key, value)
mach.setExtraData(key, value)
def setExtraDataCmd(ctx, args):
if (len(args) < 3):
print "usage: setextra [vmname|uuid|global] key <value>"
return 0
key = args[2]
if len(args) == 4:
value = args[3]
else:
value = None
if args[1] == 'global':
ctx['vb'].setExtraData(key, value)
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdClosedVm(ctx, mach, setvmextra, [key, value])
return 0
def printExtraKey(obj, key, value):
print "%s: '%s' = '%s'" % (obj, key, value)
def getExtraDataCmd(ctx, args):
if (len(args) < 2):
print "usage: getextra [vmname|uuid|global] <key>"
return 0
if len(args) == 3:
key = args[2]
else:
key = None
if args[1] == 'global':
obj = ctx['vb']
else:
obj = argsToMach(ctx, args)
if obj == None:
return 0
if key == None:
keys = obj.getExtraDataKeys()
else:
keys = [ key ]
for k in keys:
printExtraKey(args[1], k, obj.getExtraData(k))
return 0
def quitCmd(_ctx, _args):
return 1
def aliasCmd(ctx, args):
if (len(args) == 3):
aliases[args[1]] = args[2]
return 0
for (key, value) in aliases.items():
print "'%s' is an alias for '%s'" % (key, value)
return 0
def verboseCmd(ctx, args):
global g_fVerbose
if (len(args) > 1):
g_fVerbose = (args[1]=='on')
else:
g_fVerbose = not g_fVerbose
return 0
def colorsCmd(ctx, args):
global g_fHasColors
if (len(args) > 1):
g_fHasColors = (args[1] == 'on')
else:
g_fHasColors = not g_fHasColors
return 0
def hostCmd(ctx, args):
vbox = ctx['vb']
try:
print "VirtualBox version %s" % (colored(vbox.version, 'blue'))
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
props = vbox.systemProperties
print "Machines: %s" % (colPath(ctx, props.defaultMachineFolder))
#print "Global shared folders:"
#for ud in ctx['global'].getArray(vbox, 'sharedFolders'):
# printSf(ctx, sf)
host = vbox.host
cnt = host.processorCount
print colCat(ctx, "Processors:")
print " available/online: %d/%d " % (cnt, host.processorOnlineCount)
for i in range(0, cnt):
print " processor #%d speed: %dMHz %s" % (i, host.getProcessorSpeed(i), host.getProcessorDescription(i))
print colCat(ctx, "RAM:")
print " %dM (free %dM)" % (host.memorySize, host.memoryAvailable)
print colCat(ctx, "OS:")
print " %s (%s)" % (host.operatingSystem, host.OSVersion)
if host.acceleration3DAvailable:
print colCat(ctx, "3D acceleration available")
else:
print colCat(ctx, "3D acceleration NOT available")
print colCat(ctx, "Network interfaces:")
for ni in ctx['global'].getArray(host, 'networkInterfaces'):
print " %s (%s)" % (ni.name, ni.IPAddress)
print colCat(ctx, "DVD drives:")
for dd in ctx['global'].getArray(host, 'DVDDrives'):
print " %s - %s" % (dd.name, dd.description)
print colCat(ctx, "Floppy drives:")
for dd in ctx['global'].getArray(host, 'floppyDrives'):
print " %s - %s" % (dd.name, dd.description)
print colCat(ctx, "USB devices:")
for ud in ctx['global'].getArray(host, 'USBDevices'):
printHostUsbDev(ctx, ud)
if ctx['perf']:
for metric in ctx['perf'].query(["*"], [host]):
print metric['name'], metric['values_as_string']
return 0
def monitorGuestCmd(ctx, args):
if (len(args) < 2):
print "usage: monitorGuest name (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
dur = 5
if len(args) > 2:
dur = float(args[2])
active = False
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: monitorSource(ctx, console.eventSource, active, dur)])
return 0
def monitorGuestKbdCmd(ctx, args):
if (len(args) < 2):
print "usage: monitorGuestKbd name (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
dur = 5
if len(args) > 2:
dur = float(args[2])
active = False
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: monitorSource(ctx, console.keyboard.eventSource, active, dur)])
return 0
def monitorGuestMouseCmd(ctx, args):
if (len(args) < 2):
print "usage: monitorGuestMouse name (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
dur = 5
if len(args) > 2:
dur = float(args[2])
active = False
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: monitorSource(ctx, console.mouse.eventSource, active, dur)])
return 0
def monitorGuestMultiTouchCmd(ctx, args):
if (len(args) < 2):
print "usage: monitorGuestMultiTouch name (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
dur = 5
if len(args) > 2:
dur = float(args[2])
active = False
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: monitorSource(ctx, console.mouse.eventSource, active, dur)])
return 0
def monitorVBoxCmd(ctx, args):
if (len(args) > 2):
print "usage: monitorVBox (duration)"
return 0
dur = 5
if len(args) > 1:
dur = float(args[1])
vbox = ctx['vb']
active = False
monitorSource(ctx, vbox.eventSource, active, dur)
return 0
def getAdapterType(ctx, natype):
if (natype == ctx['global'].constants.NetworkAdapterType_Am79C970A or
natype == ctx['global'].constants.NetworkAdapterType_Am79C973):
return "pcnet"
elif (natype == ctx['global'].constants.NetworkAdapterType_I82540EM or
natype == ctx['global'].constants.NetworkAdapterType_I82545EM or
natype == ctx['global'].constants.NetworkAdapterType_I82543GC):
return "e1000"
elif (natype == ctx['global'].constants.NetworkAdapterType_Virtio):
return "virtio"
elif (natype == ctx['global'].constants.NetworkAdapterType_Null):
return None
else:
raise Exception("Unknown adapter type: "+natype)
def portForwardCmd(ctx, args):
if (len(args) != 5):
print "usage: portForward <vm> <adapter> <hostPort> <guestPort>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
adapterNum = int(args[2])
hostPort = int(args[3])
guestPort = int(args[4])
proto = "TCP"
session = ctx['global'].openMachineSession(mach)
mach = session.machine
adapter = mach.getNetworkAdapter(adapterNum)
adapterType = getAdapterType(ctx, adapter.adapterType)
profile_name = proto+"_"+str(hostPort)+"_"+str(guestPort)
config = "VBoxInternal/Devices/" + adapterType + "/"
config = config + str(adapter.slot) +"/LUN#0/Config/" + profile_name
mach.setExtraData(config + "/Protocol", proto)
mach.setExtraData(config + "/HostPort", str(hostPort))
mach.setExtraData(config + "/GuestPort", str(guestPort))
mach.saveSettings()
session.unlockMachine()
return 0
def showLogCmd(ctx, args):
if (len(args) < 2):
print "usage: showLog vm <num>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
log = 0
if (len(args) > 2):
log = args[2]
uOffset = 0
while True:
data = mach.readLog(log, uOffset, 4096)
if (len(data) == 0):
break
# print adds either NL or space to chunks not ending with a NL
sys.stdout.write(str(data))
uOffset += len(data)
return 0
def findLogCmd(ctx, args):
if (len(args) < 3):
print "usage: findLog vm pattern <num>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
log = 0
if (len(args) > 3):
log = args[3]
pattern = args[2]
uOffset = 0
while True:
# to reduce line splits on buffer boundary
data = mach.readLog(log, uOffset, 512*1024)
if (len(data) == 0):
break
d = str(data).split("\n")
for s in d:
match = re.findall(pattern, s)
if len(match) > 0:
for mt in match:
s = s.replace(mt, colored(mt, 'red'))
print s
uOffset += len(data)
return 0
def findAssertCmd(ctx, args):
if (len(args) < 2):
print "usage: findAssert vm <num>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
log = 0
if (len(args) > 2):
log = args[2]
uOffset = 0
ere = re.compile(r'(Expression:|\!\!\!\!\!\!)')
active = False
context = 0
while True:
# to reduce line splits on buffer boundary
data = mach.readLog(log, uOffset, 512*1024)
if (len(data) == 0):
break
d = str(data).split("\n")
for s in d:
if active:
print s
if context == 0:
active = False
else:
context = context - 1
continue
match = ere.findall(s)
if len(match) > 0:
active = True
context = 50
print s
uOffset += len(data)
return 0
def evalCmd(ctx, args):
expr = ' '.join(args[1:])
try:
exec expr
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
return 0
def reloadExtCmd(ctx, args):
# maybe will want more args smartness
checkUserExtensions(ctx, commands, getHomeFolder(ctx))
autoCompletion(commands, ctx)
return 0
def runScriptCmd(ctx, args):
if (len(args) != 2):
print "usage: runScript <script>"
return 0
try:
lf = open(args[1], 'r')
except IOError, e:
print "cannot open:", args[1], ":", e
return 0
try:
lines = lf.readlines()
ctx['scriptLine'] = 0
ctx['interrupt'] = False
while ctx['scriptLine'] < len(lines):
line = lines[ctx['scriptLine']]
ctx['scriptLine'] = ctx['scriptLine'] + 1
done = runCommand(ctx, line)
if done != 0 or ctx['interrupt']:
break
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
lf.close()
return 0
def sleepCmd(ctx, args):
if (len(args) != 2):
print "usage: sleep <secs>"
return 0
try:
time.sleep(float(args[1]))
except:
# to allow sleep interrupt
pass
return 0
def shellCmd(ctx, args):
if (len(args) < 2):
print "usage: shell <commands>"
return 0
cmd = ' '.join(args[1:])
try:
os.system(cmd)
except KeyboardInterrupt:
# to allow shell command interruption
pass
return 0
def connectCmd(ctx, args):
if (len(args) > 4):
print "usage: connect url <username> <passwd>"
return 0
if ctx['vb'] is not None:
print "Already connected, disconnect first..."
return 0
if (len(args) > 1):
url = args[1]
else:
url = None
if (len(args) > 2):
user = args[2]
else:
user = ""
if (len(args) > 3):
passwd = args[3]
else:
passwd = ""
ctx['wsinfo'] = [url, user, passwd]
vbox = ctx['global'].platform.connect(url, user, passwd)
ctx['vb'] = vbox
try:
print "Running VirtualBox version %s" % (vbox.version)
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
ctx['perf'] = ctx['global'].getPerfCollector(ctx['vb'])
return 0
def disconnectCmd(ctx, args):
if (len(args) != 1):
print "usage: disconnect"
return 0
if ctx['vb'] is None:
print "Not connected yet."
return 0
try:
ctx['global'].platform.disconnect()
except:
ctx['vb'] = None
raise
ctx['vb'] = None
return 0
def reconnectCmd(ctx, args):
if ctx['wsinfo'] is None:
print "Never connected..."
return 0
try:
ctx['global'].platform.disconnect()
except:
pass
[url, user, passwd] = ctx['wsinfo']
ctx['vb'] = ctx['global'].platform.connect(url, user, passwd)
try:
print "Running VirtualBox version %s" % (ctx['vb'].version)
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
return 0
def exportVMCmd(ctx, args):
if len(args) < 3:
print "usage: exportVm <machine> <path> <format> <license>"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
path = args[2]
if (len(args) > 3):
fmt = args[3]
else:
fmt = "ovf-1.0"
if (len(args) > 4):
lic = args[4]
else:
lic = "GPL"
app = ctx['vb'].createAppliance()
desc = mach.export(app)
desc.addDescription(ctx['global'].constants.VirtualSystemDescriptionType_License, lic, "")
progress = app.write(fmt, path)
if (progressBar(ctx, progress) and int(progress.resultCode) == 0):
print "Exported to %s in format %s" % (path, fmt)
else:
reportError(ctx, progress)
return 0
# PC XT scancodes
scancodes = {
'a': 0x1e,
'b': 0x30,
'c': 0x2e,
'd': 0x20,
'e': 0x12,
'f': 0x21,
'g': 0x22,
'h': 0x23,
'i': 0x17,
'j': 0x24,
'k': 0x25,
'l': 0x26,
'm': 0x32,
'n': 0x31,
'o': 0x18,
'p': 0x19,
'q': 0x10,
'r': 0x13,
's': 0x1f,
't': 0x14,
'u': 0x16,
'v': 0x2f,
'w': 0x11,
'x': 0x2d,
'y': 0x15,
'z': 0x2c,
'0': 0x0b,
'1': 0x02,
'2': 0x03,
'3': 0x04,
'4': 0x05,
'5': 0x06,
'6': 0x07,
'7': 0x08,
'8': 0x09,
'9': 0x0a,
' ': 0x39,
'-': 0xc,
'=': 0xd,
'[': 0x1a,
']': 0x1b,
';': 0x27,
'\'': 0x28,
',': 0x33,
'.': 0x34,
'/': 0x35,
'\t': 0xf,
'\n': 0x1c,
'`': 0x29
}
extScancodes = {
'ESC' : [0x01],
'BKSP': [0xe],
'SPACE': [0x39],
'TAB': [0x0f],
'CAPS': [0x3a],
'ENTER': [0x1c],
'LSHIFT': [0x2a],
'RSHIFT': [0x36],
'INS': [0xe0, 0x52],
'DEL': [0xe0, 0x53],
'END': [0xe0, 0x4f],
'HOME': [0xe0, 0x47],
'PGUP': [0xe0, 0x49],
'PGDOWN': [0xe0, 0x51],
'LGUI': [0xe0, 0x5b], # GUI, aka Win, aka Apple key
'RGUI': [0xe0, 0x5c],
'LCTR': [0x1d],
'RCTR': [0xe0, 0x1d],
'LALT': [0x38],
'RALT': [0xe0, 0x38],
'APPS': [0xe0, 0x5d],
'F1': [0x3b],
'F2': [0x3c],
'F3': [0x3d],
'F4': [0x3e],
'F5': [0x3f],
'F6': [0x40],
'F7': [0x41],
'F8': [0x42],
'F9': [0x43],
'F10': [0x44 ],
'F11': [0x57],
'F12': [0x58],
'UP': [0xe0, 0x48],
'LEFT': [0xe0, 0x4b],
'DOWN': [0xe0, 0x50],
'RIGHT': [0xe0, 0x4d],
}
def keyDown(ch):
code = scancodes.get(ch, 0x0)
if code != 0:
return [code]
extCode = extScancodes.get(ch, [])
if len(extCode) == 0:
print "bad ext", ch
return extCode
def keyUp(ch):
codes = keyDown(ch)[:] # make a copy
if len(codes) > 0:
codes[len(codes)-1] += 0x80
return codes
def typeInGuest(console, text, delay):
pressed = []
group = False
modGroupEnd = True
i = 0
kbd = console.keyboard
while i < len(text):
ch = text[i]
i = i+1
if ch == '{':
# start group, all keys to be pressed at the same time
group = True
continue
if ch == '}':
# end group, release all keys
for c in pressed:
kbd.putScancodes(keyUp(c))
pressed = []
group = False
continue
if ch == 'W':
# just wait a bit
time.sleep(0.3)
continue
if ch == '^' or ch == '|' or ch == '$' or ch == '_':
if ch == '^':
ch = 'LCTR'
if ch == '|':
ch = 'LSHIFT'
if ch == '_':
ch = 'LALT'
if ch == '$':
ch = 'LGUI'
if not group:
modGroupEnd = False
else:
if ch == '\\':
if i < len(text):
ch = text[i]
i = i+1
if ch == 'n':
ch = '\n'
elif ch == '&':
combo = ""
while i < len(text):
ch = text[i]
i = i+1
if ch == ';':
break
combo += ch
ch = combo
modGroupEnd = True
kbd.putScancodes(keyDown(ch))
pressed.insert(0, ch)
if not group and modGroupEnd:
for c in pressed:
kbd.putScancodes(keyUp(c))
pressed = []
modGroupEnd = True
time.sleep(delay)
def typeGuestCmd(ctx, args):
if len(args) < 3:
print "usage: typeGuest <machine> <text> <charDelay>"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
text = args[2]
if len(args) > 3:
delay = float(args[3])
else:
delay = 0.1
gargs = [lambda ctx, mach, console, args: typeInGuest(console, text, delay)]
cmdExistingVm(ctx, mach, 'guestlambda', gargs)
return 0
def optId(verbose, uuid):
if verbose:
return ": "+uuid
else:
return ""
def asSize(val, inBytes):
if inBytes:
return int(val)/(1024*1024)
else:
return int(val)
def listMediaCmd(ctx, args):
if len(args) > 1:
verbose = int(args[1])
else:
verbose = False
hdds = ctx['global'].getArray(ctx['vb'], 'hardDisks')
print colCat(ctx, "Hard disks:")
for hdd in hdds:
if hdd.state != ctx['global'].constants.MediumState_Created:
hdd.refreshState()
print " %s (%s)%s %s [logical %s]" % (colPath(ctx, hdd.location), hdd.format, optId(verbose, hdd.id), colSizeM(ctx, asSize(hdd.size, True)), colSizeM(ctx, asSize(hdd.logicalSize, True)))
dvds = ctx['global'].getArray(ctx['vb'], 'DVDImages')
print colCat(ctx, "CD/DVD disks:")
for dvd in dvds:
if dvd.state != ctx['global'].constants.MediumState_Created:
dvd.refreshState()
print " %s (%s)%s %s" % (colPath(ctx, dvd.location), dvd.format, optId(verbose, dvd.id), colSizeM(ctx, asSize(dvd.size, True)))
floppys = ctx['global'].getArray(ctx['vb'], 'floppyImages')
print colCat(ctx, "Floppy disks:")
for floppy in floppys:
if floppy.state != ctx['global'].constants.MediumState_Created:
floppy.refreshState()
print " %s (%s)%s %s" % (colPath(ctx, floppy.location), floppy.format, optId(verbose, floppy.id), colSizeM(ctx, asSize(floppy.size, True)))
return 0
def listUsbCmd(ctx, args):
if (len(args) > 1):
print "usage: listUsb"
return 0
host = ctx['vb'].host
for ud in ctx['global'].getArray(host, 'USBDevices'):
printHostUsbDev(ctx, ud)
return 0
def findDevOfType(ctx, mach, devtype):
atts = ctx['global'].getArray(mach, 'mediumAttachments')
for a in atts:
if a.type == devtype:
return [a.controller, a.port, a.device]
return [None, 0, 0]
def createHddCmd(ctx, args):
if (len(args) < 3):
print "usage: createHdd sizeM location type"
return 0
size = int(args[1])
loc = args[2]
if len(args) > 3:
fmt = args[3]
else:
fmt = "vdi"
hdd = ctx['vb'].createHardDisk(format, loc)
progress = hdd.createBaseStorage(size, (ctx['global'].constants.MediumVariant_Standard, ))
if progressBar(ctx,progress) and hdd.id:
print "created HDD at %s as %s" % (colPath(ctx,hdd.location), hdd.id)
else:
print "cannot create disk (file %s exist?)" % (loc)
reportError(ctx,progress)
return 0
return 0
def registerHddCmd(ctx, args):
if (len(args) < 2):
print "usage: registerHdd location"
return 0
vbox = ctx['vb']
loc = args[1]
setImageId = False
imageId = ""
setParentId = False
parentId = ""
hdd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_HardDisk, ctx['global'].constants.AccessMode_ReadWrite, false)
print "registered HDD as %s" % (hdd.id)
return 0
def controldevice(ctx, mach, args):
[ctr, port, slot, devtype, uuid] = args
mach.attachDevice(ctr, port, slot, devtype, uuid)
def attachHddCmd(ctx, args):
if (len(args) < 3):
print "usage: attachHdd vm hdd controller port:slot"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
hdd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_HardDisk, ctx['global'].constants.AccessMode_ReadWrite, false)
except:
print "no HDD with path %s registered" % (loc)
return 0
if len(args) > 3:
ctr = args[3]
(port, slot) = args[4].split(":")
else:
[ctr, port, slot] = findDevOfType(ctx, mach, ctx['global'].constants.DeviceType_HardDisk)
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.attachDevice(ctr, port, slot, ctx['global'].constants.DeviceType_HardDisk, hdd.id))
return 0
def detachVmDevice(ctx, mach, args):
atts = ctx['global'].getArray(mach, 'mediumAttachments')
hid = args[0]
for a in atts:
if a.medium:
if hid == "ALL" or a.medium.id == hid:
mach.detachDevice(a.controller, a.port, a.device)
def detachMedium(ctx, mid, medium):
cmdClosedVm(ctx, machById(ctx, mid), detachVmDevice, [medium])
def detachHddCmd(ctx, args):
if (len(args) < 3):
print "usage: detachHdd vm hdd"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
hdd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_HardDisk, ctx['global'].constants.AccessMode_ReadWrite, false)
except:
print "no HDD with path %s registered" % (loc)
return 0
detachMedium(ctx, mach.id, hdd)
return 0
def unregisterHddCmd(ctx, args):
if (len(args) < 2):
print "usage: unregisterHdd path <vmunreg>"
return 0
vbox = ctx['vb']
loc = args[1]
if (len(args) > 2):
vmunreg = int(args[2])
else:
vmunreg = 0
try:
hdd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_HardDisk, ctx['global'].constants.AccessMode_ReadWrite, false)
except:
print "no HDD with path %s registered" % (loc)
return 0
if vmunreg != 0:
machs = ctx['global'].getArray(hdd, 'machineIds')
try:
for mach in machs:
print "Trying to detach from %s" % (mach)
detachMedium(ctx, mach, hdd)
except Exception, e:
print 'failed: ', e
return 0
hdd.close()
return 0
def removeHddCmd(ctx, args):
if (len(args) != 2):
print "usage: removeHdd path"
return 0
vbox = ctx['vb']
loc = args[1]
try:
hdd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_HardDisk, ctx['global'].constants.AccessMode_ReadWrite, false)
except:
print "no HDD with path %s registered" % (loc)
return 0
progress = hdd.deleteStorage()
progressBar(ctx, progress)
return 0
def registerIsoCmd(ctx, args):
if (len(args) < 2):
print "usage: registerIso location"
return 0
vbox = ctx['vb']
loc = args[1]
iso = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
print "registered ISO as %s" % (iso.id)
return 0
def unregisterIsoCmd(ctx, args):
if (len(args) != 2):
print "usage: unregisterIso path"
return 0
vbox = ctx['vb']
loc = args[1]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
progress = dvd.close()
print "Unregistered ISO at %s" % (colPath(ctx, loc))
return 0
def removeIsoCmd(ctx, args):
if (len(args) != 2):
print "usage: removeIso path"
return 0
vbox = ctx['vb']
loc = args[1]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
progress = dvd.deleteStorage()
if progressBar(ctx, progress):
print "Removed ISO at %s" % (colPath(ctx, dvd.location))
else:
reportError(ctx, progress)
return 0
def attachIsoCmd(ctx, args):
if (len(args) < 3):
print "usage: attachIso vm iso controller port:slot"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
if len(args) > 3:
ctr = args[3]
(port, slot) = args[4].split(":")
else:
[ctr, port, slot] = findDevOfType(ctx, mach, ctx['global'].constants.DeviceType_DVD)
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.attachDevice(ctr, port, slot, ctx['global'].constants.DeviceType_DVD, dvd))
return 0
def detachIsoCmd(ctx, args):
if (len(args) < 3):
print "usage: detachIso vm iso"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
detachMedium(ctx, mach.id, dvd)
return 0
def mountIsoCmd(ctx, args):
if (len(args) < 3):
print "usage: mountIso vm iso controller port:slot"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
if len(args) > 3:
ctr = args[3]
(port, slot) = args[4].split(":")
else:
# autodetect controller and location, just find first controller with media == DVD
[ctr, port, slot] = findDevOfType(ctx, mach, ctx['global'].constants.DeviceType_DVD)
cmdExistingVm(ctx, mach, 'mountiso', [ctr, port, slot, dvd, True])
return 0
def unmountIsoCmd(ctx, args):
if (len(args) < 2):
print "usage: unmountIso vm controller port:slot"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
if len(args) > 3:
ctr = args[2]
(port, slot) = args[3].split(":")
else:
# autodetect controller and location, just find first controller with media == DVD
[ctr, port, slot] = findDevOfType(ctx, mach, ctx['global'].constants.DeviceType_DVD)
cmdExistingVm(ctx, mach, 'mountiso', [ctr, port, slot, None, True])
return 0
def attachCtr(ctx, mach, args):
[name, bus, ctrltype] = args
ctr = mach.addStorageController(name, bus)
if ctrltype != None:
ctr.controllerType = ctrltype
def attachCtrCmd(ctx, args):
if (len(args) < 4):
print "usage: attachCtr vm cname bus <type>"
return 0
if len(args) > 4:
ctrltype = enumFromString(ctx, 'StorageControllerType', args[4])
if ctrltype == None:
print "Controller type %s unknown" % (args[4])
return 0
else:
ctrltype = None
mach = argsToMach(ctx, args)
if mach is None:
return 0
bus = enumFromString(ctx, 'StorageBus', args[3])
if bus is None:
print "Bus type %s unknown" % (args[3])
return 0
name = args[2]
cmdClosedVm(ctx, mach, attachCtr, [name, bus, ctrltype])
return 0
def detachCtrCmd(ctx, args):
if (len(args) < 3):
print "usage: detachCtr vm name"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
ctr = args[2]
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.removeStorageController(ctr))
return 0
def usbctr(ctx, mach, console, args):
if (args[0]):
console.attachUSBDevice(args[1])
else:
console.detachUSBDevice(args[1])
def attachUsbCmd(ctx, args):
if (len(args) < 3):
print "usage: attachUsb vm deviceuid"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
dev = args[2]
cmdExistingVm(ctx, mach, 'guestlambda', [usbctr, True, dev])
return 0
def detachUsbCmd(ctx, args):
if (len(args) < 3):
print "usage: detachUsb vm deviceuid"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
dev = args[2]
cmdExistingVm(ctx, mach, 'guestlambda', [usbctr, False, dev])
return 0
def guiCmd(ctx, args):
if (len(args) > 1):
print "usage: gui"
return 0
binDir = ctx['global'].getBinDir()
vbox = os.path.join(binDir, 'VirtualBox')
try:
os.system(vbox)
except KeyboardInterrupt:
# to allow interruption
pass
return 0
def shareFolderCmd(ctx, args):
if (len(args) < 4):
print "usage: shareFolder vm path name <writable> <persistent>"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
path = args[2]
name = args[3]
writable = False
persistent = False
if len(args) > 4:
for a in args[4:]:
if a == 'writable':
writable = True
if a == 'persistent':
persistent = True
if persistent:
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.createSharedFolder(name, path, writable), [])
else:
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: console.createSharedFolder(name, path, writable)])
return 0
def unshareFolderCmd(ctx, args):
if (len(args) < 3):
print "usage: unshareFolder vm name"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
name = args[2]
found = False
for sf in ctx['global'].getArray(mach, 'sharedFolders'):
if sf.name == name:
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.removeSharedFolder(name), [])
found = True
break
if not found:
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: console.removeSharedFolder(name)])
return 0
def snapshotCmd(ctx, args):
if (len(args) < 2 or args[1] == 'help'):
print "Take snapshot: snapshot vm take name <description>"
print "Restore snapshot: snapshot vm restore name"
print "Merge snapshot: snapshot vm merge name"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
cmd = args[2]
if cmd == 'take':
if (len(args) < 4):
print "usage: snapshot vm take name <description>"
return 0
name = args[3]
if (len(args) > 4):
desc = args[4]
else:
desc = ""
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.takeSnapshot(name, desc)))
return 0
if cmd == 'restore':
if (len(args) < 4):
print "usage: snapshot vm restore name"
return 0
name = args[3]
snap = mach.findSnapshot(name)
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.restoreSnapshot(snap)))
return 0
if cmd == 'restorecurrent':
if (len(args) < 4):
print "usage: snapshot vm restorecurrent"
return 0
snap = mach.currentSnapshot()
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.restoreSnapshot(snap)))
return 0
if cmd == 'delete':
if (len(args) < 4):
print "usage: snapshot vm delete name"
return 0
name = args[3]
snap = mach.findSnapshot(name)
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.deleteSnapshot(snap.id)))
return 0
print "Command '%s' is unknown" % (cmd)
return 0
def natAlias(ctx, mach, nicnum, nat, args=[]):
"""This command shows/alters NAT's alias settings.
usage: nat <vm> <nicnum> alias [default|[log] [proxyonly] [sameports]]
default - set settings to default values
log - switch on alias logging
proxyonly - switch proxyonly mode on
sameports - enforces NAT using the same ports
"""
alias = {
'log': 0x1,
'proxyonly': 0x2,
'sameports': 0x4
}
if len(args) == 1:
first = 0
msg = ''
for aliasmode, aliaskey in alias.iteritems():
if first == 0:
first = 1
else:
msg += ', '
if int(nat.aliasMode) & aliaskey:
msg += '%d: %s' % (aliasmode, 'on')
else:
msg += '%d: %s' % (aliasmode, 'off')
msg += ')'
return (0, [msg])
else:
nat.aliasMode = 0
if 'default' not in args:
for a in range(1, len(args)):
if not alias.has_key(args[a]):
print 'Invalid alias mode: ' + args[a]
print natAlias.__doc__
return (1, None)
nat.aliasMode = int(nat.aliasMode) | alias[args[a]]
return (0, None)
def natSettings(ctx, mach, nicnum, nat, args):
"""This command shows/alters NAT settings.
usage: nat <vm> <nicnum> settings [<mtu> [[<socsndbuf> <sockrcvbuf> [<tcpsndwnd> <tcprcvwnd>]]]]
mtu - set mtu <= 16000
socksndbuf/sockrcvbuf - sets amount of kb for socket sending/receiving buffer
tcpsndwnd/tcprcvwnd - sets size of initial tcp sending/receiving window
"""
if len(args) == 1:
(mtu, socksndbuf, sockrcvbuf, tcpsndwnd, tcprcvwnd) = nat.getNetworkSettings()
if mtu == 0: mtu = 1500
if socksndbuf == 0: socksndbuf = 64
if sockrcvbuf == 0: sockrcvbuf = 64
if tcpsndwnd == 0: tcpsndwnd = 64
if tcprcvwnd == 0: tcprcvwnd = 64
msg = 'mtu:%s socket(snd:%s, rcv:%s) tcpwnd(snd:%s, rcv:%s)' % (mtu, socksndbuf, sockrcvbuf, tcpsndwnd, tcprcvwnd)
return (0, [msg])
else:
if args[1] < 16000:
print 'invalid mtu value (%s not in range [65 - 16000])' % (args[1])
return (1, None)
for i in range(2, len(args)):
if not args[i].isdigit() or int(args[i]) < 8 or int(args[i]) > 1024:
print 'invalid %s parameter (%i not in range [8-1024])' % (i, args[i])
return (1, None)
a = [args[1]]
if len(args) < 6:
for i in range(2, len(args)): a.append(args[i])
for i in range(len(args), 6): a.append(0)
else:
for i in range(2, len(args)): a.append(args[i])
#print a
nat.setNetworkSettings(int(a[0]), int(a[1]), int(a[2]), int(a[3]), int(a[4]))
return (0, None)
def natDns(ctx, mach, nicnum, nat, args):
"""This command shows/alters DNS's NAT settings
usage: nat <vm> <nicnum> dns [passdomain] [proxy] [usehostresolver]
passdomain - enforces builtin DHCP server to pass domain
proxy - switch on builtin NAT DNS proxying mechanism
usehostresolver - proxies all DNS requests to Host Resolver interface
"""
yesno = {0: 'off', 1: 'on'}
if len(args) == 1:
msg = 'passdomain:%s, proxy:%s, usehostresolver:%s' % (yesno[int(nat.DNSPassDomain)], yesno[int(nat.DNSProxy)], yesno[int(nat.DNSUseHostResolver)])
return (0, [msg])
else:
nat.DNSPassDomain = 'passdomain' in args
nat.DNSProxy = 'proxy' in args
nat.DNSUseHostResolver = 'usehostresolver' in args
return (0, None)
def natTftp(ctx, mach, nicnum, nat, args):
"""This command shows/alters TFTP settings
usage nat <vm> <nicnum> tftp [prefix <prefix>| bootfile <bootfile>| server <server>]
prefix - alters prefix TFTP settings
bootfile - alters bootfile TFTP settings
server - sets booting server
"""
if len(args) == 1:
server = nat.TFTPNextServer
if server is None:
server = nat.network
if server is None:
server = '10.0.%d/24' % (int(nicnum) + 2)
(server, mask) = server.split('/')
while server.count('.') != 3:
server += '.0'
(a, b, c, d) = server.split('.')
server = '%d.%d.%d.4' % (a, b, c)
prefix = nat.TFTPPrefix
if prefix is None:
prefix = '%s/TFTP/' % (ctx['vb'].homeFolder)
bootfile = nat.TFTPBootFile
if bootfile is None:
bootfile = '%s.pxe' % (mach.name)
msg = 'server:%s, prefix:%s, bootfile:%s' % (server, prefix, bootfile)
return (0, [msg])
else:
cmd = args[1]
if len(args) != 3:
print 'invalid args:', args
print natTftp.__doc__
return (1, None)
if cmd == 'prefix': nat.TFTPPrefix = args[2]
elif cmd == 'bootfile': nat.TFTPBootFile = args[2]
elif cmd == 'server': nat.TFTPNextServer = args[2]
else:
print "invalid cmd:", cmd
return (1, None)
return (0, None)
def natPortForwarding(ctx, mach, nicnum, nat, args):
"""This command shows/manages port-forwarding settings
usage:
nat <vm> <nicnum> <pf> [ simple tcp|udp <hostport> <guestport>]
|[no_name tcp|udp <hostip> <hostport> <guestip> <guestport>]
|[ex tcp|udp <pf-name> <hostip> <hostport> <guestip> <guestport>]
|[delete <pf-name>]
"""
if len(args) == 1:
# note: keys/values are swapped in defining part of the function
proto = {0: 'udp', 1: 'tcp'}
msg = []
pfs = ctx['global'].getArray(nat, 'redirects')
for pf in pfs:
(pfnme, pfp, pfhip, pfhp, pfgip, pfgp) = str(pf).split(', ')
msg.append('%s: %s %s:%s => %s:%s' % (pfnme, proto[int(pfp)], pfhip, pfhp, pfgip, pfgp))
return (0, msg) # msg is array
else:
proto = {'udp': 0, 'tcp': 1}
pfcmd = {
'simple': {
'validate': lambda: args[1] in pfcmd.keys() and args[2] in proto.keys() and len(args) == 5,
'func':lambda: nat.addRedirect('', proto[args[2]], '', int(args[3]), '', int(args[4]))
},
'no_name': {
'validate': lambda: args[1] in pfcmd.keys() and args[2] in proto.keys() and len(args) == 7,
'func': lambda: nat.addRedirect('', proto[args[2]], args[3], int(args[4]), args[5], int(args[6]))
},
'ex': {
'validate': lambda: args[1] in pfcmd.keys() and args[2] in proto.keys() and len(args) == 8,
'func': lambda: nat.addRedirect(args[3], proto[args[2]], args[4], int(args[5]), args[6], int(args[7]))
},
'delete': {
'validate': lambda: len(args) == 3,
'func': lambda: nat.removeRedirect(args[2])
}
}
if not pfcmd[args[1]]['validate']():
print 'invalid port-forwarding or args of sub command ', args[1]
print natPortForwarding.__doc__
return (1, None)
a = pfcmd[args[1]]['func']()
return (0, None)
def natNetwork(ctx, mach, nicnum, nat, args):
"""This command shows/alters NAT network settings
usage: nat <vm> <nicnum> network [<network>]
"""
if len(args) == 1:
if nat.network is not None and len(str(nat.network)) != 0:
msg = '\'%s\'' % (nat.network)
else:
msg = '10.0.%d.0/24' % (int(nicnum) + 2)
return (0, [msg])
else:
(addr, mask) = args[1].split('/')
if addr.count('.') > 3 or int(mask) < 0 or int(mask) > 32:
print 'Invalid arguments'
return (1, None)
nat.network = args[1]
return (0, None)
def natCmd(ctx, args):
"""This command is entry point to NAT settins management
usage: nat <vm> <nicnum> <cmd> <cmd-args>
cmd - [alias|settings|tftp|dns|pf|network]
for more information about commands:
nat help <cmd>
"""
natcommands = {
'alias' : natAlias,
'settings' : natSettings,
'tftp': natTftp,
'dns': natDns,
'pf': natPortForwarding,
'network': natNetwork
}
if len(args) < 2 or args[1] == 'help':
if len(args) > 2:
print natcommands[args[2]].__doc__
else:
print natCmd.__doc__
return 0
if len(args) == 1 or len(args) < 4 or args[3] not in natcommands:
print natCmd.__doc__
return 0
mach = ctx['argsToMach'](args)
if mach == None:
print "please specify vm"
return 0
if len(args) < 3 or not args[2].isdigit() or int(args[2]) not in range(0, ctx['vb'].systemProperties.getMaxNetworkAdapters(mach.chipsetType)):
print 'please specify adapter num %d isn\'t in range [0-%d]' % (args[2], ctx['vb'].systemProperties.getMaxNetworkAdapters(mach.chipsetType))
return 0
nicnum = int(args[2])
cmdargs = []
for i in range(3, len(args)):
cmdargs.append(args[i])
# @todo vvl if nicnum is missed but command is entered
# use NAT func for every adapter on machine.
func = args[3]
rosession = 1
session = None
if len(cmdargs) > 1:
rosession = 0
session = ctx['global'].openMachineSession(mach, False)
mach = session.machine
adapter = mach.getNetworkAdapter(nicnum)
natEngine = adapter.NATEngine
(rc, report) = natcommands[func](ctx, mach, nicnum, natEngine, cmdargs)
if rosession == 0:
if rc == 0:
mach.saveSettings()
session.unlockMachine()
elif report is not None:
for r in report:
msg ='%s nic%d %s: %s' % (mach.name, nicnum, func, r)
print msg
return 0
def nicSwitchOnOff(adapter, attr, args):
if len(args) == 1:
yesno = {0: 'off', 1: 'on'}
r = yesno[int(adapter.__getattr__(attr))]
return (0, r)
else:
yesno = {'off' : 0, 'on' : 1}
if args[1] not in yesno:
print '%s isn\'t acceptable, please choose %s' % (args[1], yesno.keys())
return (1, None)
adapter.__setattr__(attr, yesno[args[1]])
return (0, None)
def nicTraceSubCmd(ctx, vm, nicnum, adapter, args):
'''
usage: nic <vm> <nicnum> trace [on|off [file]]
'''
(rc, r) = nicSwitchOnOff(adapter, 'traceEnabled', args)
if len(args) == 1 and rc == 0:
r = '%s file:%s' % (r, adapter.traceFile)
return (0, r)
elif len(args) == 3 and rc == 0:
adapter.traceFile = args[2]
return (0, None)
def nicLineSpeedSubCmd(ctx, vm, nicnum, adapter, args):
if len(args) == 1:
r = '%d kbps'% (adapter.lineSpeed)
return (0, r)
else:
if not args[1].isdigit():
print '%s isn\'t a number' % (args[1])
print (1, None)
adapter.lineSpeed = int(args[1])
return (0, None)
def nicCableSubCmd(ctx, vm, nicnum, adapter, args):
'''
usage: nic <vm> <nicnum> cable [on|off]
'''
return nicSwitchOnOff(adapter, 'cableConnected', args)
def nicEnableSubCmd(ctx, vm, nicnum, adapter, args):
'''
usage: nic <vm> <nicnum> enable [on|off]
'''
return nicSwitchOnOff(adapter, 'enabled', args)
def nicTypeSubCmd(ctx, vm, nicnum, adapter, args):
'''
usage: nic <vm> <nicnum> type [Am79c970A|Am79c970A|I82540EM|I82545EM|I82543GC|Virtio]
'''
if len(args) == 1:
nictypes = ctx['const'].all_values('NetworkAdapterType')
for key in nictypes.keys():
if str(adapter.adapterType) == str(nictypes[key]):
return (0, str(key))
return (1, None)
else:
nictypes = ctx['const'].all_values('NetworkAdapterType')
if args[1] not in nictypes.keys():
print '%s not in acceptable values (%s)' % (args[1], nictypes.keys())
return (1, None)
adapter.adapterType = nictypes[args[1]]
return (0, None)
def nicAttachmentSubCmd(ctx, vm, nicnum, adapter, args):
'''
usage: nic <vm> <nicnum> attachment [Null|NAT|Bridged <interface>|Internal <name>|HostOnly <interface>
'''
if len(args) == 1:
nicAttachmentType = {
ctx['global'].constants.NetworkAttachmentType_Null: ('Null', ''),
ctx['global'].constants.NetworkAttachmentType_NAT: ('NAT', ''),
ctx['global'].constants.NetworkAttachmentType_Bridged: ('Bridged', adapter.bridgedInterface),
ctx['global'].constants.NetworkAttachmentType_Internal: ('Internal', adapter.internalNetwork),
ctx['global'].constants.NetworkAttachmentType_HostOnly: ('HostOnly', adapter.hostOnlyInterface),
# @todo show details of the generic network attachment type
ctx['global'].constants.NetworkAttachmentType_Generic: ('Generic', ''),
}
import types
if type(adapter.attachmentType) != types.IntType:
t = str(adapter.attachmentType)
else:
t = adapter.attachmentType
(r, p) = nicAttachmentType[t]
return (0, 'attachment:%s, name:%s' % (r, p))
else:
nicAttachmentType = {
'Null': {
'v': lambda: len(args) == 2,
'p': lambda: 'do nothing',
'f': lambda: ctx['global'].constants.NetworkAttachmentType_Null},
'NAT': {
'v': lambda: len(args) == 2,
'p': lambda: 'do nothing',
'f': lambda: ctx['global'].constants.NetworkAttachmentType_NAT},
'Bridged': {
'v': lambda: len(args) == 3,
'p': lambda: adapter.__setattr__('bridgedInterface', args[2]),
'f': lambda: ctx['global'].constants.NetworkAttachmentType_Bridged},
'Internal': {
'v': lambda: len(args) == 3,
'p': lambda: adapter.__setattr__('internalNetwork', args[2]),
'f': lambda: ctx['global'].constants.NetworkAttachmentType_Internal},
'HostOnly': {
'v': lambda: len(args) == 2,
'p': lambda: adapter.__setattr__('hostOnlyInterface', args[2]),
'f': lambda: ctx['global'].constants.NetworkAttachmentType_HostOnly},
# @todo implement setting the properties of a generic attachment
'Generic': {
'v': lambda: len(args) == 3,
'p': lambda: 'do nothing',
'f': lambda: ctx['global'].constants.NetworkAttachmentType_Generic}
}
if args[1] not in nicAttachmentType.keys():
print '%s not in acceptable values (%s)' % (args[1], nicAttachmentType.keys())
return (1, None)
if not nicAttachmentType[args[1]]['v']():
print nicAttachmentType.__doc__
return (1, None)
nicAttachmentType[args[1]]['p']()
adapter.attachmentType = nicAttachmentType[args[1]]['f']()
return (0, None)
def nicCmd(ctx, args):
'''
This command to manage network adapters
usage: nic <vm> <nicnum> <cmd> <cmd-args>
where cmd : attachment, trace, linespeed, cable, enable, type
'''
# 'command name':{'runtime': is_callable_at_runtime, 'op': function_name}
niccomand = {
'attachment': nicAttachmentSubCmd,
'trace': nicTraceSubCmd,
'linespeed': nicLineSpeedSubCmd,
'cable': nicCableSubCmd,
'enable': nicEnableSubCmd,
'type': nicTypeSubCmd
}
if len(args) < 2 \
or args[1] == 'help' \
or (len(args) > 2 and args[3] not in niccomand):
if len(args) == 3 \
and args[2] in niccomand:
print niccomand[args[2]].__doc__
else:
print nicCmd.__doc__
return 0
vm = ctx['argsToMach'](args)
if vm is None:
print 'please specify vm'
return 0
if len(args) < 3 \
or int(args[2]) not in range(0, ctx['vb'].systemProperties.getMaxNetworkAdapters(vm.chipsetType)):
print 'please specify adapter num %d isn\'t in range [0-%d]'% (args[2], ctx['vb'].systemProperties.getMaxNetworkAdapters(vm.chipsetType))
return 0
nicnum = int(args[2])
cmdargs = args[3:]
func = args[3]
session = None
session = ctx['global'].openMachineSession(vm)
vm = session.machine
adapter = vm.getNetworkAdapter(nicnum)
(rc, report) = niccomand[func](ctx, vm, nicnum, adapter, cmdargs)
if rc == 0:
vm.saveSettings()
if report is not None:
print '%s nic %d %s: %s' % (vm.name, nicnum, args[3], report)
session.unlockMachine()
return 0
def promptCmd(ctx, args):
if len(args) < 2:
print "Current prompt: '%s'" % (ctx['prompt'])
return 0
ctx['prompt'] = args[1]
return 0
def foreachCmd(ctx, args):
if len(args) < 3:
print "usage: foreach scope command, where scope is XPath-like expression //vms/vm[@CPUCount='2']"
return 0
scope = args[1]
cmd = args[2]
elems = eval_xpath(ctx, scope)
try:
for e in elems:
e.apply(cmd)
except:
print "Error executing"
traceback.print_exc()
return 0
def foreachvmCmd(ctx, args):
if len(args) < 2:
print "foreachvm command <args>"
return 0
cmdargs = args[1:]
cmdargs.insert(1, '')
for mach in getMachines(ctx):
cmdargs[1] = mach.id
runCommandArgs(ctx, cmdargs)
return 0
def recordDemoCmd(ctx, args):
if (len(args) < 3):
print "usage: recordDemo vm filename (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
filename = args[2]
dur = 10000
if len(args) > 3:
dur = float(args[3])
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: recordDemo(ctx, console, filename, dur)])
return 0
def playbackDemoCmd(ctx, args):
if (len(args) < 3):
print "usage: playbackDemo vm filename (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
filename = args[2]
dur = 10000
if len(args) > 3:
dur = float(args[3])
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: playbackDemo(ctx, console, filename, dur)])
return 0
def pciAddr(ctx, addr):
strg = "%02x:%02x.%d" % (addr >> 8, (addr & 0xff) >> 3, addr & 7)
return colPci(ctx, strg)
def lspci(ctx, console):
assigned = ctx['global'].getArray(console.machine, 'PCIDeviceAssignments')
for a in assigned:
if a.isPhysicalDevice:
print "%s: assigned host device %s guest %s" % (colDev(ctx, a.name), pciAddr(ctx, a.hostAddress), pciAddr(ctx, a.guestAddress))
atts = ctx['global'].getArray(console, 'attachedPCIDevices')
for a in atts:
if a.isPhysicalDevice:
print "%s: physical, guest %s, host %s" % (colDev(ctx, a.name), pciAddr(ctx, a.guestAddress), pciAddr(ctx, a.hostAddress))
else:
print "%s: virtual, guest %s" % (colDev(ctx, a.name), pciAddr(ctx, a.guestAddress))
return
def parsePci(strg):
pcire = re.compile(r'(?P<b>[0-9a-fA-F]+):(?P<d>[0-9a-fA-F]+)\.(?P<f>\d)')
match = pcire.search(strg)
if match is None:
return -1
pdict = match.groupdict()
return ((int(pdict['b'], 16)) << 8) | ((int(pdict['d'], 16)) << 3) | int(pdict['f'])
def lspciCmd(ctx, args):
if (len(args) < 2):
print "usage: lspci vm"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: lspci(ctx, console)])
return 0
def attachpciCmd(ctx, args):
if (len(args) < 3):
print "usage: attachpci vm hostpci <guestpci>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
hostaddr = parsePci(args[2])
if hostaddr == -1:
print "invalid host PCI %s, accepted format 01:02.3 for bus 1, device 2, function 3" % (args[2])
return 0
if (len(args) > 3):
guestaddr = parsePci(args[3])
if guestaddr == -1:
print "invalid guest PCI %s, accepted format 01:02.3 for bus 1, device 2, function 3" % (args[3])
return 0
else:
guestaddr = hostaddr
cmdClosedVm(ctx, mach, lambda ctx, mach, a: mach.attachHostPCIDevice(hostaddr, guestaddr, True))
return 0
def detachpciCmd(ctx, args):
if (len(args) < 3):
print "usage: detachpci vm hostpci"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
hostaddr = parsePci(args[2])
if hostaddr == -1:
print "invalid host PCI %s, accepted format 01:02.3 for bus 1, device 2, function 3" % (args[2])
return 0
cmdClosedVm(ctx, mach, lambda ctx, mach, a: mach.detachHostPCIDevice(hostaddr))
return 0
def gotoCmd(ctx, args):
if (len(args) < 2):
print "usage: goto line"
return 0
line = int(args[1])
ctx['scriptLine'] = line
return 0
aliases = {'s':'start',
'i':'info',
'l':'list',
'h':'help',
'a':'alias',
'q':'quit', 'exit':'quit',
'tg': 'typeGuest',
'v':'verbose'}
commands = {'help':['Prints help information', helpCmd, 0],
'start':['Start virtual machine by name or uuid: start Linux headless', startCmd, 0],
'createVm':['Create virtual machine: createVm macvm MacOS', createVmCmd, 0],
'removeVm':['Remove virtual machine', removeVmCmd, 0],
'pause':['Pause virtual machine', pauseCmd, 0],
'resume':['Resume virtual machine', resumeCmd, 0],
'save':['Save execution state of virtual machine', saveCmd, 0],
'stats':['Stats for virtual machine', statsCmd, 0],
'powerdown':['Power down virtual machine', powerdownCmd, 0],
'powerbutton':['Effectively press power button', powerbuttonCmd, 0],
'list':['Shows known virtual machines', listCmd, 0],
'info':['Shows info on machine', infoCmd, 0],
'ginfo':['Shows info on guest', ginfoCmd, 0],
'gexec':['Executes program in the guest', gexecCmd, 0],
'gcopy':['Copy file to the guest', gcopyCmd, 0],
'gpipe':['Pipe between host and guest', gpipeCmd, 0],
'alias':['Control aliases', aliasCmd, 0],
'verbose':['Toggle verbosity', verboseCmd, 0],
'setvar':['Set VMs variable: setvar Fedora BIOSSettings.ACPIEnabled True', setvarCmd, 0],
'eval':['Evaluate arbitrary Python construction: eval \'for m in getMachines(ctx): print m.name, "has", m.memorySize, "M"\'', evalCmd, 0],
'quit':['Exits', quitCmd, 0],
'host':['Show host information', hostCmd, 0],
'guest':['Execute command for guest: guest Win32 \'console.mouse.putMouseEvent(20, 20, 0, 0, 0)\'', guestCmd, 0],
'monitorGuest':['Monitor what happens with the guest for some time: monitorGuest Win32 10', monitorGuestCmd, 0],
'monitorGuestKbd':['Monitor guest keyboard for some time: monitorGuestKbd Win32 10', monitorGuestKbdCmd, 0],
'monitorGuestMouse':['Monitor guest mouse for some time: monitorGuestMouse Win32 10', monitorGuestMouseCmd, 0],
'monitorGuestMultiTouch':['Monitor guest touch screen for some time: monitorGuestMultiTouch Win32 10', monitorGuestMultiTouchCmd, 0],
'monitorVBox':['Monitor what happens with Virtual Box for some time: monitorVBox 10', monitorVBoxCmd, 0],
'portForward':['Setup permanent port forwarding for a VM, takes adapter number host port and guest port: portForward Win32 0 8080 80', portForwardCmd, 0],
'showLog':['Show log file of the VM, : showLog Win32', showLogCmd, 0],
'findLog':['Show entries matching pattern in log file of the VM, : findLog Win32 PDM|CPUM', findLogCmd, 0],
'findAssert':['Find assert in log file of the VM, : findAssert Win32', findAssertCmd, 0],
'reloadExt':['Reload custom extensions: reloadExt', reloadExtCmd, 0],
'runScript':['Run VBox script: runScript script.vbox', runScriptCmd, 0],
'sleep':['Sleep for specified number of seconds: sleep 3.14159', sleepCmd, 0],
'shell':['Execute external shell command: shell "ls /etc/rc*"', shellCmd, 0],
'exportVm':['Export VM in OVF format: exportVm Win /tmp/win.ovf', exportVMCmd, 0],
'screenshot':['Take VM screenshot to a file: screenshot Win /tmp/win.png 1024 768 0', screenshotCmd, 0],
'teleport':['Teleport VM to another box (see openportal): teleport Win anotherhost:8000 <passwd> <maxDowntime>', teleportCmd, 0],
'typeGuest':['Type arbitrary text in guest: typeGuest Linux "^lls\\n&UP;&BKSP;ess /etc/hosts\\nq^c" 0.7', typeGuestCmd, 0],
'openportal':['Open portal for teleportation of VM from another box (see teleport): openportal Win 8000 <passwd>', openportalCmd, 0],
'closeportal':['Close teleportation portal (see openportal, teleport): closeportal Win', closeportalCmd, 0],
'getextra':['Get extra data, empty key lists all: getextra <vm|global> <key>', getExtraDataCmd, 0],
'setextra':['Set extra data, empty value removes key: setextra <vm|global> <key> <value>', setExtraDataCmd, 0],
'gueststats':['Print available guest stats (only Windows guests with additions so far): gueststats Win32', gueststatsCmd, 0],
'plugcpu':['Add a CPU to a running VM: plugcpu Win 1', plugcpuCmd, 0],
'unplugcpu':['Remove a CPU from a running VM (additions required, Windows cannot unplug): unplugcpu Linux 1', unplugcpuCmd, 0],
'createHdd': ['Create virtual HDD: createHdd 1000 /disk.vdi ', createHddCmd, 0],
'removeHdd': ['Permanently remove virtual HDD: removeHdd /disk.vdi', removeHddCmd, 0],
'registerHdd': ['Register HDD image with VirtualBox instance: registerHdd /disk.vdi', registerHddCmd, 0],
'unregisterHdd': ['Unregister HDD image with VirtualBox instance: unregisterHdd /disk.vdi', unregisterHddCmd, 0],
'attachHdd': ['Attach HDD to the VM: attachHdd win /disk.vdi "IDE Controller" 0:1', attachHddCmd, 0],
'detachHdd': ['Detach HDD from the VM: detachHdd win /disk.vdi', detachHddCmd, 0],
'registerIso': ['Register CD/DVD image with VirtualBox instance: registerIso /os.iso', registerIsoCmd, 0],
'unregisterIso': ['Unregister CD/DVD image with VirtualBox instance: unregisterIso /os.iso', unregisterIsoCmd, 0],
'removeIso': ['Permanently remove CD/DVD image: removeIso /os.iso', removeIsoCmd, 0],
'attachIso': ['Attach CD/DVD to the VM: attachIso win /os.iso "IDE Controller" 0:1', attachIsoCmd, 0],
'detachIso': ['Detach CD/DVD from the VM: detachIso win /os.iso', detachIsoCmd, 0],
'mountIso': ['Mount CD/DVD to the running VM: mountIso win /os.iso "IDE Controller" 0:1', mountIsoCmd, 0],
'unmountIso': ['Unmount CD/DVD from running VM: unmountIso win "IDE Controller" 0:1', unmountIsoCmd, 0],
'attachCtr': ['Attach storage controller to the VM: attachCtr win Ctr0 IDE ICH6', attachCtrCmd, 0],
'detachCtr': ['Detach HDD from the VM: detachCtr win Ctr0', detachCtrCmd, 0],
'attachUsb': ['Attach USB device to the VM (use listUsb to show available devices): attachUsb win uuid', attachUsbCmd, 0],
'detachUsb': ['Detach USB device from the VM: detachUsb win uuid', detachUsbCmd, 0],
'listMedia': ['List media known to this VBox instance', listMediaCmd, 0],
'listUsb': ['List known USB devices', listUsbCmd, 0],
'shareFolder': ['Make host\'s folder visible to guest: shareFolder win /share share writable', shareFolderCmd, 0],
'unshareFolder': ['Remove folder sharing', unshareFolderCmd, 0],
'gui': ['Start GUI frontend', guiCmd, 0],
'colors':['Toggle colors', colorsCmd, 0],
'snapshot':['VM snapshot manipulation, snapshot help for more info', snapshotCmd, 0],
'nat':['NAT (network address translation engine) manipulation, nat help for more info', natCmd, 0],
'nic' : ['Network adapter management', nicCmd, 0],
'prompt' : ['Control shell prompt', promptCmd, 0],
'foreachvm' : ['Perform command for each VM', foreachvmCmd, 0],
'foreach' : ['Generic "for each" construction, using XPath-like notation: foreach //vms/vm[@OSTypeId=\'MacOS\'] "print obj.name"', foreachCmd, 0],
'recordDemo':['Record demo: recordDemo Win32 file.dmo 10', recordDemoCmd, 0],
'playbackDemo':['Playback demo: playbackDemo Win32 file.dmo 10', playbackDemoCmd, 0],
'lspci': ['List PCI devices attached to the VM: lspci Win32', lspciCmd, 0],
'attachpci': ['Attach host PCI device to the VM: attachpci Win32 01:00.0', attachpciCmd, 0],
'detachpci': ['Detach host PCI device from the VM: detachpci Win32 01:00.0', detachpciCmd, 0],
'goto': ['Go to line in script (script-only)', gotoCmd, 0]
}
def runCommandArgs(ctx, args):
c = args[0]
if aliases.get(c, None) != None:
c = aliases[c]
ci = commands.get(c, None)
if ci == None:
print "Unknown command: '%s', type 'help' for list of known commands" % (c)
return 0
if ctx['remote'] and ctx['vb'] is None:
if c not in ['connect', 'reconnect', 'help', 'quit']:
print "First connect to remote server with %s command." % (colored('connect', 'blue'))
return 0
return ci[1](ctx, args)
def runCommand(ctx, cmd):
if len(cmd) == 0: return 0
args = split_no_quotes(cmd)
if len(args) == 0: return 0
return runCommandArgs(ctx, args)
#
# To write your own custom commands to vboxshell, create
# file ~/.VirtualBox/shellext.py with content like
#
# def runTestCmd(ctx, args):
# print "Testy test", ctx['vb']
# return 0
#
# commands = {
# 'test': ['Test help', runTestCmd]
# }
# and issue reloadExt shell command.
# This file also will be read automatically on startup or 'reloadExt'.
#
# Also one can put shell extensions into ~/.VirtualBox/shexts and
# they will also be picked up, so this way one can exchange
# shell extensions easily.
def addExtsFromFile(ctx, cmds, filename):
if not os.path.isfile(filename):
return
d = {}
try:
execfile(filename, d, d)
for (k, v) in d['commands'].items():
if g_fVerbose:
print "customize: adding \"%s\" - %s" % (k, v[0])
cmds[k] = [v[0], v[1], filename]
except:
print "Error loading user extensions from %s" % (filename)
traceback.print_exc()
def checkUserExtensions(ctx, cmds, folder):
folder = str(folder)
name = os.path.join(folder, "shellext.py")
addExtsFromFile(ctx, cmds, name)
# also check 'exts' directory for all files
shextdir = os.path.join(folder, "shexts")
if not os.path.isdir(shextdir):
return
exts = os.listdir(shextdir)
for e in exts:
# not editor temporary files, please.
if e.endswith('.py'):
addExtsFromFile(ctx, cmds, os.path.join(shextdir, e))
def getHomeFolder(ctx):
if ctx['remote'] or ctx['vb'] is None:
if 'VBOX_USER_HOME' in os.environ:
return os.path.join(os.environ['VBOX_USER_HOME'])
return os.path.join(os.path.expanduser("~"), ".VirtualBox")
else:
return ctx['vb'].homeFolder
def interpret(ctx):
if ctx['remote']:
commands['connect'] = ["Connect to remote VBox instance: connect http://server:18083 user password", connectCmd, 0]
commands['disconnect'] = ["Disconnect from remote VBox instance", disconnectCmd, 0]
commands['reconnect'] = ["Reconnect to remote VBox instance", reconnectCmd, 0]
ctx['wsinfo'] = ["http://localhost:18083", "", ""]
vbox = ctx['vb']
if vbox is not None:
try:
print "Running VirtualBox version %s" % (vbox.version)
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
ctx['perf'] = None # ctx['global'].getPerfCollector(vbox)
else:
ctx['perf'] = None
home = getHomeFolder(ctx)
checkUserExtensions(ctx, commands, home)
if platform.system() in ['Windows', 'Microsoft']:
global g_fHasColors
g_fHasColors = False
hist_file = os.path.join(home, ".vboxshellhistory")
autoCompletion(commands, ctx)
if g_fHasReadline and os.path.exists(hist_file):
readline.read_history_file(hist_file)
# to allow to print actual host information, we collect info for
# last 150 secs maximum, (sample every 10 secs and keep up to 15 samples)
if ctx['perf']:
try:
ctx['perf'].setup(['*'], [vbox.host], 10, 15)
except:
pass
cmds = []
if g_sCmd is not None:
cmds = g_sCmd.split(';')
it = cmds.__iter__()
while True:
try:
if g_fBatchMode:
cmd = 'runScript %s'% (g_sScriptFile)
elif g_sCmd is not None:
cmd = it.next()
else:
cmd = raw_input(ctx['prompt'])
done = runCommand(ctx, cmd)
if done != 0: break
if g_fBatchMode:
break
except KeyboardInterrupt:
print '====== You can type quit or q to leave'
except StopIteration:
break
except EOFError:
break
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
ctx['global'].waitForEvents(0)
try:
# There is no need to disable metric collection. This is just an example.
if ct['perf']:
ctx['perf'].disable(['*'], [vbox.host])
except:
pass
if g_fHasReadline:
readline.write_history_file(hist_file)
def runCommandCb(ctx, cmd, args):
args.insert(0, cmd)
return runCommandArgs(ctx, args)
def runGuestCommandCb(ctx, uuid, guestLambda, args):
mach = machById(ctx, uuid)
if mach == None:
return 0
args.insert(0, guestLambda)
cmdExistingVm(ctx, mach, 'guestlambda', args)
return 0
def main(argv):
#
# Parse command line arguments.
#
parse = OptionParser()
parse.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help = "switch on verbose")
parse.add_option("-a", "--autopath", dest="autopath", action="store_true", default=False, help = "switch on autopath")
parse.add_option("-w", "--webservice", dest="style", action="store_const", const="WEBSERVICE", help = "connect to webservice")
parse.add_option("-b", "--batch", dest="batch_file", help = "script file to execute")
parse.add_option("-c", dest="command_line", help = "command sequence to execute")
parse.add_option("-o", dest="opt_line", help = "option line")
global g_fVerbose, g_sScriptFile, g_fBatchMode, g_fHasColors, g_fHasReadline, g_sCmd
(options, args) = parse.parse_args()
g_fVerbose = options.verbose
style = options.style
if options.batch_file is not None:
g_fBatchMode = True
g_fHasColors = False
g_fHasReadline = False
g_sScriptFile = options.batch_file
if options.command_line is not None:
g_fHasColors = False
g_fHasReadline = False
g_sCmd = options.command_line
params = None
if options.opt_line is not None:
params = {}
strparams = options.opt_line
strparamlist = strparams.split(',')
for strparam in strparamlist:
(key, value) = strparam.split('=')
params[key] = value
if options.autopath:
asLocations = [ os.getcwd(), ];
try: sScriptDir = os.path.dirname(os.path.abspath(__file__));
except: pass; # In case __file__ isn't there.
else:
if platform.system() in [ 'SunOS', ]:
asLocations.append(os.path.join(sScriptDir, 'amd64'));
asLocations.append(sScriptDir);
sPath = os.environ.get("VBOX_PROGRAM_PATH")
if sPath is None:
for sCurLoc in asLocations:
if os.path.isfile(os.path.join(sCurLoc, "VirtualBox")) \
or os.path.isfile(os.path.join(sCurLoc, "VirtualBox.exe")):
print "Autodetected VBOX_PROGRAM_PATH as", sCurLoc
os.environ["VBOX_PROGRAM_PATH"] = sCurLoc
sPath = sCurLoc
break;
if sPath:
sys.path.append(os.path.join(sPath, "sdk", "installer"))
sPath = os.environ.get("VBOX_SDK_PATH")
if sPath is None:
for sCurLoc in asLocations:
if os.path.isfile(os.path.join(sCurLoc, "sdk", "bindings", "VirtualBox.xidl")):
sCurLoc = os.path.join(sCurLoc, "sdk");
print "Autodetected VBOX_SDK_PATH as", sCurLoc
os.environ["VBOX_SDK_PATH"] = sCurLoc
sPath = sCurLoc;
break;
if sPath:
sTmp = os.path.join(sCurLoc, 'sdk', 'bindings', 'xpcom', 'python');
if os.path.isdir(sTmp):
sys.path.append(sTmp);
del sTmp;
del sPath, asLocations;
#
# Set up the shell interpreter context and
#
from vboxapi import VirtualBoxManager
oVBoxMgr = VirtualBoxManager(style, params)
ctx = {
'global': oVBoxMgr,
'vb': oVBoxMgr.vbox,
'const': oVBoxMgr.constants,
'remote': oVBoxMgr.remote,
'type': oVBoxMgr.type,
'run': lambda cmd, args: runCommandCb(ctx, cmd, args),
'guestlambda': lambda uuid, guestLambda, args: runGuestCommandCb(ctx, uuid, guestLambda, args),
'machById': lambda uuid: machById(ctx, uuid),
'argsToMach': lambda args: argsToMach(ctx, args),
'progressBar': lambda p: progressBar(ctx, p),
'typeInGuest': typeInGuest,
'_machlist': None,
'prompt': g_sPrompt,
'scriptLine': 0,
'interrupt': False,
}
interpret(ctx)
oVBoxMgr.deinit()
del oVBoxMgr
if __name__ == '__main__':
main(sys.argv)
| Chilledheart/vbox | src/VBox/Frontends/VBoxShell/vboxshell.py | Python | gpl-2.0 | 120,927 |
# -*- coding: utf-8 -*-
u"""
Created on 2015-7-15
@author: cheng.li
"""
import unittest
import copy
import tempfile
import pickle
import os
from PyFin.DateUtilities import Date
from PyFin.DateUtilities import Schedule
from PyFin.DateUtilities import Period
from PyFin.DateUtilities import Calendar
from PyFin.Enums import TimeUnits
from PyFin.Enums import BizDayConventions
class TestSchedule(unittest.TestCase):
def checkDates(self, s, expected):
if s.size() != len(expected):
self.fail("expected {0:d} dates, found {1}".format(len(expected), s.size()))
for i in range(s.size()):
if s[i] != expected[i]:
self.fail("expected {0} at index found {1}".format(expected[i], s[i]))
def testScheduleInitialize(self):
startDate = Date(2013, 3, 31)
endDate = Date(2013, 6, 30)
tenor = Period('1m')
cal = Calendar('NullCalendar')
sch = Schedule(startDate, endDate, tenor, cal)
expected = [Date(2013, 3, 31), Date(2013, 4, 30), Date(2013, 5, 31), Date(2013, 6, 30)]
for i in range(sch.size()):
self.assertEqual(expected[i], sch[i])
def testScheduleInitializeWithYearly(self):
startDate = Date(2012, 2, 29)
endDate = Date(2013, 3, 1)
tenor = Period('1y')
cal = Calendar('NullCalendar')
sch = Schedule(startDate, endDate, tenor, cal)
expected = [Date(2012, 2, 29), Date(2013, 2, 28), Date(2013, 3, 1)]
for i in range(sch.size()):
self.assertEqual(expected[i], sch[i])
def testDailySchedule(self):
# Jan 2 and Jan 3 are skipped as New Year holiday
# Jan 7 is skipped as weekend
# Jan 8 is adjusted to Jan 9 with following convention
startDate = Date(2012, 1, 1)
s = Schedule(startDate,
startDate + 7,
Period(length=1, units=TimeUnits.Days),
Calendar("China.SSE"),
BizDayConventions.Preceding)
expected = [Date(2011, 12, 30), Date(2012, 1, 4), Date(2012, 1, 5), Date(2012, 1, 6), Date(2012, 1, 9)]
self.checkDates(s, expected)
# The schedule should skip Saturday 21st and Sunday 22rd.
# Previously, it would adjust them to Friday 20th, resulting
# in three copies of the same date.
startDate = Date(2012, 1, 17)
s = Schedule(startDate,
startDate + 7,
Period(length=1, units=TimeUnits.Days),
Calendar("Target"),
BizDayConventions.Preceding)
expected = [Date(2012, 1, 17), Date(2012, 1, 18), Date(2012, 1, 19), Date(2012, 1, 20), Date(2012, 1, 23),
Date(2012, 1, 24)]
self.checkDates(s, expected)
def testScheduleDeepCopy(self):
startDate = Date(2013, 3, 31)
endDate = Date(2013, 6, 30)
tenor = Period('1m')
cal = Calendar('NullCalendar')
sch = Schedule(startDate, endDate, tenor, cal)
copied_sch = copy.deepcopy(sch)
self.assertEqual(sch, copied_sch)
def testSchedulePickle(self):
startDate = Date(2013, 3, 31)
endDate = Date(2013, 6, 30)
tenor = Period('1m')
cal = Calendar('NullCalendar')
sch = Schedule(startDate, endDate, tenor, cal)
f = tempfile.NamedTemporaryFile('w+b', delete=False)
pickle.dump(sch, f)
f.close()
with open(f.name, 'rb') as f2:
pickled_sch = pickle.load(f2)
self.assertEqual(sch, pickled_sch)
os.unlink(f.name)
| wegamekinglc/Finance-Python | PyFin/tests/DateUtilities/testSchedule.py | Python | mit | 3,611 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# NP 0F 2E /r
# UCOMISS xmm1, xmm2/m32
Buffer = bytes.fromhex('0f2e20')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xf2e)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'ucomiss')
assert_equal(myDisasm.repr(), 'ucomiss xmm4, dword ptr [rax]')
# VEX.LIG.0F.WIG 2E /r
# VUCOMISS xmm1, xmm2/m32
myVEX = VEX('VEX.LIG.0F.WIG')
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}2e10'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x2e)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vucomiss')
assert_equal(myDisasm.repr(), 'vucomiss xmm10, dword ptr [r8]')
assert_equal(myDisasm.infos.Reserved_.VEX.vvvv, 15)
assert_equal(myDisasm.infos.Reserved_.ERROR_OPCODE, 0)
# EVEX.LIG.0F.W0 2E /r
# VUCOMISS xmm1, xmm2/m32{sae}
myEVEX = EVEX('EVEX.LIG.0F.W0')
Buffer = bytes.fromhex('{}2e16'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x2e)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vucomiss')
assert_equal(myDisasm.repr(), 'vucomiss xmm26, dword ptr [r14]')
# 66 0F 2E /r
# UCOMISD xmm1, xmm2/m64
Buffer = bytes.fromhex('660f2e20')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xf2e)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'ucomisd')
assert_equal(myDisasm.repr(), 'ucomisd xmm4, qword ptr [rax]')
# VEX.LIG.66.0F.WIG 2E /r
# VUCOMISD xmm1, xmm2/m64
myVEX = VEX('VEX.LIG.66.0F.WIG')
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}2e10'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x2e)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vucomisd')
assert_equal(myDisasm.repr(), 'vucomisd xmm10, qword ptr [r8]')
assert_equal(myDisasm.infos.Reserved_.ERROR_OPCODE, 0)
# EVEX.LIG.66.0F.W1 2E /r
# VUCOMISD xmm1, xmm2/m64{sae}
myEVEX = EVEX('EVEX.LIG.66.0F.W1')
Buffer = bytes.fromhex('{}2e16'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x2e)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vucomisd')
assert_equal(myDisasm.repr(), 'vucomisd xmm26, qword ptr [r14]')
# VEX.vvvv and EVEX.vvvv are reserved and must be 1111b, otherwise instructions will #UD.
myEVEX = EVEX('EVEX.LIG.66.0F.W1')
myEVEX.vvvv = 0b1000
Buffer = bytes.fromhex('{}2e16'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x2e)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vucomisd')
assert_equal(myDisasm.infos.Reserved_.ERROR_OPCODE, UD_)
| 0vercl0k/rp | src/third_party/beaengine/tests/0f2f.py | Python | mit | 4,051 |
# Copyright 2019 Alfredo de la Fuente - AvanzOSC
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Account Invoice Line Lot",
"version": "12.0.1.0.0",
"category": "Invoices & Payments",
"license": "AGPL-3",
"author": "AvanzOSC",
"website": "http://www.avanzosc.es",
"depends": [
"sale_management",
"stock_account"
],
"data": [
"views/account_invoice_view.xml",
],
"installable": True,
}
| oihane/odoo-addons | account_invoice_line_lot/__manifest__.py | Python | agpl-3.0 | 487 |
import string
from configparser import SafeConfigParser
import parser
import glob, os
import mysql.connector
config = SafeConfigParser()
config.read('config.ini')
xsl_config = config['dev.xsl']
db_config = config['dev.db']
os.chdir(xsl_config['files'])
sectors={}
sector_id = 0
companies = []
indicators_index = []
indicators = {}
conn = mysql.connector.connect(**db_config)
cursor = conn.cursor()
file_num = 0
for file in glob.glob("*/*.xls"):
file_num +=1
print('=>',file)
name = parser.parse_name(file)
if name[parser.SECTOR] not in sectors:
sector_id += 1
sector = { 'id':sector_id, 'name':name[parser.SECTOR] ,'companies':int(name[parser.SECTOR_COUNT]),'files':1}
sectors[name[parser.SECTOR]] = sector
cursor.execute("""INSERT INTO fa_sector
VALUES (%s,%s)""",(sector['id'],sector['name']))
else:
sector = sectors[name[parser.SECTOR]]
sector['files'] +=1
#print(name)
data = parser.read_data(os.path.join(xsl_config['files'],file),
name[parser.COMPANY_NAME],name[parser.CURRENCY],name[parser.YEAR_MIN],name[parser.YEAR_MAX])
company = {'file':file,'id':name[parser.COMPANY_ID],'name':name[parser.COMPANY_NAME],
'sector_id': sector['id'], 'sector_name':sector['name'],'count':name[parser.SECTOR_COUNT],'data':data}
if company['name'] not in companies:
companies.append(company['name'])
cursor.execute("""INSERT INTO fa_company
VALUES (%s,%s,%s)""",(company['id'],company['name'],sector['id']))
indicator_id=0
for line in data:
if line['name'] not in indicators:
indicator_id += 1
indicator = ({'id':indicator_id, 'name':line['name'],'units':[line['unit']],'quantities':[line['quantity']]})
indicators[line['name']] = indicator
indicators_index.append(indicator)
else:
indicator = indicators[line['name']]
if line['unit'] not in indicator['units']:
indicator['units'].append(line['unit'])
if line['quantity'] not in indicator['quantities']:
indicator['quantities'].append(line['quantity'])
assert line['year'][0]=='4','Неверный квартал периода {0}'.format(line['year'])
assert len(line['year'])==13,'Неверное название периода {0}'.format(line['year'])
if line['value']:
cursor.execute("""INSERT INTO fa_value
VALUES (%s,%s,%s,%s,%s)""",(company['id'],indicator['id'],
1 if name[parser.CURRENCY] == 'R' else 2,
line['year'][6:10],line['value']))
for indicator in indicators_index:
assert len(indicator['quantities'])==1,'Неверное количество quantities {0}'.format(indicator['quantities'])
assert len(indicator['units'])>0 and len(indicator['units'])<=2,'Неверное количество units {0}'.format(indicator['units'])
unit1 = indicator['units'][0]
unit2 = indicator['units'][1] if len(indicator['units'])>1 else ''
quantity = indicator['quantities'][0]
cursor.execute("""INSERT INTO fa_indicator
VALUES (%s,%s,%s,%s,%s)""",(indicator['id'],indicator['name'],
unit1 if len(unit1)>0 else None,
unit2 if len(unit2)>0 else None,
quantity if len(quantity)>0 else None))
#for sector_name,sector in sectors.items():
# print(sector_name,sector)
# assert sector['companies']*2==sector['files'],'Неверное количество файлов {0} в каталоге сектора {1}'.format(sector['files'],sector_name)
#for key,value in units.items():
#if len(value)>1 and list(value)[0]!='руб.' and list(value)[1]!='$':
# print(key,value)
conn.commit()
| cayman/decision | loader/load.py | Python | mit | 3,955 |
from six.moves import range
try:
import carah.string as _string
except ImportError:
_string = None
if _string:
to_hex = _string.to_hex
else:
def to_hex(s):
return ''.join((hex(ord(c))[2:].zfill(2) for c in s))
def from_hex(s):
return ''.join((chr(int(s[2 * i:2 * (i + 1)], 16))
for i in range(len(s) / 2)))
# vim: et:sta:bs=2:sw=4:
| bwesterb/sarah | src/string.py | Python | agpl-3.0 | 385 |
#!/usr/bin/env python
from distutils.core import setup
import os,sys
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
snopt7_lib_path = os.getenv('SNOPT7LIB')
if ( snopt7_lib_path == None ):
snopt7_lib_path=os.getcwd()+'/../lib'
config = Configuration('solvers', parent_package, top_path )
# config.add_extension(name='sqic_python',
# sources=['sqic/sqic_python.pyf','sqic/sqic_python.f90'],
# library_dirs=sqic_lib_path,
# libraries=['sqic'],
# extra_f90_compile_args=[sqic_mod_path])
config.add_extension(name='snopt7_python',
sources=['f2py/snopt7_python.pyf','f2py/snopt7_python.f90'],
library_dirs=snopt7_lib_path,
libraries=['snopt7'])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| snopt/snopt-python | solvers/setup.py | Python | mit | 1,045 |
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'MS16-135.ps1',
# list of one or more authors for the module
'Author': ['FuzzySecurity by b33f'],
# more verbose multi-line description of the module
'Description': ('Powershell script to scale privileges'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : None,
# True if the module needs admin rights to run
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
# list of any references/other comments
'Comments': [
'https://github.com/FuzzySecurity/PSKernel-Primitives/tree/master/Sample-Exploits/MS16-135'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to grab a screenshot from.',
'Required' : True,
'Value' : ''
},
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# the PowerShell script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
"""
# if you're reading in a large, external script that might be updates,
# use the pattern below
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/privesc/MS16-135.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
script += ""
return script
| Hackplayers/Empire-mod-Hpys-tests | lib/modules/powershell/privesc/ms16-135.py | Python | bsd-3-clause | 3,311 |
from lino.api import dd
class Companies(dd.Table):
model = 'Company'
column_names = 'name address_column *'
detail_layout = dd.DetailLayout("""
id name
addr1
street_prefix street street_no street_box
addr2""", window_size=(50, 'auto'))
| lino-framework/book | lino_book/projects/addrloc/ui.py | Python | bsd-2-clause | 266 |
# coding: utf-8
from __future__ import unicode_literals, division, print_function
"""
Error handlers for errors originating from the Submission systems.
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
__all_errors__ = ['SubmitError', 'FullQueueError', 'DiskError', 'TimeCancelError', 'MemoryCancelError',
'NodeFailureError']
import re
import abc
import six
from abc import ABCMeta, abstractproperty, abstractmethod
@six.add_metaclass(ABCMeta)
class CorrectorProtocolScheduler(object):
"""
Abstract class to define the protocol / interface for correction operators. The client code quadapters / submission
script generator method / ... should implement these methods.
"""
@abstractproperty
def name(self):
return str()
@abstractmethod
def exclude_nodes(self, nodes):
"""
Method to exclude certain nodes from being used in the calculation. It is called when a calculation seemed to
have been crashed due to a hardware failure at the nodes specified.
nodes: list of node numbers that were found to cause problems
returns True is the memory could be increased False otherwise
"""
return bool
@abstractmethod
def increase_mem(self):
"""
Method to increase then memory in the calculation. It is called when a calculation seemed to have been crashed
due to a insufficient memory.
returns True is the memory could be increased False otherwise
"""
return bool
@abstractmethod
def increase_time(self):
"""
Method to increase te time for the calculation. It is called when a calculation seemed to
have been crashed due to a time limit.
returns True is the memory could be increased False otherwise
"""
return bool
@abstractmethod
def increase_cpus(self):
"""
Method to increse the number of cpus being used in the calculation. It is called when a calculation seemed to
have been crashed due to time or memory limits being broken.
returns True is the memory could be increased False otherwise
"""
return bool
@six.add_metaclass(ABCMeta)
class CorrectorProtocolApplication(object):
"""
Abstract class to define the protocol / interface for correction operators. The client code quadapters / submission
script generator method / ... should implement these methods.
"""
@abstractproperty
def name(self):
return str()
@abstractmethod
def decrease_mem(self):
"""
Method to increase then memory in the calculation. It is called when a calculation seemed to have been crashed
due to a insufficient memory.
returns True is the memory could be increased False otherwise
"""
return bool
@abstractmethod
def speed_up(self):
"""
Method to speed_up the calculation. It is called when a calculation seemed to time limits being broken.
returns True is the memory could be increased False otherwise
"""
return bool
@six.add_metaclass(ABCMeta)
class AbstractError(object):
"""
Error base class
"""
def __init__(self, errmsg, meta_data):
self.errmsg = errmsg
self.meta_data = meta_data if meta_data is not None else {}
def __str__(self):
_message = '%s %s\n' \
' error message : %s \n' \
' meta data : %s' % (self.name, self.__doc__, self.errmsg, str(self.meta_data))
return _message
@property
def name(self):
return self.__class__.__name__
@property
def scheduler_adapter_solutions(self):
"""
to be implemented by concrete errors returning a list of tuples defining corrections. The First element of the
tuple should be a string of one of the methods in CorrectorProtocolScheduler, the second element should
contain the arguments.
"""
return []
@property
def application_adapter_solutions(self):
"""
to be implemented by concrete errors returning a list of tuples defining corrections. The First element of the
tuple should be a string of one of the methods in CorrectorProtocolApplication, the second element should
contain the arguments.
"""
return []
def last_resort_solution(self):
"""
what to do if every thing else fails...
"""
print('non of the defined solutions for %s returned success...' % self.name)
return
class SubmitError(AbstractError):
"""
Errors occurring at submission. The limits on the cluster may have changed.
"""
class FullQueueError(AbstractError):
"""
Errors occurring at submission. To many jobs in the queue / total cpus / .. .
"""
class DiskError(AbstractError):
"""
Errors involving problems writing to disk.
"""
class TimeCancelError(AbstractError):
"""
Error due to exceeding the time limit for the job.
.limit will return a list of limits that were broken, None if it could not be determined.
"""
@property
def limit(self):
return self.meta_data.get('broken_limit')
@property
def scheduler_adapter_solutions(self):
return [(CorrectorProtocolScheduler.increase_time,)]
@property
def application_adapter_solutions(self):
return [(CorrectorProtocolApplication.speed_up,)]
class MemoryCancelError(AbstractError):
"""
Error due to exceeding the memory limit for the job.
.limit will return a list of limits that were broken, None if it could not be determined.
"""
@property
def limit(self):
return self.meta_data.get('broken_limit')
@property
def scheduler_adapter_solutions(self):
return [(CorrectorProtocolScheduler.increase_mem,)]
@property
def application_adapter_solutions(self):
return [(CorrectorProtocolApplication.decrease_mem,)]
class NodeFailureError(AbstractError):
"""
Error due the hardware failure of a specific node.
.node will return a list of problematic nodes, None if it could not be determined.
"""
@property
def nodes(self):
return self.meta_data.get('nodes')
@property
def scheduler_adapter_solutions(self):
return [(CorrectorProtocolScheduler.exclude_nodes, [self.nodes])]
@six.add_metaclass(ABCMeta)
class AbstractErrorParser(object):
"""
Abstract class for parsing errors originating from the scheduler system and error that are not reported by the
program itself, i.e. segmentation faults.
A concrete implementation of this class for a specific scheduler needs a class attribute ERRORS for containing a
dictionary specifying error:
ERRORS = {ErrorClass: {
'file_specifier' : {
'string': "the string to be looked for",
'meta_filter': "string specifing the regular expression to obtain the meta data"
}
}
"""
def __init__(self, err_file, out_file=None, run_err_file=None, batch_err_file=None):
self.files = {'err': err_file, 'out': out_file, 'run_err': run_err_file, 'batch_err': batch_err_file}
self.errors = []
return
@abc.abstractproperty
def error_definitions(self):
return {}
@staticmethod
def extract_metadata(lines, meta_filter):
meta_dict = {}
for key in meta_filter.keys():
values = []
for line in lines:
match = re.match(meta_filter[key][0], line)
if match is not None:
values.append(re.match(meta_filter[key][0], line).group(meta_filter[key][1]))
values = sorted(set(values))
meta_dict.update({key: values})
return meta_dict
def parse_single(self, errmsg):
"""
Parse the provided files for the corresponding strings.
"""
found = False
message = None
metadata = None
for k in errmsg.keys():
if self.files[k] is not None:
# print 'parsing ', self.files[k], ' for ', errmsg[k]['string']
try:
with open(self.files[k], mode='r') as f:
lines = f.read().split('\n')
for line in lines:
if errmsg[k]['string'] in line:
message = line
found = True
if found:
metadata = self.extract_metadata(lines, errmsg[k]['meta_filter'])
except (IOError, OSError):
print(self.files[k], 'not found')
pass
except TypeError:
print('type error', self.files[k], ' has type ', self.files[k].cls(), ' should be string.')
pass
return found, message, metadata
def parse(self):
"""
Parse for the occurens of all errors defined in ERRORS
"""
for error in self.error_definitions:
result = self.parse_single(self.error_definitions[error])
if result[0]:
self.errors.append(error(result[1], result[2]))
if len(self.errors) > 0:
print('QUEUE_ERROR FOUND')
for error in self.errors:
print(error)
class SlurmErrorParser(AbstractErrorParser):
"""
Implementation of the error definitions for the Slurm scheduler
"""
@property
def error_definitions(self):
return {
SubmitError: {
'batch_err': {
'string': "Batch job submission failed",
'meta_filter': {}
}
},
FullQueueError: {
'batch_err': {
'string': "sbatch: error: Batch job submission failed: Job violates accounting/QOS policy",
'meta_filter': {}
}
},
MemoryCancelError: {
'err': {
'string': "Exceeded job memory limit",
'meta_filter': {}
}
},
TimeCancelError: {
'err': {
'string': "DUE TO TIME LIMIT",
'meta_filter': {
'time_of_cancel': [r"JOB (\d+) CANCELLED AT (\S*) DUE TO TIME LIMIT", 1]
}
}
},
NodeFailureError: {
'run_err': {
'string': "can't open /dev/ipath, network down",
'meta_filter': {
'nodes': [r"node(\d+)\.(\d+)can't open (\S*), network down \(err=26\)", 1]
}
}
},
AbstractError: {
'out': {
'string': "a string to be found",
'meta_filter': {}
}
}
}
class PBSErrorParser(AbstractErrorParser):
"""
Implementation for the PBS scheduler
"""
@property
def error_definitions(self):
return {
TimeCancelError: {
'out': {
'string': "job killed: walltime",
'meta_filter': {
'broken_limit': [r"job killed: walltime (\d+) exceeded limit (\d+) que std", 1]
}
}
},
AbstractError: {
'out': {
'string': "a string to be found",
'meta_filter': {}
}
}
}
ALL_PARSERS = {'slurm': SlurmErrorParser, 'pbs': PBSErrorParser, 'torque': PBSErrorParser}
def get_parser(scheduler, err_file, out_file=None, run_err_file=None, batch_err_file=None):
"""
Factory function to provide the parser for the specified scheduler. If the scheduler is not implemented None is
returned. The files, string, correspond to file names of the out and err files:
err_file stderr of the scheduler
out_file stdout of the scheduler
run_err_file stderr of the application
batch_err_file stderr of the submission
Returns:
None if scheduler is not supported.
"""
cls = ALL_PARSERS.get(scheduler)
return cls if cls is None else cls(err_file, out_file, run_err_file, batch_err_file)
if __name__ == "__main__":
my_parser = get_parser('pbs', err_file='queue.err', out_file='queue.out', run_err_file='run.err',
batch_err_file='sbatch.err')
my_parser.parse()
print('parser.errors', my_parser.errors)
for my_error in my_parser.errors:
print(my_error)
| Dioptas/pymatgen | pymatgen/io/abinitio/scheduler_error_parsers.py | Python | mit | 12,992 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_container_cluster
description:
- A Google Container Engine cluster.
short_description: Creates a GCP Cluster
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
name:
description:
- The name of this cluster. The name must be unique within this project and zone,
and can be up to 40 characters. Must be Lowercase letters, numbers, and hyphens
only. Must start with a letter. Must end with a number or a letter.
required: false
description:
description:
- An optional description of this cluster.
required: false
initial_node_count:
description:
- The number of nodes to create in this cluster. You must ensure that your Compute
Engine resource quota is sufficient for this number of instances. You must also
have available firewall and routes quota. For requests, this field should only
be used in lieu of a "nodePool" object, since this configuration (along with
the "nodeConfig") will be used to create a "NodePool" object with an auto-generated
name. Do not use this and a nodePool at the same time.
required: true
node_config:
description:
- Parameters used in creating the cluster's nodes.
- For requests, this field should only be used in lieu of a "nodePool" object,
since this configuration (along with the "initialNodeCount") will be used to
create a "NodePool" object with an auto-generated name. Do not use this and
a nodePool at the same time. For responses, this field will be populated with
the node configuration of the first node pool. If unspecified, the defaults
are used.
required: false
suboptions:
machine_type:
description:
- The name of a Google Compute Engine machine type (e.g.
- n1-standard-1). If unspecified, the default machine type is n1-standard-1.
required: false
disk_size_gb:
description:
- Size of the disk attached to each node, specified in GB. The smallest allowed
disk size is 10GB. If unspecified, the default disk size is 100GB.
required: false
oauth_scopes:
description:
- The set of Google API scopes to be made available on all of the node VMs
under the "default" service account.
- 'The following scopes are recommended, but not required, and by default
are not included: U(https://www.googleapis.com/auth/compute) is required
for mounting persistent storage on your nodes.'
- U(https://www.googleapis.com/auth/devstorage.read_only) is required for
communicating with gcr.io (the Google Container Registry).
- If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring
are enabled, in which case their required scopes will be added.
required: false
service_account:
description:
- The Google Cloud Platform Service Account to be used by the node VMs. If
no Service Account is specified, the "default" service account is used.
required: false
metadata:
description:
- The metadata key/value pairs assigned to instances in the cluster.
- 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
in length. These are reflected as part of a URL in the metadata server.
Additionally, to avoid ambiguity, keys must not conflict with any other
metadata keys for the project or be one of the four reserved keys: "instance-template",
"kube-env", "startup-script", and "user-data" Values are free-form strings,
and only have meaning as interpreted by the image running in the instance.
The only restriction placed on them is that each value''s size must be less
than or equal to 32 KB.'
- The total size of all keys and values must be less than 512 KB.
- 'An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
required: false
image_type:
description:
- The image type to use for this node. Note that for a given image type, the
latest version of it will be used.
required: false
labels:
description:
- 'The map of Kubernetes labels (key/value pairs) to be applied to each node.
These will added in addition to any default label(s) that Kubernetes may
apply to the node. In case of conflict in label keys, the applied set may
differ depending on the Kubernetes version -- it''s best to assume the behavior
is undefined and conflicts should be avoided. For more information, including
usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html)
An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
required: false
local_ssd_count:
description:
- The number of local SSD disks to be attached to the node.
- 'The limit for this value is dependant upon the maximum number of disks
available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits)
for more information.'
required: false
tags:
description:
- The list of instance tags applied to all nodes. Tags are used to identify
valid sources or targets for network firewalls and are specified by the
client during cluster or node pool creation. Each tag within the list must
comply with RFC1035.
required: false
preemptible:
description:
- 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible)
for more information about preemptible VM instances.'
required: false
type: bool
master_auth:
description:
- The authentication information for accessing the master endpoint.
required: false
suboptions:
username:
description:
- The username to use for HTTP basic authentication to the master endpoint.
required: false
password:
description:
- The password to use for HTTP basic authentication to the master endpoint.
Because the master endpoint is open to the Internet, you should create a
strong password.
required: false
cluster_ca_certificate:
description:
- Base64-encoded public certificate that is the root of trust for the cluster.
required: false
client_certificate:
description:
- Base64-encoded public certificate used by clients to authenticate to the
cluster endpoint.
required: false
client_key:
description:
- Base64-encoded private key used by clients to authenticate to the cluster
endpoint.
required: false
logging_service:
description:
- 'The logging service the cluster should use to write logs. Currently available
options: logging.googleapis.com - the Google Cloud Logging service.'
- none - no logs will be exported from the cluster.
- if left as an empty string,logging.googleapis.com will be used.
required: false
choices:
- logging.googleapis.com
- none
monitoring_service:
description:
- The monitoring service the cluster should use to write metrics.
- 'Currently available options: monitoring.googleapis.com - the Google Cloud Monitoring
service.'
- none - no metrics will be exported from the cluster.
- if left as an empty string, monitoring.googleapis.com will be used.
required: false
choices:
- monitoring.googleapis.com
- none
network:
description:
- The name of the Google Compute Engine network to which the cluster is connected.
If left unspecified, the default network will be used.
required: false
cluster_ipv4_cidr:
description:
- The IP address range of the container pods in this cluster, in CIDR notation
(e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify
a /14 block in 10.0.0.0/8.
required: false
addons_config:
description:
- Configurations for the various addons available to run in the cluster.
required: false
suboptions:
http_load_balancing:
description:
- Configuration for the HTTP (L7) load balancing controller addon, which makes
it easy to set up HTTP load balancers for services in a cluster.
required: false
suboptions:
disabled:
description:
- Whether the HTTP Load Balancing controller is enabled in the cluster.
When enabled, it runs a small pod in the cluster that manages the load
balancers.
required: false
type: bool
horizontal_pod_autoscaling:
description:
- Configuration for the horizontal pod autoscaling feature, which increases
or decreases the number of replica pods a replication controller has based
on the resource usage of the existing pods.
required: false
suboptions:
disabled:
description:
- Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.
When enabled, it ensures that a Heapster pod is running in the cluster,
which is also used by the Cloud Monitoring service.
required: false
type: bool
subnetwork:
description:
- The name of the Google Compute Engine subnetwork to which the cluster is connected.
required: false
location:
description:
- The list of Google Compute Engine locations in which the cluster's nodes should
be located.
required: false
zone:
description:
- The zone where the cluster is deployed.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a cluster
gcp_container_cluster:
name: my-cluster
initial_node_count: 2
master_auth:
username: cluster_admin
password: my-secret-password
node_config:
machine_type: n1-standard-4
disk_size_gb: 500
zone: us-central1-a
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- The name of this cluster. The name must be unique within this project and zone,
and can be up to 40 characters. Must be Lowercase letters, numbers, and hyphens
only. Must start with a letter. Must end with a number or a letter.
returned: success
type: str
description:
description:
- An optional description of this cluster.
returned: success
type: str
initialNodeCount:
description:
- The number of nodes to create in this cluster. You must ensure that your Compute
Engine resource quota is sufficient for this number of instances. You must also
have available firewall and routes quota. For requests, this field should only
be used in lieu of a "nodePool" object, since this configuration (along with the
"nodeConfig") will be used to create a "NodePool" object with an auto-generated
name. Do not use this and a nodePool at the same time.
returned: success
type: int
nodeConfig:
description:
- Parameters used in creating the cluster's nodes.
- For requests, this field should only be used in lieu of a "nodePool" object, since
this configuration (along with the "initialNodeCount") will be used to create
a "NodePool" object with an auto-generated name. Do not use this and a nodePool
at the same time. For responses, this field will be populated with the node configuration
of the first node pool. If unspecified, the defaults are used.
returned: success
type: complex
contains:
machineType:
description:
- The name of a Google Compute Engine machine type (e.g.
- n1-standard-1). If unspecified, the default machine type is n1-standard-1.
returned: success
type: str
diskSizeGb:
description:
- Size of the disk attached to each node, specified in GB. The smallest allowed
disk size is 10GB. If unspecified, the default disk size is 100GB.
returned: success
type: int
oauthScopes:
description:
- The set of Google API scopes to be made available on all of the node VMs under
the "default" service account.
- 'The following scopes are recommended, but not required, and by default are
not included: U(https://www.googleapis.com/auth/compute) is required for mounting
persistent storage on your nodes.'
- U(https://www.googleapis.com/auth/devstorage.read_only) is required for communicating
with gcr.io (the Google Container Registry).
- If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring
are enabled, in which case their required scopes will be added.
returned: success
type: list
serviceAccount:
description:
- The Google Cloud Platform Service Account to be used by the node VMs. If no
Service Account is specified, the "default" service account is used.
returned: success
type: str
metadata:
description:
- The metadata key/value pairs assigned to instances in the cluster.
- 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
in length. These are reflected as part of a URL in the metadata server. Additionally,
to avoid ambiguity, keys must not conflict with any other metadata keys for
the project or be one of the four reserved keys: "instance-template", "kube-env",
"startup-script", and "user-data" Values are free-form strings, and only have
meaning as interpreted by the image running in the instance. The only restriction
placed on them is that each value''s size must be less than or equal to 32
KB.'
- The total size of all keys and values must be less than 512 KB.
- 'An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
imageType:
description:
- The image type to use for this node. Note that for a given image type, the
latest version of it will be used.
returned: success
type: str
labels:
description:
- 'The map of Kubernetes labels (key/value pairs) to be applied to each node.
These will added in addition to any default label(s) that Kubernetes may apply
to the node. In case of conflict in label keys, the applied set may differ
depending on the Kubernetes version -- it''s best to assume the behavior is
undefined and conflicts should be avoided. For more information, including
usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html)
An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
localSsdCount:
description:
- The number of local SSD disks to be attached to the node.
- 'The limit for this value is dependant upon the maximum number of disks available
on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits)
for more information.'
returned: success
type: int
tags:
description:
- The list of instance tags applied to all nodes. Tags are used to identify
valid sources or targets for network firewalls and are specified by the client
during cluster or node pool creation. Each tag within the list must comply
with RFC1035.
returned: success
type: list
preemptible:
description:
- 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible)
for more information about preemptible VM instances.'
returned: success
type: bool
masterAuth:
description:
- The authentication information for accessing the master endpoint.
returned: success
type: complex
contains:
username:
description:
- The username to use for HTTP basic authentication to the master endpoint.
returned: success
type: str
password:
description:
- The password to use for HTTP basic authentication to the master endpoint.
Because the master endpoint is open to the Internet, you should create a strong
password.
returned: success
type: str
clusterCaCertificate:
description:
- Base64-encoded public certificate that is the root of trust for the cluster.
returned: success
type: str
clientCertificate:
description:
- Base64-encoded public certificate used by clients to authenticate to the cluster
endpoint.
returned: success
type: str
clientKey:
description:
- Base64-encoded private key used by clients to authenticate to the cluster
endpoint.
returned: success
type: str
loggingService:
description:
- 'The logging service the cluster should use to write logs. Currently available
options: logging.googleapis.com - the Google Cloud Logging service.'
- none - no logs will be exported from the cluster.
- if left as an empty string,logging.googleapis.com will be used.
returned: success
type: str
monitoringService:
description:
- The monitoring service the cluster should use to write metrics.
- 'Currently available options: monitoring.googleapis.com - the Google Cloud Monitoring
service.'
- none - no metrics will be exported from the cluster.
- if left as an empty string, monitoring.googleapis.com will be used.
returned: success
type: str
network:
description:
- The name of the Google Compute Engine network to which the cluster is connected.
If left unspecified, the default network will be used.
returned: success
type: str
clusterIpv4Cidr:
description:
- The IP address range of the container pods in this cluster, in CIDR notation (e.g.
10.96.0.0/14). Leave blank to have one automatically chosen or specify a /14 block
in 10.0.0.0/8.
returned: success
type: str
addonsConfig:
description:
- Configurations for the various addons available to run in the cluster.
returned: success
type: complex
contains:
httpLoadBalancing:
description:
- Configuration for the HTTP (L7) load balancing controller addon, which makes
it easy to set up HTTP load balancers for services in a cluster.
returned: success
type: complex
contains:
disabled:
description:
- Whether the HTTP Load Balancing controller is enabled in the cluster.
When enabled, it runs a small pod in the cluster that manages the load
balancers.
returned: success
type: bool
horizontalPodAutoscaling:
description:
- Configuration for the horizontal pod autoscaling feature, which increases
or decreases the number of replica pods a replication controller has based
on the resource usage of the existing pods.
returned: success
type: complex
contains:
disabled:
description:
- Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.
When enabled, it ensures that a Heapster pod is running in the cluster,
which is also used by the Cloud Monitoring service.
returned: success
type: bool
subnetwork:
description:
- The name of the Google Compute Engine subnetwork to which the cluster is connected.
returned: success
type: str
location:
description:
- The list of Google Compute Engine locations in which the cluster's nodes should
be located.
returned: success
type: list
endpoint:
description:
- The IP address of this cluster's master endpoint.
- The endpoint can be accessed from the internet at https://username:password@endpoint/
See the masterAuth property of this resource for username and password information.
returned: success
type: str
initialClusterVersion:
description:
- The software version of the master endpoint and kubelets used in the cluster when
it was first created. The version can be upgraded over time.
returned: success
type: str
currentMasterVersion:
description:
- The current software version of the master endpoint.
returned: success
type: str
currentNodeVersion:
description:
- The current version of the node software components. If they are currently at
multiple versions because they're in the process of being upgraded, this reflects
the minimum version of all nodes.
returned: success
type: str
createTime:
description:
- The time the cluster was created, in RFC3339 text format.
returned: success
type: str
nodeIpv4CidrSize:
description:
- The size of the address space on each node for hosting containers.
- This is provisioned from within the container_ipv4_cidr range.
returned: success
type: int
servicesIpv4Cidr:
description:
- The IP address range of the Kubernetes services in this cluster, in CIDR notation
(e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from the
container CIDR.
returned: success
type: str
currentNodeCount:
description:
- The number of nodes currently in the cluster.
returned: success
type: int
expireTime:
description:
- The time the cluster will be automatically deleted in RFC3339 text format.
returned: success
type: str
zone:
description:
- The zone where the cluster is deployed.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(type='str'),
description=dict(type='str'),
initial_node_count=dict(required=True, type='int'),
node_config=dict(
type='dict',
options=dict(
machine_type=dict(type='str'),
disk_size_gb=dict(type='int'),
oauth_scopes=dict(type='list', elements='str'),
service_account=dict(type='str'),
metadata=dict(type='dict'),
image_type=dict(type='str'),
labels=dict(type='dict'),
local_ssd_count=dict(type='int'),
tags=dict(type='list', elements='str'),
preemptible=dict(type='bool'),
),
),
master_auth=dict(
type='dict',
options=dict(
username=dict(type='str'),
password=dict(type='str'),
cluster_ca_certificate=dict(type='str'),
client_certificate=dict(type='str'),
client_key=dict(type='str'),
),
),
logging_service=dict(type='str', choices=['logging.googleapis.com', 'none']),
monitoring_service=dict(type='str', choices=['monitoring.googleapis.com', 'none']),
network=dict(type='str'),
cluster_ipv4_cidr=dict(type='str'),
addons_config=dict(
type='dict',
options=dict(
http_load_balancing=dict(type='dict', options=dict(disabled=dict(type='bool'))),
horizontal_pod_autoscaling=dict(type='dict', options=dict(disabled=dict(type='bool'))),
),
),
subnetwork=dict(type='str'),
location=dict(type='list', elements='str'),
zone=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module))
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'container')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link):
auth = GcpSession(module, 'container')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link):
auth = GcpSession(module, 'container')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'name': module.params.get('name'),
u'description': module.params.get('description'),
u'initialNodeCount': module.params.get('initial_node_count'),
u'nodeConfig': ClusterNodeconfig(module.params.get('node_config', {}), module).to_request(),
u'masterAuth': ClusterMasterauth(module.params.get('master_auth', {}), module).to_request(),
u'loggingService': module.params.get('logging_service'),
u'monitoringService': module.params.get('monitoring_service'),
u'network': module.params.get('network'),
u'clusterIpv4Cidr': module.params.get('cluster_ipv4_cidr'),
u'addonsConfig': ClusterAddonsconfig(module.params.get('addons_config', {}), module).to_request(),
u'subnetwork': module.params.get('subnetwork'),
u'location': module.params.get('location'),
}
request = encode_request(request, module)
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'container')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
return "https://container.googleapis.com/v1/projects/{project}/zones/{zone}/clusters/{name}".format(**module.params)
def collection(module):
return "https://container.googleapis.com/v1/projects/{project}/zones/{zone}/clusters".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': response.get(u'name'),
u'description': response.get(u'description'),
u'initialNodeCount': module.params.get('initial_node_count'),
u'nodeConfig': ClusterNodeconfig(module.params.get('node_config', {}), module).to_request(),
u'masterAuth': ClusterMasterauth(response.get(u'masterAuth', {}), module).from_response(),
u'loggingService': response.get(u'loggingService'),
u'monitoringService': response.get(u'monitoringService'),
u'network': response.get(u'network'),
u'clusterIpv4Cidr': response.get(u'clusterIpv4Cidr'),
u'addonsConfig': ClusterAddonsconfig(response.get(u'addonsConfig', {}), module).from_response(),
u'subnetwork': response.get(u'subnetwork'),
u'location': response.get(u'location'),
u'endpoint': response.get(u'endpoint'),
u'initialClusterVersion': response.get(u'initialClusterVersion'),
u'currentMasterVersion': response.get(u'currentMasterVersion'),
u'currentNodeVersion': response.get(u'currentNodeVersion'),
u'createTime': response.get(u'createTime'),
u'nodeIpv4CidrSize': response.get(u'nodeIpv4CidrSize'),
u'servicesIpv4Cidr': response.get(u'servicesIpv4Cidr'),
u'currentNodeCount': response.get(u'currentNodeCount'),
u'expireTime': response.get(u'expireTime'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://container.googleapis.com/v1/projects/{project}/zones/{zone}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response)
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']))
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
# Google Container Engine API has its own layout for the create method,
# defined like this:
#
# {
# 'cluster': {
# ... cluster data
# }
# }
#
# Format the request to match the expected input by the API
def encode_request(resource_request, module):
return {'cluster': resource_request}
class ClusterNodeconfig(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'machineType': self.request.get('machine_type'),
u'diskSizeGb': self.request.get('disk_size_gb'),
u'oauthScopes': self.request.get('oauth_scopes'),
u'serviceAccount': self.request.get('service_account'),
u'metadata': self.request.get('metadata'),
u'imageType': self.request.get('image_type'),
u'labels': self.request.get('labels'),
u'localSsdCount': self.request.get('local_ssd_count'),
u'tags': self.request.get('tags'),
u'preemptible': self.request.get('preemptible'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'machineType': self.request.get(u'machineType'),
u'diskSizeGb': self.request.get(u'diskSizeGb'),
u'oauthScopes': self.request.get(u'oauthScopes'),
u'serviceAccount': self.request.get(u'serviceAccount'),
u'metadata': self.request.get(u'metadata'),
u'imageType': self.request.get(u'imageType'),
u'labels': self.request.get(u'labels'),
u'localSsdCount': self.request.get(u'localSsdCount'),
u'tags': self.request.get(u'tags'),
u'preemptible': self.request.get(u'preemptible'),
}
)
class ClusterMasterauth(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'username': self.request.get('username'),
u'password': self.request.get('password'),
u'clusterCaCertificate': self.request.get('cluster_ca_certificate'),
u'clientCertificate': self.request.get('client_certificate'),
u'clientKey': self.request.get('client_key'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'username': self.request.get(u'username'),
u'password': self.request.get(u'password'),
u'clusterCaCertificate': self.request.get(u'clusterCaCertificate'),
u'clientCertificate': self.request.get(u'clientCertificate'),
u'clientKey': self.request.get(u'clientKey'),
}
)
class ClusterAddonsconfig(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'httpLoadBalancing': ClusterHttploadbalancing(self.request.get('http_load_balancing', {}), self.module).to_request(),
u'horizontalPodAutoscaling': ClusterHorizontalpodautoscaling(self.request.get('horizontal_pod_autoscaling', {}), self.module).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'httpLoadBalancing': ClusterHttploadbalancing(self.request.get(u'httpLoadBalancing', {}), self.module).from_response(),
u'horizontalPodAutoscaling': ClusterHorizontalpodautoscaling(self.request.get(u'horizontalPodAutoscaling', {}), self.module).from_response(),
}
)
class ClusterHttploadbalancing(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'disabled': self.request.get('disabled')})
def from_response(self):
return remove_nones_from_dict({u'disabled': self.request.get(u'disabled')})
class ClusterHorizontalpodautoscaling(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'disabled': self.request.get('disabled')})
def from_response(self):
return remove_nones_from_dict({u'disabled': self.request.get(u'disabled')})
if __name__ == '__main__':
main()
| valentin-krasontovitsch/ansible | lib/ansible/modules/cloud/google/gcp_container_cluster.py | Python | gpl-3.0 | 37,708 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
import base64
import re
import sickbeard
from sickbeard import common, logger
from sickbeard.exceptions import ex
from sickbeard.encodingKludge import fixStupidEncodings
try:
import xml.etree.cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
class PLEXNotifier:
def __init__(self):
self.name = 'PLEX'
def log(self, msg, level=logger.MESSAGE):
logger.log(u'%s: %s' % (self.name, msg), level)
def _send_to_plex(self, command, host, username=None, password=None):
"""Handles communication to Plex hosts via HTTP API
Args:
command: Dictionary of field/data pairs, encoded via urllib and passed to the legacy xbmcCmds HTTP API
host: Plex host:port
username: Plex API username
password: Plex API password
Returns:
Returns 'OK' for successful commands or False if there was an error
"""
# fill in omitted parameters
if not username:
username = sickbeard.PLEX_USERNAME
if not password:
password = sickbeard.PLEX_PASSWORD
if not host:
self.log(u'No host specified, check your settings', logger.ERROR)
return False
for key in command:
if type(command[key]) == unicode:
command[key] = command[key].encode('utf-8')
enc_command = urllib.urlencode(command)
self.log(u'Encoded API command: ' + enc_command, logger.DEBUG)
url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command)
try:
req = urllib2.Request(url)
# if we have a password, use authentication
if password:
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
authheader = 'Basic %s' % base64string
req.add_header('Authorization', authheader)
self.log(u'Contacting (with auth header) via url: ' + url, logger.DEBUG)
else:
self.log(u'Contacting via url: ' + url, logger.DEBUG)
response = urllib2.urlopen(req)
result = response.read().decode(sickbeard.SYS_ENCODING)
response.close()
self.log(u'HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
# could return result response = re.compile('<html><li>(.+\w)</html>').findall(result)
return 'OK'
except (urllib2.URLError, IOError) as e:
self.log(u'Couldn\'t contact Plex at ' + fixStupidEncodings(url) + ' ' + ex(e), logger.WARNING)
return False
def _notify_pmc(self, message, title='SickGear', host=None, username=None, password=None, force=False):
"""Internal wrapper for the notify_snatch and notify_download functions
Args:
message: Message body of the notice to send
title: Title of the notice to send
host: Plex Media Client(s) host:port
username: Plex username
password: Plex password
force: Used for the Test method to override config safety checks
Returns:
Returns a list results in the format of host:ip:result
The result will either be 'OK' or False, this is used to be parsed by the calling function.
"""
# suppress notifications if the notifier is disabled but the notify options are checked
if not sickbeard.USE_PLEX and not force:
return False
# fill in omitted parameters
if not host:
host = sickbeard.PLEX_HOST
if not username:
username = sickbeard.PLEX_USERNAME
if not password:
password = sickbeard.PLEX_PASSWORD
result = ''
for curHost in [x.strip() for x in host.split(',')]:
self.log(u'Sending notification to \'%s\' - %s' % (curHost, message))
command = {'command': 'ExecBuiltIn',
'parameter': 'Notification(%s,%s)' % (title.encode('utf-8'), message.encode('utf-8'))}
notify_result = self._send_to_plex(command, curHost, username, password)
if notify_result:
result += '%s:%s' % (curHost, str(notify_result))
return result
##############################################################################
# Public functions
##############################################################################
def notify_snatch(self, ep_name):
if sickbeard.PLEX_NOTIFY_ONSNATCH:
self._notify_pmc(ep_name, common.notifyStrings[common.NOTIFY_SNATCH])
def notify_download(self, ep_name):
if sickbeard.PLEX_NOTIFY_ONDOWNLOAD:
self._notify_pmc(ep_name, common.notifyStrings[common.NOTIFY_DOWNLOAD])
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notify_pmc(ep_name + ': ' + lang, common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD])
def notify_git_update(self, new_version='??'):
if sickbeard.USE_PLEX:
update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
self._notify_pmc(update_text + new_version, title)
def test_notify(self, host, username, password, server=False):
if server:
return self.update_library(host=host, username=username, password=password, force=False, test=True)
return self._notify_pmc(
'This is a test notification from SickGear', 'Test', host, username, password, force=True)
@staticmethod
def _get_host_list(host='', enable_secure=False):
"""
Return a list of hosts from a host CSV string
"""
host_list = []
user_list = [x.strip().lower() for x in host.split(',')]
for cur_host in user_list:
if cur_host.startswith('https://'):
host_list += ([], [cur_host])[enable_secure]
else:
host_list += ([], ['https://%s' % cur_host])[enable_secure] + ['http://%s' % cur_host]
return host_list
def update_library(self, ep_obj=None, host=None, username=None, password=None, force=True, test=False):
"""Handles updating the Plex Media Server host via HTTP API
Plex Media Server currently only supports updating the whole video library and not a specific path.
Returns:
Returns None for no issue, else a string of host with connection issues
"""
if sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY or test:
if not test:
if not sickbeard.PLEX_SERVER_HOST:
msg = u'No Plex Media Server host specified, check your settings'
self.log(msg, logger.DEBUG)
return '%sFail: %s' % (('', '<br />')[test], msg)
if not host:
host = sickbeard.PLEX_SERVER_HOST
if not username:
username = sickbeard.PLEX_USERNAME
if not password:
password = sickbeard.PLEX_PASSWORD
# if username and password were provided, fetch the auth token from plex.tv
token_arg = None
if username and password:
self.log(u'fetching plex.tv credentials for user: ' + username, logger.DEBUG)
req = urllib2.Request('https://plex.tv/users/sign_in.xml', data='')
authheader = 'Basic %s' % base64.encodestring('%s:%s' % (username, password))[:-1]
req.add_header('Authorization', authheader)
req.add_header('X-Plex-Device-Name', 'SickGear')
req.add_header('X-Plex-Product', 'SickGear Notifier')
req.add_header('X-Plex-Client-Identifier', '5f48c063eaf379a565ff56c9bb2b401e')
req.add_header('X-Plex-Version', '1.0')
token_arg = False
try:
response = urllib2.urlopen(req)
auth_tree = etree.parse(response)
token = auth_tree.findall('.//authentication-token')[0].text
token_arg = '?X-Plex-Token=' + token
except urllib2.URLError as e:
self.log(u'Error fetching credentials from plex.tv for user %s: %s' % (username, ex(e)))
except (ValueError, IndexError) as e:
self.log(u'Error parsing plex.tv response: ' + ex(e))
file_location = '' if None is ep_obj else ep_obj.location
host_validate = self._get_host_list(host, all([token_arg]))
hosts_all = {}
hosts_match = {}
hosts_failed = []
for cur_host in host_validate:
response = sickbeard.helpers.getURL(
'%s/library/sections%s' % (cur_host, token_arg or ''), timeout=10,
mute_connect_err=True, mute_read_timeout=True, mute_connect_timeout=True)
if response:
response = sickbeard.helpers.parse_xml(response)
if not response:
hosts_failed.append(cur_host)
continue
sections = response.findall('.//Directory')
if not sections:
self.log(u'Plex Media Server not running on: ' + cur_host)
hosts_failed.append(cur_host)
continue
for section in filter(lambda x: 'show' == x.attrib['type'], sections):
if str(section.attrib['key']) in hosts_all:
continue
keyed_host = [(str(section.attrib['key']), cur_host)]
hosts_all.update(keyed_host)
if not file_location:
continue
for section_location in section.findall('.//Location'):
section_path = re.sub(r'[/\\]+', '/', section_location.attrib['path'].lower())
section_path = re.sub(r'^(.{,2})[/\\]', '', section_path)
location_path = re.sub(r'[/\\]+', '/', file_location.lower())
location_path = re.sub(r'^(.{,2})[/\\]', '', location_path)
if section_path in location_path:
hosts_match.update(keyed_host)
break
if not test:
hosts_try = (hosts_all.copy(), hosts_match.copy())[any(hosts_match)]
host_list = []
for section_key, cur_host in hosts_try.items():
refresh_result = None
if force:
refresh_result = sickbeard.helpers.getURL(
'%s/library/sections/%s/refresh%s' % (cur_host, section_key, token_arg or ''))
if (force and '' == refresh_result) or not force:
host_list.append(cur_host)
else:
hosts_failed.append(cur_host)
self.log(u'Error updating library section for Plex Media Server: %s' % cur_host, logger.ERROR)
if len(hosts_failed) == len(host_validate):
self.log(u'No successful Plex host updated')
return 'Fail no successful Plex host updated: %s' % ', '.join(host for host in hosts_failed)
else:
hosts = ', '.join(set(host_list))
if len(hosts_match):
self.log(u'Hosts updating where TV section paths match the downloaded show: %s' % hosts)
else:
self.log(u'Updating all hosts with TV sections: %s' % hosts)
return ''
hosts = [
host.replace('http://', '') for host in filter(lambda x: x.startswith('http:'), hosts_all.values())]
secured = [
host.replace('https://', '') for host in filter(lambda x: x.startswith('https:'), hosts_all.values())]
failed = [
host.replace('http://', '') for host in filter(lambda x: x.startswith('http:'), hosts_failed)]
failed_secured = ', '.join(filter(
lambda x: x not in hosts,
[host.replace('https://', '') for host in filter(lambda x: x.startswith('https:'), hosts_failed)]))
return '<br />' + '<br />'.join(result for result in [
('', 'Fail: username/password when fetching credentials from plex.tv')[False is token_arg],
('', 'OK (secure connect): %s' % ', '.join(secured))[any(secured)],
('', 'OK%s: %s' % ((' (legacy connect)', '')[None is token_arg], ', '.join(hosts)))[any(hosts)],
('', 'Fail (secure connect): %s' % failed_secured)[any(failed_secured)],
('', 'Fail%s: %s' % ((' (legacy connect)', '')[None is token_arg], failed))[any(failed)]] if result)
notifier = PLEXNotifier
| jetskijoe/SickGear | sickbeard/notifiers/plex.py | Python | gpl-3.0 | 13,857 |
# -*- coding: utf-8 -*-
import os
try:
from PIL import Image as PILImage
except ImportError:
try:
import Image as PILImage
except ImportError:
raise ImportError("The Python Imaging Library was not found.")
import logging
logger = logging.getLogger(__name__)
from django.db import models
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from filer import settings as filer_settings
from filer.models.filemodels import File
from filer.utils.filer_easy_thumbnails import FilerThumbnailer
from filer.utils.pil_exif import get_exif_for_file
class BaseImage(File):
SIDEBAR_IMAGE_WIDTH = 210
DEFAULT_THUMBNAILS = {
'admin_clipboard_icon': {'size': (32, 32), 'crop': True,
'upscale': True},
'admin_sidebar_preview': {'size': (SIDEBAR_IMAGE_WIDTH, 10000)},
'admin_directory_listing_icon': {'size': (48, 48),
'crop': True, 'upscale': True},
'admin_tiny_icon': {'size': (32, 32), 'crop': True, 'upscale': True},
}
file_type = 'Image'
_icon = "image"
_height = models.IntegerField(null=True, blank=True)
_width = models.IntegerField(null=True, blank=True)
default_alt_text = models.CharField(_('default alt text'), max_length=255, blank=True, null=True)
default_caption = models.CharField(_('default caption'), max_length=255, blank=True, null=True)
subject_location = models.CharField(_('subject location'), max_length=64, null=True, blank=True,
default=None)
@classmethod
def matches_file_type(cls, iname, ifile, request):
# This was originally in admin/clipboardadmin.py it was inside of a try
# except, I have moved it here outside of a try except because I can't
# figure out just what kind of exception this could generate... all it was
# doing for me was obscuring errors...
# --Dave Butler <croepha@gmail.com>
iext = os.path.splitext(iname)[1].lower()
return iext in ['.jpg', '.jpeg', '.png', '.gif']
def save(self, *args, **kwargs):
self.has_all_mandatory_data = self._check_validity()
try:
# do this more efficient somehow?
self.file.seek(0)
self._width, self._height = PILImage.open(self.file).size
except Exception:
# probably the image is missing. nevermind.
pass
super(BaseImage, self).save(*args, **kwargs)
def _check_validity(self):
if not self.name:
return False
return True
def sidebar_image_ratio(self):
if self.width:
return float(self.width) / float(self.SIDEBAR_IMAGE_WIDTH)
else:
return 1.0
def _get_exif(self):
if hasattr(self, '_exif_cache'):
return self._exif_cache
else:
if self.file:
self._exif_cache = get_exif_for_file(self.file)
else:
self._exif_cache = {}
return self._exif_cache
exif = property(_get_exif)
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, permission_type):
"""
Return true if the current user has permission on this
image. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated():
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
elif self.folder:
return self.folder.has_generic_permission(request, permission_type)
else:
return False
@property
def label(self):
if self.name in ['', None]:
return self.original_filename or 'unnamed file'
else:
return self.name
@property
def width(self):
return self._width or 0
@property
def height(self):
return self._height or 0
def _generate_thumbnails(self, required_thumbnails):
_thumbnails = {}
for name, opts in six.iteritems(required_thumbnails):
try:
opts.update({'subject_location': self.subject_location})
thumb = self.file.get_thumbnail(opts)
_thumbnails[name] = thumb.url
except Exception as e:
# catch exception and manage it. We can re-raise it for debugging
# purposes and/or just logging it, provided user configured
# proper logging configuration
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while generating thumbnail: %s', e)
if filer_settings.FILER_DEBUG:
raise
return _thumbnails
@property
def icons(self):
required_thumbnails = dict(
(size, {'size': (int(size), int(size)),
'crop': True,
'upscale': True,
'subject_location': self.subject_location})
for size in filer_settings.FILER_ADMIN_ICON_SIZES)
return self._generate_thumbnails(required_thumbnails)
@property
def thumbnails(self):
return self._generate_thumbnails(BaseImage.DEFAULT_THUMBNAILS)
@property
def easy_thumbnails_thumbnailer(self):
tn = FilerThumbnailer(
file=self.file, name=self.file.name,
source_storage=self.file.source_storage,
thumbnail_storage=self.file.thumbnail_storage,
thumbnail_basedir=self.file.thumbnail_basedir)
return tn
class Meta:
app_label = 'filer'
verbose_name = _('image')
verbose_name_plural = _('images')
abstract = True
| mkoistinen/django-filer | filer/models/abstract.py | Python | bsd-3-clause | 6,117 |
# -*- coding: utf-8 -*-
from sklearn.preprocessing import StandardScaler
from sklearn_pandas import DataFrameMapper
from ..base import FactorTransformer
from ..enums import FactorType
class FactorStandardizer(FactorTransformer):
def __init__(self, copy=True, out_container=False, with_mean=True, with_std=True):
super(FactorStandardizer, self).__init__(copy=copy, out_container=out_container)
self.with_mean = with_mean
self.with_std = with_std
def _build_mapper(self, factor_container):
data = factor_container.data
data_mapper = [([factor_name], self._get_mapper(factor_container.property[factor_name]['type']))
for factor_name in data.columns]
return DataFrameMapper(data_mapper)
def _get_mapper(self, factor_type):
if factor_type == FactorType.INDUSTY_CODE:
return None
else:
return StandardScaler(copy=self.copy, with_mean=self.with_mean, with_std=self.with_std) | iLampard/alphaware | alphaware/preprocess/standardizer.py | Python | apache-2.0 | 996 |
import sys
#--------------------------------------------------
def print_multiplication_table():
for x in range(1, 13):
print
for y in range(1, 13):
print '{:>4}'.format(x * y),
print
print
print_multiplication_table()
print
print
#--------------------------------------------------
def pyramid():
height = int(raw_input("\n\nPlease enter the height of the pyramid: "))
for row in xrange(1, height+1):
filler = ((height-row) * 3) * ' '
sys.stdout.write(filler)
for col in range(1, row + 1):
sys.stdout.write('* ')
print
pyramid()
print
print
print
#--------------------------------------------------
# Note there are more graceful ways to write this
# but we will address that later.
def factorial():
while True:
x = int(raw_input("\n\nFACTORIAL: Please enter a positive number. Or a negative number to stop. "))
if x < 0:
print "STOPPING"
break
elif x == 0:
print 1
else:
result = 1
while x > 0:
result = result * x
x = x -1
print result
factorial()
print
print
#--------------------------------------------------
# Note there are more graceful ways to write this
# but we will address that later.
def fibonacci():
while True:
x = int(raw_input("\n\nFIBONACCI: Please enter a positive number. Zero or a negative number to stop. "))
if x < 1:
print "STOPPING"
return
else:
a, b = 0, 1
counter = 0
while counter < x:
print a,
counter = counter + 1
a, b = b, a+b
fibonacci()
print
print
#--------------------------------------------------
def tip_calculator(bill_amt):
print "Your bill amount is $%.2f" % (round(bill_amt, 2))
print "A 10 percent tip: $%.2f" % round(bill_amt * 0.10, 2), "totalling $%.2f" % round(bill_amt * 0.10, 2)
print "A 15 percent tip: $%.2f" % round(bill_amt * 0.15, 2), "totalling $%.2f" % round(bill_amt * 0.15, 2)
print "A 20 percent tip: $%.2f" % round(bill_amt * 0.20, 2), "totalling $%.2f" % round(bill_amt * 0.20, 2)
print "An excellent tip: $%.2f" % round(bill_amt, 2), "totalling $%.2f" % round(bill_amt * 2, 2)
while True:
bill = float(raw_input("\n\nPlease enter your total bill or zero to stop: "))
if bill <= 0:
break
tip_calculator(bill)
print
print
#--------------------------------------------------
def is_pythagorean(a, b, c):
if a**2 + b**2 == c**2:
print a, b, c, "IS PYTHAGOREAN"
print a, b, c, "IS NOT PYTHAGOREAN"
print
print
is_pythagorean(3, 4, 6)
is_pythagorean(3, 4, 8)
print
print
#--------------------------------------------------
def print_pythagoreans_under_100():
counter = 0
for a in range (1,100):
for b in range (a+1,100):
for c in range (1, 100):
if a**2 + b**2 == c**2:
counter = counter + 1
print "{:>2} {:>2} {:>2} ----> {:>4} + {:>4} = {:>4}".format(a, b, c, a**2, b**2, c**2)
print
print
print
print
print_pythagoreans_under_100()
print
print
print
print
#--------------------------------------------------
def identify_triangle():
while True:
a = int(raw_input("\n\nPlease enter side a: "))
b = int(raw_input("Please enter side b: "))
c = int(raw_input("Please enter side c: "))
if a < 0 or b < 0 or c < 0:
print "Please enter positive values."
elif a == 0 and b == 0 and c == 0:
print "Stopping"
break
elif ( a > (b + c) ) or ( b > (a + c) ) or ( c > (a + b) ):
print "{:>3} {:>3} {:>3} {:>3} {:>12}".format(a, b, c, "NO", "")
else:
if a == b == c:
print "{:>3} {:>3} {:>3} {:>3} {}".format(a, b, c, "YES", "EQUILATERAL")
continue
if a**2 + b**2 == c**2:
print "{:>3} {:>3} {:>3} {:>3} {}".format(a, b, c, "YES", "PYTHAGOREAN")
continue
if a == b or b == c or a == c:
print "{:>3} {:>3} {:>3} {:>3} {}".format(a, b, c, "YES", "ISOSCELES")
continue
if (a == b + c) or (b == a + c) or (c == a + b):
print "{:>3} {:>3} {:>3} {:>3} {}".format(a, b, c, "YES", "DEGENERATE")
continue
if not(specific):
print "{:>3} {:>3} {:>3} {:>3} {}".format(a, b, c, "YES", "")
print
print
print
print
identify_triangle()
print
print
print
print
#--------------------------------------------------
| wenduowang/git_home | python/MSBA/Bootcamp/problem_set_02_solution.py | Python | gpl-3.0 | 4,928 |
# -*- coding: utf-8 -*-
"""
tossi.__about__
~~~~~~~~~~~~~~~
"""
__version__ = '0.3.1'
__license__ = 'BSD'
__author__ = 'What! Studio'
__maintainer__ = 'Heungsub Lee'
__maintainer_email__ = 'sub@nexon.co.kr'
| what-studio/tossi | tossi/__about__.py | Python | bsd-3-clause | 213 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import difflib
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
# Rootdir defaults to the directory **above** the repo-infra dir.
rootdir = os.path.dirname(__file__) + "/../../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "repo-infra/verify/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh" or extension == "py":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
print('File %s is missing the year' % filename, file=verbose_out)
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git',
'cluster/env.sh', 'vendor', 'test/e2e/generated/bindata.go',
'repo-infra/verify/boilerplate/test', '.glide']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015 or 2016, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016|2017)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
| mdshuai/service-catalog | vendor/github.com/kubernetes/repo-infra/verify/boilerplate/boilerplate.py | Python | apache-2.0 | 6,302 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-02-12 19:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0026_auto_20170211_2027'),
]
operations = [
migrations.AddField(
model_name='category',
name='published_in_products',
field=models.BooleanField(default='', verbose_name='Published in all products'),
),
]
| skylifewww/pangolin-fog | product/migrations/0027_category_published_in_products.py | Python | mit | 507 |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run a group of subprocesses and then finish."""
import hashlib
import multiprocessing
import os
import random
import signal
import subprocess
import sys
import tempfile
import time
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
have_alarm = False
def alarm_handler(unused_signum, unused_frame):
global have_alarm
have_alarm = False
# setup a signal handler so that signal.pause registers 'something'
# when a child finishes
# not using futures and threading to avoid a dependency on subprocess32
signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
signal.signal(signal.SIGALRM, alarm_handler)
def shuffle_iteratable(it):
"""Return an iterable that randomly walks it"""
# take a random sampling from the passed in iterable
# we take an element with probablity 1/p and rapidly increase
# p as we take elements - this gives us a somewhat random set of values before
# we've seen all the values, but starts producing values without having to
# compute ALL of them at once, allowing tests to start a little earlier
nextit = []
p = 1
for val in it:
if random.randint(0, p) == 0:
p = min(p*2, 100)
yield val
else:
nextit.append(val)
# after taking a random sampling, we shuffle the rest of the elements and
# yield them
random.shuffle(nextit)
for val in nextit:
yield val
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
_COLORS = {
'red': [ 31, 0 ],
'green': [ 32, 0 ],
'yellow': [ 33, 0 ],
'lightgray': [ 37, 0],
'gray': [ 30, 1 ],
}
_BEGINNING_OF_LINE = '\x1b[0G'
_CLEAR_LINE = '\x1b[2K'
_TAG_COLOR = {
'FAILED': 'red',
'TIMEOUT': 'red',
'PASSED': 'green',
'START': 'gray',
'WAITING': 'yellow',
'SUCCESS': 'green',
'IDLE': 'gray',
}
def message(tag, message, explanatory_text=None, do_newline=False):
try:
sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
_BEGINNING_OF_LINE,
_CLEAR_LINE,
'\n%s' % explanatory_text if explanatory_text is not None else '',
_COLORS[_TAG_COLOR[tag]][1],
_COLORS[_TAG_COLOR[tag]][0],
tag,
message,
'\n' if do_newline or explanatory_text is not None else ''))
sys.stdout.flush()
except:
pass
def which(filename):
if '/' in filename:
return filename
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, filename)):
return os.path.join(path, filename)
raise Exception('%s not found' % filename)
class JobSpec(object):
"""Specifies what to run for a job."""
def __init__(self, cmdline, shortname=None, environ={}, hash_targets=[]):
"""
Arguments:
cmdline: a list of arguments to pass as the command line
environ: a dictionary of environment variables to set in the child process
hash_targets: which files to include in the hash representing the jobs version
(or empty, indicating the job should not be hashed)
"""
self.cmdline = cmdline
self.environ = environ
self.shortname = cmdline[0] if shortname is None else shortname
self.hash_targets = hash_targets or []
def identity(self):
return '%r %r %r' % (self.cmdline, self.environ, self.hash_targets)
def __hash__(self):
return hash(self.identity())
def __cmp__(self, other):
return self.identity() == other.identity()
class Job(object):
"""Manages one job."""
def __init__(self, spec, bin_hash, newline_on_success, travis):
self._spec = spec
self._bin_hash = bin_hash
self._tempfile = tempfile.TemporaryFile()
env = os.environ.copy()
for k, v in spec.environ.iteritems():
env[k] = v
self._start = time.time()
self._process = subprocess.Popen(args=spec.cmdline,
stderr=subprocess.STDOUT,
stdout=self._tempfile,
env=env)
self._state = _RUNNING
self._newline_on_success = newline_on_success
self._travis = travis
message('START', spec.shortname, do_newline=self._travis)
def state(self, update_cache):
"""Poll current state of the job. Prints messages at completion."""
if self._state == _RUNNING and self._process.poll() is not None:
elapsed = time.time() - self._start
if self._process.returncode != 0:
self._state = _FAILURE
self._tempfile.seek(0)
stdout = self._tempfile.read()
message('FAILED', '%s [ret=%d]' % (
self._spec.shortname, self._process.returncode), stdout)
else:
self._state = _SUCCESS
message('PASSED', '%s [time=%.1fsec]' % (self._spec.shortname, elapsed),
do_newline=self._newline_on_success or self._travis)
if self._bin_hash:
update_cache.finished(self._spec.identity(), self._bin_hash)
elif self._state == _RUNNING and time.time() - self._start > 300:
message('TIMEOUT', self._spec.shortname, do_newline=self._travis)
self.kill()
return self._state
def kill(self):
if self._state == _RUNNING:
self._state = _KILLED
self._process.terminate()
class Jobset(object):
"""Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, newline_on_success, travis, cache):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
self._failures = 0
self._completed = 0
self._maxjobs = maxjobs
self._newline_on_success = newline_on_success
self._travis = travis
self._cache = cache
def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while len(self._running) >= self._maxjobs:
if self.cancelled(): return False
self.reap()
if self.cancelled(): return False
if spec.hash_targets:
bin_hash = hashlib.sha1()
for fn in spec.hash_targets:
with open(which(fn)) as f:
bin_hash.update(f.read())
bin_hash = bin_hash.hexdigest()
should_run = self._cache.should_run(spec.identity(), bin_hash)
else:
bin_hash = None
should_run = True
if should_run:
self._running.add(Job(spec,
bin_hash,
self._newline_on_success,
self._travis))
return True
def reap(self):
"""Collect the dead jobs."""
while self._running:
dead = set()
for job in self._running:
st = job.state(self._cache)
if st == _RUNNING: continue
if st == _FAILURE: self._failures += 1
if st == _KILLED: self._failures += 1
dead.add(job)
for job in dead:
self._completed += 1
self._running.remove(job)
if dead: return
if (not self._travis):
message('WAITING', '%d jobs running, %d complete, %d failed' % (
len(self._running), self._completed, self._failures))
global have_alarm
if not have_alarm:
have_alarm = True
signal.alarm(10)
signal.pause()
def cancelled(self):
"""Poll for cancellation."""
if self._cancelled: return True
if not self._check_cancelled(): return False
for job in self._running:
job.kill()
self._cancelled = True
return True
def finish(self):
while self._running:
if self.cancelled(): pass # poll cancellation
self.reap()
return not self.cancelled() and self._failures == 0
def _never_cancelled():
return False
# cache class that caches nothing
class NoCache(object):
def should_run(self, cmdline, bin_hash):
return True
def finished(self, cmdline, bin_hash):
pass
def run(cmdlines,
check_cancelled=_never_cancelled,
maxjobs=None,
newline_on_success=False,
travis=False,
cache=None):
js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis,
cache if cache is not None else NoCache())
if not travis:
cmdlines = shuffle_iteratable(cmdlines)
else:
cmdlines = sorted(cmdlines, key=lambda x: x.shortname)
for cmdline in cmdlines:
if not js.start(cmdline):
break
return js.finish()
| chenbaihu/grpc | tools/run_tests/jobset.py | Python | bsd-3-clause | 9,808 |
# -*- coding: utf-8 -*-
#
# Based on initial version of DualMetaFix. Copyright (C) 2013 Kevin Hendricks
# Changes for KindleButler Copyright (C) 2014 Pawel Jastrzebski <pawelj@vulturis.eu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import struct
from io import BytesIO
class DualMetaFixException(Exception):
pass
# palm database offset constants
number_of_pdb_records = 76
first_pdb_record = 78
# important rec0 offsets
mobi_header_base = 16
mobi_header_length = 20
mobi_version = 36
title_offset = 84
def getint(data, ofs, sz='L'):
i, = struct.unpack_from('>'+sz, data, ofs)
return i
def writeint(data, ofs, n, slen='L'):
if slen == 'L':
return data[:ofs]+struct.pack('>L', n)+data[ofs+4:]
else:
return data[:ofs]+struct.pack('>H', n)+data[ofs+2:]
def getsecaddr(datain, secno):
nsec = getint(datain, number_of_pdb_records, 'H')
if (secno < 0) | (secno >= nsec):
emsg = 'requested section number %d out of range (nsec=%d)' % (secno, nsec)
raise DualMetaFixException(emsg)
secstart = getint(datain, first_pdb_record+secno*8)
if secno == nsec-1:
secend = len(datain)
else:
secend = getint(datain, first_pdb_record+(secno+1)*8)
return secstart, secend
def readsection(datain, secno):
secstart, secend = getsecaddr(datain, secno)
return datain[secstart:secend]
# overwrite section - must be exact same length as original
def replacesection(datain, secno, secdata):
secstart, secend = getsecaddr(datain, secno)
seclen = secend - secstart
if len(secdata) != seclen:
raise DualMetaFixException('section length change in replacesection')
datalst = [datain[0:secstart], secdata, datain[secend:]]
dataout = b''.join(datalst)
return dataout
def get_exth_params(rec0):
ebase = mobi_header_base + getint(rec0, mobi_header_length)
if rec0[ebase:ebase+4] != b'EXTH':
raise DualMetaFixException('EXTH tag not found where expected')
elen = getint(rec0, ebase+4)
enum = getint(rec0, ebase+8)
rlen = len(rec0)
return ebase, elen, enum, rlen
def add_exth(rec0, exth_num, exth_bytes):
ebase, elen, enum, rlen = get_exth_params(rec0)
newrecsize = 8+len(exth_bytes)
newrec0 = rec0[0:ebase+4]+struct.pack('>L', elen+newrecsize)+struct.pack('>L', enum+1)+struct.pack('>L', exth_num)\
+ struct.pack('>L', newrecsize)+exth_bytes+rec0[ebase+12:]
newrec0 = writeint(newrec0, title_offset, getint(newrec0, title_offset)+newrecsize)
# keep constant record length by removing newrecsize null bytes from end
sectail = newrec0[-newrecsize:]
if sectail != b'\0'*newrecsize:
raise DualMetaFixException('add_exth: trimmed non-null bytes at end of section')
newrec0 = newrec0[0:rlen]
return newrec0
def read_exth(rec0, exth_num):
exth_values = []
ebase, elen, enum, rlen = get_exth_params(rec0)
ebase += 12
while enum > 0:
exth_id = getint(rec0, ebase)
if exth_id == exth_num:
# We might have multiple exths, so build a list.
exth_values.append(rec0[ebase+8:ebase+getint(rec0, ebase+4)])
enum -= 1
ebase = ebase+getint(rec0, ebase+4)
return exth_values
def del_exth(rec0, exth_num):
ebase, elen, enum, rlen = get_exth_params(rec0)
ebase_idx = ebase+12
enum_idx = 0
while enum_idx < enum:
exth_id = getint(rec0, ebase_idx)
exth_size = getint(rec0, ebase_idx+4)
if exth_id == exth_num:
newrec0 = rec0
newrec0 = writeint(newrec0, title_offset, getint(newrec0, title_offset)-exth_size)
newrec0 = newrec0[:ebase_idx]+newrec0[ebase_idx+exth_size:]
newrec0 = newrec0[0:ebase+4]+struct.pack('>L', elen-exth_size)+struct.pack('>L', enum-1)+newrec0[ebase+12:]
newrec0 += b'\0'*exth_size
if rlen != len(newrec0):
raise DualMetaFixException('del_exth: incorrect section size change')
return newrec0
enum_idx += 1
ebase_idx = ebase_idx+exth_size
return rec0
class DualMobiMetaFix:
def __init__(self, infile, asin,cloud):
self.datain = open(infile, 'rb').read()
self.datain_rec0 = readsection(self.datain, 0)
# noinspection PyArgumentList
self.asin = asin
# in the first mobi header
# add 501 to "EBOK", add 113 as asin, DO NOT TOUCH 504 as asin
#rec0 = del_exth(rec0, 504)
# rec0 = add_exth(rec0, 504, self.asin)
rec0 = self.datain_rec0
if cloud=='no':
rec0 = del_exth(rec0, 501)
rec0 = del_exth(rec0, 113)
rec0 = add_exth(rec0, 501, b'EBOK')
rec0 = add_exth(rec0, 113, self.asin)
else:# do not modify ASIN for cloud books
rec0 = del_exth(rec0, 501)
rec0 = add_exth(rec0, 501, b'PDOC')
self.datain = replacesection(self.datain, 0, rec0)
ver = getint(self.datain_rec0, mobi_version)
self.combo = (ver != 8)
if not self.combo:
return
exth121 = read_exth(self.datain_rec0, 121)
if len(exth121) == 0:
self.combo = False
return
else:# do not modify ASIN for cloud books
# only pay attention to first exth121
# (there should only be one)
datain_kf8, = struct.unpack_from('>L', exth121[0], 0)
if datain_kf8 == 0xffffffff:
self.combo = False
return
self.datain_kfrec0 = readsection(self.datain, datain_kf8)
# in the second header
# add 501 to "EBOK", add 113 as asin, DO NOT TOUCH 504 as asin
#rec0 = del_exth(rec0, 504)
#rec0 = add_exth(rec0, 504, self.asin)
rec0 = self.datain_kfrec0
if cloud=='no':
rec0 = del_exth(rec0, 501)
rec0 = del_exth(rec0, 113)
rec0 = add_exth(rec0, 501, b'EBOK')
rec0 = add_exth(rec0, 113, self.asin)
else:
rec0 = del_exth(rec0, 501)
rec0 = add_exth(rec0, 501, b'PDOC')
self.datain = replacesection(self.datain, datain_kf8, rec0)
def getresult(self):
# noinspection PyArgumentList
return BytesIO(bytes(self.datain)), sys.getsizeof(bytes(self.datain)) | knigophil/KindleWisper | KindleButler/DualMetaFix.py | Python | gpl-3.0 | 6,909 |
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='snmp2canopsis',
version='0.4',
description='Send SNMP trap to Canopsis/AMQP',
long_description=long_description,
author='Mathieu Virbel',
author_email='mat@meltingrocks.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
install_requires=["kombu", "pysnmp", "Logbook"],
packages=["snmp2canopsis"],
entry_points={
'console_scripts': [
'snmp2canopsis=snmp2canopsis.snmp2canopsis:main',
'cat-snmp2canopsis=snmp2canopsis.manage:main'
],
},
)
| tito/snmp2canopsis | setup.py | Python | mit | 1,083 |
"""Tests for dumping."""
from attr import asdict, astuple
from hypothesis import given
from hypothesis.strategies import data, lists, sampled_from
from cattr.converters import Converter, UnstructureStrategy
from . import (
dicts_of_primitives,
enums_of_primitives,
nested_classes,
seqs_of_primitives,
sets_of_primitives,
simple_classes,
)
unstruct_strats = sampled_from(
[UnstructureStrategy.AS_DICT, UnstructureStrategy.AS_TUPLE]
)
# Primitive stuff first.
@given(seqs_of_primitives, unstruct_strats)
def test_seq_unstructure(seq_and_type, dump_strat):
"""Dumping a sequence of primitives is a simple copy operation."""
converter = Converter(unstruct_strat=dump_strat)
assert converter.unstruct_strat is dump_strat
seq = seq_and_type[0]
dumped = converter.unstructure(seq)
assert dumped == seq
if not isinstance(seq, tuple):
assert dumped is not seq
assert type(dumped) is type(seq)
@given(sets_of_primitives, unstruct_strats)
def test_set_unstructure(set_and_type, dump_strat):
"""Dumping a set of primitives is a simple copy operation."""
converter = Converter(unstruct_strat=dump_strat)
assert converter.unstruct_strat is dump_strat
set = set_and_type[0]
dumped = converter.unstructure(set)
assert dumped == set
if set:
assert dumped is not set
assert type(dumped) is type(set)
@given(dicts_of_primitives, unstruct_strats)
def test_mapping_unstructure(map_and_type, dump_strat):
"""Dumping a mapping of primitives is a simple copy operation."""
converter = Converter(unstruct_strat=dump_strat)
mapping = map_and_type[0]
dumped = converter.unstructure(mapping)
assert dumped == mapping
assert dumped is not mapping
assert type(dumped) is type(mapping)
@given(enums_of_primitives(), unstruct_strats, data())
def test_enum_unstructure(enum, dump_strat, data):
"""Dumping enums of primitives converts them to their primitives."""
converter = Converter(unstruct_strat=dump_strat)
member = data.draw(sampled_from(list(enum.__members__.values())))
assert converter.unstructure(member) == member.value
@given(nested_classes)
def test_attrs_asdict_unstructure(nested_class):
"""Our dumping should be identical to `attrs`."""
converter = Converter()
instance = nested_class[0]()
assert converter.unstructure(instance) == asdict(instance)
@given(nested_classes)
def test_attrs_astuple_unstructure(nested_class):
"""Our dumping should be identical to `attrs`."""
converter = Converter(unstruct_strat=UnstructureStrategy.AS_TUPLE)
instance = nested_class[0]()
assert converter.unstructure(instance) == astuple(instance)
@given(simple_classes())
def test_unstructure_hooks(cl_and_vals):
"""
Unstructure hooks work.
"""
converter = Converter()
cl, vals = cl_and_vals
inst = cl(*vals)
converter.register_unstructure_hook(cl, lambda _: "test")
assert converter.unstructure(inst) == "test"
def test_unstructure_hook_func(converter):
"""
Unstructure hooks work.
"""
def can_handle(cls):
return cls.__name__.startswith("F")
def handle(_):
return "hi"
class Foo(object):
pass
class Bar(object):
pass
converter.register_unstructure_hook_func(can_handle, handle)
b = Bar()
assert converter.unstructure(Foo()) == "hi"
assert converter.unstructure(b) is b
@given(lists(simple_classes()), sampled_from([tuple, list]))
def test_seq_of_simple_classes_unstructure(cls_and_vals, seq_type):
"""Dumping a sequence of primitives is a simple copy operation."""
converter = Converter()
inputs = seq_type(cl(*vals) for cl, vals in cls_and_vals)
outputs = converter.unstructure(inputs)
assert type(outputs) == seq_type
assert all(type(e) is dict for e in outputs)
| Tinche/cattrs | tests/test_unstructure.py | Python | mit | 3,880 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class for managing unity versions.
This module serves as a source of truth for what constitutes a valid Unity
version, and provides a class for storing a version and accessing its parts.
It also provides a number of helpful methods related to properties of Unity
projects of particular versions, such as which .NET runtimes they support.
Usage:
>>> version = UnityVersion("5.6.3p2")
>>> print(version.major)
int: 5
>>> print(version.minor)
int: 6
>>> print(version.revision)
string: 3p2
>>> version = UnityVersion("5.6")
>>> print(version.major)
int: 5
>>> print(version.minor)
int: 6
>>> print(version.revision)
NoneType: None
>>> version = UnityVersion("5.6")
>>> version == "5.6"
bool: True
>>> version > "5.6.1f1"
bool: False
"""
import functools
import re
_RUNTIME_35 = "3.5"
_RUNTIME_46 = "4.6"
_RUNTIMES = (_RUNTIME_35, _RUNTIME_46)
# Sorting order of possible version types in the version string
# (bigger is most recent).
_VERSION_TYPE_ORDER = (None, "a", "b", "rc", "f", "p")
_RE = re.compile(
"("
"(?P<major>[0-9]+)" # Starts with digits (major version).
"\\." # Followed by a period.
"(?P<minor>[0-9]+)" # Followed by more digits (minor version).
"(?:" # Begin non-capturing group (so we can later mark it optional).
"\\." # Start with period.
"(?P<revision>" # Begin revision group.
"(?P<revision_major>[0-9]+)" # Revision starts with digits.
"(?P<version_type>p|f|rc|a|b)" # Followed by one of these version types.
"(?P<revision_minor>[0-9]+)" # Revision ends with digits.
")" # End revision group.
")" # End non-capturing group.
"?" # Mark previous group as optional.
"$" # Must reach the end of string.
")")
@functools.total_ordering
class UnityVersion(object):
"""Represents a version of the Unity game engine.
See the constructor documentation for the version string for the required
format, which is strict. Once constructed, the major, minor and revision
versions can be accessed as properties. Note that the major and minor versions
are integers, while the revision is a string (or None, if the version only
contained a major and minor version).
To check validity of a version string without passing it to the constructor,
use the module-level validate function.
"""
def __init__(self, version):
"""Construct a unity version object.
Args:
version: Must take the format a.b or a.b.c. a and b must consist
of digits, while c can consist of digits and lower case letters.
a is the major version, b is the minor version, and c is the revision.
Can also be a UnityVersion object.
Raises:
ValueError: Format for version string is not correct.
"""
# This allows version to be a UnityVersion object, which makes
# implementing string/UnityVersion comparisons much easier.
version_string = str(version)
match = _RE.match(version_string)
if not match:
raise ValueError("Invalid version string: %s" % version_string)
match_dict = match.groupdict()
self._major = int(match_dict["major"])
self._minor = int(match_dict["minor"])
# If no revision was supplied, this will be None.
self._revision = match_dict["revision"]
# The following are needed for accurate version comparison.
if self._revision:
self._revision_major = int(match_dict["revision_major"])
self._version_type = match_dict["version_type"]
self._revision_minor = int(match_dict["revision_minor"])
else:
self._revision_major = None
self._version_type = None
self._revision_minor = None
def __repr__(self):
# Note: it's important that for any Version v, we have the following
# identity: v == UnityVersion(str(v))
components = [str(self._major), str(self._minor)]
if self._revision:
components.append(self._revision)
return ".".join(components)
def __gt__(self, other):
return self.is_more_recent_than(UnityVersion(other))
def __eq__(self, other):
try:
other = UnityVersion(other)
except ValueError:
return NotImplemented
a = (self.major, self.minor, self.revision)
b = (other.major, other.minor, other.revision)
return a == b
def __ne__(self, other):
return not self == other
def __hash__(self):
# Since we treat a version object as equal to its version string,
# we also need their hashes to agree.
return hash(self.__repr__())
@property
def major(self):
"""The major version, as an integer."""
return self._major
@property
def minor(self):
"""The minor version, as an integer."""
return self._minor
@property
def revision(self):
"""The revision, as a string. Can be None."""
return self._revision
@property
def revision_major(self):
"""The first number in the revision. None if revision is None."""
return self._revision_major
@property
def version_type(self):
"""The letters in the revision (f, p, b, etc.). None if revision is None."""
return self._version_type
@property
def revision_minor(self):
"""The final number in the revision. None if revision is None."""
return self._revision_minor
@property
def generates_workspace(self):
"""Does this unity version generate an xcode workspace?
Starting with Unity 5.6, Unity will generate a workspace for xcode when
performing an iOS build. Prior to that, it only generated a project.
xcodebuild needs to be used differently through the command line based
on whether a workspace or project is being used.
Returns:
Boolean indicating whether this version will produce a workspace.
"""
return self >= "5.6"
# This is redundant with the comparison operators, but seeing as this
# is a useful operation, it can be useful to be explicit about what
# the comparison represents.
def is_more_recent_than(self, other):
"""Is this version of Unity more recent than other?
Recent means release date. 2017.3.1f1 is more recent than 5.6.3p2, for
example, and 5.6.3p1 is more recent than 5.6.3f1.
Note that a.b will be treated as being older than a.b.c for all c.
Args:
other: The other version being compared. Can be a string or
UnityVersion object.
Returns:
boolean corresponding to whether this version is more recent than other.
Raises:
ValueError: If other is not a UnityVersion object or valid
Unity version string.
"""
# This breaks the version down into a tuple of components strictly for
# lexicographical ordering purposes.
def componentize(version):
version = UnityVersion(version)
return (
version.major,
version.minor,
version.revision_major or 0,
_VERSION_TYPE_ORDER.index(version.version_type),
version.revision_minor or 0)
return componentize(self) > componentize(other)
def supports_runtime(self, runtime):
"""Does this version of Unity support this .NET runtime?
Unity began supporting .NET 4.6 starting with 2017, and deprecated 3.5
with 2018.3. This method will indicate whether the given runtime is
supported by this version of Unity.
Args:
runtime: (string) .NET runtime version. Either '3.5' or '4.6'.
Returns:
(boolean) Whether the given runtime is supported by this version.
Raises:
ValueError: Unrecognized runtime.
"""
if runtime == _RUNTIME_35:
return self < "2018.3"
if runtime == _RUNTIME_46:
return self >= "2017.0"
raise ValueError(
"Runtime {0} not recognized. Must be one of {1}.".format(
runtime, str(_RUNTIMES)))
@property
def default_runtime(self):
"""Returns the default .NET runtime for this version."""
return "3.5" if self < "2018.3" else "4.6"
def validate(version_string):
"""Is this a valid Unity version?
It is recommended to use this before attempting to construct a UnityVersion
object.
Args:
version_string: Must take the format a.b or a.b.cde, where a, b, c and e
must consist of digits. d can be any version type, i.e. 'a', 'b',
'rc', 'f', or 'p', corresponding to alpha, beta, release candidate,
full and patch respectively.
a is the major version, b is the minor version, c is the
revision_major, d is the version_type, and e is the revision_minor.
Returns:
boolean, corresponding to whether the argument corresponds to a valid
Unity version.
"""
return bool(re.match(_RE, version_string))
| firebase/firebase-unity-sdk | scripts/gha/integration_testing/unity_version.py | Python | apache-2.0 | 9,179 |
import unicodedata
def clean(var):
"""Removes tabs, newlines and trailing whitespace"""
if var is None:
return ''
return var.replace("\t", "").replace("\n", "").strip()
def slugify(var):
remove = ["(", ")", ";", "?", '’', "'", ".", ",", ':', "‘",]
replace = [
(" - ", '_'), (" ", "_"), ("ŋ", "ng"), ('ʝ', "j"),
('ɛ', 'e'), ('ʃ', 'sh'), ('ø', 'Y'), ('ɲ', 'nj'),
]
var = var.split("[")[0].strip()
var = var.split("/")[0].strip()
var = unicodedata.normalize('NFKD', var)
var = "".join([c for c in var if not unicodedata.combining(c)])
var = var.replace("ß", "V") # do this before casefolding
var = var.casefold()
for r in remove:
var = var.replace(r, "")
for r in replace:
var = var.replace(*r)
var = var.title()
return var
| SimonGreenhill/ABVDGet | abvdget/tools.py | Python | bsd-3-clause | 839 |
"""File to contain functions for controlling parts of Norc."""
from datetime import datetime
from norc.core.constants import Status
def handle(obj):
if not obj.is_alive():
obj.status = Status.HANDLED
if hasattr(obj, "ended") and obj.ended == None:
obj.ended = datetime.utcnow()
obj.save()
return True
else:
return False
| darrellsilver/norc | core/controls.py | Python | bsd-3-clause | 384 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import filecmp
import random
import textwrap
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import BranchPythonOperator, PythonOperator
from airflow.providers.qubole.operators.qubole import QuboleOperator
from airflow.providers.qubole.sensors.qubole import QuboleFileSensor, QubolePartitionSensor
from airflow.utils.dates import days_ago
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
}
with DAG(
dag_id='example_qubole_operator',
default_args=default_args,
schedule_interval=None,
start_date=days_ago(2),
tags=['example'],
) as dag:
dag.doc_md = textwrap.dedent(
"""
This is only an example DAG to highlight usage of QuboleOperator in various scenarios,
some of these tasks may or may not work based on your Qubole account setup.
Run a shell command from Qubole Analyze against your Airflow cluster with following to
trigger it manually `airflow dags trigger example_qubole_operator`.
*Note: Make sure that connection `qubole_default` is properly set before running this
example. Also be aware that it might spin up clusters to run these examples.*
"""
)
def compare_result_fn(**kwargs):
"""
Compares the results of two QuboleOperator tasks.
:param kwargs: The context of the executed task.
:type kwargs: dict
:return: True if the files are the same, False otherwise.
:rtype: bool
"""
ti = kwargs['ti']
qubole_result_1 = hive_show_table.get_results(ti)
qubole_result_2 = hive_s3_location.get_results(ti)
return filecmp.cmp(qubole_result_1, qubole_result_2)
hive_show_table = QuboleOperator(
task_id='hive_show_table',
command_type='hivecmd',
query='show tables',
cluster_label='{{ params.cluster_label }}',
fetch_logs=True,
# If `fetch_logs`=true, will fetch qubole command logs and concatenate
# them into corresponding airflow task logs
tags='airflow_example_run',
# To attach tags to qubole command, auto attach 3 tags - dag_id, task_id, run_id
qubole_conn_id='qubole_default',
# Connection id to submit commands inside QDS, if not set "qubole_default" is used
params={
'cluster_label': 'default',
},
)
hive_s3_location = QuboleOperator(
task_id='hive_s3_location',
command_type="hivecmd",
script_location="s3n://public-qubole/qbol-library/scripts/show_table.hql",
notify=True,
tags=['tag1', 'tag2'],
# If the script at s3 location has any qubole specific macros to be replaced
# macros='[{"date": "{{ ds }}"}, {"name" : "abc"}]',
trigger_rule="all_done",
)
compare_result = PythonOperator(
task_id='compare_result', python_callable=compare_result_fn, trigger_rule="all_done"
)
compare_result << [hive_show_table, hive_s3_location]
options = ['hadoop_jar_cmd', 'presto_cmd', 'db_query', 'spark_cmd']
branching = BranchPythonOperator(task_id='branching', python_callable=lambda: random.choice(options))
branching << compare_result
join = DummyOperator(task_id='join', trigger_rule='one_success')
hadoop_jar_cmd = QuboleOperator(
task_id='hadoop_jar_cmd',
command_type='hadoopcmd',
sub_command='jar s3://paid-qubole/HadoopAPIExamples/'
'jars/hadoop-0.20.1-dev-streaming.jar '
'-mapper wc '
'-numReduceTasks 0 -input s3://paid-qubole/HadoopAPITests/'
'data/3.tsv -output '
's3://paid-qubole/HadoopAPITests/data/3_wc',
cluster_label='{{ params.cluster_label }}',
fetch_logs=True,
params={
'cluster_label': 'default',
},
)
pig_cmd = QuboleOperator(
task_id='pig_cmd',
command_type="pigcmd",
script_location="s3://public-qubole/qbol-library/scripts/script1-hadoop-s3-small.pig",
parameters="key1=value1 key2=value2",
trigger_rule="all_done",
)
pig_cmd << hadoop_jar_cmd << branching
pig_cmd >> join
presto_cmd = QuboleOperator(task_id='presto_cmd', command_type='prestocmd', query='show tables')
shell_cmd = QuboleOperator(
task_id='shell_cmd',
command_type="shellcmd",
script_location="s3://public-qubole/qbol-library/scripts/shellx.sh",
parameters="param1 param2",
trigger_rule="all_done",
)
shell_cmd << presto_cmd << branching
shell_cmd >> join
db_query = QuboleOperator(
task_id='db_query', command_type='dbtapquerycmd', query='show tables', db_tap_id=2064
)
db_export = QuboleOperator(
task_id='db_export',
command_type='dbexportcmd',
mode=1,
hive_table='default_qubole_airline_origin_destination',
db_table='exported_airline_origin_destination',
partition_spec='dt=20110104-02',
dbtap_id=2064,
trigger_rule="all_done",
)
db_export << db_query << branching
db_export >> join
db_import = QuboleOperator(
task_id='db_import',
command_type='dbimportcmd',
mode=1,
hive_table='default_qubole_airline_origin_destination',
db_table='exported_airline_origin_destination',
where_clause='id < 10',
parallelism=2,
dbtap_id=2064,
trigger_rule="all_done",
)
prog = '''
import scala.math.random
import org.apache.spark._
/** Computes an approximation to pi */
object SparkPi {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("Spark Pi")
val spark = new SparkContext(conf)
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x*x + y*y < 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count / n)
spark.stop()
}
}
'''
spark_cmd = QuboleOperator(
task_id='spark_cmd',
command_type="sparkcmd",
program=prog,
language='scala',
arguments='--class SparkPi',
tags='airflow_example_run',
)
spark_cmd << db_import << branching
spark_cmd >> join
with DAG(
dag_id='example_qubole_sensor',
default_args=default_args,
schedule_interval=None,
start_date=days_ago(2),
doc_md=__doc__,
tags=['example'],
) as dag2:
dag2.doc_md = textwrap.dedent(
"""
This is only an example DAG to highlight usage of QuboleSensor in various scenarios,
some of these tasks may or may not work based on your QDS account setup.
Run a shell command from Qubole Analyze against your Airflow cluster with following to
trigger it manually `airflow dags trigger example_qubole_sensor`.
*Note: Make sure that connection `qubole_default` is properly set before running
this example.*
"""
)
check_s3_file = QuboleFileSensor(
task_id='check_s3_file',
qubole_conn_id='qubole_default',
poke_interval=60,
timeout=600,
data={
"files": [
"s3://paid-qubole/HadoopAPIExamples/jars/hadoop-0.20.1-dev-streaming.jar",
"s3://paid-qubole/HadoopAPITests/data/{{ ds.split('-')[2] }}.tsv",
] # will check for availability of all the files in array
},
)
check_hive_partition = QubolePartitionSensor(
task_id='check_hive_partition',
poke_interval=10,
timeout=60,
data={
"schema": "default",
"table": "my_partitioned_table",
"columns": [
{"column": "month", "values": ["{{ ds.split('-')[1] }}"]},
{"column": "day", "values": ["{{ ds.split('-')[2] }}", "{{ yesterday_ds.split('-')[2] }}"]},
], # will check for partitions like [month=12/day=12,month=12/day=13]
},
)
check_s3_file >> check_hive_partition
| mrkm4ntr/incubator-airflow | airflow/providers/qubole/example_dags/example_qubole.py | Python | apache-2.0 | 9,126 |
# -*- coding: utf-8 -*-
from flask import Flask,render_template,send_file,Response,flash,request,redirect,session
from werkzeug.utils import secure_filename
import json
import os.path
import os
import gzip
import urllib
from db import DbGetListOfDates,DbGet,DbGetComments,DbGetMulitple,DbGetNearbyPoints,DbPut,DbPutWithoutPassword,DbSearchWord,DbGetMapsOfUser,DbGetAllMaps,DbAddComment,CheckValidMapId,CheckValidFreetext,DbDelMap,DbChkPwd
import anydbm
import traceback
from progress import GetProgress,SetProgress
from users import CheckSession,Login,ActivateUser,SendActivationMail,ReserveUser,GetUserFromUserOrEmail,SendForgotPasswordMail
import sys
from orchestrator import BuildMap,ProcessTrkSegWithProgress,BuildMapFromTrack
from searchparser import SearchQueryParser
from sets import Set
from textutils import remove_accents
from log import Log
from mapparser import ParseMap
from model import Track
from options import options_default
from dem import GetEleFromLatLon
from computeprofile import ComputeProfile
from demize import Demize
from generate_id import uniqid
from config import keysnpwds, config
from flask_babel import Babel, gettext
from thumbnail import selectPointsForThumbnail, thumbnailUrlMapbox
# Create flask application
application = Flask(__name__)
application.config['UPLOAD_FOLDER'] = 'uploads'
application.secret_key = keysnpwds['secret_key']
## Internationalization (i18n)
babel = Babel(application)
LANGUAGES = {
'en': 'English',
'fr': 'Francais',
'es': 'Español'
}
@babel.localeselector
def get_locale():
# Uncomment for testing a specific language
#return 'es'
#return 'fr'
# Check if there is a lang in session
if session.has_key('lang'):
return session['lang']
# Else guess the lang from browser request
return request.accept_languages.best_match(LANGUAGES.keys())
@application.route('/i18n.js/<item>')
def i18n_js(item):
""" Translation strings for javascript """
assert(item in ('header','map','prepare')) #basic security check
return render_template('i18n_%s.js'%item)
@application.route('/<lang>/testi18n.js')
def test_i18n_js(lang):
""" To test i18n for javascript because js escaping is not well handled by jinja2 """
session['lang']=lang
return '<html><head></head><body>Press Ctrl+Maj+K and check no errors in console<script>'+render_template('i18n_header.js')+render_template('i18n_map.js')+'</script>'
## Index page
@application.route('/',defaults={'lang':None,'limit':10})
@application.route('/indexall',defaults={'lang':None,'limit':-1})
@application.route('/<lang>/',defaults={'limit':10})
@application.route('/<lang>/indexall',defaults={'limit':10})
def index(lang,limit):
if lang!=None:
session['lang']=lang
maplist = DbGetListOfDates()
cptr = 0
mapsout = []
for date in sorted(maplist.iterkeys(),reverse=True):
maps = maplist[date]
for mapid in maps:
(lat,lon) = DbGet(mapid,'startpoint').split(',')
trackdesc = DbGet(mapid,'trackdesc')
trackuser = DbGet(mapid,'trackuser')
desc=trackdesc.decode('utf8')
mapsout.append({'mapid':mapid,'lat':lat,'lon':lon,'user':trackuser,'desc':desc,'date':date})
cptr += 1
if(limit>-1) and (cptr>limit):
break
if(limit>-1) and (cptr>limit):
break
return render_template('index.html',limit=limit,maps=mapsout,GMapsApiKey=keysnpwds['GMapsApiKey'])
## GPX Export
@application.route('/togpx/<mapid>')
def togpx(mapid):
# Read map data
f=gzip.open('data/mapdata/%s.json.gz'%mapid,'rb')
mapdata=json.load(f)
f.close()
return '<?xml version="1.0" encoding="UTF-8"?>\n<gpx version="1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.topografix.com/GPX/1/0" xsi:schemaLocation="http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd"><trk><trkseg>' + ''.join(map(lambda p:'<trkpt lat="%f" lon="%f"></trkpt>'%(p[0],p[1]),mapdata['points'])) + '</trkseg></trk></gpx>'
## Thumbnails
if not os.path.isdir('data'):
os.mkdir('data')
if not os.path.isdir('data/thumbnail_cache'):
os.mkdir('data/thumbnail_cache')
@application.route('/thumbnail/<mapid>')
@application.route('/thumbnail.php',defaults={'mapid':None})
def thumbnail(mapid):
if mapid==None:
mapid = request.args.get('mapid')
filename = 'data/thumbnail_cache/%s.png'%mapid
if os.path.isfile(filename):
# Return image in cache
return send_file(filename, mimetype='image/png')
else:
ptlist = selectPointsForThumbnail(mapid)
# Build map image url
url = thumbnailUrlMapbox(ptlist)
furl = open('data/thumbnail_cache/%s.url'%(mapid),'w')
furl.write(url)
furl.close()
# Download png, put it in cache and send it
f = urllib.urlopen(url)
fcache = open(filename,'wb')
contents = f.read()
fcache.write(contents)
fcache.close()
f.close()
return contents
## Show map
@application.route('/<lang>/showmap/<mapid>', defaults={'map_type': None})
@application.route('/<lang>/showmap/<mapid>/<map_type>')
@application.route('/<lang>/showmap-flot.php',defaults={'mapid':None,'map_type': None})
@application.route('/<lang>/showmap.php',defaults={'mapid':None,'map_type': None})
@application.route('/showmap/<mapid>', defaults={'lang':None,'map_type': None})
@application.route('/showmap/<mapid>/<map_type>',defaults={'lang':None})
@application.route('/showmap-flot.php',defaults={'lang':None,'mapid':None,'map_type': None})
@application.route('/showmap.php',defaults={'lang':None,'mapid':None,'map_type': None})
def showmap(lang,mapid,map_type):
if lang!=None:
session['lang']=lang
if mapid==None:
mapid=request.args.get('mapid')
# Read map data
f=gzip.open('data/mapdata/%s.json.gz'%mapid,'rb')
mapdata=json.load(f)
f.close()
# Read map db
mapdb = anydbm.open('data/maps/%s.db'%mapid, 'r')
if map_type==None:
map_type = mapdata['type']
# Render
_mapdb={}
for key in mapdb:
_mapdb[key] = mapdb[key].decode('utf-8') # We must convert each utf8 string into unicode for jinja2
out = render_template('showmap.html',domain=config['domain'],mapid=mapid,type=map_type,mapdb=_mapdb,mapdata=mapdata,GMapsApiKey=keysnpwds['GMapsApiKey'],GeoPortalApiKey=keysnpwds['GeoPortalApiKey'])
mapdb.close()
return out
@application.route('/mapdata/<mapid>')
def mapdata(mapid):
# Read map data
f=gzip.open('data/mapdata/%s.json.gz'%mapid,'rb')
mapfromfile=json.load(f)
f.close()
return Response(render_template('mapdata.js',mapdata=mapfromfile,chartdata=json.dumps(mapfromfile['chartdata'])), mimetype='text/javascript')
@application.route('/comments/<mapid>')
def comments(mapid):
comments = DbGetComments(mapid)
return Response('<?xml version="1.0" encoding="UTF-8"?><result>%s</result>' % ''.join(map(lambda comment: '<comment user="%s" date="%s">%s</comment>' % (comment[1],comment[0],comment[2]),comments)), mimetype='text/xml')
@application.route('/sendcomment/<mapid>/<comment>')
def sendcomment(mapid,comment):
try:
user = 'unknown'
if request.form.has_key('user'):
user = request.form.getvalue('user')
if not CheckValidUserName(user):
raise Exception('Invalid user name')
sess = request.form.getvalue('sess')
if CheckSession(user,sess):
pass
else:
raise Exception(gettext('Invalid session, please re-login'))
else:
user = request.remote_addr
if not CheckValidMapId(mapid):
raise Exception(gettext('Invalid map id'))
if not CheckValidFreetext(comment):
raise Exception(gettext('Invalid map id'))
DbAddComment(mapid,user,comment)
result = 'OK'
except Exception, e:
result = str(e)
out = '<?xml version="1.0" encoding="UTF-8"?>\n<result>%s</result>'%result
return Response(out, mimetype='text/xml')
@application.route('/nearmaps/<mapid>')
def nearmaps(mapid):
lat,lon = map(float,DbGet(mapid,'startpoint').split(','))
return '{'+','.join(['"%s":%s' % (_mapid,json.dumps(DbGetMulitple(_mapid,('startpoint','trackdesc','trackuser','date')))) for _mapid in filter(lambda mid: mid!=mapid,DbGetNearbyPoints(lat,lon))])+'}'
@application.route('/dbget/<mapid>/<element>')
def dbget(mapid,element):
try:
val = DbGet(mapid,element.encode('ascii'))
message = 'OK'
except Exception, e:
message = 'Error: ' + str(e)+'\n'+traceback.format_exc()
val = 'Error'
out = '<?xml version="1.0" encoding="UTF-8"?>\n<answer><message>%s</message><pageelementid>%s</pageelementid><value>%s</value></answer>' % (message,element,val)
return Response(out, mimetype='text/xml')
@application.route('/dbput/<mapid>/<pwd>/<ele>/<val>',defaults={'user':None,'sess':-1})
@application.route('/dbput/<mapid>/<pwd>/<ele>/<val>/<user>/<sess>')
def dbput(mapid,pwd,ele,val,user,sess,defaults={'user': None,'sess': -1}):
try:
if user!=None and sess!=-1:
if CheckSession(user,sess):
map_user = DbGet(mapid,'trackuser')
if len(map_user)>0 and map_user==user:
DbPutWithoutPassword(mapid,ele.encode('ascii'),val.encode('utf8'))
message = 'OK'
else:
raise Exception(gettext('Map %s does not belong to user %s, but to user %s') % (mapid,user,map_user))
else:
raise Exception(gettext('Invalid session, please re-login'))
else:
DbPut(mapid,pwd,ele.encode('ascii'),val.encode('utf8'))
message = 'OK'
except Exception, e:
message = 'Error: ' + str(e)
val = 'Error'
out = '<?xml version="1.0" encoding="UTF-8"?>\n<answer><message>%s</message><pageelementid>%s</pageelementid><value>%s</value></answer>' % (message,ele,val)
return Response(out, mimetype='text/xml')
## Send map
@application.route('/<lang>/submitform')
@application.route('/submitform',defaults={'lang':None})
def submitform(lang):
if lang!=None:
session['lang']=lang
return render_template('submitform.html',GMapsApiKey=keysnpwds['GMapsApiKey'])
@application.route('/upload', methods=['POST'])
def upload():
# Get submit_id
submit_id = request.form['submit_id'].encode('ascii')
if not submit_id.isalnum():
return 'Bad submitid'
# Build inputfile array
inputfile = []
i=0
for file in request.files.getlist("file[]"):
# Save each uploaded file
if not os.path.isdir(application.config['UPLOAD_FOLDER']):
os.mkdir(application.config['UPLOAD_FOLDER'])
p=os.path.join(application.config['UPLOAD_FOLDER'], secure_filename('%s_%s.gpx'%(submit_id,i)))
Log('Saving file to %s'%p,submit_id)
file.save(p)
Log('File saved',submit_id)
i+=1
inputfile.append(file)
# In case of import from URL
if request.form.has_key('fromurl') and len(request.form['fromurl'])>0:
inputfile.append(request.form.get('fromurl').encode('ascii'))
if len(inputfile)<1:
return gettext('Error while uploading file')
# Track selection in case file contains several tracks
if request.form.has_key('trk_select'):
trk_id = int(request.form['trk_select'])
else:
trk_id = 0
trk_seg_id = 0
# Get track description
Log('Get track desc',submit_id)
desc = request.form['desc'].encode('utf8')
Log('Check session',submit_id)
# Check session
user = request.form['user']
#sys.stderr.write('%s\n'%(request.form))
if user=='NoUser' or user=='':
user = 'unknown'
else:
sess = request.form['sess']
if not CheckSession(user,sess):
user = 'unknown'
# Parse options (flat,wind,maptype,...)
options = options_default
for key in options:
if request.form.has_key(key):
if type(options[key])==bool:
if request.form.get(key):
options[key]=True
else:
options[key]=False
#options[key]=(request.form[key]=='yes')
elif type(options[key])==int:
options[key]=int(request.form[key])
elif type(options[key])==str or type(options[key])==unicode:
options[key]=request.form[key]
else:
raise Exception(gettext('type %s not handled')%type(options[key]))
Log('options=%s'%options,submit_id)
Log('start BuildMap',submit_id)
try:
pwd = BuildMap(inputfile,submit_id,trk_id,trk_seg_id,submit_id,desc,user,options)
except Exception,e:
Log(str(e))
SetProgress(submit_id,str(e))
return str(e)
Log('end BuildMap',submit_id)
return '''<script type="text/javascript">
var date = new Date();
date.setTime(date.getTime()+(10*24*60*60*1000));
var expires = "; expires="+date.toGMTString();
document.cookie = "pwd%(mapid)s=%(pwd)s"+expires+"; path=/";
location.href=\'/showmap/%(mapid)s\';
</script>'''% {'mapid':submit_id,'pwd':pwd}
@application.route('/getprogress/<submitid>')
def getprogress(submitid):
return GetProgress(submitid.encode('ascii')).decode('utf8')
## Search
class MapSeach(SearchQueryParser):
def GetWord(self, word):
return Set(DbSearchWord('trackdesc',word))
def GetWordWildcard(self, word):
return Set()
def GetQuotes(self, search_string, tmp_result):
return Set()
def map_search_result(mapid):
try:
(lat,lon) = DbGet(mapid,'startpoint').split(',')
except:
(lat,lon)=(0.0,0.0)
trackdesc = DbGet(mapid,'trackdesc')
startdate = DbGet(mapid,'date')
trackuser = DbGet(mapid,'trackuser')
try:
desc = trackdesc.encode('ascii', 'xmlcharrefreplace')
except:
desc = trackdesc
desc = desc.replace('&','&')
return('<map mapid="%s" lat="%s" lon="%s" date="%s" user="%s">%s</map>' % (mapid,lat,lon,startdate,trackuser,desc))
@application.route('/search/<search_req>')
def search(search_req):
try:
req = remove_accents(search_req.encode('utf8').lower(),'utf-8')
mapids = MapSeach().Parse(req)
out='<result><maps>%s</maps></result>'%''.join(map(map_search_result,mapids))
except Exception, e:
out='<error>Error: %s</error>'%e
return Response(out, mimetype='text/xml')
## Show user
def map_retrieve_infos_showuser(mapid):
trackdesc = DbGet(mapid,'trackdesc').decode('utf8')
startdate = DbGet(mapid,'date')
return {'mapid':mapid,'desc':trackdesc,'date':startdate}
@application.route('/<lang>/showuser/<user>')
@application.route('/showuser/<user>',defaults={'lang':None})
def showuser(lang,user):
if lang!=None:
session['lang']=lang
mapids = DbGetMapsOfUser(user.encode('ascii'))
maps = map(map_retrieve_infos_showuser,mapids)
return render_template('showuser.html',user=user,maps=maps)
@application.route('/userinfo/<user>')
def userinfo(user):
mapids = DbGetMapsOfUser(user.encode('ascii'))
out = '<maps>%s</maps>'%''.join(map(map_search_result,mapids))
return Response(out, mimetype='text/xml')
## Browse maps
@application.route('/<lang>/mapofmaps')
@application.route('/mapofmaps',defaults={'lang':None})
def mapofmaps(lang):
if lang!=None:
session['lang']=lang
return render_template('mapofmaps.html',GMapsApiKey=keysnpwds['GMapsApiKey'])
def map_search_result2(lat,lon,mapid):
trackdesc = DbGet(mapid,'trackdesc')
startdate = DbGet(mapid,'date')
trackuser = DbGet(mapid,'trackuser')
try:
desc = trackdesc.encode('ascii', 'xmlcharrefreplace').replace('<','<').replace('>','>')
except:
desc = trackdesc
return('<map mapid="%s" lat="%s" lon="%s" date="%s" user="%s">%s</map>' % (mapid,lat,lon,startdate,trackuser,desc))
def latlonmapids2xml(latlonmapids):
lat,lon,mapids = latlonmapids
return '<maps lat="%.4f" lon="%.4f">%s</maps>' % (lat,lon,''.join(map(lambda mapid:map_search_result2(lat,lon,mapid),mapids)))
@application.route('/getmaplist')
def getmaplist():
latlonmapidss = DbGetAllMaps()
out = '<results>%s</results>' % ''.join(map(latlonmapids2xml,latlonmapidss))
return Response(out, mimetype='text/xml')
## Map Tools
def auth(mapid,pwd,user,sess):
# Check rights
if user!=None and sess!=None:
if CheckSession(user,sess):
map_user = DbGet(mapid,'trackuser')
if len(map_user)>0 and map_user==user:
pass
else:
raise Exception(gettext('Map %s does not belong to user %s, but to user %s') % (mapid,user,map_user))
else:
raise Exception(gettext('Invalid session, please re-login'))
else:
if not DbChkPwd(mapid,pwd):
raise Exception(gettext('You do not have the map\'s password in your browser\'s cookies'))
@application.route('/delmap/<mapid>/<pwd>',defaults={'user':None,'sess':None})
@application.route('/delmap/<mapid>/<pwd>/<user>/<sess>')
def delmap(mapid,pwd,user,sess):
try:
auth(mapid,pwd,user,sess)
# Delete map
DbDelMap(mapid)
mapfile = 'data/mapdata/%s.json.gz' % mapid
os.remove(mapfile)
message = gettext('Map deleted')
except Exception, e:
message = str(e)
return render_template('map_deleted.html',message=message)
def modifymap(mapid,pwd,user,sess,modifyfunction):
try:
# Authentificate
auth(mapid,pwd,user,sess)
# Parse map
options, ptlist = ParseMap(mapid)
# Apply modifications
ptlist,startpointchanged = modifyfunction(ptlist)
# Rebuild map
track = Track(ptlist)
ProcessTrkSegWithProgress(track,mapid,mapid,True,options)
# If start point has changed, then update the database
if startpointchanged:
DbPutWithoutPassword(mapid,'startpoint','%.4f,%.4f' % (track.ptlist[0].lat,track.ptlist[0].lon))
# Recompute thumbnail
previewfile = 'data/thumbnail_cache/%s.png' % mapid
if os.access(previewfile,os.F_OK):
os.remove(previewfile)
message = None
except Exception, e:
message = str(e)
if message==None:
return redirect('/showmap/%s'%mapid)
else:
return render_template('map_action_error.html',message=message,mapid=mapid)
@application.route('/map/crop/<mapid>/<pwd>/<int:pt1>/<int:pt2>',defaults={'user':None,'sess':None})
@application.route('/map/crop/<mapid>/<pwd>/<int:pt1>/<int:pt2>/<user>/<sess>')
def cropmap(mapid,pwd,pt1,pt2,user,sess):
return modifymap(mapid,pwd,user,sess,lambda ptlist: (ptlist[pt1:pt2],pt1!=0))
@application.route('/map/clear/<mapid>/<pwd>/<int:pt1>/<int:pt2>',defaults={'user':None,'sess':None})
@application.route('/map/clear/<mapid>/<pwd>/<int:pt1>/<int:pt2>/<user>/<sess>')
def clearmap(mapid,pwd,pt1,pt2,user,sess):
return modifymap(mapid,pwd,user,sess,lambda ptlist: (ptlist[:pt1]+ptlist[pt2:],pt1==0))
def removepoints(ptlist,ptidxtodel):
l=range(0,len(ptlist))
Log('removepoints: %s %s'%(ptidxtodel,len(ptlist)))
for i in ptidxtodel:
l.remove(i)
return ([ptlist[i] for i in l],0 in ptidxtodel)
@application.route('/map/clearlist/<mapid>/<pwd>/<ptliststr>',defaults={'user':None,'sess':None})
@application.route('/map/clearlist/<mapid>/<pwd>/<ptliststr>/<user>/<sess>')
def clearmaplist(mapid,pwd,ptliststr,user,sess):
ptidxtodel = map(int,ptliststr.split(','))
return modifymap(mapid,pwd,user,sess,lambda ptlist: removepoints(ptlist,ptidxtodel))
@application.route('/map/export/<mapid>')
def exportmap(mapid):
# TODO: build it from client side
pass
@application.route('/map/demize/<int:index>/<mapid>/<pwd>',defaults={'user':None,'sess':None})
@application.route('/map/demize/<int:index>/<mapid>/<pwd>/<user>/<sess>')
def demize(index,mapid,pwd,user,sess):
try:
# Authentificate
auth(mapid,pwd,user,sess)
# Start/continue/finish DEMization. index is current point index, l is total number of points in map
index,l = Demize(index,mapid)
# Format answer
if index==0:
answer = '<answer><result>Done</result></answer>'
else:
percent = index * 100 / l
answer = '<answer><result>OK</result><nextindex>%s</nextindex><percent>%s</percent></answer>' % (index,percent)
except Exception, e:
answer = '<answer><result>%s</result></answer>' % e
return Response('<?xml version="1.0" encoding="UTF-8"?>\n%s'%answer,mimetype='text/xml')
## User services
def CheckHumain(humaincheck):
return ((humaincheck.strip().lower()==gettext('earth'))or(humaincheck.strip().lower()==gettext('the earth')))
@application.route('/<lang>/registerform')
@application.route('/registerform',defaults={'lang':None})
def registerform(lang):
""" Display register form """
if lang!=None:
session['lang']=lang
return render_template('register.html')
@application.route('/register', methods=['POST'])
def register():
mail = request.form['mail'].lower()
user = request.form['user'].lower()
pwd1 = request.form['pwd1']
pwd2 = request.form['pwd2']
humaincheck = request.form['humaincheck']
if not CheckHumain(humaincheck):
return render_template('register.html',error_message=gettext('Humain check error'))
if pwd1!=pwd2:
return render_template('register.html',error_message=gettext('The two password you entered are different. Please enter twice the same password'))
activation_id,err_msg = ReserveUser(user.encode('ascii'),mail.encode('ascii'),pwd1.encode('utf8'))
if activation_id==None:
return render_template('register.html',error_message=err_msg)
SendActivationMail(mail,user,activation_id)
return render_template('user_registered.html',user=user)
@application.route('/activate/<user>/<activationid>')
def activate(user,activationid):
""" Activate user given it's activation_id """
try:
ActivateUser(user,activationid)
except Exception, e:
return render_template('user_activate_error.html',message=str(e))
return render_template('user_activated.html',user=user)
@application.route('/login/<user>/<pwd>')
def login(user,pwd):
""" Check login/password return sesssion_id """
user = user.lower()
try:
(user,sessid) = Login(user,pwd)
except Exception, e:
return Response('<result><user>NoUser</user><sess>-1</sess><error>%s</error></result>'%e, mimetype='text/xml')
out = '<result>'
if user==None:
user = 'NoUser'
sess = -1
out = '<result><user>%s</user><sess>%s</sess></result>' % (user,sessid)
return Response(out, mimetype='text/xml')
@application.route('/chksess/<user>/<sess>')
def chksess(user,sess):
""" Check session_id for a given user """
try:
ret = CheckSession(user,sess)
except Exception, e:
out = '<answer><result>Error: %s</result><user>NoUser</user><sess>-1</sess></answer>' % str(e)
return Response(out, mimetype='text/xml')
if ret:
result = 'OK'
else:
result = 'Expired'
out = '<answer><result>%s</result><user>%s</user><sess>%s</sess></answer>' % (result,user,sess)
return Response(out, mimetype='text/xml')
@application.route('/<lang>/forgotpwd')
@application.route('/forgotpwd',defaults={'lang':None})
def forgotpwd(lang):
if lang!=None:
session['lang']=lang
return render_template('forgotpwd.html')
@application.route('/resendpwd', methods=['POST'])
def resendpwd():
user_mail = request.form['user_mail'].encode('ascii').lower()
humaincheck = request.form['humaincheck']
if not CheckHumain(humaincheck):
return render_template('resendpwd_error.html',error_message=gettext('Humain check error'))
user,err_str = GetUserFromUserOrEmail(user_mail)
if user==None:
return render_template('resendpwd_error.html',error_message=err_str)
mail = SendForgotPasswordMail(user)
return render_template('resendpwd_ok.html',mail=mail)
def retrievemap(mapid):
(lat,lon) = DbGet(mapid,'startpoint').split(',')
desc = DbGet(mapid,'trackdesc').decode('utf8')
startdate = DbGet(mapid,'date')
user = DbGet(mapid,'trackuser')
return {'mapid':mapid,'lat':lat,'lon':lon,'desc':desc,'date':startdate,'user':user}
@application.route('/<lang>/userhome/<user>')
@application.route('/userhome/<user>',defaults={'lang':None})
def userhome(lang,user):
if lang!=None:
session['lang']=lang
mapids = DbGetMapsOfUser(user.encode('ascii'))
return render_template('userhome.html',user=user,maps=map(retrievemap,mapids),GMapsApiKey=keysnpwds['GMapsApiKey'])
@application.route('/mergemaps/<mapidsliststr>/<user>/<sess>')
def mergemaps(mapidsliststr,user,sess):
if not CheckSession(user,sess):
message = gettext('Cannot identify user %s %s')%(user,sess)
else:
mapids = mapidsliststr.split(',')
ptlistmerged = {}
for mapid in mapids:
newmapid = uniqid()
Log("MergeCgi: parse map %s" % mapid,newmapid)
# Parse map
options,ptlist = ParseMap(mapid)
#TODO: merge options
# set right day if needed
if ptlist[0].datetime.year<=1980:
dfromdb = DbGet(mapid,'date')
if dfromdb:
d = datetime.datetime.strptime(dfromdb,'%Y-%m-%d')
for pt in ptlist:
pt.datetime = pt.datetime.replace(year=d.year,month=d.month,day=d.day)
# append to dict
for pt in ptlist:
ptlistmerged[pt.datetime] = pt
ptlistmerged = ptlistmerged.values()
ptlistmerged.sort(key=lambda pt:pt.datetime)
Log("MergeCgi: rebuild: Track len=%d" % len(ptlistmerged),newmapid)
# Rebuild map
track = Track(ptlistmerged)
pwd = BuildMapFromTrack(track,newmapid,newmapid,'Result of merge',user,options)
Log("MergeCgi: finished",newmapid)
# Redirect to map
return redirect('/showmap/%s'%newmapid)
@application.route('/delmaps/<mapidsliststr>/<user>/<sess>')
def delmaps(mapidsliststr,user,sess):
if not CheckSession(user,sess):
message = gettext('Cannot identify user %s %s')%(user,sess)
else:
try:
mapids = mapidsliststr.split(',')
message = ''
for mapid in mapids:
map_user = DbGet(mapid,'trackuser')
if len(map_user)>0 and map_user==user:
DbDelMap(mapid)
os.remove('data/mapdata/%s.json.gz'%mapid)
message += gettext('Map %s deleted. ')%mapid
else:
message += gettext('Map %s do not belong to you')%mapid
break
except Exception, e:
message += gettext('Error: %s')%e
return render_template('map_deleted.html',message=message)
## Prepare
@application.route('/<lang>/prepare',defaults={'map_type':'GeoPortal','pts':[],'names':[]})
@application.route('/<lang>/prepare/<map_type>',defaults={'pts':[],'names':[]})
@application.route('/<lang>/prepare/<map_type>/<pts>',defaults={'names':None})
@application.route('/<lang>/prepare/<map_type>/<pts>/<names>')
@application.route('/prepare',defaults={'lang':None,'map_type':'GeoPortal','pts':[],'names':[]})
@application.route('/prepare/<map_type>',defaults={'lang':None,'pts':[],'names':[]})
@application.route('/prepare/<map_type>/<pts>',defaults={'lang':None,'names':None})
@application.route('/prepare/<map_type>/<pts>/<names>',defaults={'lang':None})
def prepare(lang,map_type,pts,names):
if lang!=None:
session['lang']=lang
return render_template('prepare.html',domain=config['domain'],map_type=map_type,GMapsApiKey=keysnpwds['GMapsApiKey'],GeoPortalApiKey=keysnpwds['GeoPortalApiKey'])
# Backward compatibility
@application.route('/prepare.php?ptlist=<ptlist>',defaults={'lang':None})
#@application.route('/fr/prepare.php',defaults={'lang':'fr'})
def prepare_php(lang):
pts=request.args.get('ptlist')
maptype=request.args.get('maptype')
names=request.args.get('names')
return prepare(lang,maptype,pts,names)
@application.route('/ele/<float:lat>/<float:lon>')
def getele(lat,lon):
return Response('%d'%GetEleFromLatLon(lat,lon), mimetype='text/plain')
def PtStr2FloatArray(ptstr):
out = ptstr.split(',')
return (float(out[0]),float(out[1]))
@application.route('/profile/<ptliststr>/<width>/<height>')
def profile(ptliststr,width,height):
ptlist = map(PtStr2FloatArray,ptliststr.split('~'))
if(len(ptlist)<2):
return Response(gettext('Error: Cannot compute profile for only one point'), mimetype='text/plain')
nbpts = 400
return Response('\n'.join(map(str,ComputeProfile(ptlist,nbpts,width,height))), mimetype='text/plain')
@application.route('/prepare/export/<format>/<ptlist>/<names>')
def prepare_export(format,ptlist,names):
# TODO: build it from client side
pass
## Misc
@application.route('/<lang>/mobile')
@application.route('/mobile',defaults={'lang':None})
def mobile(lang):
if lang!=None:
session['lang']=lang
return render_template('mobile.html')
@application.route('/<lang>/tour')
@application.route('/tour',defaults={'lang':None})
def tour(lang):
if lang!=None:
session['lang']=lang
return render_template('tour.html')
## Add .min.js in all templates if debug mode is true
@application.context_processor
def inject_min_js():
if application.debug:
return {'minify':''}
else:
return {'minify':'.min'}
## Program entry point
if __name__ == '__main__':
# Start web server
if len(sys.argv)==2:
if sys.argv[1] in ('-h','--help'):
print 'Usage: %s [bindingip]' % sys.argv[0]
exit()
else:
host = sys.argv[1]
else:
host = "127.0.0.1"
application.run(port=8080,debug=True,host=host)
| fparrel/regepe | vps/regepe_flask_server.py | Python | gpl-3.0 | 30,280 |
[{'assessment_number': '1',
'due_string': 'Multiple Weeks',
'is_group': 'No',
'name': 'Participation in practical programming tasks',
'weight': '10.00'},
{'assessment_number': '2',
'due_string': 'Multiple Weeks',
'is_group': 'No',
'name': 'Quiz',
'weight': '20.00'},
{'assessment_number': '3',
'due_string': 'Multiple Weeks',
'is_group': 'No',
'name': 'Lab Skills',
'weight': '10.00'},
{'assessment_number': '4',
'due_string': 'Exam Period',
'is_group': 'No',
'name': 'Final Exam',
'weight': '60.00'}]
| lyneca/rainbow-table | examples/example.py | Python | mit | 537 |
import time
from jaeger_client import Config
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def get_config(service_name):
return Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'local_agent': {
# 'reporting_host': '192.168.99.109',
'reporting_host': 'jaeger',
'reporting_port': '6831',
},
'logging': True,
},
service_name=service_name)
def wait_for_termination():
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
pass
| dhilipsiva/talks | assets/2019-11-30/utils.py | Python | mit | 655 |
import pdb
class Config(pdb.DefaultConfig):
sticky_by_default = True
| mphe/dotfiles | homedir/.pdbrc.py | Python | mit | 75 |
# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import datetime
import logging
import pybreaker
import pytz
import requests as requests
from jormungandr import utils
from jormungandr import app
import jormungandr.scenarios.ridesharing.ridesharing_journey as rsj
from jormungandr.scenarios.ridesharing.ridesharing_service import (
AbstractRidesharingService,
RsFeedPublisher,
RidesharingServiceError,
)
from jormungandr.utils import decode_polyline
from navitiacommon import type_pb2
DEFAULT_INSTANT_SYSTEM_FEED_PUBLISHER = {
'id': 'Instant System',
'name': 'Instant System',
'license': 'Private',
'url': 'https://instant-system.com/disclaimers/disclaimer_XX.html',
}
class InstantSystem(AbstractRidesharingService):
def __init__(
self,
instance,
service_url,
api_key,
network,
feed_publisher=DEFAULT_INSTANT_SYSTEM_FEED_PUBLISHER,
rating_scale_min=None,
rating_scale_max=None,
timeout=2,
crowfly_radius=200,
timeframe_duration=1800,
):
self.instance = instance
self.service_url = service_url
self.api_key = api_key
self.network = network
self.rating_scale_min = rating_scale_min
self.rating_scale_max = rating_scale_max
self.system_id = 'Instant System'
self.timeout = timeout
self.feed_publisher = None if feed_publisher is None else RsFeedPublisher(**feed_publisher)
self.crowfly_radius = crowfly_radius
self.timeframe_duration = timeframe_duration
self.journey_metadata = rsj.MetaData(
system_id=self.system_id,
network=self.network,
rating_scale_min=self.rating_scale_min,
rating_scale_max=self.rating_scale_max,
)
self.logger = logging.getLogger("{} {}".format(__name__, self.system_id))
self.breaker = pybreaker.CircuitBreaker(
fail_max=app.config['CIRCUIT_BREAKER_MAX_INSTANT_SYSTEM_FAIL'],
reset_timeout=app.config['CIRCUIT_BREAKER_INSTANT_SYSTEM_TIMEOUT_S'],
)
self.call_params = None
def status(self):
return {
'id': self.system_id,
'class': self.__class__.__name__,
'circuit_breaker': {
'current_state': self.breaker.current_state,
'fail_counter': self.breaker.fail_counter,
'reset_timeout': self.breaker.reset_timeout,
},
'rating_scale_min': self.rating_scale_min,
'rating_scale_max': self.rating_scale_max,
'crowfly_radius': self.crowfly_radius,
'network': self.network,
}
def _call_service(self, params):
self.logger.debug("requesting instant system")
headers = {'Authorization': 'apiKey {}'.format(self.api_key)}
try:
return self.breaker.call(
requests.get, url=self.service_url, headers=headers, params=params, timeout=self.timeout
)
except pybreaker.CircuitBreakerError as e:
logging.getLogger(__name__).error(
'Instant System service dead (error: %s)', e, extra={'ridesharing_service_id': self._get_rs_id()}
)
raise RidesharingServiceError('circuit breaker open')
except requests.Timeout as t:
logging.getLogger(__name__).error(
'Instant System service timeout (error: %s)',
t,
extra={'ridesharing_service_id': self._get_rs_id()},
)
raise RidesharingServiceError('timeout')
except Exception as e:
logging.getLogger(__name__).exception(
'Instant System service error', extra={'ridesharing_service_id': self._get_rs_id()}
)
raise RidesharingServiceError(str(e))
@staticmethod
def _get_ridesharing_journeys(raw_journeys):
"""
This function gives us journeys that contain at least a pure ridesharing offer
:param raw_journeys:
:return:
"""
def has_ridesharing_path(j):
return next((p for p in j.get('paths', []) if p.get('mode') == 'RIDESHARINGAD'), None)
return (j for j in raw_journeys if has_ridesharing_path(j))
def _make_response(self, raw_json):
raw_journeys = raw_json.get('journeys')
if not raw_journeys:
return []
ridesharing_journeys = []
for j in self._get_ridesharing_journeys(raw_journeys):
for p in j.get('paths'):
if p.get('mode') != 'RIDESHARINGAD':
continue
res = rsj.RidesharingJourney()
res.metadata = self.journey_metadata
res.distance = j.get('distance')
res.ridesharing_ad = j.get('url')
ridesharing_ad = p['rideSharingAd']
from_data = p['from']
res.pickup_place = rsj.Place(
addr=from_data.get('name'), lat=from_data.get('lat'), lon=from_data.get('lon')
)
to_data = p['to']
res.dropoff_place = rsj.Place(
addr=to_data.get('name'), lat=to_data.get('lat'), lon=to_data.get('lon')
)
# shape is a list of type_pb2.GeographicalCoord()
res.shape = []
shape = decode_polyline(p.get('shape'), precision=5)
if not shape or res.pickup_place.lon != shape[0][0] or res.pickup_place.lat != shape[0][1]:
coord = type_pb2.GeographicalCoord()
coord.lon = res.pickup_place.lon
coord.lat = res.pickup_place.lat
res.shape.append(coord)
for c in shape:
coord = type_pb2.GeographicalCoord()
coord.lon = c[0]
coord.lat = c[1]
res.shape.append(coord)
if not shape or res.dropoff_place.lon != shape[0][0] or res.dropoff_place.lat != shape[0][1]:
coord = type_pb2.GeographicalCoord()
coord.lon = res.dropoff_place.lon
coord.lat = res.dropoff_place.lat
res.shape.append(coord)
res.pickup_date_time = utils.make_timestamp_from_str(p['departureDate'])
res.dropoff_date_time = utils.make_timestamp_from_str(p['arrivalDate'])
user = ridesharing_ad['user']
gender = user.get('gender')
gender_map = {'MALE': rsj.Gender.MALE, 'FEMALE': rsj.Gender.FEMALE}
res.driver = rsj.Individual(
alias=user.get('alias'),
gender=gender_map.get(gender, rsj.Gender.UNKNOWN),
image=user.get('imageUrl'),
rate=user.get('rating', {}).get('rate'),
rate_count=user.get('rating', {}).get('count'),
)
# the usual form of the price for InstantSystem is: "170 EUR"
# which means "170 EURO cents" also equals "1.70 EURO"
# In Navitia so far prices are in "centime" so we transform it to: "170 centime"
price = ridesharing_ad['price']
res.price = price.get('amount')
if price.get('currency') == "EUR":
res.currency = "centime"
else:
res.currency = price.get('currency')
res.available_seats = ridesharing_ad['vehicle']['availableSeats']
res.total_seats = None
ridesharing_journeys.append(res)
return ridesharing_journeys
def _request_journeys(self, from_coord, to_coord, period_extremity, limit=None):
"""
:param from_coord: lat,lon ex: '48.109377,-1.682103'
:param to_coord: lat,lon ex: '48.020335,-1.743929'
:param period_extremity: a tuple of [timestamp(utc), clockwise]
:param limit: optional
:return:
"""
# format of datetime: 2017-12-25T07:00:00Z
datetime_str = datetime.datetime.fromtimestamp(period_extremity.datetime, pytz.utc).strftime(
'%Y-%m-%dT%H:%M:%SZ'
)
if period_extremity.represents_start:
datetime_str = '{}/PT{}S'.format(datetime_str, self.timeframe_duration)
else:
datetime_str = 'PT{}S/{}'.format(self.timeframe_duration, datetime_str)
params = {
'from': from_coord,
'to': to_coord,
'fromRadius': self.crowfly_radius,
'toRadius': self.crowfly_radius,
('arrivalDate', 'departureDate')[bool(period_extremity.represents_start)]: datetime_str,
}
if limit is not None:
params.update({'limit', limit})
# Format call_params from parameters
self.call_params = ''
for key, value in params.items():
self.call_params += '{}={}&'.format(key, value)
resp = self._call_service(params=params)
if resp.status_code != 200:
# TODO better error handling, the response might be in 200 but in error
logging.getLogger(__name__).error(
'Instant System service unavailable, impossible to query : %s',
resp.url,
extra={'ridesharing_service_id': self._get_rs_id(), 'status_code': resp.status_code},
)
raise RidesharingServiceError('non 200 response', resp.status_code, resp.reason, resp.text)
if resp:
r = self._make_response(resp.json())
self.record_additional_info('Received ridesharing offers', nb_ridesharing_offers=len(r))
logging.getLogger('stat.ridesharing.instant-system').info(
'Received ridesharing offers : %s',
len(r),
extra={'ridesharing_service_id': self._get_rs_id(), 'nb_ridesharing_offers': len(r)},
)
return r
self.record_additional_info('Received ridesharing offers', nb_ridesharing_offers=0)
logging.getLogger('stat.ridesharing.instant-system').info(
'Received ridesharing offers : 0',
extra={'ridesharing_service_id': self._get_rs_id(), 'nb_ridesharing_offers': 0},
)
return []
def _get_feed_publisher(self):
return self.feed_publisher
| xlqian/navitia | source/jormungandr/jormungandr/scenarios/ridesharing/instant_system.py | Python | agpl-3.0 | 11,739 |
from delegates.base import SystemCalcDelegate
from datetime import datetime
from time import time
ts_to_str = lambda x: datetime.fromtimestamp(x).strftime('%Y-%m-%d %H:%M:%S')
PREFIX = '/Ac/Genset'
class GensetStartStop(SystemCalcDelegate):
""" Relay a unified view of what generator start/stop is doing. This
clears up the distinction between relay/fisherpanda as well. """
def get_input(self):
return [('com.victronenergy.generator', [
'/RunningByConditionCode',
'/Runtime',
'/LastStartTime'])]
def get_output(self):
return [('{}/Runtime'.format(PREFIX), {'gettext': '%d'}),
('{}/RunningByConditionCode'.format(PREFIX), {'gettext': '%d'}),
]
def set_sources(self, dbusmonitor, settings, dbusservice):
SystemCalcDelegate.set_sources(self, dbusmonitor, settings, dbusservice)
self._dbusservice.add_path('{}/LastStartTime'.format(PREFIX), None,
gettextcallback=lambda p, v: ts_to_str(v) if v is not None else '---')
@property
def starttime(self):
try:
return self._dbusservice['{}/LastStartTime'.format(PREFIX)]
except KeyError:
return None
@starttime.setter
def starttime(self, v):
self._dbusservice['{}/LastStartTime'.format(PREFIX)] = v
def update_values(self, newvalues):
for service in sorted(self._dbusmonitor.get_service_list('com.victronenergy.generator')):
rbc = self._dbusmonitor.get_value(service, '/RunningByConditionCode')
if rbc is not None:
if self._dbusservice[PREFIX + '/RunningByConditionCode'] == 0 and rbc > 0:
# Generator was started, update LastStartTime
self.starttime = int(time())
newvalues[PREFIX + '/RunningByConditionCode'] = rbc
# Update runtime in 10 second increments, we don't need more than that
rt = self._dbusmonitor.get_value(service, '/Runtime')
newvalues[PREFIX + '/Runtime'] = None if rt is None else 10 * (rt // 10)
break
| victronenergy/dbus-systemcalc-py | delegates/genset.py | Python | mit | 1,870 |
"""Unit test for netutils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import sys
import unittest
import mock
import pkg_resources
from treadmill import netutils
def _test_data(name):
data_path = os.path.join('data', name)
with pkg_resources.resource_stream(__name__, data_path) as f:
return f.read().decode()
def _net_tcp_open(f, *args, **kwargs):
"""Mock tcp/tcp6 open."""
if f.endswith('/tcp'):
data = _test_data('proc.net.tcp.data')
return mock.mock_open(read_data=data)(f, *args, **kwargs)
if f.endswith('/tcp6'):
data = _test_data('proc.net.tcp6.data')
return mock.mock_open(read_data=data)(f, *args, **kwargs)
else:
return io.open.return_value
@unittest.skipUnless(sys.platform.startswith('linux'), 'Requires Linux')
class NetutilsTest(unittest.TestCase):
"""Tests for teadmill.netutils
The test uses two mock outputs from /proc/net/tcp and /proc/net/tcp6.
On tcp6, it listens on port 1 on ::, and port 2 on ::1 - loopback.
On tcp, list on 0.0.0.0:3 and 127.0.0.1:4.
Loopback ports are ignored. Other lines - where state is not listen, are
ignored.
"""
@mock.patch('io.open', mock.mock_open())
def test_netstat(self):
"""Tests netutils.netstat"""
io.open.side_effect = _net_tcp_open
self.assertIn(1, netutils.netstat(os.getpid()))
self.assertNotIn(2, netutils.netstat(os.getpid()))
self.assertIn(3, netutils.netstat(os.getpid()))
self.assertNotIn(4, netutils.netstat(os.getpid()))
if __name__ == '__main__':
unittest.main()
| Morgan-Stanley/treadmill | lib/python/treadmill/tests/netutils_test.py | Python | apache-2.0 | 1,737 |
POLYMODEL_CLASS_ATTRIBUTE = "class"
| Ali-aqrabawi/ezclinic | lib/djangae/db/backends/appengine/__init__.py | Python | mit | 37 |
import datetime
import time as t
import json
import os
from analyzer import check_cross_against_schedule, check_training_or_testing_against_schedule
def add_param(param, program, ard_val, schedule_a):
if param == "program":
return program
if param == "datetime":
return datetime.datetime.now()
# date_handler = lambda obj: (
# obj.isoformat()
# if isinstance(obj, datetime.datetime)
# or isinstance(obj, datetime.date)
# else None)
# return json.dumps(datetime.datetime.now(), default=date_handler)
if param == 'ard_sensor':
return ard_val
if param == 'correct?':
return check_cross_against_schedule(ard_val, schedule_a)
if param == 'training?':
return check_training_or_testing_against_schedule(schedule_a)
class Data_handler():
def __init__(self, experiment, schedule_a, schedule_b):
self.save_file_name = experiment.save_file_name
self.save_model = experiment.save_model
self.program_name = experiment.program
self.line_to_save = []
self.schedule_a = schedule_a
if not os.path.exists("data/"):
os.makedirs("data/")
def save_data(self, data):
sav_f = "data/" + self.save_file_name + t.strftime('_%Y_%m_%d.txt')
with open(sav_f, 'a') as outfile:
json.dump(self.line_to_save, outfile)
outfile.write("\n")
def ard_grab_and_tag_data(self, arduino_sensor):
self.line_to_save = []
val_from_ard= arduino_sensor.read()
if val_from_ard:
for item in self.save_model:
self.line_to_save.append(add_param(item, self.program_name, val_from_ard, self.schedule_a))
self.save_data(self.line_to_save)
print self.line_to_save
t.sleep(0.1)
| npalermo10/auto_choice_assay_train-test | tagger.py | Python | gpl-3.0 | 1,883 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import NumericProperty
from kivy.properties import ObjectProperty
from kivy.uix.boxlayout import BoxLayout
interface = Builder.load_string('''
#:import facade plyer.compass
<CompassInterface>:
facade: facade
orientation: 'vertical'
padding: '20dp'
spacing: '10dp'
BoxLayout:
orientation: 'vertical'
BoxLayout:
orientation: 'horizontal'
size_hint: 1, .1
Button:
id: enable_button
text: 'Enable Sensor'
disabled: False
on_release:
root.enable()
disable_button.disabled = not disable_button.disabled
enable_button.disabled = not enable_button.disabled
Button:
id: disable_button
text: 'Disable Sensor'
disabled: True
on_release:
root.disable()
disable_button.disabled = not disable_button.disabled
enable_button.disabled = not enable_button.disabled
BoxLayout:
orientation: 'vertical'
Label:
text: "Earth's Magnetic Field"
Label:
text: 'including hard iron calibration'
Label:
text: '(' + str(root.x_calib) + ','
Label:
text: str(root.y_calib) + ','
Label:
text: str(root.z_calib) + ')'
Label:
text: "Earth's Magnetic Field"
Label:
text: 'w/o hard iron calibration'
Label:
text: '(' + str(root.x_field) + ','
Label:
text: str(root.y_field) + ','
Label:
text: str(root.z_field) + ')'
Label:
text: 'Hard Iron Calibration'
Label:
text: '(' + str(root.x_iron) + ','
Label:
text: str(root.y_iron) + ','
Label:
text: str(root.z_iron) + ')'
Label:
text: 'All the values are in μT'
''')
class CompassInterface(BoxLayout):
x_calib = NumericProperty(0)
y_calib = NumericProperty(0)
z_calib = NumericProperty(0)
x_field = NumericProperty(0)
y_field = NumericProperty(0)
z_field = NumericProperty(0)
x_iron = NumericProperty(0)
y_iron = NumericProperty(0)
z_iron = NumericProperty(0)
facade = ObjectProperty()
def enable(self):
self.facade.enable()
Clock.schedule_interval(self.get_field, 1 / 20.)
Clock.schedule_interval(self.get_field_uncalib, 1 / 20.)
def disable(self):
self.facade.disable()
Clock.unschedule(self.get_field)
Clock.unschedule(self.get_field_uncalib)
def get_field(self, dt):
if self.facade.field != (None, None, None):
self.x_calib, self.y_calib, self.z_calib = self.facade.field
def get_field_uncalib(self, dt):
if self.facade.field_uncalib != (None, None, None, None, None, None):
self.x_field, self.y_field, self.z_field, self.x_iron,\
self.y_iron, self.z_iron = self.facade.field_uncalib
class CompassTestApp(App):
def build(self):
return CompassInterface()
if __name__ == '__main__':
CompassTestApp().run()
| kivy/plyer | examples/compass/main.py | Python | mit | 3,527 |
"""
Classes to cache and read specific items from github issues in a uniform way
"""
from functools import partial as Partial
import datetime
import time
import shelve
# Requires PyGithub version >= 1.13 for access to raw_data attribute
import github
# Needed to not confuse cached 'None' objects
class Nothing(object):
raw_data = None
# Needed to signal list cache, not github object
class SearchResults(object):
def __init__(self, *stuff):
self.raw_data = stuff
class GithubCache(object):
"""
Auto-refreshing github.GithubObject.GithubObject from dict
"""
cache_hits = 0
cache_misses = 0
cache_lifetimes = {
'default': datetime.timedelta(hours=2),
github.GitCommit.GitCommit: datetime.timedelta(days=30),
github.NamedUser.NamedUser: datetime.timedelta(days=30),
github.Commit.Commit: datetime.timedelta(days=30),
github.Issue.Issue: datetime.timedelta(minutes=30),
github.PullRequest.PullRequest: datetime.timedelta(hours=1),
# Special case for github.Issue.Issue
'closed': datetime.timedelta(days=30),
SearchResults: datetime.timedelta(minutes=10),
github.NamedUser.NamedUser: datetime.timedelta(hours=2),
github.GitAuthor.GitAuthor: datetime.timedelta(days=9999),
'total_issues': datetime.timedelta(days=9999)
}
def __init__(self, github_obj, cache_get_partial, cache_set_partial,
cache_del_partial, pre_fetch_partial, fetch_partial):
self.github = github_obj
self.cache_get = cache_get_partial # Returns native dict
self.cache_set = cache_set_partial # called with value=dict
self.cache_del = cache_del_partial
self.pre_fetch = pre_fetch_partial # called with nothing
self.fetch = fetch_partial # Returns github.GithubObject.GithubObject
def __call__(self):
"""
Retrieve instance from fresh or cached data
"""
# microseconds aren't useful when fetch takes ~1 second
now = datetime.datetime.utcnow()
now = datetime.datetime(year=now.year, month=now.month,
day=now.day, hour=now.hour,
minute=now.minute, second=0, microsecond=0)
try:
data = self.cached_data()
if data['expires'] < now:
raise KeyError # refresh cache
self.cache_hits += 1
except KeyError:
data = self.fetched_data(now)
self.cache_set(value=data)
self.cache_misses += 1
# Any exceptions thrown during conversion should purge cache entry
try:
# Format data for consumption
if data['klass'] == github.PaginatedList.PaginatedList:
inside_klass = data['inside_klass']
result = []
for item in data['raw_data']:
result.append(
self.github.create_from_raw_data(inside_klass,
item))
return result
elif data['klass'] == Nothing:
return None # it's a None object
elif data['klass'] == SearchResults:
return data['raw_data'] # just the contents
else:
return self.github.create_from_raw_data(data['klass'],
data['raw_data'])
except Exception:
try:
self.cache_del()
except KeyError:
pass # doesn't exist in cache, ignore
raise # original exception
@staticmethod
def format_data(klass, expires, raw_data, inside_klass=None):
"""
Enforce uniform data format for fetched data
"""
if inside_klass is None:
return {'klass': klass,
'fetched': datetime.datetime.utcnow(),
'expires': expires,
'raw_data': raw_data}
else:
return {'klass': klass,
'inside_klass': inside_klass,
'fetched': datetime.datetime.utcnow(),
'expires': expires,
'raw_data': raw_data}
def fetched_data(self, now):
"""
Return dictionary containing freshly fetched values
"""
try:
if callable(self.pre_fetch):
self.pre_fetch()
fetched_obj = self.fetch()
except github.GithubException, detail:
if detail.status == 404:
raise KeyError('Github item not-found error while calling %s '
'with args=%s and dargs=%s' % (self.fetch.func,
self.fetch.args,
self.fetch.keywords))
else:
raise
if fetched_obj is None:
fetched_obj = Nothing()
klass = fetched_obj.__class__
# github.PaginatedList.PaginatedList need special handling
if isinstance(fetched_obj, github.PaginatedList.PaginatedList):
raw_data = [item.raw_data for item in fetched_obj]
inside_klass = fetched_obj[0].__class__
expires = now + self.cache_lifetimes.get(inside_klass,
self.cache_lifetimes['default'])
return self.__class__.format_data(klass,
now + self.cache_lifetimes.get(
inside_klass,
self.cache_lifetimes[
'default']),
raw_data, inside_klass)
else:
expires = now + self.cache_lifetimes.get(klass,
# else default
self.cache_lifetimes['default'])
# closed issues/pull requests don't change much
if hasattr(fetched_obj, 'closed_at'):
if fetched_obj.closed_at is not None:
expires = now + self.cache_lifetimes['closed']
return self.__class__.format_data(klass, expires,
fetched_obj.raw_data)
def cached_data(self):
"""
Return dictionary containing cached values or raise KeyError
"""
try:
return self.cache_get() # maybe raise KeyError or TypeError
except KeyError:
raise
except Exception:
# Try to delete the entry
self.cache_del()
raise
class GithubIssuesBase(list):
"""
Base class for cached list of github issues
"""
# Force static pickle protocol version
protocol = 2
# Class to use for cache management
cache_class = GithubCache
def __init__(self, github_obj, repo_full_name, cache_filename):
"""
Initialize cache and reference github repository issues
"""
self.github = github_obj
self.repo_full_name = repo_full_name
self.shelf = shelve.open(filename=cache_filename,
protocol=self.protocol,
writeback=True)
# Avoid exceeding rate-limit per hour
requests = self.github.rate_limiting[1] # requests per hour
period = 60.0 * 60.0 # one hour in seconds
sleeptime = period / requests
self.pre_fetch_partial = Partial(time.sleep, sleeptime)
# self.pre_fetch_partial = None # cheat-mode enable (no delays)
repo_cache_key = 'repo_%s' % self.repo_full_name
# get_repo called same way throughout instance life
cache_get_partial = Partial(self.shelf.__getitem__, repo_cache_key)
cache_set_partial = Partial(self.shelf.__setitem__, repo_cache_key)
cache_del_partial = Partial(self.shelf.__delitem__, repo_cache_key)
fetch_partial = Partial(self.github.get_repo,
self.repo_full_name)
# Callable instance retrieves cached or fetched value for key
self.get_repo = self.cache_class(self.github,
cache_get_partial,
cache_set_partial,
cache_del_partial,
self.pre_fetch_partial,
fetch_partial)
super(GithubIssuesBase, self).__init__()
def __del__(self):
"""
Make sure cache is saved
"""
try:
self.shelf.close()
except AttributeError:
pass # Open must have failed
def __len__(self):
"""
Binary search through issue numbers until largest identified
"""
increment = 1000
last_issue = 1
if not self.__contains__(last_issue):
return 0 # no issues
while increment > 0:
while self.__contains__(last_issue):
last_issue += increment
# Fall back to prior one
last_issue -= increment
# Chop increment in half
increment /= 2
return last_issue
def __contains__(self, key):
try:
# Must call this classes method specifically
GithubIssuesBase.__getitem__(self, key)
except KeyError:
return False
return True
def __iter__(self):
for key in self.keys():
yield self[key]
def __setitem__(self, key, value):
raise KeyError("Read only mapping while trying to set %s to %s"
% (str(key), str(value)))
def __delitem__(self, key):
raise KeyError(
"Read only mapping while trying to delete %s" % str(key))
def __getitem__(self, key):
"""
Return a standardized dict of github issue unless NoEnumerate=True
"""
repo = self.get_repo()
# Enforce uniform key string
cache_key = self.get_issue_cache_key(key)
fetch_partial = Partial(repo.get_issue, int(key))
item = self.get_gh_obj(cache_key, fetch_partial)
# No exception raised, update cache on disk
self.shelf.sync()
return item
def get_issue_cache_key(self, number):
return 'repo_%s_issue_%s' % (self.repo_full_name, str(int(number)))
def has_key(self, key):
return self.__contains__(key)
def items(self):
# Iterator comprehension
return (self[key] for key in self.keys())
def keys(self):
# Iterators are simply better
return xrange(1, self.__len__() + 1)
def values(self):
# Iterator comprehension
return (value for (key, value) in self.items())
class GithubIssues(GithubIssuesBase, object):
"""
Read-only List-like interface to cached github issues in standardized format
"""
# Marshal callables for key to github.Issue.Issue value
marshal_map = {
'number': lambda gh_obj: getattr(gh_obj, 'number'),
'summary': lambda gh_obj: getattr(gh_obj, 'title'),
'description': lambda gh_obj: getattr(gh_obj, 'body'),
'modified': lambda gh_obj: getattr(gh_obj, 'updated_at'),
'commits': NotImplementedError, # setup in __init__
'opened': lambda gh_obj: getattr(gh_obj, 'created_at'),
'closed': lambda gh_obj: getattr(gh_obj, 'closed_at'),
'assigned': lambda gh_obj: getattr(gh_obj, 'assignee'),
'author': lambda gh_obj: getattr(gh_obj, 'user').login,
'commit_authors': NotImplementedError, # setup in __init__
'comments': lambda gh_obj: getattr(gh_obj, 'comments'),
'comment_authors': NotImplementedError, # setup in __init__
'labels': lambda gh_obj: [label.name for label in gh_obj.labels],
'url': lambda gh_obj: getattr(gh_obj, 'html_url'),
'github_issue': lambda gh_obj: gh_obj
}
# Storage for property values
_cache_hits = 0 # Tracks temporary cache instances
_cache_misses = 0 # Tracks temporary cache instances
def __init__(self, github_obj, repo_full_name):
"""
Initialize cache and reference github repository issues
"""
cache_filename = self.__class__.__name__ + '.cache'
super(GithubIssues, self).__init__(github_obj,
repo_full_name,
cache_filename)
# These marshal functions require state
self.marshal_map['commits'] = self.gh_pr_commits
self.marshal_map['commit_authors'] = self.gh_pr_commit_authors
self.marshal_map['comment_authors'] = self.gh_issue_comment_authors
def __del__(self):
self.vacuum()
super(GithubIssues, self).__del__()
def vacuum(self):
"""Vacuum up all expired entries"""
# Can't modify list while iterating
keys_to_del = []
now = datetime.datetime.utcnow()
for key, value in self.shelf.items():
# no need to be precise
if value['expires'] <= now:
keys_to_del.append(key)
for key in keys_to_del:
del self.shelf[key]
@property
def cache_hits(self):
return self.get_repo.cache_hits + self._cache_hits
@property
def cache_misses(self):
return self.get_repo.cache_misses + self._cache_misses
def __getitem__(self, key):
"""
Return a standardized dict of github issue
"""
item = self.marshal_gh_obj(super(GithubIssues, self).__getitem__(key))
self.shelf.sync()
return item
def __len__(self):
"""
Return cached number of issues
"""
cache_key = 'repo_%s_total_issues' % self.repo_full_name
# seconds aren't useful when fetch takes > 1 minute
now = datetime.datetime.utcnow()
now = datetime.datetime(year=now.year, month=now.month,
day=now.day, hour=now.hour,
minute=now.minute, second=0, microsecond=0)
# Easier to do custom caching behavior here than fuss with GithubCache
try:
cache_data = self.shelf.__getitem__(cache_key)
if cache_data['expires'] < now:
raise KeyError
# Bypass search_result caching used in self.search()
searchresult = self.make_search_results(
{'since': cache_data['since']})
# about to change the number
cache_data['since'] = now
# total equal to old count plus new count since then
cache_data['raw_data'] += len(searchresult.raw_data)
except KeyError:
cache_data = {}
# doesn't expire ever
cache_data['expires'] = now + GithubCache.cache_lifetimes[
'total_issues']
cache_data['since'] = now
# This will take a while if issue cache is stale
cache_data['raw_data'] = super(GithubIssues, self).__len__()
self.shelf.__setitem__(cache_key, cache_data)
return cache_data['raw_data']
def get_gh_obj(self, cache_key, fetch_partial):
"""
Helper to get object possibly from cache and update counters
"""
cache_get_partial = Partial(self.shelf.__getitem__,
cache_key)
cache_set_partial = Partial(self.shelf.__setitem__,
cache_key)
cache_del_partial = Partial(self.shelf.__delitem__,
cache_key)
# Callable instance could change every time
get_obj = GithubCache(self.github,
cache_get_partial,
cache_set_partial,
cache_del_partial,
self.pre_fetch_partial,
fetch_partial)
result = get_obj()
self._cache_hits += get_obj.cache_hits
self._cache_misses += get_obj.cache_misses
return result # DOES NOT SYNC DATA!
def search(self, criteria):
"""
Return a list of issue-numbers that match a search criteria.
:param criteria: Dictionary of search terms
state - str - 'open', 'closed'
assignee - list of str (login), "none" or "*"
mentioned - str (login)
labels - list of str (label name)
sort - str - 'created', 'updated', 'comments'
direction - str - 'asc', 'desc'
since - datetime.datetime
"""
valid_criteria = {}
# use search dictionary to form hash for cached results
search_cache_key = 'issue_search'
# Validate & transform criteria
if criteria.has_key('state'):
state = str(criteria['state'])
if state not in ('open', 'closed'):
raise ValueError("'state' criteria must be 'open' or 'closed'")
valid_criteria['state'] = state
search_cache_key = '%s_%s' % (search_cache_key, state)
if criteria.has_key('assignee'):
assignee = str(criteria['assignee'])
search_cache_key = '%s_%s' % (search_cache_key, assignee)
if assignee in ('none', '*'):
valid_criteria['assignee'] = assignee
else:
# returns github.NamedUser.NamedUser
valid_criteria['assignee'] = self.get_gh_user(assignee)
if criteria.has_key('mentioned'):
mentioned = str(criteria['assignee'])
search_cache_key = '%s_%s' % (search_cache_key, mentioned)
if mentioned in ('none', '*'):
valid_criteria['mentioned'] = mentioned
else:
# returns github.NamedUser.NamedUser
valid_criteria['mentioned'] = self.get_gh_user(mentioned)
if criteria.has_key('labels'):
labels = criteria['labels']
if not isinstance(labels, list):
raise ValueError("'lables' criteria must be a list")
valid_criteria['labels'] = []
for name in labels:
search_cache_key = '%s_%s' % (search_cache_key, labels)
valid_criteria['labels'].append(self.get_gh_label(str(name)))
if criteria.has_key('sort'):
sort = str(criteria['sort'])
if sort not in ('created', 'updated', 'comments'):
raise ValueError("'sort' criteria must be 'created', 'updated'"
", 'comments'")
valid_criteria['sort'] = sort
search_cache_key = '%s_%s' % (search_cache_key, sort)
if criteria.has_key('direction'):
direction = str(criteria['direction'])
if direction not in ('asc', 'desc'):
raise ValueError("'direction' criteria must be 'asc', 'desc'")
valid_criteria['direction'] = direction
search_cache_key = '%s_%s' % (search_cache_key, direction)
if criteria.has_key('since'):
since = criteria['since']
if not isinstance(since, datetime.datetime):
raise ValueError("'since' criteria must be a "
"datetime.datetime")
# second and milisecond not useful to search or cache
since = datetime.datetime(year=since.year,
month=since.month,
day=since.day,
hour=since.hour,
minute=since.minute,
second=0,
microsecond=0)
search_cache_key = '%s_%s' % (search_cache_key, since.isoformat())
valid_criteria['since'] = since
# Do not perform search operation unless no cached results
# or cached results have expired
fetch_partial = Partial(self.make_search_results, valid_criteria)
# This could take an arbitrarily LONG time
return self.get_gh_obj(search_cache_key, fetch_partial)
def make_search_results(self, valid_criteria):
"""
Return a SearchResults instance from issue numbers found by search
"""
repo = self.get_repo()
result = repo.get_issues(**valid_criteria)
return SearchResults(*[issue.number for issue in result])
def clean_cache_entry(self, key):
"""
Remove an entry from cache, ignoring any KeyErrors
"""
try:
del self.shelf[key]
except KeyError:
pass
def get_gh_user(self, login):
cache_key = 'github_user_%s' % login
fetch_partial = Partial(self.github.get_user, login)
try:
return self.get_gh_obj(cache_key, fetch_partial)
except KeyError:
raise ValueError('login %s is not a valid github user' % login)
def get_gh_label(self, name):
repo = self.get_repo()
cache_key = str('repo_%s_label_%s' % (self.repo_full_name, name))
fetch_partial = Partial(repo.get_label, name)
try:
return self.get_gh_obj(cache_key, fetch_partial)
except KeyError:
raise ValueError('label %s is not valid for repo %s' % (name,
self.repo_full_name))
def marshal_gh_obj(self, gh_issue):
"""
Translate a github issue object into dictionary w/ fixed keys
"""
mkeys = self.marshal_map.keys()
return dict([(key, self.marshal_map[key](gh_issue)) for key in mkeys])
@staticmethod
def gh_issue_is_pull(gh_issue):
"""
Return True/False if gh_issue is a pull request or not
"""
pullreq = gh_issue.pull_request
if pullreq is not None:
if (pullreq.diff_url is None and
pullreq.html_url is None and
pullreq.patch_url is None):
return False
else:
return False
# pullreq not None but pullreq attributes are not None
return True
# marshal_map method
def gh_issue_comment_authors(self, gh_issue):
"""
Retrieve a list of comment author e-mail addresses
"""
if gh_issue.comments > 0:
num = gh_issue.number
cache_key = ('repo_%s_issue_%s_comments'
% (self.repo_full_name, num))
fetch_partial = Partial(gh_issue.get_comments)
authors = set()
for comment in self.get_gh_obj(cache_key, fetch_partial):
# Referencing user attribute requires a request, so cache it
user_cache_key = cache_key + '_%s_user' % comment.id
user_fetch_partial = Partial(getattr, comment, 'user')
try:
user = self.get_gh_obj(user_cache_key, user_fetch_partial)
except Exception:
# Also clean up comments cache
self.clean_cache_entry(cache_key)
raise # original exception
authors.add(user.email)
return authors
else:
return None
# marshal_map method
def gh_pr_commit_authors(self, gh_issue):
"""
Return list of commit author e-mail addresses for a pull-request
"""
if GithubIssues.gh_issue_is_pull(gh_issue):
num = gh_issue.number
repo = self.get_repo()
cache_key = 'repo_%s_pull_%s' % (self.repo_full_name, str(num))
fetch_partial = Partial(repo.get_pull, num)
pull = self.get_gh_obj(cache_key, fetch_partial)
if pull.commits is None or pull.commits < 1:
return None # No commits == no commit authors
cache_key = 'repo_%s_pull_%s_commits' % (self.repo_full_name,
str(num))
fetch_partial = Partial(pull.get_commits)
authors = set()
for commit in self.get_gh_obj(cache_key, fetch_partial):
# Referencing commit author requires a request, cache it.
author_cache_key = cache_key + '_%s_author' % str(commit.sha)
author_fetch_partial = Partial(getattr, commit, 'author')
try:
author_obj = self.get_gh_obj(author_cache_key,
author_fetch_partial)
except Exception:
# clean up commit list cache entry also
self.clean_cache_entry(cache_key)
raise # original exception
# Retrieve e-mail from git commit object
if author_obj is None:
# Referencing git commit requires a request, cache it
gitcommit_cache_key = (cache_key + '_%s_gitcommit'
% str(commit.sha))
gitcommit_fetch_partial = Partial(getattr, commit,
'commit') # git commit
try:
gitcommit = self.get_gh_obj(gitcommit_cache_key,
gitcommit_fetch_partial)
except Exception:
# Need to clean commit and gitcommit entries
self.clean_cache_entry(cache_key)
self.clean_cache_entry(gitcommit_cache_key)
raise
authors.add(gitcommit.author.email)
else: # Author is a github user
authors.add(author_obj.login)
return authors
return None # not a pull request
# marshal_map method
def gh_pr_commits(self, gh_issue):
"""
Retrieves the number of commits on a pull-request, None if not a pull.
"""
if GithubIssues.gh_issue_is_pull(gh_issue):
num = gh_issue.number
repo = self.get_repo()
cache_key = 'repo_%s_pull_%s' % (self.repo_full_name, str(num))
fetch_partial = Partial(repo.get_pull, num)
pull = self.get_gh_obj(cache_key, fetch_partial)
return pull.commits
return None # not a pull request
class MutateError(KeyError):
def __init__(self, key, number):
super(MutateError, self).__init__("Unable to modify %s on issue %d"
% (str(key), number))
class MutableIssue(dict):
"""Allow modification of some issue values"""
def __init__(self, github_issues, issue_number):
if not isinstance(github_issues, GithubIssues):
raise ValueError("github_issues %s is not a GithubIssues, it's a %s"
% (str(github_issues), str(type(github_issues))))
# make sure issue_number is valid and cached
junk = github_issues[issue_number]
del junk
# Private for private _github_issue property access
self._github_issues = github_issues
self._issue_number = issue_number
super(MutableIssue, self).__init__()
@property
def _github_issue(self):
return self._github_issues[self._issue_number]
@property
def _issue_cache_key(self):
return self.get_issue_cache_key(self._issue_number)
def _setdelitem(self, opr, key, value):
if key not in self._github_issues.marshal_map.keys():
raise MutateError(key, self._issue_number)
methodname = '%s_%s' % (opr, key)
if callable(getattr(self, methodname)):
method = getattr(self, methodname)
if opr == 'set':
method(value)
else:
method()
else:
raise MutateError(key, self._issue_number)
def __getitem__(self, key):
# Guarantees fresh/cached data for every call
return self._github_issue[key]
def __setitem__(self, key, value):
self._setdelitem('set', key, value)
def __delitem__(self, key):
self._setdelitem('del', key, None)
def set_labels(self, value):
"""
Merge list of new lables into existing label set
"""
new_labels = set(value)
old_labels = set(self._github_issue['labels'])
change_list = list(new_labels | old_labels)
get_gh_label = self._github_issues.get_gh_label # save typing
# Raise exception if any label name is bad
gh_labels = [get_gh_label(label) for label in change_list]
# Access PyGithub object to change labels
self._github_issue['github_issue'].set_labels(*gh_labels)
# Force retrieval of changed item
self._github_issues.clean_cache_entry(self._issue_cache_key)
def del_labels(self):
"""
Remove all lbels from an issue
"""
self._github_issue['github_issue'].delete_labels()
# Force retrieval of changed item
self._github_issues.clean_cache_entry(self._issue_cache_key)
# TODO: Write get_*(), set_*(), del_*() for other dictionary keys
| CongLi/avocado-vt | scripts/github/github_issues.py | Python | gpl-2.0 | 29,593 |
"""
webhooks module URLs config
"""
from django.conf.urls import url
from webhooks import views
urlpatterns = [
url(
r'^github/$',
views.GitHubWebhookReceiverView.as_view(),
name='github',
),
url(
r'^zenhub/$',
views.ZenHubWebhookReceiverView.as_view(),
name='zenhub',
),
]
| pawelad/zenboard | src/webhooks/urls.py | Python | apache-2.0 | 341 |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import ctypes
import os
import re
import subprocess
import time
try:
import pywintypes # pylint: disable=F0401
import win32api # pylint: disable=F0401
import win32con # pylint: disable=F0401
import win32process # pylint: disable=F0401
except ImportError:
pywintypes = None
win32api = None
win32con = None
win32process = None
from telemetry.core.platform import desktop_platform_backend
class WinPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
def _GetProcessHandle(self, pid):
mask = (win32con.PROCESS_QUERY_INFORMATION |
win32con.PROCESS_VM_READ)
return win32api.OpenProcess(mask, False, pid)
# pylint: disable=W0613
def StartRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def StopRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def GetRawDisplayFrameRateMeasurements(self):
raise NotImplementedError()
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
class PerformanceInfo(ctypes.Structure):
"""Struct for GetPerformanceInfo() call
http://msdn.microsoft.com/en-us/library/ms683210
"""
_fields_ = [('size', ctypes.c_ulong),
('CommitTotal', ctypes.c_size_t),
('CommitLimit', ctypes.c_size_t),
('CommitPeak', ctypes.c_size_t),
('PhysicalTotal', ctypes.c_size_t),
('PhysicalAvailable', ctypes.c_size_t),
('SystemCache', ctypes.c_size_t),
('KernelTotal', ctypes.c_size_t),
('KernelPaged', ctypes.c_size_t),
('KernelNonpaged', ctypes.c_size_t),
('PageSize', ctypes.c_size_t),
('HandleCount', ctypes.c_ulong),
('ProcessCount', ctypes.c_ulong),
('ThreadCount', ctypes.c_ulong)]
def __init__(self):
self.size = ctypes.sizeof(self)
super(PerformanceInfo, self).__init__()
performance_info = PerformanceInfo()
ctypes.windll.psapi.GetPerformanceInfo(
ctypes.byref(performance_info), performance_info.size)
return performance_info.CommitTotal * performance_info.PageSize / 1024
def GetCpuStats(self, pid):
try:
cpu_info = win32process.GetProcessTimes(
self._GetProcessHandle(pid))
except pywintypes.error, e:
errcode = e[0]
if errcode == 87: # The process may have been closed.
return {}
raise
# Convert 100 nanosecond units to seconds
cpu_time = (cpu_info['UserTime'] / 1e7 +
cpu_info['KernelTime'] / 1e7)
return {'CpuProcessTime': cpu_time}
def GetCpuTimestamp(self):
"""Return current timestamp in seconds."""
return {'TotalTime': time.time()}
def GetMemoryStats(self, pid):
try:
memory_info = win32process.GetProcessMemoryInfo(
self._GetProcessHandle(pid))
except pywintypes.error, e:
errcode = e[0]
if errcode == 87: # The process may have been closed.
return {}
raise
return {'VM': memory_info['PagefileUsage'],
'VMPeak': memory_info['PeakPagefileUsage'],
'WorkingSetSize': memory_info['WorkingSetSize'],
'WorkingSetSizePeak': memory_info['PeakWorkingSetSize']}
def GetIOStats(self, pid):
try:
io_stats = win32process.GetProcessIoCounters(
self._GetProcessHandle(pid))
except pywintypes.error, e:
errcode = e[0]
if errcode == 87: # The process may have been closed.
return {}
raise
return {'ReadOperationCount': io_stats['ReadOperationCount'],
'WriteOperationCount': io_stats['WriteOperationCount'],
'ReadTransferCount': io_stats['ReadTransferCount'],
'WriteTransferCount': io_stats['WriteTransferCount']}
def KillProcess(self, pid, kill_process_tree=False):
# os.kill for Windows is Python 2.7.
cmd = ['taskkill', '/F', '/PID', str(pid)]
if kill_process_tree:
cmd.append('/T')
subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).wait()
def GetSystemProcessInfo(self):
# [3:] To skip 2 blank lines and header.
lines = subprocess.Popen(
['wmic', 'process', 'get',
'CommandLine,CreationDate,Name,ParentProcessId,ProcessId',
'/format:csv'],
stdout=subprocess.PIPE).communicate()[0].splitlines()[3:]
process_info = []
for line in lines:
if not line:
continue
parts = line.split(',')
pi = {}
pi['ProcessId'] = int(parts[-1])
pi['ParentProcessId'] = int(parts[-2])
pi['Name'] = parts[-3]
creation_date = None
if parts[-4]:
creation_date = float(re.split('[+-]', parts[-4])[0])
pi['CreationDate'] = creation_date
pi['CommandLine'] = ','.join(parts[1:-4])
process_info.append(pi)
return process_info
def GetChildPids(self, pid):
"""Retunds a list of child pids of |pid|."""
ppid_map = collections.defaultdict(list)
creation_map = {}
for pi in self.GetSystemProcessInfo():
ppid_map[pi['ParentProcessId']].append(pi['ProcessId'])
if pi['CreationDate']:
creation_map[pi['ProcessId']] = pi['CreationDate']
def _InnerGetChildPids(pid):
if not pid or pid not in ppid_map:
return []
ret = [p for p in ppid_map[pid] if creation_map[p] >= creation_map[pid]]
for child in ret:
if child == pid:
continue
ret.extend(_InnerGetChildPids(child))
return ret
return _InnerGetChildPids(pid)
def GetCommandLine(self, pid):
for pi in self.GetSystemProcessInfo():
if pid == pi['ProcessId']:
return pi['CommandLine']
raise Exception('Could not get command line for %d' % pid)
def GetOSName(self):
return 'win'
def GetOSVersionName(self):
os_version = os.uname()[2]
if os_version.startswith('5.1.'):
return 'xp'
if os_version.startswith('6.0.'):
return 'vista'
if os_version.startswith('6.1.'):
return 'win7'
if os_version.startswith('6.2.'):
return 'win8'
def CanFlushIndividualFilesFromSystemCache(self):
return True
def GetFlushUtilityName(self):
return 'clear_system_cache.exe'
| mogoweb/chromium-crosswalk | tools/telemetry/telemetry/core/platform/win_platform_backend.py | Python | bsd-3-clause | 6,574 |
__author__ = 'eponsko'
from setuptools import setup, find_packages
setup(name='doubledecker',
version='0.4',
description='DoubleDecker client module and examples',
url='http://acreo.github.io/DoubleDecker/',
author='ponsko',
author_email='ponsko@acreo.se',
license='LGPLv2.1',
scripts=['bin/ddclient.py', 'bin/ddkeys.py', ],
packages=find_packages(),
install_requires=['pyzmq', 'pynacl', 'future'])
| Acreo/DoubleDecker-py | setup.py | Python | lgpl-2.1 | 453 |
import invoke
from minchin.releaser import make_release
| MinchinWeb/colourettu | tasks.py | Python | mit | 57 |
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| alexsavio/scikit-learn | sklearn/neighbors/regression.py | Python | bsd-3-clause | 10,998 |
import operator as op
from parsimonious.grammar import Grammar
class Mini(object):
def __init__(self, env={}):
env.update({'sum': lambda *args: sum(args)})
self.env = env
def parse(self,source):
grammar = '\n'.join(v.__doc__ for k, v in vars(self.__class__).items()
if not k.startswith('__') and hasattr(v, '__doc__')
and getattr(v, '__doc__'))
return Grammar(grammar)['program'].parse(source)
def eval(self, source):
node = self.parse(source) if isinstance(source, str) else source
method = getattr(self, node.expr_name, lambda *a: 'error')
return method(node, [self.eval(n) for n in node])
def program(self, node, children):
'program = expr*'
return children
def expr(self, node, children):
'expr = call / infix / assignment / number / name'
return children[0]
def call(self, node, children):
'call = name "(" arguments ")"'
name, _, arguments, _ = children
return name(*arguments)
def arguments(self, node, children):
'arguments = argument*'
return children
def argument(self, node, children):
'argument = expr _'
return children[0]
def infix(self, node, children):
'infix = "(" _ expr _ operator _ expr _ ")" '
_, _, expr1, _, operator, _, expr2, _, _ = children
return operator(expr1, expr2)
def operator(self, node, children):
'operator = "+" / "-" / "*" / "/"'
operators = {
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.div
}
return operators[node.text]
def assignment(self, node, children):
'assignment = lvalue _ "=" _ expr'
lvalue, _, _, _, expr = children
self.env[lvalue] = expr
return expr
def lvalue(self, node, children):
'lvalue = ~"[a-z]+" _'
return node.text.strip()
def name(self, node , children):
'name = ~"[a-z]+" _'
return self.env.get(node.text.strip(), -1)
def number(self, node, children):
'number = ~"[0-9]+" _'
return int(node.text)
def _(self, node, children):
'_ = ~"\s*"' | saru95/Mini-Interpreter | src/regex/mini.py | Python | mit | 1,942 |
import os
from PyQt5.QtCore import QObject
from ethereum.utils import denoms
from mock import patch, MagicMock, ANY, Mock
from PIL import Image
from PyQt5.QtCore import Qt
from PyQt5.QtTest import QTest
from golem.testutils import TempDirFixture, TestGui
from apps.core.task.coretaskstate import TaskDesc
from apps.rendering.task.renderingtaskstate import RenderingTaskDefinition
from gui.controller.mainwindowcustomizer import MainWindowCustomizer
from gui.view.tasktableelem import ItemMap
class MagicQObject(QObject):
def __init__(self, *args):
super(MagicQObject, self).__init__(*args)
self.ui = MagicMock()
@staticmethod
def setMouseTracking(*args):
pass
class TestMainWindowCustomizer(TestGui):
def test_description(self):
customizer = MainWindowCustomizer(self.gui.get_main_window(), MagicMock())
assert isinstance(customizer, MainWindowCustomizer)
customizer.set_options(MagicMock(), "ID1", "ETH_ADDR1", "DESC1")
assert customizer.gui.ui.descriptionTextEdit.toPlainText() == "DESC1"
customizer.set_options(MagicMock(), "ID1", "ETH_ADDR1", "DESC2")
assert customizer.gui.ui.descriptionTextEdit.toPlainText() == "DESC2"
assert customizer.gui.ui.editDescriptionButton.isEnabled()
assert not customizer.gui.ui.saveDescriptionButton.isEnabled()
assert not customizer.gui.ui.descriptionTextEdit.isEnabled()
QTest.mouseClick(customizer.gui.ui.editDescriptionButton, Qt.LeftButton)
assert not customizer.gui.ui.editDescriptionButton.isEnabled()
assert customizer.gui.ui.saveDescriptionButton.isEnabled()
assert customizer.gui.ui.descriptionTextEdit.isEnabled()
QTest.mouseClick(customizer.gui.ui.saveDescriptionButton, Qt.LeftButton)
assert customizer.gui.ui.editDescriptionButton.isEnabled()
assert not customizer.gui.ui.saveDescriptionButton.isEnabled()
assert not customizer.gui.ui.descriptionTextEdit.isEnabled()
def test_table(self):
customizer = MainWindowCustomizer(self.gui.get_main_window(), MagicMock())
task1 = TaskDesc()
task1.definition.task_id = "TASK ID 1"
task1.status = "Finished"
task1.definition.task_name = "TASK NAME 1"
customizer.logic.get_task.return_value = task1
customizer.add_task(task1)
assert customizer.gui.ui.taskTableWidget.item(0, ItemMap.Id).text() == "TASK ID 1"
assert customizer.gui.ui.taskTableWidget.item(0, ItemMap.Name).text() == "TASK NAME 1"
assert customizer.gui.ui.taskTableWidget.item(0, ItemMap.Status).text() == "Finished"
assert customizer.gui.ui.taskTableWidget.item(0, ItemMap.Cost).text() == "0.000000"
assert customizer.gui.ui.taskTableWidget.item(0, ItemMap.Time).text() == "00:00:00"
task2 = TaskDesc()
task2.definition.task_id = "TASK ID 2"
task2.status = "Waiting"
task2.definition.task_name = "TASK NAME 2"
customizer.logic.get_task.return_value = task2
customizer.add_task(task2)
assert customizer.gui.ui.taskTableWidget.item(1, ItemMap.Id).text() == "TASK ID 2"
assert customizer.gui.ui.taskTableWidget.item(1, ItemMap.Name).text() == "TASK NAME 2"
assert customizer.gui.ui.taskTableWidget.item(1, ItemMap.Status).text() == "Waiting"
assert customizer.gui.ui.taskTableWidget.item(1, ItemMap.Cost).text() == "0.000000"
assert customizer.gui.ui.taskTableWidget.item(1, ItemMap.Time).text() == "00:00:00"
customizer.update_time()
assert customizer.gui.ui.taskTableWidget.item(0, ItemMap.Time).text() == "00:00:00"
time_ = customizer.gui.ui.taskTableWidget.item(1, ItemMap.Time).text()
assert time_ != "00:00:00"
task1.task_state.status = "Computing"
task2.task_state.progress = 0.3
task2.task_state.status = "Paused"
task2.task_state.progress = 1.0
customizer.logic.get_cost_for_task_id.return_value = 2.342 * denoms.ether
tasks = {'TASK ID 1': task1, 'TASK ID 2': task2}
customizer.update_tasks(tasks)
customizer.update_time()
assert customizer.gui.ui.taskTableWidget.item(1, ItemMap.Cost).text() == "2.342000"
assert customizer.gui.ui.taskTableWidget.item(0, ItemMap.Time).text() != "00:00:00"
assert customizer.gui.ui.taskTableWidget.item(1, ItemMap.Time).text() == time_
customizer.remove_task("TASK ID 2")
customizer.logic.get_task.return_value = TaskDesc()
customizer.show_change_task_dialog("ABC")
customizer.change_task_dialog.close()
def test_preview(self):
obj = MagicQObject()
obj.ui.outputFile = QObject()
obj.ui.previewLabel = MagicQObject()
customizer = MainWindowCustomizer(obj, obj)
self.assertTrue(os.path.isfile(customizer.preview_controller.preview_path))
def test_folderTreeView(self):
tmp_files = self.additional_dir_content([4, [3], [2]])
customizer = MainWindowCustomizer(self.gui.get_main_window(), MagicMock())
customizer.gui.ui.showResourceButton.click()
customizer.current_task_highlighted = MagicMock()
customizer.current_task_highlighted.definition.main_scene_file = tmp_files[0]
customizer.current_task_highlighted.definition.resources = tmp_files
customizer.gui.ui.showResourceButton.click()
def test_update_preview(self):
customizer = MainWindowCustomizer(self.gui.get_main_window(), MagicMock())
rts = TaskDesc(definition_class=RenderingTaskDefinition)
rts.definition.output_file = "bla"
customizer.update_task_additional_info(rts)
assert customizer.gui.ui.outputFile.text() == "bla"
assert not customizer.gui.ui.previewsSlider.isVisible()
assert customizer.preview_controller.last_preview_path == customizer.preview_controller.preview_path
assert customizer.gui.ui.previewLabel.pixmap().width() == 298
assert customizer.gui.ui.previewLabel.pixmap().height() == 200
img = Image.new("RGB", (250, 123), "white")
img_path = os.path.join(self.path, "image1.png")
img.save(img_path)
rts.task_state.extra_data = {"result_preview": img_path}
customizer.update_task_additional_info(rts)
assert customizer.gui.ui.previewLabel.pixmap().width() == 250
assert customizer.gui.ui.previewLabel.pixmap().height() == 123
img = Image.new("RGB", (301, 206), "white")
img.save(img_path)
customizer.update_task_additional_info(rts)
assert customizer.gui.ui.previewLabel.pixmap().width() == 301
assert customizer.gui.ui.previewLabel.pixmap().height() == 206
rts.definition.task_type = u"Blender"
rts.definition.options = MagicMock()
rts.definition.options.use_frames = True
rts.definition.options.frames = range(10)
rts.task_state.outputs = ["result"] * 10
rts.task_state.extra_data = {"result_preview": [img_path]}
customizer.update_task_additional_info(rts)
@patch("gui.controller.customizer.QMessageBox")
def test_show_task_result(self, mock_messagebox):
customizer = MainWindowCustomizer(self.gui.get_main_window(), MagicMock())
td = TaskDesc()
td.definition.task_type = "Blender"
td.definition.options.use_frames = True
td.definition.output_file = os.path.join(self.path, "output.png")
td.task_state.outputs = [os.path.join(self.path, u"output0011.png"),
os.path.join(self.path, u"output0014.png"),
os.path.join(self.path, u"output0017.png")]
td.definition.options.frames = [11, 14, 17]
customizer.logic.get_task.return_value = td
customizer.current_task_highlighted = td
customizer.gui.ui.previewsSlider.setRange(1, 3)
mock_messagebox.Critical = "CRITICAL"
customizer.show_task_result("abc")
expected_file = td.task_state.outputs[0]
mock_messagebox.assert_called_with(mock_messagebox.Critical, "Error",
expected_file + u" is not a file",
ANY, ANY)
customizer.gui.ui.previewsSlider.setValue(2)
customizer.show_task_result("abc")
expected_file = td.task_state.outputs[1]
mock_messagebox.assert_called_with(mock_messagebox.Critical, "Error",
expected_file + u" is not a file",
ANY, ANY)
customizer.gui.ui.previewsSlider.setValue(3)
customizer.show_task_result("abc")
expected_file = td.task_state.outputs[2]
mock_messagebox.assert_called_with(mock_messagebox.Critical, "Error",
expected_file + u" is not a file",
ANY, ANY)
@patch("gui.controller.mainwindowcustomizer.QMessageBox")
def test_load_task(self, mock_messagebox):
mock_messagebox.return_value = mock_messagebox
customizer = MainWindowCustomizer(self.gui.get_main_window(), MagicMock())
customizer._load_new_task_from_definition = Mock()
task_path = os.path.join(self.path, "file.gt")
f = Mock()
f.read.return_value = '[{"key": "value"}]'
with patch('__builtin__.open') as mock_open:
mock_open.return_value = f
customizer._load_task(task_path)
assert mock_open.called
assert f.close.called
assert not mock_messagebox.exec_.called
assert customizer._load_new_task_from_definition.called
def _raise(*_):
raise Exception
f.read = _raise
customizer._load_new_task_from_definition.called = False
with patch('__builtin__.open') as mock_open:
mock_open.return_value = f
customizer._load_task(task_path)
assert mock_open.called
assert f.close.called
assert mock_messagebox.exec_.called
assert not customizer._load_new_task_from_definition.called
| scorpilix/Golemtest | tests/gui/controller/test_mainwindowcustomizer.py | Python | gpl-3.0 | 10,209 |
#!/usr/bin/python
# encoding: UTF-8
"""
This file is part of PenTestKit
Copyright (C) 2017-2018 @maldevel
https://github.com/maldevel/PenTestKit
PenTestKit - Useful tools for Penetration Testing.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For more see the file 'LICENSE' for copying permission.
"""
__author__ = "maldevel"
__copyright__ = "Copyright (c) 2017-2018 @maldevel"
__credits__ = ["maldevel"]
__license__ = "GPLv3"
__version__ = "0.1"
__maintainer__ = "maldevel"
################################
import argparse
import sys
import urlparse
from argparse import RawTextHelpFormatter
################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument("-i1", "--input1",
action="store",
metavar='POST_data',
dest='input1',
type=str,
default=None,
required=True,
help='POST data to compare')
parser.add_argument("-i2", "--input2",
action="store",
metavar='POST_data',
dest='input2',
type=str,
default=None,
required=True,
help='POST data to compare')
if len(sys.argv) is 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
input1_params = urlparse.parse_qs(args.input1, True)
input1_params = set().union(input1_params.keys())
input2_params = urlparse.parse_qs(args.input2, True)
input2_params = set().union(input2_params.keys())
unique_params = input1_params.union(input2_params)
params1_not_params2 = list(input1_params - input2_params)
params2_not_params1 = list(input2_params - input1_params)
print
print "[+] Unique parameters"
print
print ', '.join(unique_params)
print
print
print "[+] Parameters in input1 and not in input2"
print
print ', '.join(params1_not_params2)
print
print "[+] Parameters in input2 and not in input1"
print
print ', '.join(params2_not_params1)
| maldevel/PenTestKit | web/compare-post-data.py | Python | gpl-3.0 | 2,917 |
d = {
'00': 'A',
'01': 'C',
'10': 'G',
'11': 'T'
}
a = input()
#res = ''
for i in range(len(a) // 2):
print(d[a[2*i:2*(i + 1)]], end='')
#res += d[a[2*i:2*(i + 1)]]
#print(res)
| nizhikebinesi/code_problems_python | stepik/data_structures/7/7.1/task_3.py | Python | gpl-2.0 | 203 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
DsgTools
A QGIS plugin
Brazilian Army Cartographic Production Tools
-------------------
begin : 2018-08-13
git sha : $Format:%H$
copyright : (C) 2018 by Philipe Borba - Cartographic Engineer @ Brazilian Army
email : borba.philipe@eb.mil.br
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import QCoreApplication
from qgis.core import (QgsDataSourceUri, QgsFeature, QgsFeatureSink,
QgsProcessing, QgsProcessingAlgorithm,
QgsProcessingException, QgsProcessingOutputVectorLayer,
QgsProcessingParameterBoolean,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterNumber,
QgsProcessingParameterVectorLayer, QgsWkbTypes)
from .validationAlgorithm import ValidationAlgorithm
class IdentifySmallPolygonsAlgorithm(ValidationAlgorithm):
FLAGS = 'FLAGS'
INPUT = 'INPUT'
SELECTED = 'SELECTED'
TOLERANCE = 'TOLERANCE'
def initAlgorithm(self, config):
"""
Parameter setting.
"""
self.addParameter(
QgsProcessingParameterVectorLayer(
self.INPUT,
self.tr('Input layer'),
[QgsProcessing.TypeVectorPolygon ]
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.SELECTED,
self.tr('Process only selected features')
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.TOLERANCE,
self.tr('Area tolerance'),
minValue=0,
defaultValue=625
)
)
self.addParameter(
QgsProcessingParameterFeatureSink(
self.FLAGS,
self.tr('{0} Flags').format(self.displayName())
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
inputLyr = self.parameterAsVectorLayer(parameters, self.INPUT, context)
if inputLyr is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
onlySelected = self.parameterAsBool(parameters, self.SELECTED, context)
tol = self.parameterAsDouble(parameters, self.TOLERANCE, context)
self.prepareFlagSink(parameters, inputLyr, inputLyr.wkbType(), context)
# Compute the number of steps to display within the progress bar and
# get features from source
featureList, total = self.getIteratorAndFeatureCount(inputLyr, onlySelected=onlySelected)
for current, feat in enumerate(featureList):
# Stop the algorithm if cancel button has been clicked
if feedback.isCanceled():
break
if feat.geometry().area() < tol:
flagText = self.tr('Feature from layer {0} with id={1} has area of value {2:.2f}, which is lesser than the tolerance of {3} square units.').format(inputLyr.name(), feat.id(), feat.geometry().area(), tol)
self.flagFeature(feat.geometry(), flagText)
# Update the progress bar
feedback.setProgress(int(current * total))
return {self.FLAGS: self.flag_id}
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'identifysmallpolygons'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr('Identify Small Polygons')
def group(self):
"""
Returns the name of the group this algorithm belongs to. This string
should be localised.
"""
return self.tr('Quality Assurance Tools (Identification Processes)')
def groupId(self):
"""
Returns the unique ID of the group this algorithm belongs to. This
string should be fixed for the algorithm, and must not be localised.
The group id should be unique within each provider. Group id should
contain lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'DSGTools: Quality Assurance Tools (Identification Processes)'
def tr(self, string):
return QCoreApplication.translate('IdentifySmallPolygonsAlgorithm', string)
def createInstance(self):
return IdentifySmallPolygonsAlgorithm()
| lcoandrade/DsgTools | core/DSGToolsProcessingAlgs/Algs/ValidationAlgs/identifySmallPolygonsAlgorithm.py | Python | gpl-2.0 | 5,841 |
# vim:fileencoding=utf-8:noet
'''Dynamic configuration files tests.'''
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import os
import json
import logging
import tests.vim as vim_module
from tests.lib import Args, urllib_read, replace_attr
from tests import TestCase
from powerline import NotInterceptedError
from powerline.segments.common import wthr
VBLOCK = chr(ord('V') - 0x40)
SBLOCK = chr(ord('S') - 0x40)
class FailingLogger(logging.Logger):
def exception(self, *args, **kwargs):
super(FailingLogger, self).exception(*args, **kwargs)
raise NotInterceptedError('Unexpected exception occurred')
def get_logger(stream=None):
log_format = '%(asctime)s:%(levelname)s:%(message)s'
formatter = logging.Formatter(log_format)
level = logging.WARNING
handler = logging.StreamHandler(stream)
handler.setLevel(level)
handler.setFormatter(formatter)
logger = FailingLogger('powerline')
logger.setLevel(level)
logger.addHandler(handler)
return logger
class TestVimConfig(TestCase):
def test_vim(self):
from powerline.vim import VimPowerline
cfg_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'powerline', 'config_files')
buffers = (
(('bufoptions',), {'buftype': 'help'}),
(('bufname', '[Command Line]'), {}),
(('bufoptions',), {'buftype': 'quickfix'}),
(('bufname', 'NERD_tree_1'), {}),
(('bufname', '__Gundo__'), {}),
(('bufname', '__Gundo_Preview__'), {}),
# No Command-T tests here: requires +ruby or emulation
# No tabline here: tablines are tested separately
)
with open(os.path.join(cfg_path, 'config.json'), 'r') as f:
local_themes_raw = json.load(f)['ext']['vim']['local_themes']
# Don’t run tests on external/plugin segments
local_themes = dict((k, v) for (k, v) in local_themes_raw.items())
# See end of the buffers definition above for `- 2`
self.assertEqual(len(buffers), len(local_themes) - 2)
outputs = {}
i = 0
with vim_module._with('split'):
with VimPowerline(logger=get_logger()) as powerline:
def check_output(mode, args, kwargs):
if mode == 'nc':
window = vim_module.windows[0]
window_id = 2
else:
vim_module._start_mode(mode)
window = vim_module.current.window
window_id = 1
winnr = window.number
out = powerline.render(window, window_id, winnr)
if out in outputs:
self.fail('Duplicate in set #{0} ({1}) for mode {2!r} (previously defined in set #{3} ({4!r}) for mode {5!r})'.format(i, (args, kwargs), mode, *outputs[out]))
outputs[out] = (i, (args, kwargs), mode)
with vim_module._with('bufname', '/tmp/foo.txt'):
out = powerline.render(vim_module.current.window, 1, vim_module.current.window.number, is_tabline=True)
outputs[out] = (-1, (None, None), 'tab')
with vim_module._with('globals', powerline_config_paths=[cfg_path]):
exclude = set(('no', 'v', 'V', VBLOCK, 's', 'S', SBLOCK, 'R', 'Rv', 'c', 'cv', 'ce', 'r', 'rm', 'r?', '!'))
try:
for mode in ['n', 'nc', 'no', 'v', 'V', VBLOCK, 's', 'S', SBLOCK, 'i', 'R', 'Rv', 'c', 'cv', 'ce', 'r', 'rm', 'r?', '!']:
check_output(mode, None, None)
for args, kwargs in buffers:
i += 1
if mode in exclude:
continue
with vim_module._with(*args, **kwargs):
check_output(mode, args, kwargs)
finally:
vim_module._start_mode('n')
@classmethod
def setUpClass(cls):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'path')))
@classmethod
def tearDownClass(cls):
sys.path.pop(0)
class TestConfig(TestCase):
def test_tmux(self):
from powerline.segments import common
from imp import reload
reload(common)
from powerline.shell import ShellPowerline
with replace_attr(common, 'urllib_read', urllib_read):
with ShellPowerline(Args(ext=['tmux']), logger=get_logger(), run_once=False) as powerline:
powerline.render()
with ShellPowerline(Args(ext=['tmux']), logger=get_logger(), run_once=False) as powerline:
powerline.render()
def test_zsh(self):
from powerline.shell import ShellPowerline
args = Args(last_pipe_status=[1, 0], jobnum=0, ext=['shell'], renderer_module='.zsh')
segment_info = {'args': args}
with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline:
powerline.render(segment_info=segment_info)
with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline:
powerline.render(segment_info=segment_info)
segment_info['local_theme'] = 'select'
with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline:
powerline.render(segment_info=segment_info)
segment_info['local_theme'] = 'continuation'
segment_info['parser_state'] = 'if cmdsubst'
with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline:
powerline.render(segment_info=segment_info)
def test_bash(self):
from powerline.shell import ShellPowerline
args = Args(last_exit_code=1, jobnum=0, ext=['shell'], renderer_module='.bash', config_override={'ext': {'shell': {'theme': 'default_leftonly'}}})
with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline:
powerline.render(segment_info={'args': args})
with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline:
powerline.render(segment_info={'args': args})
def test_ipython(self):
from powerline.ipython import IPythonPowerline
class IpyPowerline(IPythonPowerline):
config_paths = None
config_overrides = None
theme_overrides = {}
segment_info = Args(prompt_count=1)
with IpyPowerline(logger=get_logger(), renderer_module='.pre_5') as powerline:
for prompt_type in ['in', 'in2']:
powerline.render(is_prompt=True, matcher_info=prompt_type, segment_info=segment_info)
powerline.render(is_prompt=True, matcher_info=prompt_type, segment_info=segment_info)
with IpyPowerline(logger=get_logger(), renderer_module='.pre_5') as powerline:
for prompt_type in ['out', 'rewrite']:
powerline.render(is_prompt=False, matcher_info=prompt_type, segment_info=segment_info)
powerline.render(is_prompt=False, matcher_info=prompt_type, segment_info=segment_info)
def test_wm(self):
from powerline.segments import common
from imp import reload
reload(common)
from powerline import Powerline
with replace_attr(wthr, 'urllib_read', urllib_read):
Powerline(logger=get_logger(), ext='wm', renderer_module='pango_markup', run_once=True).render()
reload(common)
old_cwd = None
saved_get_config_paths = None
def setUpModule():
global old_cwd
global saved_get_config_paths
import powerline
saved_get_config_paths = powerline.get_config_paths
path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'powerline', 'config_files')
powerline.get_config_paths = lambda: [path]
old_cwd = os.getcwd()
def tearDownModule():
global old_cwd
global saved_get_config_paths
import powerline
powerline.get_config_paths = saved_get_config_paths
os.chdir(old_cwd)
old_cwd = None
if __name__ == '__main__':
from tests import main
main()
| EricSB/powerline | tests/test_provided_config_files.py | Python | mit | 7,107 |
import logging
logger = logging.getLogger('rh-nexttask')
class Renderer(object):
"""Different way to render the advised bzs.
"""
def __init__(self, bzs):
self.bzs = bzs
def list(self):
raw_names = [f for f in dir(self) if f.startswith('r_')]
return [f[2:] for f in raw_names]
def list_str(self):
header = 'Available renderers:'
content = ''
for renderer in self.list():
content += "\t- {}\n".format(renderer)
return "{}\n{}".format(header, content)
@staticmethod
def _display_bzs(bzs, sorting_key='no_change_during', url=False):
for bz in sorted(bzs, reverse=True, key=lambda bz: getattr(bz, sorting_key)):
header = "{} ({} since {} days):".format(bz, sorting_key, getattr(bz, sorting_key))
content = ''
if url:
content = "\t\t{}\n".format(bz.url)
for advice in bz.advices:
content += "\t{}".format(advice)
if not content:
content = '\tCongratulation, state is OK.\n'
print("{}\n{}".format(header, content))
print("\nGot {} bzs.\n".format(len(bzs)))
@staticmethod
def _display_bzs_url_short(bzs, sorting_key='no_change_during'):
for bz in sorted(bzs, reverse=True, key=lambda bz: getattr(bz, sorting_key)):
print("{}\n\t{}".format(bz, bz.url))
print("\nGot {} bzs.\n".format(len(bzs)))
def r_echo(self):
self._display_bzs(bzs=self.bzs, url=True)
def r_echo_under_post(self):
bzs_under_post = [bz for bz in self.bzs if bz.status in ['NEW', 'ASSIGNED', 'ON_DEV']]
self._display_bzs(bzs=bzs_under_post, url=True)
def _ask_for_review(self, title, rtype, show_bz=False):
header = title
content = ''
for bz in sorted(self.bzs, reverse=True, key=lambda bz: bz.no_change_during):
content_review = ''
for advice in bz.advices:
for review in advice.reviews:
if review and review.rtype == rtype:
logger.debug('Review {} has type {}'.format(review.id, review.rtype))
if advice.etype in ['need_merging', 'need_review']:
content_review += " - {} - {}\n".format(review, advice.etype)
content_review += " {}\n".format(review.url)
if show_bz and content_review:
content += '## BZ {} -- {}\n{}'.format(bz.id, bz.url, content_review)
else:
content += '{}'.format(content_review)
if not content:
content = 'Nothing need review.'
print("{}\n{}\n".format(header, content))
def r_tripleo_meeting(self):
self._ask_for_review("Tripleo meeting", 'upstream')
def r_daily_meeting(self):
self._ask_for_review("Daily Meeting", 'internal', show_bz=True)
def r_old_bug(self):
na_bzs = [bz for bz in self.bzs if bz._need_action]
bzs_users = {}
for bz in na_bzs:
if bz.assigned_to not in bzs_users:
bzs_users[bz.assigned_to] = []
bzs_users[bz.assigned_to].append(bz)
for (user, bzs) in bzs_users.items():
print "{}:\n".format(user)
self._display_bzs(bzs, '_need_action_days')
def r_bz_url(self):
self._display_bzs_url_short(self.bzs)
def r_bz_url_under_post(self):
bzs_under_post = [bz for bz in self.bzs if bz.status in ['NEW', 'ASSIGNED', 'ON_DEV']]
self._display_bzs_url_short(bzs_under_post)
def r_client_bz(self):
c_bzs = [bz for bz in self.bzs if bz.is_client]
self._display_bzs_url_short(c_bzs)
| sathlan/python-rh-nexttaks | src/rh_nexttask/renderer.py | Python | bsd-2-clause | 3,745 |
#!/usr/bin/env python
# coding: utf-8
import os
import sys
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from common.util import smooth_curve
from common.multi_layer_net import MultiLayerNet
from common.optimizer import SGD
# 0:MNISTデータの読み込み==========
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
train_size = x_train.shape[0]
batch_size = 128
max_iterations = 2000
# 1:実験の設定==========
weight_init_types = {'std=0.01': 0.01, 'Xavier': 'sigmoid', 'He': 'relu'}
optimizer = SGD(lr=0.01)
networks = {}
train_loss = {}
for key, weight_type in weight_init_types.items():
networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100],
output_size=10, weight_init_std=weight_type)
train_loss[key] = []
# 2:訓練の開始==========
for i in range(max_iterations):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
for key in weight_init_types.keys():
grads = networks[key].gradient(x_batch, t_batch)
optimizer.update(networks[key].params, grads)
loss = networks[key].loss(x_batch, t_batch)
train_loss[key].append(loss)
if i % 100 == 0:
print("===========" + "iteration:" + str(i) + "===========")
for key in weight_init_types.keys():
loss = networks[key].loss(x_batch, t_batch)
print(key + ":" + str(loss))
# 3.グラフの描画==========
markers = {'std=0.01': 'o', 'Xavier': 's', 'He': 'D'}
x = np.arange(max_iterations)
for key in weight_init_types.keys():
plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key)
plt.xlabel("iterations")
plt.ylabel("loss")
plt.ylim(0, 2.5)
plt.legend()
plt.show()
| kgsn1763/deep-learning-from-scratch | ch06/weight_init_compare.py | Python | mit | 1,963 |
# This file is part of the Simulation Manager project for VecNet.
# For copyright and licensing information about this project, see the
# NOTICE.txt and LICENSE.md files in its top-level directory; they are
# available at https://github.com/vecnet/simulation-manager
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Tests for the submit_group.py script.
"""
import random
import sys
from crc_nd.utils.test_io import WritesOutputFiles
from django.test import LiveServerTestCase
from mock import patch
from path import path
from vecnet.simulation import ExecutionRequest, sim_model, Simulation, SimulationGroup as SimGroup, submission_status
from .constants import TEST_OUTPUT_ROOT
from .mixins import UsesDatabaseApi
from sim_manager import scripts, working_dirs
from sim_manager.models import SimulationGroup
from sim_manager.scripts import api_urls, batch, input_files, submit_group
from sim_manager.scripts.batch import test_utils
from sim_manager.scripts.constants import SIMULATION_DEFINITION_FILENAME, SIMULATION_SCRIPT
class MainTests(LiveServerTestCase, UsesDatabaseApi, WritesOutputFiles):
"""
Tests for the script's main function.
"""
@classmethod
def setUpClass(cls):
super(MainTests, cls).setUpClass()
cls.setup_database_api_user()
cls.set_output_root(TEST_OUTPUT_ROOT)
working_dirs.TestingApi.use_testing_root()
# Add the scripts package's directory to the module search path so the loading of the batch system in the
# submit_group.py script works. When the script is executed at the command line, the package directory will
# automatically be added to the search path. But here in the test suite, the package is imported, so it's
# directory is not added automatically. Therefore, we explicitly add it.
scripts_dir = path(scripts.__file__).dirname()
sys.path.append(scripts_dir)
cls.simulation_script = scripts_dir / SIMULATION_SCRIPT
@classmethod
def tearDownClass(cls):
cls.remove_database_api_user()
working_dirs.TestingApi.reset_root_to_default()
sys.path.pop()
@patch('sim_manager.scripts.submit_group.BATCH_SYSTEM', batch.MOCK)
def test_run_script(self):
group = SimulationGroup.objects.create(submitter=self.test_user)
self.group_id = group.id
self.assertEqual(group.script_status, submission_status.READY_TO_RUN)
self.sim_group = SimGroup()
simulation_1 = Simulation(model=sim_model.OPEN_MALARIA, model_version='32', id_on_client='349',
output_url='http://ingestor.example.com/output-files/')
simulation_1.input_files['scenario.xml'] = 'http://www.example.com/data/scenarios/1234/scenario.xml'
simulation_2 = Simulation(model=sim_model.EMOD, model_version='1.6', cmd_line_args=['--foo', 'bar'],
id_on_client='350', output_url=simulation_1.output_url)
simulation_2.input_files['config.json'] = 'https://files.vecnet.org/4710584372'
simulation_2.input_files['campaign.json'] = 'https://files.vecnet.org/678109'
self.sim_group.simulations = [simulation_1, simulation_2]
self.execution_request = ExecutionRequest(simulation_group=self.sim_group)
group.setup_working_dir(self.execution_request)
group_url = self.live_server_url + ('/api/v1/sim-groups/%s/' % group.id)
simulations_url = self.live_server_url + '/api/v1/simulations/'
api_urls.write_for_group(group.working_dir, group_url, simulations_url)
self.check_expected_state = self.expect_script_started
group.working_dir.chdir()
self.initialize_output_dir()
stdout = self.get_output_dir() / 'stdout.txt'
with stdout.open('w') as f:
exit_status = submit_group.main('foo', 'bar', stdout=f, test_callback=self.callback)
self.assertEqual(exit_status, 0)
group = SimulationGroup.objects.get(id=group.id)
self.assertEqual(group.script_status, submission_status.SCRIPT_DONE)
def callback(self):
if self.check_expected_state:
self.check_expected_state()
else:
self.fail('callback unexpectedly called')
def expect_script_started(self):
"""
Confirm that the submission script was started.
"""
self.assertGroupScriptStatus(submission_status.STARTED_SCRIPT)
self.check_expected_state = self.expect_cached_files
def expect_cached_files(self):
"""
Confirm that the submission script cached input files.
"""
self.assertGroupScriptStatus(submission_status.CACHING_FILES)
self.assertTrue(input_files.TestingApi.add_to_cache_mock.called)
args, kwargs = input_files.TestingApi.add_to_cache_mock.call_args
self.assertEqual((self.execution_request.input_files,), args)
self.check_expected_state = self.expect_simulation_created
self.simulations_created = 0
test_utils.Mocks.submit_job.reset_mock()
test_utils.Mocks.submit_job.return_value = generate_job_id()
def expect_simulation_created(self):
"""
Confirm that the submission script has created a new simulation in the database.
"""
self.assertGroupScriptStatus(submission_status.SUBMITTING_JOBS)
group = SimulationGroup.objects.get(id=self.group_id)
self.assertEqual(group.simulation_set.count(), self.simulations_created + 1)
self.simulations_created += 1
# Check that the working directory is set up properly for the simulation that was just created
simulation = group.simulation_set.order_by('created_when').last()
self.assertTrue(simulation.working_dir.isdir())
sim_definition_path = simulation.working_dir / SIMULATION_DEFINITION_FILENAME
self.assertTrue(sim_definition_path.isfile())
sim_definition = Simulation.read_json_file(sim_definition_path)
expected_sim_definition = self.sim_group.simulations[self.simulations_created - 1]
self.assertEqual(sim_definition.model, expected_sim_definition.model)
self.assertEqual(sim_definition.model_version, expected_sim_definition.model_version)
self.assertEqual(sim_definition.input_files, expected_sim_definition.input_files)
self.assertEqual(sim_definition.cmd_line_args, expected_sim_definition.cmd_line_args)
self.assertEqual(sim_definition.id_on_client, expected_sim_definition.id_on_client)
self.assertEqual(sim_definition.output_url, expected_sim_definition.output_url)
# Check that the simulation was submitted to the batch system.
self.assertTrue(test_utils.Mocks.submit_job.called)
args, kwargs = test_utils.Mocks.submit_job.call_args
executable, working_dir, cmd_args = args[0], args[1], args[2:]
self.assertEqual(executable, sys.executable)
self.assertEqual(working_dir, simulation.working_dir)
self.assertEqual(list(cmd_args), [self.simulation_script])
self.assertEqual(simulation.batch_job_id, test_utils.Mocks.submit_job.return_value)
test_utils.Mocks.submit_job.reset_mock()
if self.simulations_created < len(self.sim_group.simulations):
test_utils.Mocks.submit_job.return_value = generate_job_id()
else:
self.check_expected_state = None
def assertGroupScriptStatus(self, expected_status):
group = SimulationGroup.objects.get(id=self.group_id)
self.assertEqual(group.script_status, expected_status)
def generate_job_id():
return str(random.randint(1, 100000)) | vecnet/simulation-manager | sim_manager/tests/test_submit_group.py | Python | mpl-2.0 | 7,814 |
am = imp.load_source( 'am', 'artifact-manager' )
| paulovn/artifact-manager | test/__init__.py | Python | gpl-2.0 | 50 |
import pytest
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
print(wd)
request.addfinalizer(wd.quit)
return wd
def test_example(driver):
driver.get("http://ivanka/admin/login.php")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click()
driver.get("http://ivanka/admin/")
elem = driver.find_elements_by_tag_name("h1")
print(elem) | IvankaK/Testing | test_tag.py | Python | apache-2.0 | 527 |
#!/usr/bin/env python
# encoding: utf-8
from base_task import BaseTask, Node
task_definitions = [
{
'name': 'first_test_runs_task',
'start': Node('A', 0, 0),
'finish': Node('Z', 500, 500),
'mid_nodes': [
Node('B', 50, 500),
Node('C', 50, 100),
Node('D', 0, 550),
Node('E', 500, 100),
Node('F', 250, 250),
Node('G', 150, 400),
Node('H', 350, 150),
],
'distances': {
'A:B': 10,
},
'timeout': 1,
},
{
'name': 'test_task_symetric_distances_only',
'start': Node('A', 0, 0),
'finish': Node('Z', 500, 500),
'mid_nodes': [
Node('B', 50, 500),
Node('C', 50, 100),
Node('D', 0, 550),
Node('E', 500, 100),
Node('F', 250, 250),
Node('G', 150, 400),
Node('H', 350, 150),
],
'distances': {
'A:B': 10,
'B:A': 10,
'B:C': 5,
'C:D': 10,
'D:E': 30,
'E:F': 17,
'F:G': 35,
'G:H': 40,
'H:C': 20,
'F:A': 15,
'F:C': 5,
'A:Z': 30,
},
'timeout': 10,
'paths_only': True,
},
{
'name': 'test_task_asymetric_distances_only',
'start': Node('A', 0, 0),
'finish': Node('Z', 500, 500),
'mid_nodes': [
Node('B', 50, 500),
Node('C', 50, 100),
Node('D', 0, 550),
Node('E', 500, 100),
Node('F', 250, 250),
Node('G', 150, 400),
Node('H', 350, 150),
],
'distances': {
'A:B': 10,
'B:C': 5,
'C:D': 10,
'D:E': 30,
'E:F': 17,
'F:G': 35,
'G:H': 40,
'H:C': 20,
'F:A': 15,
'F:C': 5,
'F:Z': 1,
},
'timeout': 1,
'paths_only': True,
'symetric': False,
},
]
tasks = {}
for task_def in task_definitions:
tasks[task_def['name']] = BaseTask(**task_def)
| Cosiek/KombiVojager | tasks.py | Python | mit | 2,187 |
#!/usr/bin/python
"""
Read the bulk converted file (not trifiltered)
and the triplicate file to identify technical replicates
and collect VAF for each mutation common in the replicates
"""
import sys
triplicates = dict()
trifile = open(sys.argv[1])
for i in trifile:
fields = i.rstrip().split()
repname = fields[0]
for name in fields[1:]:
triplicates[name] = repname
trifile.close()
ratiofieldnum = 69 # But for some version it is different
result = dict()
for i in open(sys.argv[2]):
if i.startswith("#"):
if i.startswith("#Anal"):
colnames = i.rstrip().split("\t")
for j in range(len(colnames)):
if colnames[j] == "PM-Tum":
ratiofieldnum = j
break
continue
fields = i.rstrip().split("\t")
mtype = fields[15]
if mtype != "Sub":
continue
sample = fields[1]
chrx = fields[4]
pos = fields[5]
alt = fields[7]
ratio = fields[ratiofieldnum]
rname = triplicates[sample]
key = rname + "\t" + chrx + "_" + pos + "_" + alt
if key not in result:
result[key] = list()
result[key].append(ratio)
for r in result:
if len(result[r]) < 2:
continue
print r + "\t" + "\t".join(result[r][:2])
| TravisCG/SI_scripts | trivaf.py | Python | gpl-3.0 | 1,154 |
import os
import unittest
from datetime import datetime
from esdl import CubeConfig
from esdl.providers.country_mask import CountryMaskProvider
from esdl.util import Config
SOURCE_DIR = Config.instance().get_cube_source_path('CountryCodes')
class CountryMaskProviderTest(unittest.TestCase):
@unittest.skipIf(not os.path.exists(SOURCE_DIR), 'test data not found: ' + SOURCE_DIR)
def test_temporal_coverage(self):
provider = CountryMaskProvider(CubeConfig(end_time=datetime(2003, 1, 1)), dir=SOURCE_DIR)
provider.prepare()
self.assertEqual((datetime(2001, 1, 1, 0, 0), datetime(2003, 1, 1, 0, 0)),
provider.temporal_coverage)
@unittest.skipIf(not os.path.exists(SOURCE_DIR), 'test data not found: ' + SOURCE_DIR)
def test_get_images(self):
provider = CountryMaskProvider(CubeConfig(end_time=datetime(2003, 1, 1)), dir=SOURCE_DIR)
provider.prepare()
images = provider.compute_variable_images(datetime(2002, 7, 27), datetime(2002, 8, 4))
self.assertIsNotNone(images)
self.assertTrue('country_mask' in images)
image = images['country_mask']
self.assertEqual((720, 1440), image.shape)
@unittest.skipIf(not os.path.exists(SOURCE_DIR), 'test data not found: ' + SOURCE_DIR)
def test_get_high_res_images(self):
provider = CountryMaskProvider(CubeConfig(grid_width=4320, grid_height=2160, spatial_res=1 / 12,
end_time=datetime(2003, 1, 1)), dir=SOURCE_DIR)
provider.prepare()
images = provider.compute_variable_images(datetime(2002, 7, 27), datetime(2002, 8, 4))
self.assertIsNotNone(images)
self.assertTrue('country_mask' in images)
image = images['country_mask']
self.assertEqual((2160, 4320), image.shape)
@unittest.skipIf(not os.path.exists(SOURCE_DIR), 'test data not found: ' + SOURCE_DIR)
def test_get_images_outside_time_range(self):
provider = CountryMaskProvider(CubeConfig(end_time=datetime(2002, 1, 1)), dir=SOURCE_DIR)
provider.prepare()
images = provider.compute_variable_images(datetime(2016, 7, 27), datetime(2016, 8, 4))
self.assertIsNotNone(images)
self.assertTrue('country_mask' in images)
image = images['country_mask']
self.assertEqual((720, 1440), image.shape)
| CAB-LAB/cablab-core | test/providers/test_country_mask.py | Python | gpl-3.0 | 2,384 |
#!/usr/bin/env python3
#
# Copyright 2008-2014 Jose Fonseca
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Generate a dot graph from the output of several profilers."""
__author__ = "Jose Fonseca et al"
import sys
import math
import os.path
import re
import textwrap
import optparse
import xml.parsers.expat
import collections
import locale
import json
# Python 2.x/3.x compatibility
if sys.version_info[0] >= 3:
PYTHON_3 = True
def compat_iteritems(x):
return x.items() # No iteritems() in Python 3
def compat_itervalues(x):
return x.values() # No itervalues() in Python 3
def compat_keys(x):
return list(x.keys()) # keys() is a generator in Python 3
basestring = str # No class basestring in Python 3
unichr = chr # No unichr in Python 3
xrange = range # No xrange in Python 3
else:
PYTHON_3 = False
def compat_iteritems(x):
return x.iteritems()
def compat_itervalues(x):
return x.itervalues()
def compat_keys(x):
return x.keys()
try:
# Debugging helper module
import debug
except ImportError:
pass
########################################################################
# Model
MULTIPLICATION_SIGN = unichr(0xd7)
def times(x):
return "%u%s" % (x, MULTIPLICATION_SIGN)
def percentage(p):
return "%.02f%%" % (p * 100.0, )
def add(a, b):
return a + b
def fail(a, b):
assert False
tol = 2** -23
def ratio(numerator, denominator):
try:
ratio = float(numerator) / float(denominator)
except ZeroDivisionError:
# 0/0 is undefined, but 1.0 yields more useful results
return 1.0
if ratio < 0.0:
if ratio < -tol:
sys.stderr.write(
'warning: negative ratio (%s/%s)\n' % (numerator, denominator)
)
return 0.0
if ratio > 1.0:
if ratio > 1.0 + tol:
sys.stderr.write(
'warning: ratio greater than one (%s/%s)\n' %
(numerator, denominator)
)
return 1.0
return ratio
class UndefinedEvent(Exception):
"""Raised when attempting to get an event which is undefined."""
def __init__(self, event):
Exception.__init__(self)
self.event = event
def __str__(self):
return 'unspecified event %s' % self.event.name
class Event(object):
"""Describe a kind of event, and its basic operations."""
def __init__(self, name, null, aggregator, formatter=str):
self.name = name
self._null = null
self._aggregator = aggregator
self._formatter = formatter
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
def null(self):
return self._null
def aggregate(self, val1, val2):
"""Aggregate two event values."""
assert val1 is not None
assert val2 is not None
return self._aggregator(val1, val2)
def format(self, val):
"""Format an event value."""
assert val is not None
return self._formatter(val)
CALLS = Event("Calls", 0, add, times)
SAMPLES = Event("Samples", 0, add, times)
SAMPLES2 = Event("Samples", 0, add, times)
# Count of samples where a given function was either executing or on the stack.
# This is used to calculate the total time ratio according to the
# straightforward method described in Mike Dunlavey's answer to
# stackoverflow.com/questions/1777556/alternatives-to-gprof, item 4 (the myth
# "that recursion is a tricky confusing issue"), last edited 2012-08-30: it's
# just the ratio of TOTAL_SAMPLES over the number of samples in the profile.
#
# Used only when totalMethod == callstacks
TOTAL_SAMPLES = Event("Samples", 0, add, times)
TIME = Event("Time", 0.0, add, lambda x: '(' + str(x) + ')')
TIME_RATIO = Event("Time ratio", 0.0, add, lambda x: '(' + percentage(x) + ')')
TOTAL_TIME = Event("Total time", 0.0, fail)
TOTAL_TIME_RATIO = Event("Total time ratio", 0.0, fail, percentage)
totalMethod = 'callratios'
class Object(object):
"""Base class for all objects in profile which can store events."""
def __init__(self, events=None):
if events is None:
self.events = {}
else:
self.events = events
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
def __lt__(self, other):
return id(self) < id(other)
def __contains__(self, event):
return event in self.events
def __getitem__(self, event):
try:
return self.events[event]
except KeyError:
raise UndefinedEvent(event)
def __setitem__(self, event, value):
if value is None:
if event in self.events:
del self.events[event]
else:
self.events[event] = value
class Call(Object):
"""A call between functions.
There should be at most one call object for every pair of functions.
"""
def __init__(self, callee_id):
Object.__init__(self)
self.callee_id = callee_id
self.ratio = None
self.weight = None
class Function(Object):
"""A function."""
def __init__(self, id, name):
Object.__init__(self)
self.id = id
self.name = name
self.module = None
self.process = None
self.calls = {}
self.called = None
self.weight = None
self.cycle = None
self.filename = None
def add_call(self, call):
if call.callee_id in self.calls:
sys.stderr.write(
'warning: overwriting call from function %s to %s\n' %
(str(self.id), str(call.callee_id))
)
self.calls[call.callee_id] = call
def get_call(self, callee_id):
if not callee_id in self.calls:
call = Call(callee_id)
call[SAMPLES] = 0
call[SAMPLES2] = 0
call[CALLS] = 0
self.calls[callee_id] = call
return self.calls[callee_id]
_parenthesis_re = re.compile(r'\([^()]*\)')
_angles_re = re.compile(r'<[^<>]*>')
_const_re = re.compile(r'\s+const$')
def stripped_name(self):
"""Remove extraneous information from C++ demangled function names."""
name = self.name
# Strip function parameters from name by recursively removing paired parenthesis
while True:
name, n = self._parenthesis_re.subn('', name)
if not n:
break
# Strip const qualifier
name = self._const_re.sub('', name)
# Strip template parameters from name by recursively removing paired angles
while True:
name, n = self._angles_re.subn('', name)
if not n:
break
return name
# TODO: write utility functions
def __repr__(self):
return self.name
class Cycle(Object):
"""A cycle made from recursive function calls."""
def __init__(self):
Object.__init__(self)
self.functions = set()
def add_function(self, function):
assert function not in self.functions
self.functions.add(function)
if function.cycle is not None:
for other in function.cycle.functions:
if function not in self.functions:
self.add_function(other)
function.cycle = self
class Profile(Object):
"""The whole profile."""
def __init__(self):
Object.__init__(self)
self.functions = {}
self.cycles = []
def add_function(self, function):
if function.id in self.functions:
sys.stderr.write(
'warning: overwriting function %s (id %s)\n' %
(function.name, str(function.id))
)
self.functions[function.id] = function
def add_cycle(self, cycle):
self.cycles.append(cycle)
def validate(self):
"""Validate the edges."""
for function in compat_itervalues(self.functions):
for callee_id in compat_keys(function.calls):
assert function.calls[callee_id].callee_id == callee_id
if callee_id not in self.functions:
sys.stderr.write(
'warning: call to undefined function %s from function %s\n'
% (str(callee_id), function.name)
)
del function.calls[callee_id]
def find_cycles(self):
"""Find cycles using Tarjan's strongly connected components algorithm."""
# Apply the Tarjan's algorithm successively until all functions are visited
stack = []
data = {}
order = 0
for function in compat_itervalues(self.functions):
order = self._tarjan(function, order, stack, data)
cycles = []
for function in compat_itervalues(self.functions):
if function.cycle is not None and function.cycle not in cycles:
cycles.append(function.cycle)
self.cycles = cycles
if 0:
for cycle in cycles:
sys.stderr.write("Cycle:\n")
for member in cycle.functions:
sys.stderr.write("\tFunction %s\n" % member.name)
def prune_root(self, root):
visited = set()
frontier = set([root])
while len(frontier) > 0:
node = frontier.pop()
visited.add(node)
f = self.functions[node]
newNodes = f.calls.keys()
frontier = frontier.union(set(newNodes) - visited)
subtreeFunctions = {}
for n in visited:
subtreeFunctions[n] = self.functions[n]
self.functions = subtreeFunctions
def prune_leaf(self, leaf):
edgesUp = collections.defaultdict(set)
for f in self.functions.keys():
for n in self.functions[f].calls.keys():
edgesUp[n].add(f)
# build the tree up
visited = set()
frontier = set([leaf])
while len(frontier) > 0:
node = frontier.pop()
visited.add(node)
frontier = frontier.union(edgesUp[node] - visited)
downTree = set(self.functions.keys())
upTree = visited
path = downTree.intersection(upTree)
pathFunctions = {}
for n in path:
f = self.functions[n]
newCalls = {}
for c in f.calls.keys():
if c in path:
newCalls[c] = f.calls[c]
f.calls = newCalls
pathFunctions[n] = f
self.functions = pathFunctions
def getFunctionId(self, funcName):
for f in self.functions:
if self.functions[f].name == funcName:
return f
return False
class _TarjanData:
def __init__(self, order):
self.order = order
self.lowlink = order
self.onstack = False
def _tarjan(self, function, order, stack, data):
"""Tarjan's strongly connected components algorithm.
See also:
- http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm
"""
try:
func_data = data[function.id]
return order
except KeyError:
func_data = self._TarjanData(order)
data[function.id] = func_data
order += 1
pos = len(stack)
stack.append(function)
func_data.onstack = True
for call in compat_itervalues(function.calls):
try:
callee_data = data[call.callee_id]
if callee_data.onstack:
func_data.lowlink = min(
func_data.lowlink, callee_data.order
)
except KeyError:
callee = self.functions[call.callee_id]
order = self._tarjan(callee, order, stack, data)
callee_data = data[call.callee_id]
func_data.lowlink = min(func_data.lowlink, callee_data.lowlink)
if func_data.lowlink == func_data.order:
# Strongly connected component found
members = stack[pos:]
del stack[pos:]
if len(members) > 1:
cycle = Cycle()
for member in members:
cycle.add_function(member)
data[member.id].onstack = False
else:
for member in members:
data[member.id].onstack = False
return order
def call_ratios(self, event):
# Aggregate for incoming calls
cycle_totals = {}
for cycle in self.cycles:
cycle_totals[cycle] = 0.0
function_totals = {}
for function in compat_itervalues(self.functions):
function_totals[function] = 0.0
# Pass 1: function_total gets the sum of call[event] for all
# incoming arrows. Same for cycle_total for all arrows
# that are coming into the *cycle* but are not part of it.
for function in compat_itervalues(self.functions):
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if event in call.events:
function_totals[callee] += call[event]
if callee.cycle is not None and callee.cycle is not function.cycle:
cycle_totals[callee.cycle] += call[event]
else:
sys.stderr.write(
"call_ratios: No data for " + function.name +
" call to " + callee.name + "\n"
)
# Pass 2: Compute the ratios. Each call[event] is scaled by the
# function_total of the callee. Calls into cycles use the
# cycle_total, but not calls within cycles.
for function in compat_itervalues(self.functions):
for call in compat_itervalues(function.calls):
assert call.ratio is None
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if event in call.events:
if callee.cycle is not None and callee.cycle is not function.cycle:
total = cycle_totals[callee.cycle]
else:
total = function_totals[callee]
call.ratio = ratio(call[event], total)
else:
# Warnings here would only repeat those issued above.
call.ratio = 0.0
def integrate(self, outevent, inevent):
"""Propagate function time ratio along the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html
"""
# Sanity checking
assert outevent not in self
for function in compat_itervalues(self.functions):
assert outevent not in function
assert inevent in function
for call in compat_itervalues(function.calls):
assert outevent not in call
if call.callee_id != function.id:
assert call.ratio is not None
# Aggregate the input for each cycle
for cycle in self.cycles:
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self[inevent] = total
# Integrate along the edges
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self._integrate_function(function, outevent, inevent)
self[outevent] = total
def _integrate_function(self, function, outevent, inevent):
if function.cycle is not None:
return self._integrate_cycle(function.cycle, outevent, inevent)
else:
if outevent not in function:
total = function[inevent]
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
total += self._integrate_call(call, outevent, inevent)
function[outevent] = total
return function[outevent]
def _integrate_call(self, call, outevent, inevent):
assert outevent not in call
assert call.ratio is not None
callee = self.functions[call.callee_id]
subtotal = call.ratio * self._integrate_function(
callee, outevent, inevent
)
call[outevent] = subtotal
return subtotal
def _integrate_cycle(self, cycle, outevent, inevent):
if outevent not in cycle:
# Compute the outevent for the whole cycle
total = inevent.null()
for member in cycle.functions:
subtotal = member[inevent]
for call in compat_itervalues(member.calls):
callee = self.functions[call.callee_id]
if callee.cycle is not cycle:
subtotal += self._integrate_call(
call, outevent, inevent
)
total += subtotal
cycle[outevent] = total
# Compute the time propagated to callers of this cycle
callees = {}
for function in compat_itervalues(self.functions):
if function.cycle is not cycle:
for call in compat_itervalues(function.calls):
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
try:
callees[callee] += call.ratio
except KeyError:
callees[callee] = call.ratio
for member in cycle.functions:
member[outevent] = outevent.null()
for callee, call_ratio in compat_iteritems(callees):
ranks = {}
call_ratios = {}
partials = {}
self._rank_cycle_function(cycle, callee, ranks)
self._call_ratios_cycle(
cycle, callee, ranks, call_ratios, set()
)
partial = self._integrate_cycle_function(
cycle, callee, call_ratio, partials, ranks, call_ratios,
outevent, inevent
)
assert partial == max(partials.values())
assert abs(
call_ratio * total - partial
) <= 0.001 * call_ratio * total
return cycle[outevent]
def _rank_cycle_function(self, cycle, function, ranks):
"""Dijkstra's shortest paths algorithm.
See also:
- http://en.wikipedia.org/wiki/Dijkstra's_algorithm
"""
import heapq
Q = []
Qd = {}
p = {}
visited = set([function])
ranks[function] = 0
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
ranks[callee] = 1
item = [ranks[callee], function, callee]
heapq.heappush(Q, item)
Qd[callee] = item
while Q:
cost, parent, member = heapq.heappop(Q)
if member not in visited:
p[member] = parent
visited.add(member)
for call in compat_itervalues(member.calls):
if call.callee_id != member.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
member_rank = ranks[member]
rank = ranks.get(callee)
if rank is not None:
if rank > 1 + member_rank:
rank = 1 + member_rank
ranks[callee] = rank
Qd_callee = Qd[callee]
Qd_callee[0] = rank
Qd_callee[1] = member
heapq._siftdown(Q, 0, Q.index(Qd_callee))
else:
rank = 1 + member_rank
ranks[callee] = rank
item = [rank, member, callee]
heapq.heappush(Q, item)
Qd[callee] = item
def _call_ratios_cycle(self, cycle, function, ranks, call_ratios, visited):
if function not in visited:
visited.add(function)
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
if ranks[callee] > ranks[function]:
call_ratios[callee] = call_ratios.get(
callee, 0.0
) + call.ratio
self._call_ratios_cycle(
cycle, callee, ranks, call_ratios, visited
)
def _integrate_cycle_function(
self, cycle, function, partial_ratio, partials, ranks, call_ratios,
outevent, inevent
):
if function not in partials:
partial = partial_ratio * function[inevent]
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is not cycle:
assert outevent in call
partial += partial_ratio * call[outevent]
else:
if ranks[callee] > ranks[function]:
callee_partial = self._integrate_cycle_function(
cycle, callee, partial_ratio, partials, ranks,
call_ratios, outevent, inevent
)
call_ratio = ratio(call.ratio, call_ratios[callee])
call_partial = call_ratio * callee_partial
try:
call[outevent] += call_partial
except UndefinedEvent:
call[outevent] = call_partial
partial += call_partial
partials[function] = partial
try:
function[outevent] += partial
except UndefinedEvent:
function[outevent] = partial
return partials[function]
def aggregate(self, event):
"""Aggregate an event for the whole profile."""
total = event.null()
for function in compat_itervalues(self.functions):
try:
total = event.aggregate(total, function[event])
except UndefinedEvent:
return
self[event] = total
def ratio(self, outevent, inevent):
assert outevent not in self
assert inevent in self
for function in compat_itervalues(self.functions):
assert outevent not in function
assert inevent in function
function[outevent] = ratio(function[inevent], self[inevent])
for call in compat_itervalues(function.calls):
assert outevent not in call
if inevent in call:
call[outevent] = ratio(call[inevent], self[inevent])
self[outevent] = 1.0
def prune(self, node_thres, edge_thres):
"""Prune the profile"""
# compute the prune ratios
for function in compat_itervalues(self.functions):
try:
function.weight = function[TOTAL_TIME_RATIO]
except UndefinedEvent:
pass
for call in compat_itervalues(function.calls):
callee = self.functions[call.callee_id]
if TOTAL_TIME_RATIO in call:
# handle exact cases first
call.weight = call[TOTAL_TIME_RATIO]
else:
try:
# make a safe estimate
call.weight = min(
function[TOTAL_TIME_RATIO],
callee[TOTAL_TIME_RATIO]
)
except UndefinedEvent:
pass
# prune the nodes
for function_id in compat_keys(self.functions):
function = self.functions[function_id]
if function.weight is not None:
if function.weight < node_thres:
del self.functions[function_id]
# prune the egdes
for function in compat_itervalues(self.functions):
for callee_id in compat_keys(function.calls):
call = function.calls[callee_id]
if callee_id not in self.functions or call.weight is not None and call.weight < edge_thres:
del function.calls[callee_id]
def dump(self):
for function in compat_itervalues(self.functions):
sys.stderr.write('Function %s:\n' % (function.name, ))
self._dump_events(function.events)
for call in compat_itervalues(function.calls):
callee = self.functions[call.callee_id]
sys.stderr.write(' Call %s:\n' % (callee.name, ))
self._dump_events(call.events)
for cycle in self.cycles:
sys.stderr.write('Cycle:\n')
self._dump_events(cycle.events)
for function in cycle.functions:
sys.stderr.write(' Function %s\n' % (function.name, ))
def _dump_events(self, events):
for event, value in compat_iteritems(events):
sys.stderr.write(
' %s: %s\n' % (event.name, event.format(value))
)
########################################################################
# Parsers
class Struct:
"""Masquerade a dictionary with a structure-like behavior."""
def __init__(self, attrs=None):
if attrs is None:
attrs = {}
self.__dict__['_attrs'] = attrs
def __getattr__(self, name):
try:
return self._attrs[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self._attrs[name] = value
def __str__(self):
return str(self._attrs)
def __repr__(self):
return repr(self._attrs)
class ParseError(Exception):
"""Raised when parsing to signal mismatches."""
def __init__(self, msg, line):
Exception.__init__(self)
self.msg = msg
# TODO: store more source line information
self.line = line
def __str__(self):
return '%s: %r' % (self.msg, self.line)
class Parser:
"""Parser interface."""
stdinInput = True
multipleInput = False
def __init__(self):
pass
def parse(self):
raise NotImplementedError
class AfgprofParser(Parser):
def __init__(self, stream):
Parser.__init__(self)
self.stream = stream
def parse(self):
obj = json.load(self.stream)
profile = Profile()
profile[SAMPLES] = 0
functions = dict()
fi = 0
def find_function(pc):
try:
return functions[transform(pc)]
except KeyError:
nonlocal fi
function = Function(
fi, obj['index'][str(pc)].get('symbol', '?')
)
function[SAMPLES] = 0
fi += 1
functions[transform(pc)] = function
profile.add_function(function)
return function
def transform(pc):
return obj['index'][str(pc)].get('symbol', '?')
for call in obj['calls']:
caller = find_function(call['lr'])
callee = find_function(call['pc'])
count = call['count']
callee[SAMPLES] += count
profile[SAMPLES] += count
try:
call = caller.calls[callee.id]
except KeyError:
call = Call(callee.id)
call[SAMPLES2] = count
call[CALLS] = count
caller.add_call(call)
else:
call[CALLS] += count
call[SAMPLES2] += count
if False:
profile.dump()
# compute derived data
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES2)
profile.aggregate(CALLS)
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return profile
class Theme:
def __init__(
self,
bgcolor=(0.0, 0.0, 1.0),
mincolor=(0.0, 0.0, 0.0),
maxcolor=(0.0, 0.0, 1.0),
fontname="Arial",
fontcolor="white",
nodestyle="filled",
minfontsize=10.0,
maxfontsize=10.0,
minpenwidth=0.5,
maxpenwidth=4.0,
gamma=2.2,
skew=1.0
):
self.bgcolor = bgcolor
self.mincolor = mincolor
self.maxcolor = maxcolor
self.fontname = fontname
self.fontcolor = fontcolor
self.nodestyle = nodestyle
self.minfontsize = minfontsize
self.maxfontsize = maxfontsize
self.minpenwidth = minpenwidth
self.maxpenwidth = maxpenwidth
self.gamma = gamma
self.skew = skew
def graph_bgcolor(self):
return self.hsl_to_rgb(*self.bgcolor)
def graph_fontname(self):
return self.fontname
def graph_fontcolor(self):
return self.fontcolor
def graph_fontsize(self):
return self.minfontsize
def node_bgcolor(self, weight):
return self.color(weight)
def node_fgcolor(self, weight):
if self.nodestyle == "filled":
return self.graph_bgcolor()
else:
return self.color(weight)
def node_fontsize(self, weight):
return self.fontsize(weight)
def node_style(self):
return self.nodestyle
def edge_color(self, weight):
return self.color(weight)
def edge_fontsize(self, weight):
return self.fontsize(weight)
def edge_penwidth(self, weight):
return max(weight * self.maxpenwidth, self.minpenwidth)
def edge_arrowsize(self, weight):
return 0.5 * math.sqrt(self.edge_penwidth(weight))
def fontsize(self, weight):
return max(weight**2 * self.maxfontsize, self.minfontsize)
def color(self, weight):
weight = min(max(weight, 0.0), 1.0)
hmin, smin, lmin = self.mincolor
hmax, smax, lmax = self.maxcolor
if self.skew < 0:
raise ValueError("Skew must be greater than 0")
elif self.skew == 1.0:
h = hmin + weight * (hmax - hmin)
s = smin + weight * (smax - smin)
l = lmin + weight * (lmax - lmin)
else:
base = self.skew
h = hmin + ((hmax - hmin) * (-1.0 + (base**weight)) / (base - 1.0))
s = smin + ((smax - smin) * (-1.0 + (base**weight)) / (base - 1.0))
l = lmin + ((lmax - lmin) * (-1.0 + (base**weight)) / (base - 1.0))
return self.hsl_to_rgb(h, s, l)
def hsl_to_rgb(self, h, s, l):
"""Convert a color from HSL color-model to RGB.
See also:
- http://www.w3.org/TR/css3-color/#hsl-color
"""
h = h % 1.0
s = min(max(s, 0.0), 1.0)
l = min(max(l, 0.0), 1.0)
if l <= 0.5:
m2 = l * (s + 1.0)
else:
m2 = l + s - l * s
m1 = l * 2.0 - m2
r = self._hue_to_rgb(m1, m2, h + 1.0 / 3.0)
g = self._hue_to_rgb(m1, m2, h)
b = self._hue_to_rgb(m1, m2, h - 1.0 / 3.0)
# Apply gamma correction
r **= self.gamma
g **= self.gamma
b **= self.gamma
return (r, g, b)
def _hue_to_rgb(self, m1, m2, h):
if h < 0.0:
h += 1.0
elif h > 1.0:
h -= 1.0
if h * 6 < 1.0:
return m1 + (m2 - m1) * h * 6.0
elif h * 2 < 1.0:
return m2
elif h * 3 < 2.0:
return m1 + (m2 - m1) * (2.0 / 3.0 - h) * 6.0
else:
return m1
TEMPERATURE_COLORMAP = Theme(
mincolor = (2.0/3.0, 0.80, 0.25), # dark blue
maxcolor = (0.0, 1.0, 0.5), # satured red
gamma = 1.0
)
PINK_COLORMAP = Theme(
mincolor = (0.0, 1.0, 0.90), # pink
maxcolor = (0.0, 1.0, 0.5), # satured red
)
GRAY_COLORMAP = Theme(
mincolor = (0.0, 0.0, 0.85), # light gray
maxcolor = (0.0, 0.0, 0.0), # black
)
BW_COLORMAP = Theme(
minfontsize = 8.0,
maxfontsize = 24.0,
mincolor = (0.0, 0.0, 0.0), # black
maxcolor = (0.0, 0.0, 0.0), # black
minpenwidth = 0.1,
maxpenwidth = 8.0,
)
PRINT_COLORMAP = Theme(
minfontsize = 18.0,
maxfontsize = 30.0,
fontcolor = "black",
nodestyle = "solid",
mincolor = (0.0, 0.0, 0.0), # black
maxcolor = (0.0, 0.0, 0.0), # black
minpenwidth = 0.1,
maxpenwidth = 8.0,
)
themes = {
"color": TEMPERATURE_COLORMAP,
"pink": PINK_COLORMAP,
"gray": GRAY_COLORMAP,
"bw": BW_COLORMAP,
"print": PRINT_COLORMAP,
}
def sorted_iteritems(d):
# Used mostly for result reproducibility (while testing.)
keys = compat_keys(d)
keys.sort()
for key in keys:
value = d[key]
yield key, value
class DotWriter:
"""Writer for the DOT language.
See also:
- "The DOT Language" specification
http://www.graphviz.org/doc/info/lang.html
"""
strip = False
wrap = False
def __init__(self, fp):
self.fp = fp
def wrap_function_name(self, name):
"""Split the function name on multiple lines."""
if len(name) > 32:
ratio = 2.0 / 3.0
height = max(int(len(name) / (1.0 - ratio) + 0.5), 1)
width = max(len(name) / height, 32)
# TODO: break lines in symbols
name = textwrap.fill(name, width, break_long_words=False)
# Take away spaces
name = name.replace(", ", ",")
name = name.replace("> >", ">>")
name = name.replace("> >", ">>") # catch consecutive
return name
show_function_events = [TOTAL_TIME_RATIO, TIME_RATIO]
show_edge_events = [TOTAL_TIME_RATIO, CALLS]
def graph(self, profile, theme):
self.begin_graph()
fontname = theme.graph_fontname()
fontcolor = theme.graph_fontcolor()
nodestyle = theme.node_style()
self.attr('graph', fontname=fontname, ranksep=0.25, nodesep=0.125)
self.attr(
'node',
fontname=fontname,
shape="box",
style=nodestyle,
fontcolor=fontcolor,
width=0,
height=0
)
self.attr('edge', fontname=fontname)
for _, function in sorted_iteritems(profile.functions):
labels = []
if function.process is not None:
labels.append(function.process)
if function.module is not None:
labels.append(function.module)
if self.strip:
function_name = function.stripped_name()
else:
function_name = function.name
if self.wrap:
function_name = self.wrap_function_name(function_name)
labels.append(function_name)
for event in self.show_function_events:
if event in function.events:
label = event.format(function[event])
labels.append(label)
if function.called is not None:
labels.append("%u%s" % (function.called, MULTIPLICATION_SIGN))
if function.weight is not None:
weight = function.weight
else:
weight = 0.0
label = '\n'.join(labels)
self.node(
function.id,
label=label,
color=self.color(theme.node_bgcolor(weight)),
fontcolor=self.color(theme.node_fgcolor(weight)),
fontsize="%.2f" % theme.node_fontsize(weight),
tooltip=function.filename,
)
for _, call in sorted_iteritems(function.calls):
callee = profile.functions[call.callee_id]
labels = []
for event in self.show_edge_events:
if event in call.events:
label = event.format(call[event])
labels.append(label)
if call.weight is not None:
weight = call.weight
elif callee.weight is not None:
weight = callee.weight
else:
weight = 0.0
label = '\n'.join(labels)
self.edge(
function.id,
call.callee_id,
label=label,
color=self.color(theme.edge_color(weight)),
fontcolor=self.color(theme.edge_color(weight)),
fontsize="%.2f" % theme.edge_fontsize(weight),
penwidth="%.2f" % theme.edge_penwidth(weight),
labeldistance="%.2f" % theme.edge_penwidth(weight),
arrowsize="%.2f" % theme.edge_arrowsize(weight),
)
self.end_graph()
def begin_graph(self):
self.write('digraph {\n')
def end_graph(self):
self.write('}\n')
def attr(self, what, **attrs):
self.write("\t")
self.write(what)
self.attr_list(attrs)
self.write(";\n")
def node(self, node, **attrs):
self.write("\t")
self.id(node)
self.attr_list(attrs)
self.write(";\n")
def edge(self, src, dst, **attrs):
self.write("\t")
self.id(src)
self.write(" -> ")
self.id(dst)
self.attr_list(attrs)
self.write(";\n")
def attr_list(self, attrs):
if not attrs:
return
self.write(' [')
first = True
for name, value in sorted_iteritems(attrs):
if value is None:
continue
if first:
first = False
else:
self.write(", ")
self.id(name)
self.write('=')
self.id(value)
self.write(']')
def id(self, id):
if isinstance(id, (int, float)):
s = str(id)
elif isinstance(id, basestring):
if id.isalnum() and not id.startswith('0x'):
s = id
else:
s = self.escape(id)
else:
raise TypeError
self.write(s)
def color(self, rgb):
r, g, b = rgb
def float2int(f):
if f <= 0.0:
return 0
if f >= 1.0:
return 255
return int(255.0 * f + 0.5)
return "#" + "".join(["%02x" % float2int(c) for c in (r, g, b)])
def escape(self, s):
if not PYTHON_3:
s = s.encode('utf-8')
s = s.replace('\\', r'\\')
s = s.replace('\n', r'\n')
s = s.replace('\t', r'\t')
s = s.replace('"', r'\"')
return '"' + s + '"'
def write(self, s):
self.fp.write(s)
########################################################################
# Main program
def naturalJoin(values):
if len(values) >= 2:
return ', '.join(values[:-1]) + ' or ' + values[-1]
else:
return ''.join(values)
def main():
"""Main program."""
global totalMethod
optparser = optparse.OptionParser(usage="\n\t%prog [options] [file] ...")
optparser.add_option(
'-o',
'--output',
metavar='FILE',
type="string",
dest="output",
help="output filename [stdout]"
)
optparser.add_option(
'-n',
'--node-thres',
metavar='PERCENTAGE',
type="float",
dest="node_thres",
default=0.5,
help="eliminate nodes below this threshold [default: %default]"
)
optparser.add_option(
'-e',
'--edge-thres',
metavar='PERCENTAGE',
type="float",
dest="edge_thres",
default=0.1,
help="eliminate edges below this threshold [default: %default]"
)
optparser.add_option(
'--total',
type="choice",
choices=('callratios', 'callstacks'),
dest="totalMethod",
default=totalMethod,
help="preferred method of calculating total time: callratios or callstacks (currently affects only perf format) [default: %default]"
)
optparser.add_option(
'-c',
'--colormap',
type="choice",
choices=('color', 'pink', 'gray', 'bw', 'print'),
dest="theme",
default="color",
help="color map: color, pink, gray, bw, or print [default: %default]"
)
optparser.add_option(
'-s',
'--strip',
action="store_true",
dest="strip",
default=False,
help="strip function parameters, template parameters, and const modifiers from demangled C++ function names"
)
optparser.add_option(
'-w',
'--wrap',
action="store_true",
dest="wrap",
default=False,
help="wrap function names"
)
optparser.add_option(
'--show-samples',
action="store_true",
dest="show_samples",
default=False,
help="show function samples"
)
# add option to create subtree or show paths
optparser.add_option(
'-z',
'--root',
type="string",
dest="root",
default="",
help="prune call graph to show only descendants of specified root function"
)
optparser.add_option(
'-l',
'--leaf',
type="string",
dest="leaf",
default="",
help="prune call graph to show only ancestors of specified leaf function"
)
# add a new option to control skew of the colorization curve
optparser.add_option(
'--skew',
type="float",
dest="theme_skew",
default=1.0,
help="skew the colorization curve. Values < 1.0 give more variety to lower percentages. Values > 1.0 give less variety to lower percentages"
)
(options, args) = optparser.parse_args(sys.argv[1:])
try:
theme = themes[options.theme]
except KeyError:
optparser.error('invalid colormap \'%s\'' % options.theme)
# set skew on the theme now that it has been picked.
if options.theme_skew:
theme.skew = options.theme_skew
totalMethod = options.totalMethod
Format = AfgprofParser
if Format.stdinInput:
if not args:
fp = sys.stdin
elif PYTHON_3:
fp = open(args[0], 'rt', encoding='UTF-8')
else:
fp = open(args[0], 'rt')
parser = Format(fp)
elif Format.multipleInput:
if not args:
optparser.error(
'at least a file must be specified for %s input' %
options.format
)
parser = Format(*args)
else:
if len(args) != 1:
optparser.error(
'exactly one file must be specified for %s input' %
options.format
)
parser = Format(args[0])
profile = parser.parse()
if options.output is None:
output = sys.stdout
else:
if PYTHON_3:
output = open(options.output, 'wt', encoding='UTF-8')
else:
output = open(options.output, 'wt')
dot = DotWriter(output)
dot.strip = options.strip
dot.wrap = options.wrap
if options.show_samples:
dot.show_function_events.append(SAMPLES)
profile = profile
profile.prune(options.node_thres / 100.0, options.edge_thres / 100.0)
if options.root:
rootId = profile.getFunctionId(options.root)
if not rootId:
sys.stderr.write(
'root node ' + options.root +
' not found (might already be pruned : try -e0 -n0 flags)\n'
)
sys.exit(1)
profile.prune_root(rootId)
if options.leaf:
leafId = profile.getFunctionId(options.leaf)
if not leafId:
sys.stderr.write(
'leaf node ' + options.leaf +
' not found (maybe already pruned : try -e0 -n0 flags)\n'
)
sys.exit(1)
profile.prune_leaf(leafId)
dot.graph(profile, theme)
if __name__ == '__main__':
main()
else:
# Importing this module is unsupported. It more robust and equally easy to
# do
#
# subprocess.call([sys.executable, 'gprof2dot.py', ...])
#
raise Exception('using gprof2dot.py as a module is unsupported')
| afg984/afgprof | afgprof2dot.py | Python | mit | 46,545 |
"""
WSGI config for gameonwebapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gameonwebapp.settings")
application = get_wsgi_application()
| Nishanthnishu/GameON-VirtualCricketBetting | GameON/gameonwebapp/gameonwebapp/wsgi.py | Python | mit | 402 |
from djpcms import sites
from djpcms.http import get_http
from djpcms.template import RequestContext, loader
from djpcms.views.baseview import djpcmsview
class badview(djpcmsview):
def __init__(self, template, httphandler):
self.template = template
self.httphandler = httphandler
super(badview,self).__init__()
def response(self, request):
t = loader.get_template(self.template)
c = {'request_path': request.path,
'grid': self.grid960()}
return self.httphandler(t.render(RequestContext(request, c)))
def http404view(request, *args, **kwargs):
http = get_http(sites.settings.HTTP_LIBRARY)
return badview('404.html',
http.HttpResponseNotFound).response(request)
def http500view(request, *args, **kwargs):
http = get_http(sites.settings.HTTP_LIBRARY)
return badview('500.html',
http.HttpResponseServerError).response(request) | strogo/djpcms | djpcms/views/specials.py | Python | bsd-3-clause | 963 |
# Locking debugging code -- temporary
# Copyright (C) 2003 John Goerzen
# <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from threading import Lock, currentThread
import traceback
logfile = open("/tmp/logfile", "wt")
loglock = Lock()
class DebuggingLock:
def __init__(self, name):
self.lock = Lock()
self.name = name
def acquire(self, blocking = 1):
self.print_tb("Acquire lock")
self.lock.acquire(blocking)
self.logmsg("===== %s: Thread %s acquired lock\n" % (self.name, currentThread().getName()))
def release(self):
self.print_tb("Release lock")
self.lock.release()
def logmsg(self, msg):
loglock.acquire()
logfile.write(msg + "\n")
logfile.flush()
loglock.release()
def print_tb(self, msg):
self.logmsg(".... %s: Thread %s attempting to %s\n" % \
(self.name, currentThread().getName(), msg) + \
"\n".join(traceback.format_list(traceback.extract_stack())))
| dimpase/offlineimap | offlineimap/ui/debuglock.py | Python | gpl-2.0 | 1,752 |
__author__ = 'stanley'
import json
import webapp2
from google.appengine.api import users
from domain.user import *
from util.sanity_check import*
from domain.doc_index import *
class FollowHandler(webapp2.RequestHandler):
def post(self):
usr = user_key(users.get_current_user().email()).get()
if not user_is_logged_in(usr):
return
if attr_is_not_in_request(self.request, 'target'):
return
target_email = self.request.get('target')
target = user_key(target_email)
# if the target user does not exist at all!
if target is None:
return
# no one can follow him/her self!
if target_email == usr.id:
return
target = target.get()
# already following target
if usr.id in target.followers:
return
target.append_follower(usr.id)
update_rank(target.id, 10, 'plus')
target.put()
self.response.headers['Content-Type'] = 'application/json'
result = json.dumps({'successful': True})
self.response.write(result) | nimadini/Teammate | handlers/follow.py | Python | apache-2.0 | 1,114 |
"""
=========================================
Visualize channel over epochs as an image
=========================================
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
Two images are produced, one with a good channel and one with a channel
that does not show any evoked field.
It is also demonstrated how to reorder the epochs using a 1D spectral
embedding as described in [1]_.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.4
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# Create epochs, here for gradiometers + EOG only for simplicity
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
###############################################################################
# Show event-related fields images
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.manifold import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
# We'll also plot a sample time onset for each trial
plt_times = np.linspace(0, .2, len(epochs))
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=.5,
order=order_func, vmin=-250, vmax=250,
overlay_times=plt_times, show=True)
###############################################################################
# References
# ----------
# .. [1] Graph-based variability estimation in single-trial event-related
# neural responses. A. Gramfort, R. Keriven, M. Clerc, 2010,
# Biomedical Engineering, IEEE Trans. on, vol. 57 (5), 1051-1061
# https://ieeexplore.ieee.org/document/5406156
| olafhauk/mne-python | examples/visualization/plot_channel_epochs_image.py | Python | bsd-3-clause | 2,881 |
import hashlib
import os
import sys
import urllib
def check(path, sha):
d = hashlib.sha1()
with open(path, 'rb') as f:
while 1:
buf = f.read(1024 * 1024)
if not len(buf):
break
d.update(buf)
return d.hexdigest() == sha
def run(*args, **kwargs):
url = sys.argv[1]
sha = sys.argv[2]
path = sys.argv[3]
if os.path.isdir(path):
import urlparse
path = os.path.join(path, urlparse.urlparse(url).path.split('/')[-1])
if os.path.exists(path):
if not check(path, sha):
os.remove(path)
if not os.path.exists(path):
urllib.urlretrieve(url, path)
if not check(path, sha):
sys.exit(1)
if __name__ == '__main__':
run()
| ployground/bsdploy | bsdploy/download.py | Python | bsd-3-clause | 775 |
# -*- coding: utf8 -*-
from .ids import ID as ItemID
from .factory import new_item
from .recipe import furnace_recipe
__all__ = [
'ItemID',
'new_item',
'furnace_recipe',
] | nosix/PyCraft | src/pycraft/service/part/item/__init__.py | Python | lgpl-3.0 | 190 |
# Copyright (c) 2018, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.conf.urls import url
from ..settings import SLUG_RE
from ..views.campaigns import (CampaignCreateView, CampaignDeleteView,
CampaignListView, CampaignPublishView, CampaignResultView, CampaignSendView,
CampaignUpdateView)
from ..views.createquestion import (QuestionCreateView, QuestionDeleteView,
QuestionListView, QuestionRankView, QuestionUpdateView)
from ..views.matrix import RespondentListView
urlpatterns = [
url(r'^new/',
CampaignCreateView.as_view(), name='survey_create'),
url(r'^(?P<campaign>%s)/send/' % SLUG_RE,
CampaignSendView.as_view(), name='survey_send'),
url(r'^(?P<campaign>%s)/result/' % SLUG_RE,
CampaignResultView.as_view(), name='survey_result'),
url(r'^(?P<campaign>%s)/respondents/' % SLUG_RE,
RespondentListView.as_view(), name='survey_respondent_list'),
url(r'^(?P<campaign>%s)/publish/' % SLUG_RE,
CampaignPublishView.as_view(), name='survey_publish'),
url(r'^(?P<campaign>%s)/edit/' % SLUG_RE,
CampaignUpdateView.as_view(), name='survey_edit'),
url(r'^(?P<campaign>%s)/delete/' % SLUG_RE,
CampaignDeleteView.as_view(), name='survey_delete'),
url(r'^(?P<campaign>%s)/new/' % SLUG_RE,
QuestionCreateView.as_view(), name='survey_question_new'),
url(r'^(?P<campaign>%s)/(?P<num>\d+)/down/' % SLUG_RE,
QuestionRankView.as_view(), name='survey_question_down'),
url(r'^(?P<campaign>%s)/(?P<num>\d+)/up/' % SLUG_RE,
QuestionRankView.as_view(direction=-1), name='survey_question_up'),
url(r'^(?P<campaign>%s)/(?P<num>\d+)/delete/' % SLUG_RE,
QuestionDeleteView.as_view(), name='survey_question_delete'),
url(r'^(?P<campaign>%s)/(?P<num>\d+)/edit/' % SLUG_RE,
QuestionUpdateView.as_view(), name='survey_question_edit'),
url(r'^(?P<campaign>%s)/' % SLUG_RE,
QuestionListView.as_view(), name='survey_question_list'),
url(r'^',
CampaignListView.as_view(), name='survey_campaign_list'),
]
| djaodjin/djaodjin-survey | survey/urls/manager.py | Python | bsd-2-clause | 3,316 |
#!/usr/bin/env python
""" Package install script. """
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
f = open(os.path.join(os.path.dirname(__file__), "README.rst"))
readme = f.read()
f.close()
setup(
name="pdfjinja",
version="1.1.0",
author="Ram Mehta",
author_email="ram.mehta@gmail.com",
url="http://github.com/rammie/pdfjinja/",
description='Use jinja templates to fill and sign pdf forms.',
long_description=readme,
py_modules=["pdfjinja"],
entry_points={"console_scripts": ["pdfjinja = pdfjinja:main"]},
install_requires=[
"fdfgen>=0.13.0",
"jinja2>=2.8",
"pdfminer.six==20160202",
"Pillow>=3.2.0",
"PyPDF2>=1.25.1",
"reportlab>=3.3.0"
])
| rammie/pdfjinja | setup.py | Python | mit | 799 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-06 05:22
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ExtendedUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('qr_code_text', models.CharField(max_length=200)),
('qr_code_img', models.ImageField(upload_to=b'/media/tenant/')),
('qr_code_url_text', models.URLField(blank=True, null=True)),
('qr_code_url_img', models.ImageField(blank=True, null=True, upload_to=b'/media/tenant/')),
('mobile_number', models.IntegerField()),
('alternate_number', models.IntegerField()),
('country_code', models.IntegerField(default=91)),
('alternate_country_code', models.IntegerField(default=91)),
('dob', models.DateField()),
('gender', models.CharField(choices=[(b'M', b'Male'), (b'F', b'Female'), (b'U', b'Unspecified')], max_length=1)),
('photo', models.ImageField(upload_to=b'/media/avatars')),
('blood_group', models.CharField(choices=[(b'A1 -ve', b'A1 Negative '), (b'A1 +ve', b'A1 Positive '), (b'A1B -ve', b'A1B Negative '), (b'A1B +ve', b'A1B Positive '), (b'A2 -ve', b'A2 Negative '), (b'A2 +ve', b'A2 Positive '), (b'A2B -ve', b'A2B Negative '), (b'A2B +ve', b'A2B Positive '), (b'B -ve', b'B Negative '), (b'B +ve', b'B Positive '), (b'B1 +ve', b'B1 Positive '), (b'O -ve', b'O Negative '), (b'O +ve', b'O Positive ')], max_length=10)),
('user_type', models.CharField(choices=[(b'S', b'Student'), (b'T', b'Teacher'), (b'C', b'Contractor'), (b'E', b'Employee')], max_length=1)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_created_by', to=settings.AUTH_USER_MODEL)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_updated_by', to=settings.AUTH_USER_MODEL)),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=False)),
('line_1', models.CharField(max_length=100)),
('line_2', models.CharField(max_length=100)),
('city', models.CharField(max_length=30)),
('state', models.CharField(max_length=30)),
('country', models.CharField(max_length=30)),
('zip_code', models.IntegerField()),
('address_type', models.CharField(choices=[(b'P', b'Permanent'), (b'C', b'Current'), (b'T', b'Temporary')], max_length=1)),
('created_date_time', models.DateTimeField(auto_now_add=True)),
('updated_date_time', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='address_crd_by', to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='address_upd_by', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PhoneNumber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=False)),
('phone_connection_type', models.CharField(choices=[(b'M', b'Mobile Number'), (b'F', b'Fixed Line')], max_length=1)),
('country_code', models.IntegerField(default=91)),
('area_code', models.IntegerField()),
('phone_number', models.IntegerField()),
('phone_number_type', models.CharField(choices=[(b'P', b'Personal'), (b'E', b'Emergency'), (b'H', b'Home')], max_length=1)),
('created_date_time', models.DateTimeField(auto_now_add=True)),
('updated_date_time', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phone_crd_by', to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phone_upd_by', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ReferenceChoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('choices', django.contrib.postgres.fields.jsonb.JSONField()),
('created_date_time', models.DateTimeField(auto_now_add=True)),
('updated_date_time', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ref_choice_crd_by', to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ref_choice_upd_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ReferenceDepartment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('descr', models.CharField(max_length=200)),
('created_date_time', models.DateTimeField(auto_now_add=True)),
('updated_date_time', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ref_dept_crd_by', to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ref_dept_upd_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ReferenceDesignation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('descr', models.CharField(max_length=200)),
('created_date_time', models.DateTimeField(auto_now_add=True)),
('updated_date_time', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ref_designation_crd_by', to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ref_designation_upd_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ReferenceFacility',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('created_date_time', models.DateTimeField(auto_now_add=True)),
('updated_date_time', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ref_facility_crd_by', to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ref_facility_upd_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ReferenceItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('data', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=30), size=None)),
('created_date_time', models.DateTimeField(auto_now_add=True)),
('updated_date_time', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ref_item_crd_by', to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ref_item_upd_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| django-school-management/ssms | ssms/common/common/migrations/0001_initial.py | Python | lgpl-3.0 | 12,307 |
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellStateManager
"""
import time
import mock
from oslo.config import cfg
from oslo.db import exception as db_exc
import six
from nova.cells import state
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.openstack.common import fileutils
from nova import test
FAKE_COMPUTES = [
('host1', 1024, 100, 0, 0),
('host2', 1024, 100, -1, -1),
('host3', 1024, 100, 1024, 100),
('host4', 1024, 100, 300, 30),
]
# NOTE(alaski): It's important to have multiple types that end up having the
# same memory and disk requirements. So two types need the same first value,
# and two need the second and third values to add up to the same thing.
FAKE_ITYPES = [
(0, 0, 0),
(50, 12, 13),
(50, 2, 4),
(10, 20, 5),
]
def _fake_compute_node_get_all(context):
def _node(host, total_mem, total_disk, free_mem, free_disk):
service = {'host': host, 'disabled': False}
return {'service': service,
'memory_mb': total_mem,
'local_gb': total_disk,
'free_ram_mb': free_mem,
'free_disk_gb': free_disk}
return [_node(*fake) for fake in FAKE_COMPUTES]
def _fake_instance_type_all(context):
def _type(mem, root, eph):
return {'root_gb': root,
'ephemeral_gb': eph,
'memory_mb': mem}
return [_type(*fake) for fake in FAKE_ITYPES]
class TestCellsStateManager(test.TestCase):
def setUp(self):
super(TestCellsStateManager, self).setUp()
self.stubs.Set(db, 'compute_node_get_all', _fake_compute_node_get_all)
self.stubs.Set(db, 'flavor_get_all', _fake_instance_type_all)
def test_cells_config_not_found(self):
self.flags(cells_config='no_such_file_exists.conf', group='cells')
e = self.assertRaises(cfg.ConfigFilesNotFoundError,
state.CellStateManager)
self.assertEqual(['no_such_file_exists.conf'], e.config_files)
@mock.patch.object(cfg.ConfigOpts, 'find_file')
@mock.patch.object(fileutils, 'read_cached_file')
def test_filemanager_returned(self, mock_read_cached_file, mock_find_file):
mock_find_file.return_value = "/etc/nova/cells.json"
mock_read_cached_file.return_value = (False, six.StringIO({}))
self.flags(cells_config='cells.json', group='cells')
self.assertIsInstance(state.CellStateManager(),
state.CellStateManagerFile)
def test_dbmanager_returned(self):
self.assertIsInstance(state.CellStateManager(),
state.CellStateManagerDB)
def test_capacity_no_reserve(self):
# utilize entire cell
cap = self._capacity(0.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = cell_free_ram / 50
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 5 # 4 on host 3, 1 on host4
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
def test_capacity_full_reserve(self):
# reserve the entire cell. (utilize zero percent)
cap = self._capacity(100.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
self.assertEqual(0, cap['disk_free']['units_by_mb'][str(sz)])
def test_capacity_part_reserve(self):
# utilize half the cell's free capacity
cap = self._capacity(50.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = 10 # 10 from host 3
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 2 # 2 on host 3
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
def _get_state_manager(self, reserve_percent=0.0):
self.flags(reserve_percent=reserve_percent, group='cells')
return state.CellStateManager()
def _capacity(self, reserve_percent):
state_manager = self._get_state_manager(reserve_percent)
my_state = state_manager.get_my_state()
return my_state.capacities
class TestCellStateManagerException(test.TestCase):
@mock.patch.object(time, 'sleep')
def test_init_db_error(self, mock_sleep):
class TestCellStateManagerDB(state.CellStateManagerDB):
def __init__(self):
self._cell_data_sync = mock.Mock()
self._cell_data_sync.side_effect = [db_exc.DBError(), []]
super(TestCellStateManagerDB, self).__init__()
test = TestCellStateManagerDB()
mock_sleep.assert_called_once_with(30)
self.assertEqual(test._cell_data_sync.call_count, 2)
class TestCellsGetCapacity(TestCellsStateManager):
def setUp(self):
super(TestCellsGetCapacity, self).setUp()
self.capacities = {"ram_free": 1234}
self.state_manager = self._get_state_manager()
cell = models.Cell(name="cell_name")
other_cell = models.Cell(name="other_cell_name")
cell.capacities = self.capacities
other_cell.capacities = self.capacities
self.stubs.Set(self.state_manager, 'child_cells',
{"cell_name": cell,
"other_cell_name": other_cell})
def test_get_cell_capacity_for_all_cells(self):
self.stubs.Set(self.state_manager.my_cell_state, 'capacities',
self.capacities)
capacities = self.state_manager.get_capacities()
self.assertEqual({"ram_free": 3702}, capacities)
def test_get_cell_capacity_for_the_parent_cell(self):
self.stubs.Set(self.state_manager.my_cell_state, 'capacities',
self.capacities)
capacities = self.state_manager.\
get_capacities(self.state_manager.my_cell_state.name)
self.assertEqual({"ram_free": 3702}, capacities)
def test_get_cell_capacity_for_a_cell(self):
self.assertEqual(self.capacities,
self.state_manager.get_capacities(cell_name="cell_name"))
def test_get_cell_capacity_for_non_existing_cell(self):
self.assertRaises(exception.CellNotFound,
self.state_manager.get_capacities,
cell_name="invalid_cell_name")
class FakeCellStateManager(object):
def __init__(self):
self.called = []
def _cell_data_sync(self, force=False):
self.called.append(('_cell_data_sync', force))
class TestSyncDecorators(test.TestCase):
def test_sync_before(self):
manager = FakeCellStateManager()
def test(inst, *args, **kwargs):
self.assertEqual(inst, manager)
self.assertEqual(args, (1, 2, 3))
self.assertEqual(kwargs, dict(a=4, b=5, c=6))
return 'result'
wrapper = state.sync_before(test)
result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
self.assertEqual(result, 'result')
self.assertEqual(manager.called, [('_cell_data_sync', False)])
def test_sync_after(self):
manager = FakeCellStateManager()
def test(inst, *args, **kwargs):
self.assertEqual(inst, manager)
self.assertEqual(args, (1, 2, 3))
self.assertEqual(kwargs, dict(a=4, b=5, c=6))
return 'result'
wrapper = state.sync_after(test)
result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
self.assertEqual(result, 'result')
self.assertEqual(manager.called, [('_cell_data_sync', True)])
| jumpstarter-io/nova | nova/tests/cells/test_cells_state_manager.py | Python | apache-2.0 | 9,385 |
"""
Module containing class `OutsideClassifier`.
An `OutsideClassifier` assigns the `'Outside'` classification to a clip
if the clip's start time is outside of the interval from one hour after
sunset to one half hour before sunrise, and does nothing otherwise.
"""
import datetime
from vesper.command.annotator import Annotator
from vesper.ephem.sun_moon import SunMoonCache
_START_OFFSET = datetime.timedelta(minutes=60)
_END_OFFSET = datetime.timedelta(minutes=-30)
class OutsideClassifier(Annotator):
extension_name = 'MPG Ranch Outside Classifier 1.1'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sun_moons = SunMoonCache()
def annotate(self, clip):
station = clip.station
sun_moon = self._sun_moons.get_sun_moon(
station.latitude, station.longitude, station.tz)
clip_start_time = clip.start_time
night = station.get_night(clip_start_time)
def get_event_time(event_name):
return sun_moon.get_solar_event_time(night, event_name, day=False)
# Check if clip start time precedes analysis period.
sunset_time = get_event_time('Sunset')
if sunset_time is not None:
start_time = sunset_time + _START_OFFSET
if clip_start_time < start_time:
self._annotate(clip, 'Outside')
return True
# Check if clip start time follows analysis period.
sunrise_time = get_event_time('Sunrise')
if sunrise_time is not None:
end_time = sunrise_time + _END_OFFSET
if clip_start_time > end_time:
self._annotate(clip, 'Outside')
return True
# If we get here, the clip is not outside of the analysis period,
# so we will not annotate it.
return False
| HaroldMills/Vesper | vesper/mpg_ranch/outside_classifier.py | Python | mit | 1,908 |
from jsonrpclib import Server
"""
Specify settings
"""
username = ''
password = ''
server = Server('https://' + username + ':' + password + '@www.factweb.nl/jsonrpc/call/jsonrpc')
def creditor_group():
"""
Get creditor_group data
"""
json_data = server.get('creditor_group', 5)
print "Received JSON data from server.get(creditor_group, id):"
print json_data
"""
Add a creditor_group
"""
json_payload = [{'name': 'test api group'}]
json_data = server.post('creditor_group', json_payload)
print "Received JSON data from server.post(creditor_group, json_payload):"
print json_data
"""
Update a creditor_group
"""
json_payload = {'name': 'test api group #2'}
json_data2 = server.put('creditor_group', json_data[0]['id'], json_payload)
print "Received boolean from server.put(creditor_group, id, json_payload):"
print json_data2
"""
Delete a creditor_group
"""
json_data = server.delete('creditor_group', json_data[0]['id'])
print "Received boolean from server.delete(creditor_group, id):"
print json_data
"""
Starting the API
"""
creditor_group() | corebyte/factweb-jsonrpc-client | python/samples/creditor_group.py | Python | lgpl-3.0 | 1,170 |
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the hinge loss (standard SVM)
while 'l2' is the squared hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='l2', dual=True, tol=1e-4, C=1.0,
multi_class='ovr', fit_intercept=True, intercept_scaling=1,
class_weight=None, verbose=0, random_state=None, max_iter=1000):
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss
)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0, loss='l1', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0, random_state=None,
max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
loss = {'l1': 'ei', 'l2': 'se'}.get(self.loss)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, 'l2', self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each,
see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Index of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, random_state=None):
super(SVC, self).__init__(
'c_svc', kernel, degree, gamma, coef0, tol, C, 0., 0., shrinking,
probability, cache_size, class_weight, verbose, max_iter,
random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Index of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(NuSVC, self).__init__(
'nu_svc', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Index of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, cache_size=200,
verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces with the parameter epsilon of SVR.
The implementation is based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken. Only available if impl='nu_svc'.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Index of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outliers Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Index of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficient of the support vector in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
| ashhher3/scikit-learn | sklearn/svm/classes.py | Python | bsd-3-clause | 35,052 |
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pecan Controllers"""
from cdn.transport.pecan.controllers import root
from cdn.transport.pecan.controllers import services
from cdn.transport.pecan.controllers import v1
# Hoist into package namespace
Root = root.RootController
Services = services.ServicesController
V1 = v1.ControllerV1
| obulpathi/cdn1 | cdn/transport/pecan/controllers/__init__.py | Python | apache-2.0 | 879 |
# -*- coding: utf-8 -*-
"""
Scripts to manage categories.
Syntax: python g13_nudge_bot.py [-from:UNDERSCORED_CATEGORY]
"""
#
# (C) Rob W.W. Hooft, 2004
# (C) Daniel Herding, 2004
# (C) Wikipedian, 2004-2008
# (C) leogregianin, 2004-2008
# (C) Cyde, 2006-2010
# (C) Anreas J Schwab, 2007
# (C) xqt, 2009-2012
# (C) Pywikipedia team, 2008-2012
# (C) Hasteur, 2013
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import os, re, pickle, bz2, time, datetime, sys, logging
from dateutil.relativedelta import relativedelta
import pywikibot
from pywikibot import i18n, Bot, config, pagegenerators
#DB CONFIG
from db_handle import *
import pdb
afc_notify_list = []
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp
}
def nudge_drive(category_name):
logger = logging.getLogger('g13_nudge_bot')
page_text = pywikibot.Page(pywikibot.getSite(),
'User:HasteurBot/G13 OptIn Notifications').get()
afc_notify_list = re.findall('\#\[\[User\:(?P<name>.*)\]\]',page_text)
page_match = re.compile('Wikipedia talk:Articles for creation/')
page_match2 = re.compile('Draft:')
ip_regexp = re.compile(r'^(?:(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|'
r'(((?=(?=(.*?(::)))\3(?!.+\4)))\4?|[\dA-F]{1,4}:)'
r'([\dA-F]{1,4}(\4|:\b)|\2){5}'
r'(([\dA-F]{1,4}(\4|:\b|$)|\2){2}|'
r'(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4}))\Z',
re.IGNORECASE)
six_months_ago = (
datetime.datetime.now() + relativedelta(months=-5)
).timetuple()
logger.debug('Opened DB conn')
#Take this out once the full authorization has been given for this bot
potential_article = False
interested_insert = "INSERT INTO interested_notify (article,notified) VALUES (%s, %s)"
nom_cat = pywikibot.Category(
pywikibot.getSite(),
'Category:%s' % category_name
)
potential_articles = set(nom_cat.articles(recurse = True))
for article in potential_articles:
if None != page_match.match(article.title()) or \
None != page_match2.match(article.title()) :
pywikibot.output(article.title())
edit_time = time.strptime( \
article.getLatestEditors()[0]['timestamp'],
"%Y-%m-%dT%H:%M:%SZ"
)
potential_article = True
creator = article.getCreator()[0]
if edit_time < six_months_ago:
#Notify Creator
#Check for already nagged
cur = conn.cursor()
sql_string = "SELECT COUNT(*) FROM g13_records where " + \
"article = %s" + \
" and editor = %s;"
try:
cur.execute(sql_string, (article.title(), creator))
except:
logger.critical("Problem with %s" % article.title())
continue
results = cur.fetchone()
cur = None
if results[0] > 0:
#We already have notified this user
logger.info(u"Already notifified (%s,%s)" %(creator, article.title()))
continue
#Perform a null edit to get the creative Category juices flowing
logger.info('Starting to process %s' % article.title())
article.put(newtext = article.get(), comment="Null Edit", force=True)
logger.debug('Null Edit complete')
user_talk_page_title = "User talk:%s" % creator
user_talk_page = pywikibot.Page(
pywikibot.getSite(),
user_talk_page_title
)
summary = '[[User:HasteurBot]]: Notification of '+\
'[[WP:G13|CSD:G13]] potential nomination of [[%s]]' % (article.title())
notice = "==[[%s]] concern==\n" % (article.title()) +\
"Hi there, I'm [[User:HasteurBot|HasteurBot]]. I "+ \
"just wanted to let you know " + \
"that [[%s]]," %(article.title()) +\
" a page you created, has not been edited in 5 months" +\
". The Articles for Creation space is not an" + \
" indefinite storage location for content that is not " + \
"appropriate for articlespace.\n\n" + \
"If your submission is not edited soon, it could be " + \
"nominated for deletion. If you would like to attempt " + \
"to save it, you will need to improve it.\n\n"
if ip_regexp.match(creator) is None:
notice = notice + "You may request " + \
"[[WP:USERFY|Userfication]] of the content if it " + \
"meets requirements.\n\n"
notice = notice + "If the " + \
"deletion has already occured, instructions on how you " + \
"may be able to retrieve it are available at " + \
"[[WP:REFUND/G13]].\n\n" + \
"Thank you for your attention. ~~~~"
try:
user_talk_text = user_talk_page.get() +"\n"+ notice
except:
user_talk_text = notice
user_talk_page.put(newtext = user_talk_text,
comment = summary,
force=True)
logger.debug('User Notified')
cur = conn.cursor()
sql_string = "INSERT INTO g13_records (article,editor)" + \
"VALUES (%s, %s)"
cur.execute(sql_string, (article.title(),creator))
conn.commit()
logger.debug('DB stored')
cur = None
#Notify Interested parties
#Get List of Editors to the page
editor_list = []
for rev in article.getVersionHistory():
editor_list.append(rev[2])
#Now let's intersect these to see who we get to notify
intersection = set(editor_list) & set(afc_notify_list)
message = '\n==G13 Eligibility==\n[[%s]] has become eligible for G13. ~~~~' % article.title()
while intersection:
editor = intersection.pop()
cur = conn.cursor()
cur.execute(interested_insert, (article.title(),editor))
conn.commit()
#Take this out when finished
if False == potential_article:
log_page = pywikibot.Page(
pywikibot.getSite(),
'User:HasteurBot/Notices'
)
msg = "%s no longer has potential nominations\n\n" % category_name
page_text = log_page.get() + msg
log_page.put(newtext = page_text,comment="Date empty")
logger.critical(msg)
conn.close()
def main(*args):
global catDB
logger = logging.getLogger('g13_nudge_bot')
logger.setLevel(logging.DEBUG)
trfh = logging.handlers.TimedRotatingFileHandler('logs/g13_nudge', \
when='D', \
interval = 3, \
backupCount = 90, \
)
trfh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
trfh.setFormatter(formatter)
logger.addHandler(trfh)
trfh.doRollover()
fromGiven = False
toGiven = False
batchMode = False
editSummary = ''
inPlace = False
overwrite = False
showImages = False
talkPages = False
recurse = False
withHistory = False
titleRegex = None
pagesonly = False
# If this is set to true then the custom edit summary given for removing
# categories from articles will also be used as the deletion reason.
useSummaryForDeletion = True
action = None
sort_by_last_name = False
restore = False
create_pages = False
action = 'listify'
for arg in pywikibot.handleArgs(*args):
if arg.startswith('-from:'):
oldCatTitle = arg[len('-from:'):].replace('_', ' ')
fromGiven = True
nudge_drive(oldCatTitle)
if __name__ == "__main__":
main()
| hasteur/g13bot_tools_new | scripts/g13_nudge_bot.py | Python | mit | 8,119 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains classes for basic HTTP transport implementations.
"""
from suds.transport import *
from suds.properties import Unskin
from urlparse import urlparse
from cookielib import CookieJar
from logging import getLogger
import base64
import httplib
import socket
import sys
import urllib2
log = getLogger(__name__)
class HttpTransport(Transport):
"""
HTTP transport using urllib2. Provided basic http transport
that provides for cookies, proxies but no authentication.
"""
def __init__(self, **kwargs):
"""
@param kwargs: Keyword arguments.
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
"""
Transport.__init__(self)
Unskin(self.options).update(kwargs)
self.cookiejar = CookieJar()
self.proxy = {}
self.urlopener = None
def open(self, request):
try:
url = request.url
log.debug('opening (%s)', url)
u2request = urllib2.Request(url)
self.proxy = self.options.proxy
return self.u2open(u2request)
except urllib2.HTTPError, e:
raise TransportError(str(e), e.code, e.fp)
def send(self, request):
result = None
url = request.url
msg = request.message
headers = request.headers
try:
u2request = urllib2.Request(url, msg, headers)
self.addcookies(u2request)
self.proxy = self.options.proxy
request.headers.update(u2request.headers)
log.debug('sending:\n%s', request)
fp = self.u2open(u2request)
self.getcookies(fp, u2request)
if sys.version_info < (3, 0):
headers = fp.headers.dict
else:
headers = fp.headers
result = Reply(httplib.OK, headers, fp.read())
log.debug('received:\n%s', result)
except urllib2.HTTPError, e:
if e.code in (httplib.ACCEPTED, httplib.NO_CONTENT):
result = None
else:
raise TransportError(e.msg, e.code, e.fp)
return result
def addcookies(self, u2request):
"""
Add cookies in the cookiejar to the request.
@param u2request: A urllib2 request.
@rtype: u2request: urllib2.Requet.
"""
self.cookiejar.add_cookie_header(u2request)
def getcookies(self, fp, u2request):
"""
Add cookies in the request to the cookiejar.
@param u2request: A urllib2 request.
@rtype: u2request: urllib2.Requet.
"""
self.cookiejar.extract_cookies(fp, u2request)
def u2open(self, u2request):
"""
Open a connection.
@param u2request: A urllib2 request.
@type u2request: urllib2.Requet.
@return: The opened file-like urllib2 object.
@rtype: fp
"""
tm = self.options.timeout
url = self.u2opener()
if (sys.version_info < (3, 0)) and (self.u2ver() < 2.6):
socket.setdefaulttimeout(tm)
return url.open(u2request)
return url.open(u2request, timeout=tm)
def u2opener(self):
"""
Create a urllib opener.
@return: An opener.
@rtype: I{OpenerDirector}
"""
if self.urlopener is None:
return urllib2.build_opener(*self.u2handlers())
return self.urlopener
def u2handlers(self):
"""
Get a collection of urllib handlers.
@return: A list of handlers to be installed in the opener.
@rtype: [Handler,...]
"""
handlers = []
handlers.append(urllib2.ProxyHandler(self.proxy))
return handlers
def u2ver(self):
"""
Get the major/minor version of the urllib2 lib.
@return: The urllib2 version.
@rtype: float
"""
try:
part = urllib2.__version__.split('.', 1)
return float('.'.join(part))
except Exception, e:
log.exception(e)
return 0
def __deepcopy__(self, memo={}):
clone = self.__class__()
p = Unskin(self.options)
cp = Unskin(clone.options)
cp.update(p)
return clone
class HttpAuthenticated(HttpTransport):
"""
Provides basic HTTP authentication for servers that do not follow the
specified challenge/response model. This implementation appends the
I{Authorization} HTTP header with base64 encoded credentials on every HTTP
request.
"""
def open(self, request):
self.addcredentials(request)
return HttpTransport.open(self, request)
def send(self, request):
self.addcredentials(request)
return HttpTransport.send(self, request)
def addcredentials(self, request):
credentials = self.credentials()
if not (None in credentials):
credentials = ':'.join(credentials)
# Bytes and strings are different in Python 3 than in Python 2.x.
if sys.version_info < (3,0):
basic = 'Basic %s' % base64.b64encode(credentials)
else:
encodedBytes = base64.urlsafe_b64encode(credentials.encode())
encodedString = encodedBytes.decode()
basic = 'Basic %s' % encodedString
request.headers['Authorization'] = basic
def credentials(self):
return (self.options.username, self.options.password)
| sfriesel/suds | suds/transport/http.py | Python | lgpl-3.0 | 6,560 |
from Gui import *
g = Gui()
g.title('Gui Title')
# 1ST kind of TEXT
entry = g.en(text='Default text.')
# 2ND kind of TEXT
text = g.te(width=100, height=5)
text.insert(END, 'abc')
text.insert(1.1, 'xyz') # row.column
# Get function:
# text.get(0.0, END)
# text.delete(1.2, END)
g.mainloop() | flake123p/ProjectH | Python/GUI_Tkinter/03_Text/test.py | Python | gpl-3.0 | 296 |
import mdp.nodes
from mdp import numx
import scipy.signal
import numpy as np
class FeedbackNode(mdp.Node):
"""FeedbackNode creates the ability to feed back a certain part of a flow as
input to the flow. It both implements the Node API and the generator API and
can thus be used as input for a flow.
The duration that the feedback node feeds back data can be given. Prior to using
the node as data generator, it should be executed so it can store the previous
state.
When a FeedbackNode is reused several times, reset() should be called prior to
each use which resets the internal counter.
Note that this node keeps state and can thus NOT be used in parallel using threads.
"""
def __init__(self, n_timesteps=1, input_dim=None, dtype=None):
super(FeedbackNode, self).__init__(input_dim=input_dim, output_dim=input_dim, dtype=dtype)
self.n_timesteps = n_timesteps
self.last_value = None
self.current_timestep = 0
def reset(self):
self.current_timestep = 0
def is_trainable(self):
return True
def _train(self, x, y):
self.last_value = mdp.numx.atleast_2d(y[-1, :])
def __iter__(self):
while self.current_timestep < self.n_timesteps:
self.current_timestep += 1
yield self.last_value
def _execute(self, x):
self.last_value = mdp.numx.atleast_2d(x[-1, :])
return x
class MeanAcrossTimeNode(mdp.Node):
"""
Compute mean across time (2nd dimension)
"""
def __init__(self, input_dim=None, output_dim=None, dtype='float64'):
super(MeanAcrossTimeNode, self).__init__(input_dim, output_dim, dtype)
def is_trainable(self):
return False
def is_invertible(self):
return False
def _check_train_args(self, x, y):
# set output_dim if necessary
if self._output_dim is None:
self._set_output_dim(y.shape[1])
def _get_supported_dtypes(self):
return ['float32', 'float64']
def _execute(self, x):
e = mdp.numx.atleast_2d(mdp.numx.mean(x, axis=0, dtype=self.dtype))
return e
class WTANode(mdp.Node):
"""
Compute Winner take-all at every timestep (2nd dimension)
"""
def __init__(self, input_dim=None, output_dim=None, dtype='float64'):
super(WTANode, self).__init__(input_dim, output_dim, dtype)
def is_trainable(self):
return False
def is_invertible(self):
return False
def _check_train_args(self, x, y):
#set output_dim if necessary
if self._output_dim is None:
self._set_output_dim(y.shape[1])
def _get_supported_dtypes(self):
return ['float32', 'float64']
def _execute(self, x):
max_indices = mdp.numx.argmax(x, axis=1)
r = -mdp.numx.ones_like(x)
for i in range(r.shape[0]):
r[i, max_indices[i]] = 1
return r
class ShiftNode(mdp.Node):
"""Return input data shifted one or more time steps.
This is useful for architectures in which data from different time steps is
needed. The values that are left over are set to zero.
Negative shift values cause a shift back in time and positive ones forward in time.
"""
def __init__(self, input_dim=None, output_dim=None, n_shifts=1,
dtype='float64'):
super(ShiftNode, self).__init__(input_dim, output_dim, dtype)
self.n_shifts = n_shifts
def is_trainable(self):
False
def _execute(self, x):
n = x.shape
assert(n > 1)
ns = self.n_shifts
y = x.copy()
if ns < 0:
y[:ns] = x[-ns:]
y[ns:] = 0
elif ns > 0:
y[ns:] = x[:-ns]
y[:ns] = 0
else:
y = x
return y
def _set_input_dim(self, n):
self._input_dim = n
self._output_dim = n
class ResampleNode(mdp.Node):
""" Resamples the input signal. Based on scipy.signal.resample
CODE FROM: Georg Holzmann
"""
def __init__(self, input_dim=None, ratio=0.5, dtype='float64', window=None):
""" Initializes and constructs a random reservoir.
- input_dim: the number of inputs (output dimension is always the same as input dimension)
- ratio: ratio of up or down sampling (e.g. 0.5 means downsampling to half the samplingrate)
- window: see window parameter in scipy.signal.resample
"""
super(ResampleNode, self).__init__(input_dim, input_dim, dtype)
self.ratio = ratio
self.window = window
def is_trainable(self):
return False
def _get_supported_dtypes(self):
return ['float32', 'float64']
def _execute(self, x):
""" Resample input vector x.
"""
self.oldlength = len(x)
newlength = self.oldlength * self.ratio
sig = scipy.signal.resample(x, newlength, window=self.window)
return sig.copy()
def _inverse(self, y):
""" Inverse the resampling.
"""
sig = scipy.signal.resample(y, self.oldlength, window=self.window)
return sig.copy()
class TimeFramesNode2(mdp.nodes.TimeFramesNode):
""" An extension of TimeFramesNode that preserves the temporal
length of the data.
"""
def __init__(self, time_frames, input_dim=None, dtype=None):
super(TimeFramesNode2, self).__init__(input_dim=input_dim, dtype=dtype, time_frames=time_frames)
def _execute(self, x):
tf = x.shape[0] - (self.time_frames - 1)
rows = self.input_dim
cols = self.output_dim
y = mdp.numx.zeros((x.shape[0], cols), dtype=self.dtype)
for frame in range(self.time_frames):
y[-tf:, frame * rows:(frame + 1) * rows] = x[frame:frame + tf, :]
return y
def pseudo_inverse(self, y):
pass
class FeedbackShiftNode(mdp.Node):
""" Shift node that can be applied when using generators.
The node works as a delay line with the number of timesteps the lengths of the delay line.
"""
def __init__(self, input_dim=None, output_dim=None, n_shifts=1,
dtype='float64'):
super(FeedbackShiftNode, self).__init__(input_dim, output_dim, dtype)
self.n_shifts = n_shifts
self.y = None
def is_trainable(self):
False
def _execute(self, x):
n = x.shape
assert(n > 1)
if self.y == None:
self.y = np.zeros((self.n_shifts, self._input_dim))
self.y = np.vstack([self.y, x.copy()])
returny = self.y[:x.shape[0], :].copy()
self.y = self.y[x.shape[0]:, :]
return returny
def _set_input_dim(self, n):
self._input_dim = n
self._output_dim = n
class RescaleZMUSNode(mdp.Node):
'''
Rescales the output with the mean and standard deviation seen during training
If 'use_var' is set, the variance is used instead of the standard deviation
Currently for 1 input only!!
'''
def __init__(self, use_var=False, input_dim=None, dtype=None):
super(RescaleZMUSNode, self).__init__(input_dim=input_dim, dtype=dtype)
self._mean = 0
self._std = 0
self._len = 0
self._use_var = use_var
def is_trainable(self):
return True
def _train(self, x):
self._mean += mdp.numx.mean(x) * len(x)
self._std += mdp.numx.sum(x ** 2) - mdp.numx.sum(x) ** 2
self._len += len(x)
def _stop_training(self):
self._mean /= self._len
self._std /= self._len
if self._use_var:
self._std = mdp.numx.sqrt(self._std)
def _execute(self, x):
return (x - self._mean) / self._std
class SupervisedLayer(mdp.hinet.Layer):
"""
An extension of the MDP Layer class that is aware of target labels. This allows for
more flexibility when using supervised techniques.
The SupervisedLayer can mimic the behaviour of both the regular MDP Layer and the
SameInputLayer, with regards to the partitioning of the input training data.
In addition, the SupervisedLayer is also aware of target labels, and can partition
them according to the output dimensions of the contained nodes, or not partition
them at all. The latter is the behaviour of the label-agnostic MDP Layer
and SameInputLayer classes.
The SupervisedLayer has two flags that toggle input and target label partitioning:
* input_partitioning (defaults to True)
* target_partitioning (defaults to False)
The defaults mimic the behaviour of the MDP Layer class. Setting 'input_partitioning'
to False causes SameInputLayer-like behaviour.
Because this class explicitly refers to target labels (second argument of the 'train'
method), it will not work properly when used with unsupervised nodes.
EXAMPLE
A layer could contain 5 regressor nodes, each of which have 4-dimensional input and
3-dimensional target labels. In that case, the input_dim of the layer is 5*4 = 20,
and the output_dim is 5*3 = 15.
A default Layer will split the input data according to the input_dims of the contained
nodes, so the 20 input channels will be split into 5 sets of 4, which is the desired
behaviour.
However, the Layer is unaware of the target labels and simply passes through additional
arguments to the 'train' function to the contained nodes. This means that each of the
regressors will receive the same set of 15-dimensional target labels. The Layer should
instead split the 15 target channels into 5 sets of 3, but it is not capable of doing
so. Replacing the Layer with a SupervisedLayer(input_partitioning=True,
target_partitioning=True) solves this problem.
Another use case is one where the regressors have the same input data, but are trained
with different target labels. In that case, a SuperivsedLayer(input_partitioning=False,
target_partitioning=True) can be used. Using the previous example, each regressor then
has an input_dim of 20 and an output_dim of 3.
"""
def __init__(self, nodes, dtype=None, input_partitioning=True, target_partitioning=False):
self.input_partitioning = input_partitioning
self.target_partitioning = target_partitioning
self.nodes = nodes
# check nodes properties and get the dtype
dtype = self._check_props(dtype)
# set the correct input/output dimensions.
# The output_dim of the Layer is always the sum of the output dims of the nodes,
# Regardless of whether target partitioning is enabled.
output_dim = self._get_output_dim_from_nodes()
# The input_dim of the Layer however depends on whether input partitioning is
# enabled. When input_partitioning is disabled, all contained nodes should have
# the same input_dim and the input_dim of the layer should be equal to it.
if self.input_partitioning:
input_dim = 0
for node in nodes:
input_dim += node.input_dim
else: # all nodes should have same input_dim, input_dim of the layer is equal to this
input_dim = nodes[0].input_dim
for node in nodes:
if not node.input_dim == input_dim:
err = "The nodes have different input dimensions."
raise mdp.NodeException(err)
# intentionally use MRO above Layer, not SupervisedLayer
super(mdp.hinet.Layer, self).__init__(input_dim=input_dim,
output_dim=output_dim,
dtype=dtype)
def is_invertible(self):
return False # inversion is theoretically possible if input partitioning is enabled.
# however, it is not implemented.
def _train(self, x, y, *args, **kwargs):
"""Perform single training step by training the internal nodes."""
x_idx, y_idx = 0, 0
for node in self.nodes:
if self.input_partitioning:
next_x_idx = x_idx + node.input_dim
x_selected = x[:, x_idx:next_x_idx] # selected input dimensions for this node
x_idx = next_x_idx
else:
x_selected = x # use all input dimensions
if self.target_partitioning:
next_y_idx = y_idx + node.output_dim
y_selected = y[:, y_idx:next_y_idx] # select target dimensions for this node
y_idx = next_y_idx
else:
y_selected = y # use all target dimensions
if node.is_training():
node.train(x_selected, y_selected, *args, **kwargs)
def _pre_execution_checks(self, x):
"""Make sure that output_dim is set and then perform normal checks."""
if self.input_partitioning: # behaviour is like Layer, so just use the method of the parent class
super(SupervisedLayer, self)._pre_execution_checks(x)
else: # behaviour is like SameInputLayer
if self.output_dim is None:
# first make sure that the output_dim is set for all nodes
for node in self.nodes:
node._pre_execution_checks(x)
self.output_dim = self._get_output_dim_from_nodes()
if self.output_dim is None:
err = "output_dim must be set at this point for all nodes"
raise mdp.NodeException(err)
# intentionally use MRO above Layer, not SupervisedLayer
super(mdp.hinet.Layer, self)._pre_execution_checks(x)
def _execute(self, x, *args, **kwargs):
"""Process the data through the internal nodes."""
if self.input_partitioning: # behaviour is like Layer, so just use the method of the parent class
return super(SupervisedLayer, self)._execute(x, *args, **kwargs)
else: # behaviour is like SameInputLayer
out_start = 0
out_stop = 0
y = None
for node in self.nodes:
out_start = out_stop
out_stop += node.output_dim
if y is None:
node_y = node.execute(x, *args, **kwargs)
y = numx.zeros([node_y.shape[0], self.output_dim],
dtype=node_y.dtype)
y[:, out_start:out_stop] = node_y
else:
y[:, out_start:out_stop] = node.execute(x, *args, **kwargs)
return y
class MaxVotingNode(mdp.Node):
"""
This node finds the maximum value of all input channels at each timestep,
and returns the corresponding label.
If no labels are supplied, the index of the channel is returned.
"""
def __init__(self, labels=None, input_dim=None, dtype='float64'):
super(MaxVotingNode, self).__init__(input_dim, 1, dtype) # output_dim is always 1
if labels is None:
self.labels = None
else:
self.labels = np.asarray(labels)
def is_trainable(self):
return False
def is_invertible(self):
return False
def _get_supported_dtypes(self):
return ['float32', 'float64']
def _execute(self, x):
if self.labels is None:
self.labels = np.arange(self.input_dim) # default labels = channel indices
indices = np.atleast_2d(np.argmax(x, 1)).T
return self.labels[indices]
| npinto/Oger | Oger/nodes/utility_nodes.py | Python | gpl-3.0 | 15,581 |
from gge.GameObject import GameObject
from gge.DisplayTypes import Resolution, Fullscreen, DisplayRep
from gge.Types import Position
import pygame
from collections import defaultdict, namedtuple
PosRep = namedtuple("PosRep", "pos rep")
class PygameDisplayObject(GameObject):
"""Assumes pygame is initialized.
Attributes:
Resolution"""
def __init__(self, gge):
super(PygameDisplayObject, self).__init__(gge)
self.setAttribute(Resolution)
res = self.getAttribute(Resolution)
res.newListener(self.__handleResolution)
self.setAttribute(Fullscreen, value=False)
full = self.getAttribute(Fullscreen)
full.newListener(self.__handleFullscreen)
def update(self, dt):
reps_bgd = defaultdict(list)
reps_fgd = defaultdict(list)
reps_hud = defaultdict(list)
for game_object in self.gge.getGameObjects():
rep = game_object.getAttributeValue(DisplayRep)
if not rep:
continue
pos_rep = PosRep(game_object.getAttributeValue(Position), rep)
if rep.layer.name == "background":
reps_bgd[rep.layer.number].append(pos_rep)
elif rep.layer.name == "foreground":
reps_fgd[rep.layer.number].append(pos_rep)
elif rep.layer.name == "hud":
reps_hud[rep.layer.number].append(pos_rep)
self.__display.fill((255, 255, 255))
self.__drawLayer(reps_bgd)
self.__drawLayer(reps_fgd)
self.__drawLayer(reps_hud)
pygame.display.flip()
def getSystemFonts(self):
return pygame.font.get_fonts()
def __updateDisplay(self):
res = self.getAttributeValue(Resolution)
flags = pygame.DOUBLEBUF | pygame.HWSURFACE
if self.getAttributeValue(Fullscreen):
flags |= pygame.FULLSCREEN
self.__display = pygame.display.set_mode(res, flags)
def __handleResolution(self, value):
self.__updateDisplay()
def __handleFullscreen(self, value):
self.__updateDisplay()
def __drawLayer(self, pos_reps):
for num in sorted(pos_reps.keys()):
for pos_rep in pos_reps[num]:
self.__drawRepresentation(pos_rep)
def __drawRepresentation(self, pos_rep):
pos = pos_rep.pos
rep = pos_rep.rep
# images = rep.images
# for source, offset in images:
# self.__drawImage(source, offset)
for shape in rep.shapes:
self.__drawShape(pos, shape)
for text in rep.text:
self.__drawText(pos, text)
def __drawShape(self, pos, shape):
x = pos.x + shape.offset.x
y = pos.y + shape.offset.y
rect = pygame.Rect(x, y, shape.size.w, shape.size.h)
self.__display.fill(shape.color.fill.value, rect)
if shape.lineWidth > 0:
pygame.draw.rect(self.__display, shape.color.line.value, rect,
shape.lineWidth)
def __drawText(self, pos, text):
x = pos.x + text.offset.x
y = pos.y + text.offset.y
# TODO: cache fonts? (or image)
font = pygame.font.SysFont(text.font, text.size)
txt = font.render(text.text, True, text.color.value)
rect = txt.get_rect(topleft=(x, y))
self.__display.blit(txt, rect)
# def __drawImage(self, source, offset):
# print source, offset
# image = self.__getImage(source)
# def __getImage(self, source):
# # Retrieve or load the image
# return None
| Bredgren/GenericGameEngine | python/gge/PygameDisplayObject.py | Python | gpl-3.0 | 3,684 |
#!/usr/bin/python2.4
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
"""module describing a directory packaging object
This module contains the DirectoryAction class, which represents a
directory-type packaging object."""
import os
import errno
import stat
import generic
import pkg.portable as portable
import pkg.actions
class DirectoryAction(generic.Action):
"""Class representing a directory-type packaging object."""
name = "dir"
attributes = ("mode", "owner", "group", "path")
key_attr = "path"
def __init__(self, data=None, **attrs):
generic.Action.__init__(self, data, **attrs)
if "path" in self.attrs:
self.attrs["path"] = self.attrs["path"].lstrip(
os.path.sep)
if not self.attrs["path"]:
raise pkg.actions.InvalidActionError(
str(self), _("Empty path attribute"))
def compare(self, other):
return cmp(self.attrs["path"], other.attrs["path"])
def directory_references(self):
return [os.path.normpath(self.attrs["path"])]
def preinstall(self, pkgplan, orig):
"""Check if the referenced user and group exist."""
self.pre_get_uid_gid(pkgplan.image)
def install(self, pkgplan, orig):
"""Client-side method that installs a directory."""
path = self.attrs["path"]
mode = int(self.attrs["mode"], 8)
owner, group = self.get_uid_gid(pkgplan.image)
if orig:
omode = int(orig.attrs["mode"], 8)
oowner = pkgplan.image.get_user_by_name(
orig.attrs["owner"])
ogroup = pkgplan.image.get_group_by_name(
orig.attrs["group"])
path = os.path.normpath(os.path.sep.join(
(pkgplan.image.get_root(), path)))
# XXX Hack! (See below comment.)
if not portable.is_admin():
mode |= 0200
if not orig:
try:
self.makedirs(path, mode = mode)
except OSError, e:
if e.errno != errno.EEXIST:
raise
# The downside of chmodding the directory is that as a non-root
# user, if we set perms u-w, we won't be able to put anything in
# it, which is often not what we want at install time. We save
# the chmods for the postinstall phase, but it's always possible
# that a later package install will want to place something in
# this directory and then be unable to. So perhaps we need to
# (in all action types) chmod the parent directory to u+w on
# failure, and chmod it back aftwards. The trick is to
# recognize failure due to missing file_dac_write in contrast to
# other failures. Or can we require that everyone simply have
# file_dac_write who wants to use the tools. Probably not.
elif mode != omode:
os.chmod(path, mode)
if not orig or oowner != owner or ogroup != group:
try:
portable.chown(path, owner, group)
except OSError, e:
if e.errno != errno.EPERM and \
e.errno != errno.ENOSYS:
raise
def verify(self, img, **args):
""" make sure directory is correctly installed"""
lstat, errors, abort = \
self.verify_fsobj_common(img, stat.S_IFDIR)
return errors
def remove(self, pkgplan):
localpath = os.path.normpath(self.attrs["path"])
path = os.path.normpath(os.path.sep.join(
(pkgplan.image.get_root(), localpath)))
try:
os.rmdir(path)
except OSError, e:
if e.errno == errno.ENOENT:
pass
elif e.errno == errno.EEXIST or \
e.errno == errno.ENOTEMPTY:
# cannot remove directory since it's
# not empty...
pkgplan.image.salvagedir(localpath)
elif e.errno != errno.EACCES: # this happens on Windows
raise
def generate_indices(self):
"""Generates the indices needed by the search dictionary. See
generic.py for a more detailed explanation."""
return [
("directory", "basename",
os.path.basename(self.attrs["path"].rstrip(os.path.sep)),
None),
("directory", "path", os.path.sep + self.attrs["path"],
None)
]
| marcellodesales/svnedge-console | ext/windows/pkg-toolkit/pkg/vendor-packages/pkg/actions/directory.py | Python | agpl-3.0 | 6,191 |
import math
import Tkinter
NODE_FILL = 'blue' # color of idle nodes
NODE_FILL_ACTIVE = 'purple' # color of active node
EDGE_FILL = 'black' # color of edges
EDGE_LEN = 30
RAD = 3 # radius of node drawing
class Tree:
def __init__(self, cv):
self.ids = [] # tkinter id
self.pos = [0] # position in self.string
self.dir = [0] # absolute direction (clockwise)
self.string = '' # L-string
self.undostack = []
self.cv = cv
# add node: n=abs direction of node, id=id of parent
def add_v(self, n, id):
self.undostack.append((self.string, self.pos, self.dir))
order = self.ids.index(id) # order of parent
relative = (n - self.dir[order]) % 24 # relative direction to parent
pos = self.pos[order] # position of parent in self.string
if relative <= 12:
nodestring = '[' + 'R' * relative + 'F]' # string representing new branch for the node
else:
nodestring = '[' + 'L' * (24 - relative) + 'F]'
# update string, pos and dir
self.string = self.string[:pos] + nodestring + self.string[pos:]
self.pos = self.pos[:order + 1] + [self.pos[order] + len(nodestring) - 1] + \
[x + len(nodestring) for x in self.pos[order + 1:]]
self.dir = self.dir[:order + 1] + [n] + self.dir[order + 1:]
def draw(self):
self.cv.delete(Tkinter.ALL) # clear canvas
dir = 0 # absolute direction of drawing
vec = (0.0, -1.0) # vector to represent direction of drawing
stack = [] # stack to aid parenthesizing
coords = (150, 250) # current position of pen
self.ids = [self.cv.create_oval(coords[0] - RAD, coords[1] - RAD,
coords[0] + RAD, coords[1] + RAD,
fill=NODE_FILL, tags='vertex')] # first node
for i in range(len(self.string)):
if self.string[i] == '[':
stack.append((coords[0], coords[1], vec[0], vec[1], dir)) # store current position
elif self.string[i] == ']':
# restore positions from stack
popped = stack.pop()
coords = popped[:2]
vec = popped[2:4]
dir = popped[4]
elif self.string[i] == 'R':
# update direction and vector of direction
dir = (dir + 1) % 24
elif self.string[i] == 'L':
# update direction and vector of direction
dir = (dir - 1) % 24
elif self.string[i] == 'F':
vec = (math.cos(((dir - 6) % 24) * math.pi / 12), math.sin(((dir - 6) % 24) * math.pi / 12))
# draw line and node
self.cv.create_line(coords[0], coords[1],
coords[0] + EDGE_LEN * vec[0], coords[1] + EDGE_LEN * vec[1],
tags='edge')
coords = (coords[0] + EDGE_LEN * vec[0], coords[1] + EDGE_LEN * vec[1])
self.ids += [self.cv.create_oval(
coords[0] - RAD, coords[1] - RAD, coords[0] + RAD, coords[1] + RAD,
fill=NODE_FILL, tags='vertex')] # add node ids to self.ids
def get_string(self, s):
if s == 0:
return ''.join(map(lambda i: self.string[i] + 'X' if (self.string[i] == 'F') else self.string[i],
range(len(self.string))))
elif s == 1:
return ''.join(map(lambda i: 'X' + self.string[i] if (i != 0 and self.string[i - 1:i+1] == 'F]')
else self.string[i],
range(len(self.string))))
def undo(self):
if self.undostack:
info = self.undostack.pop()
self.string = info[0]
self.pos = info[1]
self.dir = info[2]
self.draw()
def reset(self):
self.ids = []
self.pos = [0]
self.dir = [0]
self.string = ''
self.draw()
| mntalateyya/Shapes_Studio | Tree_Repr.py | Python | apache-2.0 | 4,054 |
from pandac.PandaModules import *
from otp.otpbase.OTPGlobals import *
from direct.gui.DirectGui import *
from MultiPageTextFrame import *
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPLocalizer
from otp.otpgui import OTPDialog
class PrivacyPolicyTextPanel(getGlobalDialogClass()):
notify = DirectNotifyGlobal.directNotify.newCategory('PrivacyPolicyTextPanel')
def __init__(self, doneEvent, hidePageNum = 0, pageChangeCallback = None, textList = []):
dialogClass = getGlobalDialogClass()
dialogClass.__init__(self, parent=aspect2d, dialogName='privacyPolicyTextDialog', doneEvent=doneEvent, okButtonText=OTPLocalizer.PrivacyPolicyClose, style=OTPDialog.Acknowledge, text='', topPad=1.5, sidePad=1.2, pos=(0, 0, -.55), scale=0.9)
self.privacyPolicyText = MultiPageTextFrame(parent=self, textList=textList, hidePageNum=hidePageNum, pageChangeCallback=pageChangeCallback, pos=(0, 0, 0.7), width=2.4, height=1.5)
self['image'] = self['image']
self['image_pos'] = (0, 0, 0.65)
self['image_scale'] = (2.7, 1, 1.9)
closeButton = self.getChild(0)
closeButton.setZ(-.13)
class PrivacyPolicyPanel(getGlobalDialogClass()):
notify = DirectNotifyGlobal.directNotify.newCategory('PrivacyPolicyPanel')
def __init__(self, doneEvent, hidePageNum = 0, pageChangeCallback = None, textList = 1):
dialogClass = getGlobalDialogClass()
dialogClass.__init__(self, parent=aspect2d, dialogName='privacyPolicyDialog', doneEvent=doneEvent, okButtonText=OTPLocalizer.PrivacyPolicyClose, style=OTPDialog.Acknowledge, text='', topPad=1.5, sidePad=1.2, pos=(0, 0, -.15), scale=0.6)
self.chatPrivacyPolicy = None
self.fsm = ClassicFSM.ClassicFSM('privacyPolicyPanel', [State.State('off', self.enterOff, self.exitOff),
State.State('version1Adult', self.enterVersion1Adult, self.exitPrivacyPolicy),
State.State('version1Kids', self.enterVersion1Kids, self.exitPrivacyPolicy),
State.State('version2Adult', self.enterVersion2Adult, self.exitPrivacyPolicy),
State.State('version2Kids', self.enterVersion2Kids, self.exitPrivacyPolicy)], 'off', 'off')
self.fsm.enterInitialState()
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
moreButtonImage = (guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR'))
DirectFrame(self, pos=(-0.4, 0.1, 0.4), relief=None, text=OTPLocalizer.PrivacyPolicyText_Intro, text_align=TextNode.ALeft, text_wordwrap=28, text_scale=0.09, text_pos=(-0.82, 1.0), textMayChange=0)
textScale = 0.05
buttonFrame = DirectFrame(self, pos=(0.0, 0.1, 0.0), scale=1.4, relief=None)
DirectButton(buttonFrame, image=moreButtonImage, image_scale=(1.75, 1.0, 1.0), relief=None, text=OTPLocalizer.ActivateChatPrivacyPolicy_Button1A, text_scale=textScale, text_pos=(0, -0.01), textMayChange=0, pos=(-0.45, 0.0, 0.4), command=self.__handlePrivacyPolicy, extraArgs=['version1Adult'])
DirectButton(buttonFrame, image=moreButtonImage, image_scale=(1.75, 1.0, 1.0), relief=None, text=OTPLocalizer.ActivateChatPrivacyPolicy_Button1K, text_scale=textScale, text_pos=(0, -0.01), textMayChange=0, pos=(-0.45, 0.0, 0.2), command=self.__handlePrivacyPolicy, extraArgs=['version1Kids'])
DirectButton(buttonFrame, image=moreButtonImage, image_scale=(1.75, 1.0, 1.0), relief=None, text=OTPLocalizer.ActivateChatPrivacyPolicy_Button2A, text_scale=textScale, text_pos=(0, -0.01), textMayChange=0, pos=(0.45, 0.0, 0.4), command=self.__handlePrivacyPolicy, extraArgs=['version2Adult'])
DirectButton(buttonFrame, image=moreButtonImage, image_scale=(1.75, 1.0, 1.0), relief=None, text=OTPLocalizer.ActivateChatPrivacyPolicy_Button2K, text_scale=textScale, text_pos=(0, -0.01), textMayChange=0, pos=(0.45, 0.0, 0.2), command=self.__handlePrivacyPolicy, extraArgs=['version2Kids'])
self['image'] = self['image']
self['image_pos'] = (0, 0, 0.65)
self['image_scale'] = (2.7, 1, 1.9)
closeButton = self.getChild(0)
closeButton.setZ(-.13)
return
def delete(self):
self.ignoreAll()
del self.fsm
if self.chatPrivacyPolicy:
self.chatPrivacyPolicy.destroy()
self.chatPrivacyPolicy = None
return
def __handlePrivacyPolicy(self, state, *oooo):
self.fsm.request(state)
def __privacyPolicyTextDone(self):
self.exitPrivacyPolicy()
def enterPrivacyPolicy(self, textList):
if self.chatPrivacyPolicy == None:
self.chatPrivacyPolicy = PrivacyPolicyTextPanel('privacyPolicyTextDone', textList=textList)
self.chatPrivacyPolicy.show()
self.acceptOnce('privacyPolicyTextDone', self.__privacyPolicyTextDone)
return
def exitPrivacyPolicy(self):
self.ignore('privacyPolicyTextDone')
if self.chatPrivacyPolicy:
cleanupDialog('privacyPolicyTextDialog')
self.chatPrivacyPolicy = None
return
def enterVersion1Adult(self):
self.enterPrivacyPolicy(OTPLocalizer.PrivacyPolicyText_1A)
def enterVersion1Kids(self):
self.enterPrivacyPolicy(OTPLocalizer.PrivacyPolicyText_1K)
def enterVersion2Adult(self):
self.enterPrivacyPolicy(OTPLocalizer.PrivacyPolicyText_2A)
def enterVersion2Kids(self):
self.enterPrivacyPolicy(OTPLocalizer.PrivacyPolicyText_2K)
def enterOff(self):
self.ignoreAll()
self.exitPrivacyPolicy()
def exitOff(self):
pass
| ksmit799/Toontown-Source | otp/login/PrivacyPolicyPanel.py | Python | mit | 5,598 |
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#!/usr/bin/env python
command = oslinfo("-v test")
| imageworks/OpenShadingLanguage | testsuite/oslinfo-noparams/run.py | Python | bsd-3-clause | 223 |
import collections
class Solution:
def subdomainVisits(self, cpdomains: List[str]) -> List[str]:
table = collections.Counter()
for cpdomain in cpdomains:
wi = cpdomain.index(' ')
count = int(cpdomain[:wi])
names = cpdomain[wi+1:].split('.')
for i in range(len(names)):
table['.'.join(names[i:])] += count
return ['{} {}'.format(v, k) for k, v in table.items()]
| jiadaizhao/LeetCode | 0801-0900/0811-Subdomain Visit Count/0811-Subdomain Visit Count.py | Python | mit | 453 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para cinemax_rs
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "cinemax_rs"
__category__ = "F,S"
__type__ = "generic"
__title__ = "Filme-noi.com"
__language__ = "ES"
__creationdate__ = "20131223"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("pelisalacarta.channels.cinemax_rs mainlist")
item.url="http://www.cinemaxx.ro/newvideos.html";
return novedades(item)
def novedades(item):
logger.info("pelisalacarta.channels.cinemax_rs novedades")
itemlist = []
# Download page
data = scrapertools.cachePage(item.url)
'''
<li>
<a href="http://www.cinemaxx.ro/tomorrowland-2015_d908479d6.html">
<span class="dummy"></span>
<span class="title">Tomorrowland (2015)</span>
<img src="http://www.cinemaxx.ro/uploads/thumbs/d908479d6-1.jpg" alt="Tomorrowland (2015)"/>
</a>
<div class="meta">Adaugat <time datetime="2015-06-22T16:17:58-0400" title="Monday, June 22, 2015 4:17 PM">1 luna in urma</time></div>
</li>
'''
patron = '<li[^<]+<a href="([^"]+)"[^<]+'
patron += '<span class="dummy[^<]+</span[^<]+'
patron += '<span class="title"[^<]+</span[^<]+'
patron += '<img src="([^"]+)" alt="([^"]+)"'
# Extract elements
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot=""
if (DEBUG): logger.info("url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], title=["+scrapedtitle+"]")
itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , viewmode="movie", folder=True) )
# Next page
next_page_url = scrapertools.find_single_match(data,'<li[^<]+<a href="([^"]+)">\»\;</a>')
if next_page_url!="":
itemlist.append( Item(channel=__channel__, action="novedades", title=">> Next page" , url=next_page_url , folder=True) )
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
# mainlist
novedades_items = mainlist(Item())
# Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
bien = False
for singleitem in novedades_items:
mirrors_items = findvideos( item=singleitem )
for mirror_item in mirrors_items:
video_items = play(mirror_item)
if len(video_items)>0:
return True
return False
| Zanzibar82/pelisalacarta | python/main-classic/channels/cinemax_rs.py | Python | gpl-3.0 | 2,975 |
from blaze.datashape import *
w = TypeVar('w')
x = TypeVar('x')
y = TypeVar('y')
z = TypeVar('z')
n = TypeVar('n')
Quaternion = complex64*(z*y*x*w)
RGBA = Record(R=int16, G=int16, B=int16, A=int8)
File = string*n
def setUp():
Type.register('Quaternion', Quaternion)
Type.register('RGBA', RGBA)
Type.register('File', File)
def test_custom_type():
p1 = datashape('800, 600, RGBA')
assert p1[2] is RGBA
# We want to build records out of custom type aliases
p2 = datashape('Record(x=Quaternion, y=Quaternion)')
def test_custom_stream():
p1 = datashape('Stream, RGBA')
def test_custom_csv_like():
# A csv-like file is a variable-length strings
p1 = datashape('n, string')
p2 = datashape('File')
assert p1._equal(p2)
| davidcoallier/blaze | blaze/datashape/tests/test_custom.py | Python | bsd-2-clause | 767 |
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd
# Copyright (C) 2005 Jody Belka
#============================================================================
# This code based on tools/python/xen/xend/server/iopif.py and modified
# to handle interrupts
#============================================================================
import types
import xen.lowlevel.xc
from xen.xend import sxp
from xen.xend.XendError import VmError
from xen.xend.server.DevController import DevController
xc = xen.lowlevel.xc.xc()
class IRQController(DevController):
def __init__(self, vm):
DevController.__init__(self, vm)
def getDeviceDetails(self, config):
"""@see DevController.getDeviceDetails"""
def get_param(field):
try:
val = config.get(field)
if not val:
raise VmError('irq: Missing %s config setting' % field)
if isinstance(val, types.StringType):
return int(val,10)
radix = 10
else:
return val
except:
raise VmError('irq: Invalid config setting %s: %s' %
(field, val))
pirq = get_param('irq')
rc = xc.domain_irq_permission(domid = self.getDomid(),
pirq = pirq,
allow_access = True)
if rc < 0:
#todo non-fatal
raise VmError(
'irq: Failed to configure irq: %d' % (pirq))
return (None, {}, {})
| mikesun/xen-cow-checkpointing | tools/python/xen/xend/server/irqif.py | Python | gpl-2.0 | 2,481 |
__author__ = 'mworden'
# A regex used to match any characters
ANY_CHARS_REGEX = r'.*'
# A regex used to match any characters
ANY_NON_SPACE_CHARS_REGEX = r'([^\s]*)'
# A regex used to match a single space
SPACE_REGEX = ' '
# A regex used to match the end of a line
END_OF_LINE_REGEX = r'(?:\r\n|\n)'
# A regex used to match a float value
FLOAT_REGEX = r'(?:[+-]?[0-9]|[1-9][0-9])+\.[0-9]+'
# A regex used to match a value in scientific notation
SCIENTIFIC_REGEX = r'([+-]?[0-9]\.[0-9]+)e([+-][0-9][0-9])'
# A regex used to match an int value
INT_REGEX = r'[+-]?[0-9]+'
# A regex used to match an unsigned int value
UNSIGNED_INT_REGEX = r'[0-9]+'
# A regex used to match against one or more tab characters
MULTIPLE_TAB_REGEX = r'\t+'
# A regex used to match against one or more whitespace characters
ONE_OR_MORE_WHITESPACE_REGEX = r'\s+'
# A regex to match ASCII-HEX characters
ASCII_HEX_CHAR_REGEX = r'[0-9A-Fa-f]'
# A regex used to match a date in the format YYYY/MM/DD
DATE_YYYY_MM_DD_REGEX = r'(\d{4})\/(\d{2})\/(\d{2})'
# A regex used to match a date in the format YYYY-MM-DD, YYYY/MM/DD, YYYYMMDD and YYYY-MM, YYYY/MM and YYYYMM
DATE2_YYYY_MM_DD_REGEX = r'(\d{4})[-\/]?(\d{2})[-\/]?(\d{2})?'
# A regex used to match time in the format of HH:MM:SS.mmm
TIME_HR_MIN_SEC_MSEC_REGEX = r'(\d{2}):(\d{2}):(\d{2})\.(\d{3})'
# A regex used to match a date in the format MM/DD/YYYY
DATE_MM_DD_YYYY_REGEX = r'(\d{2})/(\d{2})/(\d{4})'
# A regex used to match time in the format of HH:MM:SS
TIME_HR_MIN_SEC_REGEX = r'(\d{2}):(\d{2}):(\d{2})'
# A regex for a common three character month abbreviation
THREE_CHAR_MONTH_REGEX = r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)'
# A regex for a common three character day of week abbreviation
THREE_CHAR_DAY_OF_WEEK_REGEX = r'(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)'
# Date related regex patterns
DATE_DAY_REGEX = '\d{2}'
DATE_YEAR_REGEX = '\d{4}'
| danmergens/mi-instrument | mi/dataset/parser/common_regexes.py | Python | bsd-2-clause | 1,902 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.