repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
maljac/odoomrp-utils | refs/heads/8.0 | sale_order_line_form_button/__init__.py | 379 | # -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from . import models
|
kenwang815/KodiPlugins | refs/heads/master | script.module.oceanktv/lib/youtube_dl/extractor/cultureunplugged.py | 17 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class CultureUnpluggedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cultureunplugged\.com/documentary/watch-online/play/(?P<id>\d+)(?:/(?P<display_id>[^/]+))?'
_TESTS = [{
'url': 'http://www.cultureunplugged.com/documentary/watch-online/play/53662/The-Next--Best-West',
'md5': 'ac6c093b089f7d05e79934dcb3d228fc',
'info_dict': {
'id': '53662',
'display_id': 'The-Next--Best-West',
'ext': 'mp4',
'title': 'The Next, Best West',
'description': 'md5:0423cd00833dea1519cf014e9d0903b1',
'thumbnail': 're:^https?://.*\.jpg$',
'creator': 'Coldstream Creative',
'duration': 2203,
'view_count': int,
}
}, {
'url': 'http://www.cultureunplugged.com/documentary/watch-online/play/53662',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
movie_data = self._download_json(
'http://www.cultureunplugged.com/movie-data/cu-%s.json' % video_id, display_id)
video_url = movie_data['url']
title = movie_data['title']
description = movie_data.get('synopsis')
creator = movie_data.get('producer')
duration = int_or_none(movie_data.get('duration'))
view_count = int_or_none(movie_data.get('views'))
thumbnails = [{
'url': movie_data['%s_thumb' % size],
'id': size,
'preference': preference,
} for preference, size in enumerate((
'small', 'large')) if movie_data.get('%s_thumb' % size)]
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'description': description,
'creator': creator,
'duration': duration,
'view_count': view_count,
'thumbnails': thumbnails,
}
|
synasius/django | refs/heads/master | django/contrib/sessions/backends/cache.py | 227 | from django.conf import settings
from django.contrib.sessions.backends.base import CreateError, SessionBase
from django.core.cache import caches
from django.utils.six.moves import range
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
cache_key_prefix = KEY_PREFIX
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return self.cache_key_prefix + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self._session_key = None
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in range(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError(
"Unable to create a new session key. "
"It is likely that the cache is unavailable.")
def save(self, must_create=False):
if self.session_key is None:
return self.create()
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return session_key and (self.cache_key_prefix + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(self.cache_key_prefix + session_key)
@classmethod
def clear_expired(cls):
pass
|
ted-gould/nova | refs/heads/master | nova/compute/monitors/__init__.py | 18 | # Copyright 2013 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Resource monitor API specification.
"""
from oslo_config import cfg
from oslo_log import log as logging
from stevedore import enabled
from nova.i18n import _LW
compute_monitors_opts = [
cfg.MultiStrOpt('compute_available_monitors',
deprecated_for_removal=True,
default=None,
help='Monitor classes available to the compute which may '
'be specified more than once. This option is '
'DEPRECATED and no longer used. Use setuptools entry '
'points to list available monitor plugins.'),
cfg.ListOpt('compute_monitors',
default=[],
help='A list of monitors that can be used for getting '
'compute metrics. You can use the alias/name from '
'the setuptools entry points for nova.compute.monitors.* '
'namespaces. If no namespace is supplied, the "cpu." '
'namespace is assumed for backwards-compatibility. '
'An example value that would enable both the CPU and '
'NUMA memory bandwidth monitors that used the virt '
'driver variant: '
'["cpu.virt_driver", "numa_mem_bw.virt_driver"]'),
]
CONF = cfg.CONF
CONF.register_opts(compute_monitors_opts)
LOG = logging.getLogger(__name__)
class MonitorHandler(object):
NAMESPACES = [
'nova.compute.monitors.cpu',
]
def __init__(self, resource_tracker):
# Dictionary keyed by the monitor type namespace. Value is the
# first loaded monitor of that namespace or False.
self.type_monitor_loaded = {ns: False for ns in self.NAMESPACES}
self.monitors = []
for ns in self.NAMESPACES:
plugin_mgr = enabled.EnabledExtensionManager(
namespace=ns,
invoke_on_load=True,
check_func=self.check_enabled_monitor,
invoke_args=(resource_tracker,)
)
self.monitors += [ext.obj for ext in plugin_mgr]
def check_enabled_monitor(self, ext):
"""Ensures that only one monitor is specified of any type."""
# The extension does not have a namespace attribute, unfortunately,
# but we can get the namespace by examining the first part of the
# entry_point_target attribute, which looks like this:
# 'nova.compute.monitors.cpu.virt_driver:Monitor'
ept = ext.entry_point_target
ept_parts = ept.split(':')
namespace_parts = ept_parts[0].split('.')
namespace = '.'.join(namespace_parts[0:-1])
if self.type_monitor_loaded[namespace] is not False:
msg = _LW("Excluding %(namespace)s monitor %(monitor_name)s. "
"Already loaded %(loaded_monitor)s.")
msg = msg % {
'namespace': namespace,
'monitor_name': ext.name,
'loaded_monitor': self.type_monitor_loaded[namespace]
}
LOG.warn(msg)
return False
# NOTE(jaypipes): We used to only have CPU monitors, so
# CONF.compute_monitors could contain "virt_driver" without any monitor
# type namespace. So, to maintain backwards-compatibility with that
# older way of specifying monitors, we first loop through any values in
# CONF.compute_monitors and put any non-namespace'd values into the
# 'cpu' namespace.
cfg_monitors = ['cpu.' + cfg if '.' not in cfg else cfg
for cfg in CONF.compute_monitors]
# NOTE(jaypipes): Append 'nova.compute.monitors.' to any monitor value
# that doesn't have it to allow CONF.compute_monitors to use shortened
# namespaces (like 'cpu.' instead of 'nova.compute.monitors.cpu.')
cfg_monitors = ['nova.compute.monitors.' + cfg
if 'nova.compute.monitors.' not in cfg else cfg
for cfg in cfg_monitors]
if namespace + '.' + ext.name in cfg_monitors:
self.type_monitor_loaded[namespace] = ext.name
return True
msg = _LW("Excluding %(namespace)s monitor %(monitor_name)s. "
"Not in the list of enabled monitors "
"(CONF.compute_monitors).")
msg = msg % {
'namespace': namespace,
'monitor_name': ext.name,
}
LOG.warn(msg)
return False
|
hastexo/edx-platform | refs/heads/master | lms/djangoapps/discussion/signals/handlers.py | 4 | """
Signal handlers related to discussions.
"""
import logging
from django.dispatch import receiver
from opaque_keys.edx.keys import CourseKey
from django_comment_common import signals
from lms.djangoapps.discussion import tasks
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
from openedx.core.djangoapps.theming.helpers import get_current_site
log = logging.getLogger(__name__)
ENABLE_FORUM_NOTIFICATIONS_FOR_SITE_KEY = 'enable_forum_notifications'
@receiver(signals.comment_created)
def send_discussion_email_notification(sender, user, post, **kwargs):
current_site = get_current_site()
if current_site is None:
log.info('Discussion: No current site, not sending notification about post: %s.', post.id)
return
try:
if not current_site.configuration.get_value(ENABLE_FORUM_NOTIFICATIONS_FOR_SITE_KEY, False):
log_message = 'Discussion: notifications not enabled for site: %s. Not sending message about post: %s.'
log.info(log_message, current_site, post.id)
return
except SiteConfiguration.DoesNotExist:
log_message = 'Discussion: No SiteConfiguration for site %s. Not sending message about post: %s.'
log.info(log_message, current_site, post.id)
return
send_message(post, current_site)
def send_message(comment, site):
thread = comment.thread
context = {
'course_id': unicode(thread.course_id),
'comment_id': comment.id,
'comment_body': comment.body,
'comment_author_id': comment.user_id,
'comment_created_at': comment.created_at, # comment_client models dates are already serialized
'thread_id': thread.id,
'thread_title': thread.title,
'thread_author_id': thread.user_id,
'thread_created_at': thread.created_at, # comment_client models dates are already serialized
'thread_commentable_id': thread.commentable_id,
'site_id': site.id
}
tasks.send_ace_message.apply_async(args=[context])
|
Nonserial/3lingual_voctrainer | refs/heads/master | settingsjson.py | 1 | import json
languages_var = ["Arabic", "English", "French", "German", "Italian", "Kurdish", "Latin", "Romanian", "Russian", "Spanish", "Swedish", "Turkish"]
settings_json = json.dumps([
{"type": "title",
"title": "Languages"},
{"type": "scrolloptions",
"title": "Search-Language: ",
"section": "languages",
"key": "learnlanguage",
"desc": "e.g. the language you want to learn: ",
"options": languages_var},
{"type": "scrolloptions",
"title": "Return-Language (1): ",
"section": "languages",
"key": "motherlanguage",
"desc": "e.g. your motherlanguage: ",
"options": languages_var},
{"type": "scrolloptions",
"title": "Return-Language (2): ",
"section": "languages",
"key": "returnlanguage",
"desc": "default 'English': ",
"options": languages_var},
{"type": "title",
"title": "Backups"},
{"type": "path",
"title": "Path of your backup-file",
"section": "languages",
"key": "backuppath",
"desc": "Path of your backup-file (default ../backups/voctrainer/)"},
{"type": "bool",
"title": "Backup your dictionary",
"section": "languages",
"key": "makebackup",
"values": ["no", "yes"]}
]) |
grap/OCB | refs/heads/7.0 | openerp/report/render/rml2pdf/trml2pdf.py | 15 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import copy
import reportlab
import re
from reportlab.pdfgen import canvas
from reportlab import platypus
import utils
import color
import os
import logging
from lxml import etree
import base64
from distutils.version import LooseVersion
from reportlab.platypus.doctemplate import ActionFlowable
from openerp.tools.safe_eval import safe_eval as eval
from reportlab.lib.units import inch,cm,mm
from openerp.tools.misc import file_open
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.pagesizes import A4, letter
try:
from cStringIO import StringIO
_hush_pyflakes = [ StringIO ]
except ImportError:
from StringIO import StringIO
_logger = logging.getLogger(__name__)
encoding = 'utf-8'
def select_fontname(fontname, default_fontname):
if fontname not in pdfmetrics.getRegisteredFontNames()\
or fontname not in pdfmetrics.standardFonts:
# let reportlab attempt to find it
try:
pdfmetrics.getFont(fontname)
except Exception:
_logger.warning('Could not locate font %s, substituting default: %s',
fontname, default_fontname)
fontname = default_fontname
return fontname
def _open_image(filename, path=None):
"""Attempt to open a binary file and return the descriptor
"""
if os.path.isfile(filename):
return open(filename, 'rb')
for p in (path or []):
if p and os.path.isabs(p):
fullpath = os.path.join(p, filename)
if os.path.isfile(fullpath):
return open(fullpath, 'rb')
try:
if p:
fullpath = os.path.join(p, filename)
else:
fullpath = filename
return file_open(fullpath)
except IOError:
pass
raise IOError("File %s cannot be found in image path" % filename)
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number()
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self):
page_count = len(self._saved_page_states)
self.setFont("Helvetica", 8)
self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40),
" %(this)i / %(total)i" % {
'this': self._pageNumber,
'total': page_count,
}
)
class PageCount(platypus.Flowable):
def __init__(self, story_count=0):
platypus.Flowable.__init__(self)
self.story_count = story_count
def draw(self):
self.canv.beginForm("pageCount%d" % self.story_count)
self.canv.setFont("Helvetica", utils.unit_get(str(8)))
self.canv.drawString(0, 0, str(self.canv.getPageNumber()))
self.canv.endForm()
class PageReset(platypus.Flowable):
def draw(self):
"""Flag to close current story page numbering and prepare for the next
should be executed after the rendering of the full story"""
self.canv._doPageReset = True
class _rml_styles(object,):
def __init__(self, nodes, localcontext):
self.localcontext = localcontext
self.styles = {}
self.styles_obj = {}
self.names = {}
self.table_styles = {}
self.default_style = reportlab.lib.styles.getSampleStyleSheet()
for node in nodes:
for style in node.findall('blockTableStyle'):
self.table_styles[style.get('id')] = self._table_style_get(style)
for style in node.findall('paraStyle'):
sname = style.get('name')
self.styles[sname] = self._para_style_update(style)
if self.default_style.has_key(sname):
for key, value in self.styles[sname].items():
setattr(self.default_style[sname], key, value)
else:
self.styles_obj[sname] = reportlab.lib.styles.ParagraphStyle(sname, self.default_style["Normal"], **self.styles[sname])
for variable in node.findall('initialize'):
for name in variable.findall('name'):
self.names[ name.get('id')] = name.get('value')
def _para_style_update(self, node):
data = {}
for attr in ['textColor', 'backColor', 'bulletColor', 'borderColor']:
if node.get(attr):
data[attr] = color.get(node.get(attr))
for attr in ['bulletFontName', 'fontName']:
if node.get(attr):
fontname= select_fontname(node.get(attr), None)
if fontname is not None:
data['fontName'] = fontname
for attr in ['bulletText']:
if node.get(attr):
data[attr] = node.get(attr)
for attr in ['fontSize', 'leftIndent', 'rightIndent', 'spaceBefore', 'spaceAfter',
'firstLineIndent', 'bulletIndent', 'bulletFontSize', 'leading',
'borderWidth','borderPadding','borderRadius']:
if node.get(attr):
data[attr] = utils.unit_get(node.get(attr))
if node.get('alignment'):
align = {
'right':reportlab.lib.enums.TA_RIGHT,
'center':reportlab.lib.enums.TA_CENTER,
'justify':reportlab.lib.enums.TA_JUSTIFY
}
data['alignment'] = align.get(node.get('alignment').lower(), reportlab.lib.enums.TA_LEFT)
data['splitLongWords'] = 0
return data
def _table_style_get(self, style_node):
styles = []
for node in style_node:
start = utils.tuple_int_get(node, 'start', (0,0) )
stop = utils.tuple_int_get(node, 'stop', (-1,-1) )
if node.tag=='blockValign':
styles.append(('VALIGN', start, stop, str(node.get('value'))))
elif node.tag=='blockFont':
styles.append(('FONT', start, stop, str(node.get('name'))))
elif node.tag=='blockTextColor':
styles.append(('TEXTCOLOR', start, stop, color.get(str(node.get('colorName')))))
elif node.tag=='blockLeading':
styles.append(('LEADING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockAlignment':
styles.append(('ALIGNMENT', start, stop, str(node.get('value'))))
elif node.tag=='blockSpan':
styles.append(('SPAN', start, stop))
elif node.tag=='blockLeftPadding':
styles.append(('LEFTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockRightPadding':
styles.append(('RIGHTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockTopPadding':
styles.append(('TOPPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBottomPadding':
styles.append(('BOTTOMPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBackground':
styles.append(('BACKGROUND', start, stop, color.get(node.get('colorName'))))
if node.get('size'):
styles.append(('FONTSIZE', start, stop, utils.unit_get(node.get('size'))))
elif node.tag=='lineStyle':
kind = node.get('kind')
kind_list = [ 'GRID', 'BOX', 'OUTLINE', 'INNERGRID', 'LINEBELOW', 'LINEABOVE','LINEBEFORE', 'LINEAFTER' ]
assert kind in kind_list
thick = 1
if node.get('thickness'):
thick = float(node.get('thickness'))
styles.append((kind, start, stop, thick, color.get(node.get('colorName'))))
return platypus.tables.TableStyle(styles)
def para_style_get(self, node):
style = False
sname = node.get('style')
if sname:
if sname in self.styles_obj:
style = self.styles_obj[sname]
else:
_logger.debug('Warning: style not found, %s - setting default!', node.get('style'))
if not style:
style = self.default_style['Normal']
para_update = self._para_style_update(node)
if para_update:
# update style only is necessary
style = copy.deepcopy(style)
style.__dict__.update(para_update)
return style
class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None):
if images is None:
images = {}
if localcontext is None:
localcontext = {}
self.localcontext = localcontext
self.etree = node
self.filename = self.etree.get('filename')
self.images = images
self.path = path
self.title = title
def docinit(self, els):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
for node in els:
for font in node.findall('registerFont'):
name = font.get('fontName').encode('ascii')
fname = font.get('fontFile').encode('ascii')
if name not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(name, fname))
#by default, we map the fontName to each style (bold, italic, bold and italic), so that
#if there isn't any font defined for one of these style (via a font family), the system
#will fallback on the normal font.
addMapping(name, 0, 0, name) #normal
addMapping(name, 0, 1, name) #italic
addMapping(name, 1, 0, name) #bold
addMapping(name, 1, 1, name) #italic and bold
#if registerFontFamily is defined, we register the mapping of the fontName to use for each style.
for font_family in node.findall('registerFontFamily'):
family_name = font_family.get('normal').encode('ascii')
if font_family.get('italic'):
addMapping(family_name, 0, 1, font_family.get('italic').encode('ascii'))
if font_family.get('bold'):
addMapping(family_name, 1, 0, font_family.get('bold').encode('ascii'))
if font_family.get('boldItalic'):
addMapping(family_name, 1, 1, font_family.get('boldItalic').encode('ascii'))
def setTTFontMapping(self,face, fontname, filename, mode='all'):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
if fontname not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(fontname, filename))
if mode == 'all':
addMapping(face, 0, 0, fontname) #normal
addMapping(face, 0, 1, fontname) #italic
addMapping(face, 1, 0, fontname) #bold
addMapping(face, 1, 1, fontname) #italic and bold
elif (mode== 'normal') or (mode == 'regular'):
addMapping(face, 0, 0, fontname) #normal
elif mode == 'italic':
addMapping(face, 0, 1, fontname) #italic
elif mode == 'bold':
addMapping(face, 1, 0, fontname) #bold
elif mode == 'bolditalic':
addMapping(face, 1, 1, fontname) #italic and bold
def _textual_image(self, node):
rc = ''
for n in node:
rc +=( etree.tostring(n) or '') + n.tail
return base64.decodestring(node.tostring())
def _images(self, el):
result = {}
for node in el.findall('.//image'):
rc =( node.text or '')
result[node.get('name')] = base64.decodestring(rc)
return result
def render(self, out):
el = self.etree.findall('.//docinit')
if el:
self.docinit(el)
el = self.etree.findall('.//stylesheet')
self.styles = _rml_styles(el,self.localcontext)
el = self.etree.findall('.//images')
if el:
self.images.update( self._images(el[0]) )
el = self.etree.findall('.//template')
if len(el):
pt_obj = _rml_template(self.localcontext, out, el[0], self, images=self.images, path=self.path, title=self.title)
el = utils._child_get(self.etree, self, 'story')
pt_obj.render(el)
else:
self.canvas = canvas.Canvas(out)
pd = self.etree.find('pageDrawing')[0]
pd_obj = _rml_canvas(self.canvas, self.localcontext, None, self, self.images, path=self.path, title=self.title)
pd_obj.render(pd)
self.canvas.showPage()
self.canvas.save()
class _rml_canvas(object):
def __init__(self, canvas, localcontext, doc_tmpl=None, doc=None, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.canvas = canvas
self.styles = doc.styles
self.doc_tmpl = doc_tmpl
self.doc = doc
self.images = images
self.path = path
self.title = title
if self.title:
self.canvas.setTitle(self.title)
def _textual(self, node, x=0, y=0):
text = node.text and node.text.encode('utf-8') or ''
rc = utils._process_text(self, text)
for n in node:
if n.tag == 'seq':
from reportlab.lib.sequencer import getSequencer
seq = getSequencer()
rc += str(seq.next(n.get('id')))
if n.tag == 'pageCount':
if x or y:
self.canvas.translate(x,y)
self.canvas.doForm('pageCount%s' % (self.canvas._storyCount,))
if x or y:
self.canvas.translate(-x,-y)
if n.tag == 'pageNumber':
rc += str(self.canvas.getPageNumber())
rc += utils._process_text(self, n.tail)
return rc.replace('\n','')
def _drawString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
try:
self.canvas.drawString(text=text, **v)
except TypeError as e:
_logger.error("Bad RML: <drawString> tag requires attributes 'x' and 'y'!")
raise e
def _drawCenteredString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawCentredString(text=text, **v)
def _drawRightString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawRightString(text=text, **v)
def _rect(self, node):
if node.get('round'):
self.canvas.roundRect(radius=utils.unit_get(node.get('round')), **utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
else:
self.canvas.rect(**utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
def _ellipse(self, node):
x1 = utils.unit_get(node.get('x'))
x2 = utils.unit_get(node.get('width'))
y1 = utils.unit_get(node.get('y'))
y2 = utils.unit_get(node.get('height'))
self.canvas.ellipse(x1,y1,x2,y2, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _curves(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>7:
self.canvas.bezier(*[utils.unit_get(l) for l in line_str[0:8]])
line_str = line_str[8:]
def _lines(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>3:
lines.append([utils.unit_get(l) for l in line_str[0:4]])
line_str = line_str[4:]
self.canvas.lines(lines)
def _grid(self, node):
xlist = [utils.unit_get(s) for s in node.get('xs').split(',')]
ylist = [utils.unit_get(s) for s in node.get('ys').split(',')]
self.canvas.grid(xlist, ylist)
def _translate(self, node):
dx = utils.unit_get(node.get('dx')) or 0
dy = utils.unit_get(node.get('dy')) or 0
self.canvas.translate(dx,dy)
def _circle(self, node):
self.canvas.circle(x_cen=utils.unit_get(node.get('x')), y_cen=utils.unit_get(node.get('y')), r=utils.unit_get(node.get('radius')), **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _place(self, node):
flows = _rml_flowable(self.doc, self.localcontext, images=self.images, path=self.path, title=self.title, canvas=self.canvas).render(node)
infos = utils.attr_get(node, ['x','y','width','height'])
infos['y']+=infos['height']
for flow in flows:
w,h = flow.wrap(infos['width'], infos['height'])
if w<=infos['width'] and h<=infos['height']:
infos['y']-=h
flow.drawOn(self.canvas,infos['x'],infos['y'])
infos['height']-=h
else:
raise ValueError("Not enough space")
def _line_mode(self, node):
ljoin = {'round':1, 'mitered':0, 'bevelled':2}
lcap = {'default':0, 'round':1, 'square':2}
if node.get('width'):
self.canvas.setLineWidth(utils.unit_get(node.get('width')))
if node.get('join'):
self.canvas.setLineJoin(ljoin[node.get('join')])
if node.get('cap'):
self.canvas.setLineCap(lcap[node.get('cap')])
if node.get('miterLimit'):
self.canvas.setDash(utils.unit_get(node.get('miterLimit')))
if node.get('dash'):
dashes = node.get('dash').split(',')
for x in range(len(dashes)):
dashes[x]=utils.unit_get(dashes[x])
self.canvas.setDash(node.get('dash').split(','))
def _image(self, node):
import urllib
import urlparse
from reportlab.lib.utils import ImageReader
nfile = node.get('file')
if not nfile:
if node.get('name'):
image_data = self.images[node.get('name')]
_logger.debug("Image %s used", node.get('name'))
s = StringIO(image_data)
else:
newtext = node.text
if self.localcontext:
res = utils._regex.findall(newtext)
for key in res:
newtext = eval(key, {}, self.localcontext) or ''
image_data = None
if newtext:
image_data = base64.decodestring(newtext)
if image_data:
s = StringIO(image_data)
else:
_logger.debug("No image data!")
return False
else:
if nfile in self.images:
s = StringIO(self.images[nfile])
else:
try:
up = urlparse.urlparse(str(nfile))
except ValueError:
up = False
if up and up.scheme:
# RFC: do we really want to open external URLs?
# Are we safe from cross-site scripting or attacks?
_logger.debug("Retrieve image from %s", nfile)
u = urllib.urlopen(str(nfile))
s = StringIO(u.read())
else:
_logger.debug("Open image file %s ", nfile)
s = _open_image(nfile, path=self.path)
try:
img = ImageReader(s)
(sx,sy) = img.getSize()
_logger.debug("Image is %dx%d", sx, sy)
args = { 'x': 0.0, 'y': 0.0, 'mask': 'auto'}
for tag in ('width','height','x','y'):
if node.get(tag):
args[tag] = utils.unit_get(node.get(tag))
if ('width' in args) and (not 'height' in args):
args['height'] = sy * args['width'] / sx
elif ('height' in args) and (not 'width' in args):
args['width'] = sx * args['height'] / sy
elif ('width' in args) and ('height' in args):
if (float(args['width'])/args['height'])>(float(sx)>sy):
args['width'] = sx * args['height'] / sy
else:
args['height'] = sy * args['width'] / sx
self.canvas.drawImage(img, **args)
finally:
s.close()
# self.canvas._doc.SaveToFile(self.canvas._filename, self.canvas)
def _path(self, node):
self.path = self.canvas.beginPath()
self.path.moveTo(**utils.attr_get(node, ['x','y']))
for n in utils._child_get(node, self):
if not n.text :
if n.tag=='moveto':
vals = utils.text_get(n).split()
self.path.moveTo(utils.unit_get(vals[0]), utils.unit_get(vals[1]))
elif n.tag=='curvesto':
vals = utils.text_get(n).split()
while len(vals)>5:
pos=[]
while len(pos)<6:
pos.append(utils.unit_get(vals.pop(0)))
self.path.curveTo(*pos)
elif n.text:
data = n.text.split() # Not sure if I must merge all TEXT_NODE ?
while len(data)>1:
x = utils.unit_get(data.pop(0))
y = utils.unit_get(data.pop(0))
self.path.lineTo(x,y)
if (not node.get('close')) or utils.bool_get(node.get('close')):
self.path.close()
self.canvas.drawPath(self.path, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def setFont(self, node):
fontname = select_fontname(node.get('name'), self.canvas._fontname)
return self.canvas.setFont(fontname, utils.unit_get(node.get('size')))
def render(self, node):
tags = {
'drawCentredString': self._drawCenteredString,
'drawRightString': self._drawRightString,
'drawString': self._drawString,
'rect': self._rect,
'ellipse': self._ellipse,
'lines': self._lines,
'grid': self._grid,
'curves': self._curves,
'fill': lambda node: self.canvas.setFillColor(color.get(node.get('color'))),
'stroke': lambda node: self.canvas.setStrokeColor(color.get(node.get('color'))),
'setFont': self.setFont ,
'place': self._place,
'circle': self._circle,
'lineMode': self._line_mode,
'path': self._path,
'rotate': lambda node: self.canvas.rotate(float(node.get('degrees'))),
'translate': self._translate,
'image': self._image
}
for n in utils._child_get(node, self):
if n.tag in tags:
tags[n.tag](n)
class _rml_draw(object):
def __init__(self, localcontext, node, styles, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.node = node
self.styles = styles
self.canvas = None
self.images = images
self.path = path
self.canvas_title = title
def render(self, canvas, doc):
canvas.saveState()
cnv = _rml_canvas(canvas, self.localcontext, doc, self.styles, images=self.images, path=self.path, title=self.canvas_title)
cnv.render(self.node)
canvas.restoreState()
class _rml_Illustration(platypus.flowables.Flowable):
def __init__(self, node, localcontext, styles, self2):
self.localcontext = (localcontext or {}).copy()
self.node = node
self.styles = styles
self.width = utils.unit_get(node.get('width'))
self.height = utils.unit_get(node.get('height'))
self.self2 = self2
def wrap(self, *args):
return self.width, self.height
def draw(self):
drw = _rml_draw(self.localcontext ,self.node,self.styles, images=self.self2.images, path=self.self2.path, title=self.self2.title)
drw.render(self.canv, None)
# Workaround for issue #15: https://bitbucket.org/rptlab/reportlab/issue/15/infinite-pages-produced-when-splitting
original_pto_split = platypus.flowables.PTOContainer.split
def split(self, availWidth, availHeight):
res = original_pto_split(self, availWidth, availHeight)
if len(res) > 2 and len(self._content) > 0:
header = self._content[0]._ptoinfo.header
trailer = self._content[0]._ptoinfo.trailer
if isinstance(res[-2], platypus.flowables.UseUpSpace) and len(header + trailer) == len(res[:-2]):
return []
return res
platypus.flowables.PTOContainer.split = split
class _rml_flowable(object):
def __init__(self, doc, localcontext, images=None, path='.', title=None, canvas=None):
if images is None:
images = {}
self.localcontext = localcontext
self.doc = doc
self.styles = doc.styles
self.images = images
self.path = path
self.title = title
self.canvas = canvas
def _textual(self, node):
rc1 = utils._process_text(self, node.text or '')
for n in utils._child_get(node,self):
txt_n = copy.deepcopy(n)
for key in txt_n.attrib.keys():
if key in ('rml_except', 'rml_loop', 'rml_tag'):
del txt_n.attrib[key]
if not n.tag == 'bullet':
if n.tag == 'pageNumber':
txt_n.text = self.canvas and str(self.canvas.getPageNumber()) or ''
else:
txt_n.text = utils.xml2str(self._textual(n))
txt_n.tail = n.tail and utils.xml2str(utils._process_text(self, n.tail.replace('\n',''))) or ''
rc1 += etree.tostring(txt_n)
return rc1
def _table(self, node):
children = utils._child_get(node,self,'tr')
if not children:
return None
length = 0
colwidths = None
rowheights = None
data = []
styles = []
posy = 0
for tr in children:
paraStyle = None
if tr.get('style'):
st = copy.deepcopy(self.styles.table_styles[tr.get('style')])
for si in range(len(st._cmds)):
s = list(st._cmds[si])
s[1] = (s[1][0],posy)
s[2] = (s[2][0],posy)
st._cmds[si] = tuple(s)
styles.append(st)
if tr.get('paraStyle'):
paraStyle = self.styles.styles[tr.get('paraStyle')]
data2 = []
posx = 0
for td in utils._child_get(tr, self,'td'):
if td.get('style'):
st = copy.deepcopy(self.styles.table_styles[td.get('style')])
for s in st._cmds:
s[1][1] = posy
s[2][1] = posy
s[1][0] = posx
s[2][0] = posx
styles.append(st)
if td.get('paraStyle'):
# TODO: merge styles
paraStyle = self.styles.styles[td.get('paraStyle')]
posx += 1
flow = []
for n in utils._child_get(td, self):
if n.tag == etree.Comment:
n.text = ''
continue
fl = self._flowable(n, extra_style=paraStyle)
if isinstance(fl,list):
flow += fl
else:
flow.append( fl )
if not len(flow):
flow = self._textual(td)
data2.append( flow )
if len(data2)>length:
length=len(data2)
for ab in data:
while len(ab)<length:
ab.append('')
while len(data2)<length:
data2.append('')
data.append( data2 )
posy += 1
if node.get('colWidths'):
assert length == len(node.get('colWidths').split(','))
colwidths = [utils.unit_get(f.strip()) for f in node.get('colWidths').split(',')]
if node.get('rowHeights'):
rowheights = [utils.unit_get(f.strip()) for f in node.get('rowHeights').split(',')]
if len(rowheights) == 1:
rowheights = rowheights[0]
table = platypus.LongTable(data = data, colWidths=colwidths, rowHeights=rowheights, **(utils.attr_get(node, ['splitByRow'] ,{'repeatRows':'int','repeatCols':'int'})))
if node.get('style'):
table.setStyle(self.styles.table_styles[node.get('style')])
for s in styles:
table.setStyle(s)
return table
def _illustration(self, node):
return _rml_Illustration(node, self.localcontext, self.styles, self)
def _textual_image(self, node):
return base64.decodestring(node.text)
def _pto(self, node):
sub_story = []
pto_header = None
pto_trailer = None
for node in utils._child_get(node, self):
if node.tag == etree.Comment:
node.text = ''
continue
elif node.tag=='pto_header':
pto_header = self.render(node)
elif node.tag=='pto_trailer':
pto_trailer = self.render(node)
else:
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return platypus.flowables.PTOContainer(sub_story, trailer=pto_trailer, header=pto_header)
def _flowable(self, node, extra_style=None):
if node.tag=='pto':
return self._pto(node)
if node.tag=='para':
style = self.styles.para_style_get(node)
if extra_style:
style.__dict__.update(extra_style)
text_node = self._textual(node).strip().replace('\n\n', '\n').replace('\n', '<br/>')
instance = platypus.Paragraph(text_node, style, **(utils.attr_get(node, [], {'bulletText':'str'})))
result = [instance]
if LooseVersion(reportlab.Version) > LooseVersion('3.0') and not instance.getPlainText().strip() and instance.text.strip():
result.append(platypus.Paragraph(' <br/>', style, **(utils.attr_get(node, [], {'bulletText': 'str'}))))
return result
elif node.tag=='barCode':
try:
from reportlab.graphics.barcode import code128
from reportlab.graphics.barcode import code39
from reportlab.graphics.barcode import code93
from reportlab.graphics.barcode import common
from reportlab.graphics.barcode import fourstate
from reportlab.graphics.barcode import usps
from reportlab.graphics.barcode import createBarcodeDrawing
except ImportError:
_logger.warning("Cannot use barcode renderers:", exc_info=True)
return None
args = utils.attr_get(node, [], {'ratio':'float','xdim':'unit','height':'unit','checksum':'int','quiet':'int','width':'unit','stop':'bool','bearers':'int','barWidth':'float','barHeight':'float'})
codes = {
'codabar': lambda x: common.Codabar(x, **args),
'code11': lambda x: common.Code11(x, **args),
'code128': lambda x: code128.Code128(str(x), **args),
'standard39': lambda x: code39.Standard39(str(x), **args),
'standard93': lambda x: code93.Standard93(str(x), **args),
'i2of5': lambda x: common.I2of5(x, **args),
'extended39': lambda x: code39.Extended39(str(x), **args),
'extended93': lambda x: code93.Extended93(str(x), **args),
'msi': lambda x: common.MSI(x, **args),
'fim': lambda x: usps.FIM(x, **args),
'postnet': lambda x: usps.POSTNET(x, **args),
'ean13': lambda x: createBarcodeDrawing('EAN13', value=str(x), **args),
'qrcode': lambda x: createBarcodeDrawing('QR', value=x, **args),
}
code = 'code128'
if node.get('code'):
code = node.get('code').lower()
return codes[code](self._textual(node))
elif node.tag=='name':
self.styles.names[ node.get('id')] = node.get('value')
return None
elif node.tag=='xpre':
style = self.styles.para_style_get(node)
return platypus.XPreformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int','frags':'int'})))
elif node.tag=='pre':
style = self.styles.para_style_get(node)
return platypus.Preformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int'})))
elif node.tag=='illustration':
return self._illustration(node)
elif node.tag=='blockTable':
return self._table(node)
elif node.tag=='title':
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Title']
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif re.match('^h([1-9]+[0-9]*)$', (node.tag or '')):
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Heading'+str(node.tag[1:])]
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif node.tag=='image':
image_data = False
if not node.get('file'):
if node.get('name'):
if node.get('name') in self.doc.images:
_logger.debug("Image %s read ", node.get('name'))
image_data = self.doc.images[node.get('name')].read()
else:
_logger.warning("Image %s not defined", node.get('name'))
return False
else:
import base64
newtext = node.text
if self.localcontext:
newtext = utils._process_text(self, node.text or '')
image_data = base64.decodestring(newtext)
if not image_data:
_logger.debug("No inline image data")
return False
image = StringIO(image_data)
else:
_logger.debug("Image get from file %s", node.get('file'))
image = _open_image(node.get('file'), path=self.doc.path)
return platypus.Image(image, mask=(250,255,250,255,250,255), **(utils.attr_get(node, ['width','height'])))
elif node.tag=='spacer':
if node.get('width'):
width = utils.unit_get(node.get('width'))
else:
width = utils.unit_get('1cm')
length = utils.unit_get(node.get('length'))
return platypus.Spacer(width=width, height=length)
elif node.tag=='section':
return self.render(node)
elif node.tag == 'pageNumberReset':
return PageReset()
elif node.tag in ('pageBreak', 'nextPage'):
return platypus.PageBreak()
elif node.tag=='condPageBreak':
return platypus.CondPageBreak(**(utils.attr_get(node, ['height'])))
elif node.tag=='setNextTemplate':
return platypus.NextPageTemplate(str(node.get('name')))
elif node.tag=='nextFrame':
return platypus.CondPageBreak(1000) # TODO: change the 1000 !
elif node.tag == 'setNextFrame':
from reportlab.platypus.doctemplate import NextFrameFlowable
return NextFrameFlowable(str(node.get('name')))
elif node.tag == 'currentFrame':
from reportlab.platypus.doctemplate import CurrentFrameFlowable
return CurrentFrameFlowable(str(node.get('name')))
elif node.tag == 'frameEnd':
return EndFrameFlowable()
elif node.tag == 'hr':
width_hr=node.get('width') or '100%'
color_hr=node.get('color') or 'black'
thickness_hr=node.get('thickness') or 1
lineCap_hr=node.get('lineCap') or 'round'
return platypus.flowables.HRFlowable(width=width_hr,color=color.get(color_hr),thickness=float(thickness_hr),lineCap=str(lineCap_hr))
else:
sys.stderr.write('Warning: flowable not yet implemented: %s !\n' % (node.tag,))
return None
def render(self, node_story):
def process_story(node_story):
sub_story = []
for node in utils._child_get(node_story, self):
if node.tag == etree.Comment:
node.text = ''
continue
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return sub_story
return process_story(node_story)
class EndFrameFlowable(ActionFlowable):
def __init__(self,resume=0):
ActionFlowable.__init__(self,('frameEnd',resume))
class TinyDocTemplate(platypus.BaseDocTemplate):
def beforeDocument(self):
# Store some useful value directly inside canvas, so it's available
# on flowable drawing (needed for proper PageCount handling)
self.canv._doPageReset = False
self.canv._storyCount = 0
def ___handle_pageBegin(self):
self.page += 1
self.pageTemplate.beforeDrawPage(self.canv,self)
self.pageTemplate.checkPageSize(self.canv,self)
self.pageTemplate.onPage(self.canv,self)
for f in self.pageTemplate.frames: f._reset()
self.beforePage()
self._curPageFlowableCount = 0
if hasattr(self,'_nextFrameIndex'):
del self._nextFrameIndex
for f in self.pageTemplate.frames:
if f.id == 'first':
self.frame = f
break
self.handle_frameBegin()
def afterPage(self):
if isinstance(self.canv, NumberedCanvas):
# save current page states before eventual reset
self.canv._saved_page_states.append(dict(self.canv.__dict__))
if self.canv._doPageReset:
# Following a <pageReset/> tag:
# - we reset page number to 0
# - we add an new PageCount flowable (relative to the current
# story number), but not for NumeredCanvas at is handle page
# count itself)
# NOTE: _rml_template render() method add a PageReset flowable at end
# of each story, so we're sure to pass here at least once per story.
if not isinstance(self.canv, NumberedCanvas):
self.handle_flowable([ PageCount(story_count=self.canv._storyCount) ])
self.canv._pageCount = self.page
self.page = 0
self.canv._flag = True
self.canv._pageNumber = 0
self.canv._doPageReset = False
self.canv._storyCount += 1
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
if images is None:
images = {}
if not localcontext:
localcontext={'internal_header':True}
self.localcontext = localcontext
self.images= images
self.path = path
self.title = title
pagesize_map = {'a4': A4,
'us_letter': letter
}
pageSize = A4
if self.localcontext.get('company'):
pageSize = pagesize_map.get(self.localcontext.get('company').paper_format, A4)
if node.get('pageSize'):
ps = map(lambda x:x.strip(), node.get('pageSize').replace(')', '').replace('(', '').split(','))
pageSize = ( utils.unit_get(ps[0]),utils.unit_get(ps[1]) )
self.doc_tmpl = TinyDocTemplate(out, pagesize=pageSize, **utils.attr_get(node, ['leftMargin','rightMargin','topMargin','bottomMargin'], {'allowSplitting':'int','showBoundary':'bool','rotation':'int','title':'str','author':'str'}))
self.page_templates = []
self.styles = doc.styles
self.doc = doc
self.image=[]
pts = node.findall('pageTemplate')
for pt in pts:
frames = []
for frame_el in pt.findall('frame'):
frame = platypus.Frame( **(utils.attr_get(frame_el, ['x1','y1', 'width','height', 'leftPadding', 'rightPadding', 'bottomPadding', 'topPadding'], {'id':'str', 'showBoundary':'bool'})) )
if utils.attr_get(frame_el, ['last']):
frame.lastFrame = True
frames.append( frame )
try :
gr = pt.findall('pageGraphics')\
or pt[1].findall('pageGraphics')
except Exception: # FIXME: be even more specific, perhaps?
gr=''
if len(gr):
# self.image=[ n for n in utils._child_get(gr[0], self) if n.tag=='image' or not self.localcontext]
drw = _rml_draw(self.localcontext,gr[0], self.doc, images=images, path=self.path, title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames, onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
else:
drw = _rml_draw(self.localcontext,node,self.doc,title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames,onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
self.doc_tmpl.addPageTemplates(self.page_templates)
def render(self, node_stories):
if self.localcontext and not self.localcontext.get('internal_header',False):
del self.localcontext['internal_header']
fis = []
r = _rml_flowable(self.doc,self.localcontext, images=self.images, path=self.path, title=self.title, canvas=None)
story_cnt = 0
for node_story in node_stories:
if story_cnt > 0:
fis.append(platypus.PageBreak())
fis += r.render(node_story)
# end of story numbering computation
fis.append(PageReset())
story_cnt += 1
try:
if self.localcontext and self.localcontext.get('internal_header',False):
self.doc_tmpl.afterFlowable(fis)
self.doc_tmpl.build(fis,canvasmaker=NumberedCanvas)
else:
self.doc_tmpl.build(fis)
except platypus.doctemplate.LayoutError, e:
e.name = 'Print Error'
e.value = 'The document you are trying to print contains a table row that does not fit on one page. Please try to split it in smaller rows or contact your administrator.'
raise
def parseNode(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
from customfonts import SetCustomFonts
SetCustomFonts(r)
except ImportError:
# means there is no custom fonts mapping in this system.
pass
except Exception:
_logger.warning('Cannot set font mapping', exc_info=True)
pass
fp = StringIO()
r.render(fp)
return fp.getvalue()
def parseString(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
from customfonts import SetCustomFonts
SetCustomFonts(r)
except Exception:
pass
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = StringIO()
r.render(fp)
return fp.getvalue()
def trml2pdf_help():
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Render the standard input (RML) and output a PDF file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
trml2pdf_help()
print parseString(file(sys.argv[1], 'r').read()),
else:
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Try \'trml2pdf --help\' for more information.'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
retomerz/intellij-community | refs/heads/master | python/helpers/pydev/tests_pydevd_python/performance_check.py | 12 | import debugger_unittest
import sys
import re
import os
CHECK_BASELINE, CHECK_REGULAR, CHECK_CYTHON = 'baseline', 'regular', 'cython'
class PerformanceWriterThread(debugger_unittest.AbstractWriterThread):
CHECK = None
debugger_unittest.AbstractWriterThread.get_environ # overrides
def get_environ(self):
env = os.environ.copy()
if self.CHECK == CHECK_BASELINE:
env['PYTHONPATH'] = r'X:\PyDev.Debugger.baseline'
elif self.CHECK == CHECK_CYTHON:
env['PYDEVD_USE_CYTHON'] = 'YES'
elif self.CHECK == CHECK_REGULAR:
env['PYDEVD_USE_CYTHON'] = 'NO'
else:
raise AssertionError("Don't know what to check.")
return env
debugger_unittest.AbstractWriterThread.get_pydevd_file # overrides
def get_pydevd_file(self):
if self.CHECK == CHECK_BASELINE:
return os.path.abspath(os.path.join(r'X:\PyDev.Debugger.baseline', 'pydevd.py'))
dirname = os.path.dirname(__file__)
dirname = os.path.dirname(dirname)
return os.path.abspath(os.path.join(dirname, 'pydevd.py'))
class WriterThreadPerformance1(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_with_breakpoint'
def run(self):
self.start_socket()
self.write_add_breakpoint(17, 'method')
self.write_make_initial_run()
self.finished_ok = True
class WriterThreadPerformance2(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_without_breakpoint'
def run(self):
self.start_socket()
self.write_make_initial_run()
self.finished_ok = True
class WriterThreadPerformance3(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_with_step_over'
def run(self):
self.start_socket()
self.write_add_breakpoint(26, None)
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
self.write_step_over(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('108', True)
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadPerformance4(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_with_exception_breakpoint'
def run(self):
self.start_socket()
self.write_add_exception_breakpoint('ValueError')
self.write_make_initial_run()
self.finished_ok = True
class CheckDebuggerPerformance(debugger_unittest.DebuggerRunner):
def get_command_line(self):
return [sys.executable]
def _get_time_from_result(self, result):
stdout = ''.join(result['stdout'])
match = re.search('TotalTime>>((\d|\.)+)<<', stdout)
time_taken = match.group(1)
return float(time_taken)
def obtain_results(self, writer_thread_class):
time_when_debugged = self._get_time_from_result(self.check_case(writer_thread_class))
args = self.get_command_line()
args.append(writer_thread_class.TEST_FILE)
regular_time = self._get_time_from_result(self.run_process(args, writer_thread=None))
simple_trace_time = self._get_time_from_result(self.run_process(args+['--regular-trace'], writer_thread=None))
print(writer_thread_class.BENCHMARK_NAME, time_when_debugged, regular_time, simple_trace_time)
if 'SPEEDTIN_AUTHORIZATION_KEY' in os.environ:
SPEEDTIN_AUTHORIZATION_KEY = os.environ['SPEEDTIN_AUTHORIZATION_KEY']
# sys.path.append(r'X:\speedtin\pyspeedtin')
import pyspeedtin # If the authorization key is there, pyspeedtin must be available
import pydevd
pydevd_cython_project_id, pydevd_pure_python_project_id = 6, 7
if writer_thread_class.CHECK == CHECK_BASELINE:
project_ids = (pydevd_cython_project_id, pydevd_pure_python_project_id)
elif writer_thread_class.CHECK == CHECK_REGULAR:
project_ids = (pydevd_pure_python_project_id,)
elif writer_thread_class.CHECK == CHECK_CYTHON:
project_ids = (pydevd_cython_project_id,)
else:
raise AssertionError('Wrong check: %s' % (writer_thread_class.CHECK))
for project_id in project_ids:
api = pyspeedtin.PySpeedTinApi(authorization_key=SPEEDTIN_AUTHORIZATION_KEY, project_id=project_id)
benchmark_name = writer_thread_class.BENCHMARK_NAME
if writer_thread_class.CHECK == CHECK_BASELINE:
version = '0.0.1_baseline'
return # No longer commit the baseline (it's immutable right now).
else:
version=pydevd.__version__,
commit_id, branch, commit_date = api.git_commit_id_branch_and_date_from_path(pydevd.__file__)
api.add_benchmark(benchmark_name)
api.add_measurement(
benchmark_name,
value=time_when_debugged,
version=version,
released=False,
branch=branch,
commit_id=commit_id,
commit_date=commit_date,
)
api.commit()
def check_performance1(self):
self.obtain_results(WriterThreadPerformance1)
def check_performance2(self):
self.obtain_results(WriterThreadPerformance2)
def check_performance3(self):
self.obtain_results(WriterThreadPerformance3)
def check_performance4(self):
self.obtain_results(WriterThreadPerformance4)
if __name__ == '__main__':
debugger_unittest.SHOW_WRITES_AND_READS = False
debugger_unittest.SHOW_OTHER_DEBUG_INFO = False
debugger_unittest.SHOW_STDOUT = False
for check in (
# CHECK_BASELINE, -- Checks against the version checked out at X:\PyDev.Debugger.baseline.
CHECK_REGULAR,
CHECK_CYTHON
):
PerformanceWriterThread.CHECK = check
print('Checking: %s' % (check,))
check_debugger_performance = CheckDebuggerPerformance()
check_debugger_performance.check_performance1()
check_debugger_performance.check_performance2()
check_debugger_performance.check_performance3()
check_debugger_performance.check_performance4()
|
jve2kor/machine-learning-nanodegree | refs/heads/master | projects/capstone/open_projects/robot_motion_planning/tester.py | 5 | from maze import Maze
from robot import Robot
import sys
# global dictionaries for robot movement and sensing
dir_sensors = {'u': ['l', 'u', 'r'], 'r': ['u', 'r', 'd'],
'd': ['r', 'd', 'l'], 'l': ['d', 'l', 'u'],
'up': ['l', 'u', 'r'], 'right': ['u', 'r', 'd'],
'down': ['r', 'd', 'l'], 'left': ['d', 'l', 'u']}
dir_move = {'u': [0, 1], 'r': [1, 0], 'd': [0, -1], 'l': [-1, 0],
'up': [0, 1], 'right': [1, 0], 'down': [0, -1], 'left': [-1, 0]}
dir_reverse = {'u': 'd', 'r': 'l', 'd': 'u', 'l': 'r',
'up': 'd', 'right': 'l', 'down': 'u', 'left': 'r'}
# test and score parameters
max_time = 1000
train_score_mult = 1/30.
if __name__ == '__main__':
'''
This script tests a robot based on the code in robot.py on a maze given
as an argument when running the script.
'''
# Create a maze based on input argument on command line.
testmaze = Maze( str(sys.argv[1]) )
# Intitialize a robot; robot receives info about maze dimensions.
testrobot = Robot(testmaze.dim)
# Record robot performance over two runs.
runtimes = []
total_time = 0
for run in range(2):
print "Starting run {}.".format(run)
# Set the robot in the start position. Note that robot position
# parameters are independent of the robot itself.
robot_pos = {'location': [0, 0], 'heading': 'up'}
run_active = True
hit_goal = False
while run_active:
# check for end of time
total_time += 1
if total_time > max_time:
run_active = False
print "Allotted time exceeded."
break
# provide robot with sensor information, get actions
sensing = [testmaze.dist_to_wall(robot_pos['location'], heading)
for heading in dir_sensors[robot_pos['heading']]]
rotation, movement = testrobot.next_move(sensing)
# check for a reset
if (rotation, movement) == ('Reset', 'Reset'):
if run == 0 and hit_goal:
run_active = False
runtimes.append(total_time)
print "Ending first run. Starting next run."
break
elif run == 0 and not hit_goal:
print "Cannot reset - robot has not hit goal yet."
continue
else:
print "Cannot reset on runs after the first."
continue
# perform rotation
if rotation == -90:
robot_pos['heading'] = dir_sensors[robot_pos['heading']][0]
elif rotation == 90:
robot_pos['heading'] = dir_sensors[robot_pos['heading']][2]
elif rotation == 0:
pass
else:
print "Invalid rotation value, no rotation performed."
# perform movement
if abs(movement) > 3:
print "Movement limited to three squares in a turn."
movement = max(min(int(movement), 3), -3) # fix to range [-3, 3]
while movement:
if movement > 0:
if testmaze.is_permissible(robot_pos['location'], robot_pos['heading']):
robot_pos['location'][0] += dir_move[robot_pos['heading']][0]
robot_pos['location'][1] += dir_move[robot_pos['heading']][1]
movement -= 1
else:
print "Movement stopped by wall."
movement = 0
else:
rev_heading = dir_reverse[robot_pos['heading']]
if testmaze.is_permissible(robot_pos['location'], rev_heading):
robot_pos['location'][0] += dir_move[rev_heading][0]
robot_pos['location'][1] += dir_move[rev_heading][1]
movement += 1
else:
print "Movement stopped by wall."
movement = 0
# check for goal entered
goal_bounds = [testmaze.dim/2 - 1, testmaze.dim/2]
if robot_pos['location'][0] in goal_bounds and robot_pos['location'][1] in goal_bounds:
hit_goal = True
if run != 0:
runtimes.append(total_time - sum(runtimes))
run_active = False
print "Goal found; run {} completed!".format(run)
# Report score if robot is successful.
if len(runtimes) == 2:
print "Task complete! Score: {:4.3f}".format(runtimes[1] + train_score_mult*runtimes[0]) |
shravan-achar/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/extra/get_issues.py | 195 | import json
import py
import textwrap
issues_url = "http://bitbucket.org/api/1.0/repositories/pytest-dev/pytest/issues"
import requests
def get_issues():
chunksize = 50
start = 0
issues = []
while 1:
post_data = {"accountname": "pytest-dev",
"repo_slug": "pytest",
"start": start,
"limit": chunksize}
print ("getting from", start)
r = requests.get(issues_url, params=post_data)
data = r.json()
issues.extend(data["issues"])
if start + chunksize >= data["count"]:
return issues
start += chunksize
kind2num = "bug enhancement task proposal".split()
status2num = "new open resolved duplicate invalid wontfix".split()
def main(args):
cachefile = py.path.local(args.cache)
if not cachefile.exists() or args.refresh:
issues = get_issues()
cachefile.write(json.dumps(issues))
else:
issues = json.loads(cachefile.read())
open_issues = [x for x in issues
if x["status"] in ("new", "open")]
def kind_and_id(x):
kind = x["metadata"]["kind"]
return kind2num.index(kind), len(issues)-int(x["local_id"])
open_issues.sort(key=kind_and_id)
report(open_issues)
def report(issues):
for issue in issues:
metadata = issue["metadata"]
priority = issue["priority"]
title = issue["title"]
content = issue["content"]
kind = metadata["kind"]
status = issue["status"]
id = issue["local_id"]
link = "https://bitbucket.org/pytest-dev/pytest/issue/%s/" % id
print("----")
print(status, kind, link)
print(title)
#print()
#lines = content.split("\n")
#print ("\n".join(lines[:3]))
#if len(lines) > 3 or len(content) > 240:
# print ("...")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("process bitbucket issues")
parser.add_argument("--refresh", action="store_true",
help="invalidate cache, refresh issues")
parser.add_argument("--cache", action="store", default="issues.json",
help="cache file")
args = parser.parse_args()
main(args)
|
amaas-fintech/amaas-core-sdk-python | refs/heads/master | amaascore/parties/asset_manager.py | 1 | from __future__ import absolute_import, division, print_function, unicode_literals
from amaascore.parties.company import Company
class AssetManager(Company):
"""
This represents a Company engaged in Asset Management activity.
"""
def __init__(self, asset_manager_id, party_id, base_currency=None, display_name='',
legal_name='', url='', description='', party_status='Active',
year_of_incorporation=None, contact_number=None, license_type=None,
license_number=None, assets_under_management=None, registration_number=None,
addresses=None, emails=None, links=None, references=None,
*args, **kwargs):
self.license_type = license_type
self.license_number = license_number
self.assets_under_management = assets_under_management
self.registration_number = registration_number
super(AssetManager, self).__init__(asset_manager_id=asset_manager_id, party_id=party_id,
base_currency=base_currency, display_name=display_name,
legal_name=legal_name, url=url,
description=description, party_status=party_status,
year_of_incorporation=year_of_incorporation,
contact_number=contact_number,
addresses=addresses, emails=emails,
links=links, references=references, *args, **kwargs)
|
Mozta/pagina-diagnostijuego | refs/heads/master | venv/lib/python2.7/site-packages/django/contrib/humanize/templatetags/humanize.py | 526 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from datetime import date, datetime
from decimal import Decimal
from django import template
from django.conf import settings
from django.template import defaultfilters
from django.utils.encoding import force_text
from django.utils.formats import number_format
from django.utils.safestring import mark_safe
from django.utils.timezone import is_aware, utc
from django.utils.translation import pgettext, ugettext as _, ungettext
register = template.Library()
@register.filter(is_safe=True)
def ordinal(value):
"""
Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
suffixes = (_('th'), _('st'), _('nd'), _('rd'), _('th'), _('th'), _('th'), _('th'), _('th'), _('th'))
if value % 100 in (11, 12, 13): # special case
return mark_safe("%d%s" % (value, suffixes[0]))
# Mark value safe so i18n does not break with <sup> or <sub> see #19988
return mark_safe("%d%s" % (value, suffixes[value % 10]))
@register.filter(is_safe=True)
def intcomma(value, use_l10n=True):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
if settings.USE_L10N and use_l10n:
try:
if not isinstance(value, (float, Decimal)):
value = int(value)
except (TypeError, ValueError):
return intcomma(value, False)
else:
return number_format(value, force_grouping=True)
orig = force_text(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new, use_l10n)
# A tuple of standard large number to their converters
intword_converters = (
(6, lambda number: (
ungettext('%(value).1f million', '%(value).1f million', number),
ungettext('%(value)s million', '%(value)s million', number),
)),
(9, lambda number: (
ungettext('%(value).1f billion', '%(value).1f billion', number),
ungettext('%(value)s billion', '%(value)s billion', number),
)),
(12, lambda number: (
ungettext('%(value).1f trillion', '%(value).1f trillion', number),
ungettext('%(value)s trillion', '%(value)s trillion', number),
)),
(15, lambda number: (
ungettext('%(value).1f quadrillion', '%(value).1f quadrillion', number),
ungettext('%(value)s quadrillion', '%(value)s quadrillion', number),
)),
(18, lambda number: (
ungettext('%(value).1f quintillion', '%(value).1f quintillion', number),
ungettext('%(value)s quintillion', '%(value)s quintillion', number),
)),
(21, lambda number: (
ungettext('%(value).1f sextillion', '%(value).1f sextillion', number),
ungettext('%(value)s sextillion', '%(value)s sextillion', number),
)),
(24, lambda number: (
ungettext('%(value).1f septillion', '%(value).1f septillion', number),
ungettext('%(value)s septillion', '%(value)s septillion', number),
)),
(27, lambda number: (
ungettext('%(value).1f octillion', '%(value).1f octillion', number),
ungettext('%(value)s octillion', '%(value)s octillion', number),
)),
(30, lambda number: (
ungettext('%(value).1f nonillion', '%(value).1f nonillion', number),
ungettext('%(value)s nonillion', '%(value)s nonillion', number),
)),
(33, lambda number: (
ungettext('%(value).1f decillion', '%(value).1f decillion', number),
ungettext('%(value)s decillion', '%(value)s decillion', number),
)),
(100, lambda number: (
ungettext('%(value).1f googol', '%(value).1f googol', number),
ungettext('%(value)s googol', '%(value)s googol', number),
)),
)
@register.filter(is_safe=False)
def intword(value):
"""
Converts a large integer to a friendly text representation. Works best
for numbers over 1 million. For example, 1000000 becomes '1.0 million',
1200000 becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000000:
return value
def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
value = defaultfilters.floatformat(value, 1)
template = string_formatted
else:
template = float_formatted
return template % {'value': value}
for exponent, converters in intword_converters:
large_number = 10 ** exponent
if value < large_number * 1000:
new_value = value / float(large_number)
return _check_for_i18n(new_value, *converters(new_value))
return value
@register.filter(is_safe=True)
def apnumber(value):
"""
For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return value
return (_('one'), _('two'), _('three'), _('four'), _('five'),
_('six'), _('seven'), _('eight'), _('nine'))[value - 1]
# Perform the comparison in the default time zone when USE_TZ = True
# (unless a specific time zone has been applied with the |timezone filter).
@register.filter(expects_localtime=True)
def naturalday(value, arg=None):
"""
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to settings.DATE_FORMAT.
"""
try:
tzinfo = getattr(value, 'tzinfo', None)
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't a date object
return value
except ValueError:
# Date arguments out of range
return value
today = datetime.now(tzinfo).date()
delta = value - today
if delta.days == 0:
return _('today')
elif delta.days == 1:
return _('tomorrow')
elif delta.days == -1:
return _('yesterday')
return defaultfilters.date(value, arg)
# This filter doesn't require expects_localtime=True because it deals properly
# with both naive and aware datetimes. Therefore avoid the cost of conversion.
@register.filter
def naturaltime(value):
"""
For date and time values shows how many seconds, minutes or hours ago
compared to current timestamp returns representing string.
"""
if not isinstance(value, date): # datetime is a subclass of date
return value
now = datetime.now(utc if is_aware(value) else None)
if value < now:
delta = now - value
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s ago'
) % {'delta': defaultfilters.timesince(value, now)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a second ago', '%(count)s seconds ago', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a minute ago', '%(count)s minutes ago', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'an hour ago', '%(count)s hours ago', count
) % {'count': count}
else:
delta = value - now
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s from now'
) % {'delta': defaultfilters.timeuntil(value, now)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a second from now', '%(count)s seconds from now', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a minute from now', '%(count)s minutes from now', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'an hour from now', '%(count)s hours from now', count
) % {'count': count}
|
benschmaus/catapult | refs/heads/master | trace_processor/trace_uploader/appengine_config.py | 3 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""App Engine config.
This module is loaded before others and can be used to set up the
App Engine environment. See:
https://cloud.google.com/appengine/docs/python/tools/appengineconfig
"""
import os
from google.appengine.ext import vendor
appstats_SHELL_OK = True
# Directories in catapult/third_party required by uploader/corpus cleanup.
THIRD_PARTY_LIBRARIES = [
'apiclient',
'uritemplate',
]
# Directories in trace_processor/third_party required by uploader/corpus
# cleanup.
THIRD_PARTY_LIBRARIES_IN_TRACE_PROCESSOR = [
'cloudstorage',
]
# Libraries bundled with the App Engine SDK.
THIRD_PARTY_LIBRARIES_IN_SDK = [
'httplib2',
'oauth2client',
'six',
]
def _AddThirdPartyLibraries():
"""Registers the third party libraries with App Engine.
In order for third-party libraries to be available in the App Engine
runtime environment, they must be added with vendor.add. The directories
added this way must be inside the App Engine project directory.
"""
# The deploy script is expected to add links to third party libraries
# before deploying. If the directories aren't there (e.g. when running tests)
# then just ignore it.
for library_dir in (THIRD_PARTY_LIBRARIES +
THIRD_PARTY_LIBRARIES_IN_TRACE_PROCESSOR +
THIRD_PARTY_LIBRARIES_IN_SDK):
if os.path.exists(library_dir):
vendor.add(os.path.join(os.path.dirname(__file__), library_dir))
_AddThirdPartyLibraries()
|
jeremiahmarks/sl4a | refs/heads/master | python/src/Lib/distutils/bcppcompiler.py | 53 | """distutils.bcppcompiler
Contains BorlandCCompiler, an implementation of the abstract CCompiler class
for the Borland C++ compiler.
"""
# This implementation by Lyle Johnson, based on the original msvccompiler.py
# module and using the directions originally published by Gordon Williams.
# XXX looks like there's a LOT of overlap between these two classes:
# someone should sit down and factor out the common code as
# WindowsCCompiler! --GPW
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bcppcompiler.py 61000 2008-02-23 17:40:11Z christian.heimes $"
import os
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError, UnknownFileError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.file_util import write_file
from distutils.dep_util import newer
from distutils import log
class BCPPCompiler(CCompiler) :
"""Concrete class that implements an interface to the Borland C/C++
compiler, as defined by the CCompiler abstract class.
"""
compiler_type = 'bcpp'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# These executables are assumed to all be in the path.
# Borland doesn't seem to use any special registry settings to
# indicate their installation locations.
self.cc = "bcc32.exe"
self.linker = "ilink32.exe"
self.lib = "tlib.exe"
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
# -- Worker methods ------------------------------------------------
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError, msg:
raise CompileError, msg
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = [output_filename, '/u'] + objects
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# XXX this ignores 'build_temp'! should follow the lead of
# msvccompiler.py
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warn("I don't know what to do with 'runtime_library_dirs': %s",
str(runtime_library_dirs))
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
# Figure out linker args based on type of target.
if target_desc == CCompiler.EXECUTABLE:
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
# Create a temporary exports file for use by the linker
if export_symbols is None:
def_file = ''
else:
head, tail = os.path.split (output_filename)
modname, ext = os.path.splitext (tail)
temp_dir = os.path.dirname(objects[0]) # preserve tree structure
def_file = os.path.join (temp_dir, '%s.def' % modname)
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' %s=_%s' % (sym, sym))
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# Borland C++ has problems with '/' in paths
objects2 = map(os.path.normpath, objects)
# split objects in .obj and .res files
# Borland C++ needs them at different positions in the command line
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if ext == '.res':
resources.append(file)
else:
objects.append(file)
for l in library_dirs:
ld_args.append("/L%s" % os.path.normpath(l))
ld_args.append("/L.") # we sometimes use relative paths
# list of object files
ld_args.extend(objects)
# XXX the command-line syntax for Borland C++ is a bit wonky;
# certain filenames are jammed together in one big string, but
# comma-delimited. This doesn't mesh too well with the
# Unix-centric attitude (with a DOS/Windows quoting hack) of
# 'spawn()', so constructing the argument list is a bit
# awkward. Note that doing the obvious thing and jamming all
# the filenames and commas into one argument would be wrong,
# because 'spawn()' would quote any filenames with spaces in
# them. Arghghh!. Apparently it works fine as coded...
# name of dll/exe file
ld_args.extend([',',output_filename])
# no map file and start libraries
ld_args.append(',,')
for lib in libraries:
# see if we find it and if there is a bcpp specific lib
# (xxx_bcpp.lib)
libfile = self.find_library_file(library_dirs, lib, debug)
if libfile is None:
ld_args.append(lib)
# probably a BCPP internal library -- don't warn
else:
# full name which prefers bcpp_xxx.lib over xxx.lib
ld_args.append(libfile)
# some default libraries
ld_args.append ('import32')
ld_args.append ('cw32mt')
# def file for export symbols
ld_args.extend([',',def_file])
# add resource files
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
def find_library_file (self, dirs, lib, debug=0):
# List of effective library names to try, in order of preference:
# xxx_bcpp.lib is better than xxx.lib
# and xxx_d.lib is better than xxx.lib if debug is set
#
# The "_bcpp" suffix is to handle a Python installation for people
# with multiple compilers (primarily Distutils hackers, I suspect
# ;-). The idea is they'd have one static library for each
# compiler they care about, since (almost?) every Windows compiler
# seems to have a different format for static libraries.
if debug:
dlib = (lib + "_d")
try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
else:
try_names = (lib + "_bcpp", lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res':
# these can go unchanged
obj_names.append (os.path.join (output_dir, base + ext))
elif ext == '.rc':
# these need to be compiled to .res-files
obj_names.append (os.path.join (output_dir, base + '.res'))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = ['cpp32.exe'] + pp_opts
if output_file is not None:
pp_args.append('-o' + output_file)
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
print msg
raise CompileError, msg
# preprocess()
|
cpyou/odoo | refs/heads/master | addons/hr_timesheet_invoice/report/account_analytic_profit.py | 281 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report import report_sxw
from openerp.osv import osv
class account_analytic_profit(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_analytic_profit, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'lines': self._lines,
'user_ids': self._user_ids,
'journal_ids': self._journal_ids,
'line': self._line,
})
def _user_ids(self, lines):
user_obj = self.pool['res.users']
ids=list(set([b.user_id.id for b in lines]))
return user_obj.browse(self.cr, self.uid, ids)
def _journal_ids(self, form, user_id):
if isinstance(user_id, (int, long)):
user_id = [user_id]
line_obj = self.pool['account.analytic.line']
journal_obj = self.pool['account.analytic.journal']
line_ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', form['journal_ids'][0][2]),
('user_id', 'in', user_id),
])
ids=list(set([b.journal_id.id for b in line_obj.browse(self.cr, self.uid, line_ids)]))
return journal_obj.browse(self.cr, self.uid, ids)
def _line(self, form, journal_ids, user_ids):
line_obj = self.pool['account.analytic.line']
product_obj = self.pool['product.product']
price_obj = self.pool['product.pricelist']
ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', journal_ids),
('user_id', 'in', user_ids),
])
res={}
for line in line_obj.browse(self.cr, self.uid, ids):
if line.account_id.pricelist_id:
if line.account_id.to_invoice:
if line.to_invoice:
id=line.to_invoice.id
name=line.to_invoice.name
discount=line.to_invoice.factor
else:
name="/"
discount=1.0
id = -1
else:
name="Fixed"
discount=0.0
id=0
pl=line.account_id.pricelist_id.id
price=price_obj.price_get(self.cr, self.uid, [pl], line.product_id.id, line.unit_amount or 1.0, line.account_id.partner_id.id)[pl]
else:
name="/"
discount=1.0
id = -1
price=0.0
if id not in res:
res[id]={'name': name, 'amount': 0, 'cost':0, 'unit_amount':0,'amount_th':0}
xxx = round(price * line.unit_amount * (1-(discount or 0.0)), 2)
res[id]['amount_th']+=xxx
if line.invoice_id:
self.cr.execute('select id from account_analytic_line where invoice_id=%s', (line.invoice_id.id,))
tot = 0
for lid in self.cr.fetchall():
lid2 = line_obj.browse(self.cr, self.uid, lid[0])
pl=lid2.account_id.pricelist_id.id
price=price_obj.price_get(self.cr, self.uid, [pl], lid2.product_id.id, lid2.unit_amount or 1.0, lid2.account_id.partner_id.id)[pl]
tot += price * lid2.unit_amount * (1-(discount or 0.0))
if tot:
procent = line.invoice_id.amount_untaxed / tot
res[id]['amount'] += xxx * procent
else:
res[id]['amount'] += xxx
else:
res[id]['amount'] += xxx
res[id]['cost']+=line.amount
res[id]['unit_amount']+=line.unit_amount
for id in res:
res[id]['profit']=res[id]['amount']+res[id]['cost']
res[id]['eff']=res[id]['cost'] and '%d' % (-res[id]['amount'] / res[id]['cost'] * 100,) or 0.0
return res.values()
def _lines(self, form):
line_obj = self.pool['account.analytic.line']
ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', form['journal_ids'][0][2]),
('user_id', 'in', form['employee_ids'][0][2]),
])
return line_obj.browse(self.cr, self.uid, ids)
class report_account_analytic_profit(osv.AbstractModel):
_name = 'report.hr_timesheet_invoice.report_analyticprofit'
_inherit = 'report.abstract_report'
_template = 'hr_timesheet_invoice.report_analyticprofit'
_wrapped_report_class = account_analytic_profit
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Essk/FreeCodeCamp-Projects | refs/heads/master | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/setup.py | 2462 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from setuptools import setup
setup(
name='gyp',
version='0.1',
description='Generate Your Projects',
author='Chromium Authors',
author_email='chromium-dev@googlegroups.com',
url='http://code.google.com/p/gyp',
package_dir = {'': 'pylib'},
packages=['gyp', 'gyp.generator'],
entry_points = {'console_scripts': ['gyp=gyp:script_main'] }
)
|
ya7lelkom/googleads-adsensehost-examples | refs/heads/master | python/v4.x/update_ad_unit_on_publisher.py | 2 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates an ad unit on a publisher ad client.
To get ad clients, run get_all_ad_clients_for_publisher.py.
To get ad units, run get_all_ad_units_for_publisher.py.
Tags: accounts.adunits.patch
"""
__author__ = 'jalc@google.com (Jose Alcerreca)'
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'account_id',
help='The ID of the publisher account on which the ad unit exists')
argparser.add_argument(
'ad_client_id',
help='The ID of the ad client on which the ad unit exists')
argparser.add_argument(
'ad_unit_id',
help='The ID of the ad unit to be updated')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'adsensehost', 'v4.1', __doc__, __file__, parents=[argparser])
account_id = flags.account_id
ad_client_id = flags.ad_client_id
ad_unit_id = flags.ad_unit_id
try:
ad_unit = {'customStyle': {'colors': {'text': 'ff0000'}}}
# Update ad unit text color.
request = service.accounts().adunits().patch(accountId=account_id,
adClientId=ad_client_id,
adUnitId=ad_unit_id,
body=ad_unit)
result = request.execute()
print ('Ad unit with ID "%s" was updated with text color "%s".'
% (result['id'], result['customStyle']['colors']['text']))
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
chudaol/edx-platform | refs/heads/master | cms/tests/test_startup.py | 154 | """
Test cms startup
"""
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from mock import patch
from cms.startup import run, enable_theme
class StartupTestCase(TestCase):
"""
Test cms startup
"""
def setUp(self):
super(StartupTestCase, self).setUp()
@patch.dict("django.conf.settings.FEATURES", {"USE_CUSTOM_THEME": True})
@override_settings(THEME_NAME="bar")
def test_run_with_theme(self):
self.assertEqual(settings.FEATURES["USE_CUSTOM_THEME"], True)
with patch('cms.startup.enable_theme') as mock_enable_theme:
run()
self.assertTrue(mock_enable_theme.called)
@patch.dict("django.conf.settings.FEATURES", {"USE_CUSTOM_THEME": False})
def test_run_without_theme(self):
self.assertEqual(settings.FEATURES["USE_CUSTOM_THEME"], False)
with patch('cms.startup.enable_theme') as mock_enable_theme:
run()
self.assertFalse(mock_enable_theme.called)
@patch.dict("django.conf.settings.FEATURES", {"USE_CUSTOM_THEME": True})
@override_settings(THEME_NAME="bar")
@override_settings(FAVICON_PATH="images/favicon.ico")
def test_enable_theme(self):
enable_theme()
self.assertEqual(
settings.FAVICON_PATH,
'themes/bar/images/favicon.ico'
)
exp_path = (u'themes/bar', settings.ENV_ROOT / "themes/bar/static")
self.assertIn(exp_path, settings.STATICFILES_DIRS)
|
mach0/QGIS | refs/heads/master | tests/src/python/test_qgsprocessingalgrunner.py | 30 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for Processing algorithm runner(s).
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '2019-02'
__copyright__ = 'Copyright 2019, The QGIS Project'
import re
from qgis.PyQt.QtCore import QCoreApplication
from qgis.testing import start_app, unittest
from qgis.core import QgsProcessingAlgRunnerTask
from processing.core.Processing import Processing
from processing.core.ProcessingConfig import ProcessingConfig
from qgis.testing import start_app, unittest
from qgis.analysis import QgsNativeAlgorithms
from qgis.core import (
QgsApplication,
QgsSettings,
QgsProcessingContext,
QgsProcessingAlgRunnerTask,
QgsProcessingAlgorithm,
QgsProject,
QgsProcessingFeedback,
)
start_app()
class ConsoleFeedBack(QgsProcessingFeedback):
_error = ''
def reportError(self, error, fatalError=False):
self._error = error
print(error)
class CrashingProcessingAlgorithm(QgsProcessingAlgorithm):
"""
Wrong class in factory createInstance()
"""
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
def tr(self, string):
return QCoreApplication.translate('Processing', string)
def createInstance(self):
"""Wrong!"""
return ExampleProcessingAlgorithm() # noqa
def name(self):
return 'mycrashingscript'
def displayName(self):
return self.tr('My Crashing Script')
def group(self):
return self.tr('Example scripts')
def groupId(self):
return 'examplescripts'
def shortHelpString(self):
return self.tr("Example algorithm short description")
def initAlgorithm(self, config=None):
pass
def processAlgorithm(self, parameters, context, feedback):
return {self.OUTPUT: 'an_id'}
class TestQgsProcessingAlgRunner(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain(
"QGIS_TestPyQgsProcessingInPlace.com")
QCoreApplication.setApplicationName("QGIS_TestPyQgsProcessingInPlace")
QgsSettings().clear()
Processing.initialize()
QgsApplication.processingRegistry().addProvider(QgsNativeAlgorithms())
cls.registry = QgsApplication.instance().processingRegistry()
def test_bad_script_dont_crash(self): # spellok
"""Test regression #21270 (segfault)"""
context = QgsProcessingContext()
context.setProject(QgsProject.instance())
feedback = ConsoleFeedBack()
task = QgsProcessingAlgRunnerTask(CrashingProcessingAlgorithm(), {}, context=context, feedback=feedback)
self.assertTrue(task.isCanceled())
self.assertIn('name \'ExampleProcessingAlgorithm\' is not defined', feedback._error)
if __name__ == '__main__':
unittest.main()
|
gangadhar-kadam/prjlib | refs/heads/master | core/__init__.py | 107 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals |
mattnenterprise/servo | refs/heads/master | tests/wpt/web-platform-tests/service-workers/service-worker/resources/update-nocookie-worker.py | 158 | import time
def main(request, response):
# no-cache itself to ensure the user agent finds a new version for each update.
headers = [('Cache-Control', 'no-cache, must-revalidate'),
('Pragma', 'no-cache')]
# Set a normal mimetype.
content_type = 'application/javascript'
headers.append(('Content-Type', content_type))
# Return a different script for each access. Use .time() and .clock() for
# best time resolution across different platforms.
return headers, '// %s %s' % (time.time(), time.clock())
|
LohithBlaze/scikit-learn | refs/heads/master | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
|
leafclick/intellij-community | refs/heads/master | python/testData/inspections/PyTypeCheckerInspection/IterateOverDictValueWhenItsTypeIsUnion.py | 25 | KWARGS = {
"do_stuff": True,
"little_list": ['WORLD_RET_BP_IMPALA_AB.Control', 'WORLD_RET_BP_IMPALA_AB.Impala_WS'],
}
for element in KWARGS["little_list"]:
print(element) |
waytai/django | refs/heads/master | tests/shortcuts/views.py | 252 | import os.path
from django.shortcuts import render, render_to_response
from django.template import Context, RequestContext
from django.utils._os import upath
dirs = (os.path.join(os.path.dirname(upath(__file__)), 'other_templates'),)
def render_to_response_view(request):
return render_to_response('shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_multiple_templates(request):
return render_to_response([
'shortcuts/no_such_template.html',
'shortcuts/render_test.html',
], {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_request_context(request):
return render_to_response('shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=RequestContext(request))
def render_to_response_view_with_content_type(request):
return render_to_response('shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_to_response_view_with_dirs(request):
return render_to_response('render_dirs_test.html', dirs=dirs)
def render_to_response_view_with_status(request):
return render_to_response('shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_to_response_view_with_using(request):
using = request.GET.get('using')
return render_to_response('shortcuts/using.html', using=using)
def context_processor(request):
return {'bar': 'context processor output'}
def render_to_response_with_context_instance_misuse(request):
context_instance = RequestContext(request, {}, processors=[context_processor])
# Incorrect -- context_instance should be passed as a keyword argument.
return render_to_response('shortcuts/render_test.html', context_instance)
def render_view(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_multiple_templates(request):
return render(request, [
'shortcuts/no_such_template.html',
'shortcuts/render_test.html',
], {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_base_context(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=Context())
def render_view_with_content_type(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_with_dirs(request):
return render(request, 'render_dirs_test.html', dirs=dirs)
def render_view_with_status(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_view_with_using(request):
using = request.GET.get('using')
return render(request, 'shortcuts/using.html', using=using)
def render_view_with_current_app(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app")
def render_view_with_current_app_conflict(request):
# This should fail because we don't passing both a current_app and
# context_instance:
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app", context_instance=RequestContext(request))
|
narantech/linux-rpi2 | refs/heads/master | tools/perf/util/setup.py | 989 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPIKFS')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
ericholscher/pip-1 | refs/heads/develop | tests/packages/LineEndings/setup.py | 72 | from distutils.core import setup
setup()
|
Fruit-Snacks/aima-python | refs/heads/master | submissions/Miles/puzzles.py | 18 | import search
from math import (cos, pi)
alabama_map = search.UndirectedGraph(dict(
Birmingham=dict(Tuscaloosa=45, Auburn=120, Montgomery=86, Huntsville=90, Mobile=219, Dothan=197),
Tuscaloosa=dict(Birmingham=45, Auburn=160, Montgomery=110, Huntsville=140, Mobile=211, Dothan=227),
Auburn=dict(Birmingham=120, Tuscaloosa=160, Montgomery=57, Huntsville=212, Mobile=195, Dothan=130),
Huntsville=dict(Birmingham=90, Tuscaloosa=140, Montgomery=166, Auburn=212, Mobile=302, Dothan=279),
Montgomery=dict(Birmingham=86, Tuscaloosa=110, Auburn=57, Huntsville=166, Mobile=144, Dothan=120),
Mobile=dict(Birmingham=219, Tuscaloosa=211, Auburn=195, Montgomery=144, Huntsville=302, Dothan=184),
Dothan=dict(Birmingham=197, Tuscaloosa=227, Auburn=130, Montgomery=120, Huntsville=279, Mobile=184),
Gardendale=dict(Birmingham=21),
Fairhope=dict(Mobile=26, Birmingham=237)
))
alabama_map.locations = dict(
Birmingham=(50, 300), Tuscaloosa=(20, 270), Auburn=(50, 180),
Montgomery=(45, 214), Huntsville=(50, 390), Mobile=(10, 85),
Dothan=(100, 170), Gardendale=(50, 321), Fairhope=(10, 59))
alabama_puzzle = search.GraphProblem('Fairhope', 'Tuscaloosa', alabama_map)
alabama_puzzle.description = '''
An abbreviated map of Middle Alabama.
This map is unique, to the best of my knowledge.
'''
# A trivial Problem definition of connect four
# The goal is to get either 4 x's in a row or 4 o's in a row
# The x's and o's represent the colors red and yellow
class ConnectFour(search.Problem):
def actions(self, state):
# return connect_four
Red = 'X' # the player
Yellow = 'O' # the computer
player1 = 'Winner'
state1 = ConnectFour([['O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O'],
])
state2 = ConnectFour([['X', 'O', 'O', 'O'],
['O', 'X', 'O', 'O'],
['O', 'O', 'X', 'O'],
['O', 'O', 'O', 'X'],
])
state3 = ConnectFour([['X', 'O', 'O', 'O'],
['X', 'O', 'O', 'O'],
['X', 'O', 'O', 'O'],
['X', 'O', 'O', 'O'],
])
state4 = ConnectFour([['O', 'X', 'O', 'O'],
['O', 'X', 'O', 'O'],
['O', 'X', 'O', 'O'],
['O', 'X', 'O', 'O'],
])
state5 = ConnectFour([['O', 'O', 'X', 'O'],
['O', 'O', 'X', 'O'],
['O', 'O', 'X', 'O'],
['O', 'O', 'X', 'O'],
])
return state1
def result(self, state, action):
if action == 'X':
return state2
else:
return state1
def goal_test(self, state):
return state
def h(self, node):
state = node.state
if self.goal_test(state):
return 1
else:
return -1
miles_puzzle = ConnectFour('X')
miles_puzzle.label = 'Connect Four'
myPuzzles = [
alabama_puzzle,
miles_puzzle
]
|
IFGHou/honeyd | refs/heads/master | regress/regress.py | 2 | #!/usr/bin/env python
#
# Copyright (c) 2004 Niels Provos <provos@citi.umich.edu>
# All rights reserved.
#
# Honeyd Regression Framework Class
#
import os
import sys
import getopt
import popen2
import time
import fcntl
import string
import re
import dpkt
import pcap
import dnet
import tempfile # Note, we use mkstemp which is only available in 2.3
import filecmp
class Test:
def __init__(self):
# Precheck
self.name = "<Unknown>"
self.expect = None
self.packets = []
self.Setup()
def Expect(self):
return self.expect
def Name(self):
return self.name
def Send(self):
ip = dnet.ip()
for pkt in self.packets:
data = str(pkt)
data = dnet.ip_checksum(data)
ip.send(data)
time.sleep(0.10)
class regress:
def __init__(self, name, cmd, config, debug=0):
self.testname = name
self.debug = debug
self.config = config
self.cmd = cmd
self.oktests = 0
self.runtests = 0
self.generate = 0
self.pidfile = '/var/run/honeyd.pid'
self.interface = self.find_loopback()
try:
os.stat(self.cmd)
except OSError:
print >>sys.stderr, 'Cannot find honeyd program "%s" ' % self.cmd
sys.exit(1)
# XXX - we might need to add other configuration file options in here,
# in order to avoid it using installed configuration files
# (in Debian under /etc/honeypot/)
self.command = ('%s --disable-webserver --disable-update -R 1 -d '
'-i %s -f %s 192.18.0.0/15') % (
self.cmd, self.interface, '%s')
try:
os.stat(self.config)
except OSError:
print >>sys.stderr, (
'Configuration file "%s" does not exist, aborting.' %
self.config )
sys.exit(1)
# Test the configuration file
if self.testconfig(self.cmd, self.config) != 0:
print >>sys.stderr, 'Cannot use file "%s", aborting,' % self.config
sys.exit(1)
self.regexps = [ [re.compile(r'\['), r'\['],
[re.compile(r'\('), r'\('],
[re.compile(r'\.'), r'\.'],
[re.compile(r'\]'), r'\]'],
[re.compile(r'\)'), r'\)'],
[re.compile(r'\*'), r'.*'],
[re.compile(r'\?'), r'.'],
[re.compile(r'\s+'), r'\s+']
]
if self.VerifyRoute() != 0:
print >>sys.stderr, 'There was an error adding the route'
print >>sys.stderr, 'Testing "%s" behavior:' % self.testname
def find_cmd(self,cmd):
""" Find the cmd binary of the running system """
dirs = [ '/', '/usr/', '/usr/local/', sys.prefix ]
for d in dirs:
for sd in ('bin', 'sbin'):
for name in ('dnet', 'dumbnet'):
location = os.path.join(d, sd, cmd)
if os.path.exists(location):
return location
return 0
def find_loopback(self):
""" Find which is the loopback interface in this system, use
dnet for that
"""
ifs = dnet.intf()
interfaces = []
ifs.loop(lambda x,y: interfaces.append(x), None)
for intf in interfaces:
if intf['flags'] & dnet.INTF_FLAG_LOOPBACK:
if self.debug:
print >>sys.stderr, 'Loopback interface: ', intf['name']
return intf['name']
if self.debug:
print >>sys.stderr, 'Failed to find loopback interface'
return None
def find_running_proc(self, name):
# XXX - is this portable enough?
file = os.popen("ps -o pid=,command= 2>/dev/null", 'r')
# XXX - we only read a line, but there might be more than
# one instances there
for line in file:
res = re.search('\s*(\d+) %s' % name, line)
if res:
return int(res.group(1))
return None
def AddRoute(self, network, gw):
"""Verifies that the route points to localhost."""
network = dnet.addr(network)
gw = dnet.addr(gw)
router = dnet.route()
error = 0
try:
res = router.delete(network)
except OSError:
if self.debug:
print >>sys.stderr, "Cannot remove route: ", network
try:
res = router.add(network, gw)
except OSError:
if self.debug:
print >>sys.stderr, "Cannot add route: ", network
error = 1
if error:
return 1
else:
return 0
def VerifyRoute(self):
""" Adds the test routes, currently reserved by the RFC:
network equiment test network - 192.18.0.0/15 and
'test net' network - 192.0.2.0/24
"""
if self.AddRoute('192.0.2.0/24', '127.0.0.1'):
return 1
if self.AddRoute('192.18.0.0/15', '127.0.0.1'):
return 1
return 0
# XXX - what's the method for destructing objects? we should
# call this there to cleanup
def RemoveRoute(self, network):
"""Removes the route pointing to localhost."""
network = dnet.addr(network)
router = dnet.route()
error = 0
try:
res = router.delete(network)
except OSError:
if self.debug:
print >>sys.stderr, "Cannot remove route: ", network
error = 1
if error:
return 1
else:
return 0
def RemoveAllRoutes(self):
"""Removes all the routes."""
self.RemoveRoute('192.0.2.0/24')
self.RemoveRoute('192.18.0.0/15')
def match(self, got, should):
if filecmp.cmp(got,should):
return 1
else:
# If we are debugging go finegrain, read the files and compare them
if self.debug:
gotr = open(got)
shouldr = open(should)
count = 0
lineg = "start"
lines = "start"
while len(lineg) and len(lines):
lineg = gotr.readline()
lines = shouldr.readline()
count +=1
if lineg != lines:
print "Differ on line %d" % count
print "-%s", lines.splitlines()
print "+%s", lineg.splitlines()
return 0
# XX we already use filecmp and compare line by line but this might be
# useful if we want to have regular expressions in the output files
# tcpdump = self.tcpfr.read()
# self.tcpdump = []
# for line in string.split(tcpdump, '\n'):
# if not len(line):
# continue
# self.tcpdump.append(line)
# tcpdumperr = self.tcpfe.read()
# self.tcpfr.close()
# self.tcpfe.close()
def fail(self):
print >>sys.stderr, 'FAILED'
# XXX - We might not want to fail here, we will count the tests when ok()
sys.exit(1)
def ok(self):
""" Print the final result of tests """
if self.runtests == self.oktests:
print >>sys.stderr, ' OK (%d)' % self.oktests
sys.exit(0)
else:
failed = self.runtests-self.oktests
print >>sys.stderr, ' FAILED (%u/%u)' % (failed, self.runtests)
sys.exit(1)
def finish(self):
""" Finishes the tests, prints the results and removes the routes """
self.RemoveAllRoutes()
if not self.generate:
self.ok()
def testconfig(self, cmd, config):
command = ('%s --disable-webserver --disable-update --verify-config '
'-i %s -f %s 192.18.0.0/15 >/dev/null 2>&1') % (cmd, self.interface, config)
if self.debug:
print >>sys.stderr, 'Running "%s"' % command
errorcode = os.system(command)
if self.debug and errorcode != 0:
print 'Error testing honeyd configuration file returned: ', errorcode
return errorcode
def run(self, test):
self.stop_honeyd()
self.start_pcap()
self.start_honeyd(self.config)
self.outputfile = test.Expect()
print >>sys.stderr, '\tRunning %s: ' % test.Name(),
self.runtests +=1
sys.stderr.flush()
# Send all the packets
test.Send()
time.sleep(1)
self.stop_honeyd()
self.stop_pcap()
if not self.generate:
if self.compare() == 0:
print >>sys.stderr, 'OK'
self.oktests += 1
# Clean up the temporary file unless debugging
if not self.debug:
try:
os.remove(self.dpktfile)
except IOError:
print >>sys.stderr, "Expected temporary file %s does not exist" % self.dpktfile
else:
# We want to use the results we generated instead of comparing them
if self.Rename(self.dpktfile, self.outputfile):
print >>sys.stderr, "Cannot move over the auto-generated file"
sys.exit(1)
print >>sys.stderr, "Generated output file '%s'" % self.outputfile
def Rename(self, src, dst):
try:
error = 0
os.rename(src, dst)
except OSError:
if self.debug:
print >>sys.stderr, "Rename %s -> %s failed" % (src, dst)
error = 1
if not error:
return 0
# Open and copy
error = 0
try:
data = open(src, 'r').read()
open(dst, 'w').write(data)
except OSError:
error = 1
if error:
return 1
else:
return 0
def compare(self):
try:
os.stat(self.outputfile)
except OSError:
print >>sys.stderr, 'Expected results file "%s" not found' % self.outputfile
self.fail()
return 1
try:
os.stat(self.dpktfile)
except OSError:
print >>sys.stderr, 'We lost the file with the output "%s"!' % self.dpktfile
self.fail()
return 1
if os.stat(self.dpktfile).st_size != os.stat(self.outputfile).st_size:
# Not a direct failure, but worth mentioning
if self.debug:
print >>sys.stderr, 'Results are of different length'
if not self.match(self.dpktfile,self.outputfile):
if self.debug:
print >>sys.stderr, 'Results differ'
self.fail()
return 1
# If we get here comparison is OK
return 0
def set_nonblock(self, fd):
# set it to non-blocking mode
flags = fcntl.fcntl (fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl (fd, fcntl.F_SETFL, flags)
def start_honeyd(self, filename):
(fw, fr, self.fe) = popen2.popen3(self.command % filename, 0)
fw.close()
fr.close()
self.set_nonblock(self.fe.fileno())
time.sleep(2)
def start_pcap(self):
(self.dpktfh, self.dpktfile) = tempfile.mkstemp()
if self.debug:
print "Starting pcap capture, saving in file", self.dpktfile
self.pid_pcap = os.fork()
if self.pid_pcap == 0:
# Child, reads pcap, outputs to a file in dpkt format
pc = 0
try:
pc = pcap.pcap(self.interface)
except:
print >>sys.stderr, "Cannot run packet filter, aborting"
sys.exit(1)
# filter on our dedciated subnets
pc.setfilter('net 192.18.0.0/15 and net 192.0.2.0/24')
for ts, pkt in pc:
lp = dpkt.loopback.Loopback(pkt)
ip = dpkt.ip.IP(str(lp.data))
os.write(self.dpktfh, "SRC=" + dnet.ip_ntoa(ip.src) + "\n")
os.write(self.dpktfh, "DST=" + dnet.ip_ntoa(ip.dst) + "\n")
os.write(self.dpktfh, "ID=%d\n" % ip.id)
os.write(self.dpktfh, "TTL=%d\n" % ip.ttl)
os.write(self.dpktfh, "DATA=" + `ip.data` + "\n")
os.write(self.dpktfh, "\n")
exit
# Parent returns
return 0
def kill_pid(self, pid):
if self.debug:
print >>sys.stderr, "Killing honeyd pid:", pid
try:
os.kill(int(pid), 2)
return 0
except OSError:
return 1
def exists_pid(self, pid):
# XXX - is this portable enough?
file = os.popen("ps -o pid= -p %s" % pid, 'r')
pid = file.readline()
file.close()
if len(pid):
return True
else:
return False
def stop_honeyd(self):
pid = 0
try:
pid = open(self.pidfile, 'r').read()
except IOError:
if self.debug:
print >>sys.stderr, "No honeyd pidfile"
if pid != 0:
if self.exists_pid(pid):
show_error = self.kill_pid(pid)
# XXX that might not be us!
try:
self.honeyd = self.fe.read()
if show_error:
print "Failed to kill honeyd: ", self.honeyd
# Close all file descriptors
self.fe.close()
except:
if self.debug:
print >>sys.stderr, "Killed an instance of honeyd we did not run"
# Clean up the file
try:
os.remove(self.pidfile)
except:
print >>sys.stderr, "Cannot remove pidfile"
sys.exit(1)
else:
# Hmmm, me don't have a pid, is there another honeyd running?
pid = self.find_running_proc('honeyd')
if pid:
self.kill_pid(pid)
def stop_pcap(self):
time.sleep(1)
if self.debug:
print "Killing pcap capture, pid:", self.pid_pcap
os.kill(self.pid_pcap, 9)
time.sleep(1)
def usage():
print "Usage: %s [-d]" % sys.argv[0]
|
vipulkanade/EventbriteDjango | refs/heads/master | lib/python2.7/site-packages/django/core/files/locks.py | 725 | """
Portable file locking utilities.
Based partially on an example by Jonathan Feignberg in the Python
Cookbook [1] (licensed under the Python Software License) and a ctypes port by
Anatoly Techtonik for Roundup [2] (license [3]).
[1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
[2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py
[3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
import os
__all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock')
def _fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, 'fileno') else f
if os.name == 'nt':
import msvcrt
from ctypes import (sizeof, c_ulong, c_void_p, c_int64,
Structure, Union, POINTER, windll, byref)
from ctypes.wintypes import BOOL, DWORD, HANDLE
LOCK_SH = 0 # the default
LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK
# --- Adapted from the pyserial project ---
# detect size of ULONG_PTR
if sizeof(c_ulong) != sizeof(c_void_p):
ULONG_PTR = c_int64
else:
ULONG_PTR = c_ulong
PVOID = c_void_p
# --- Union inside Structure by stackoverflow:3480240 ---
class _OFFSET(Structure):
_fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD)]
class _OFFSET_UNION(Union):
_anonymous_ = ['_offset']
_fields_ = [
('_offset', _OFFSET),
('Pointer', PVOID)]
class OVERLAPPED(Structure):
_anonymous_ = ['_offset_union']
_fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_offset_union', _OFFSET_UNION),
('hEvent', HANDLE)]
LPOVERLAPPED = POINTER(OVERLAPPED)
# --- Define function prototypes for extra safety ---
LockFileEx = windll.kernel32.LockFileEx
LockFileEx.restype = BOOL
LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED]
UnlockFileEx = windll.kernel32.UnlockFileEx
UnlockFileEx.restype = BOOL
UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED]
def lock(f, flags):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
def unlock(f):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
else:
try:
import fcntl
LOCK_SH = fcntl.LOCK_SH # shared lock
LOCK_NB = fcntl.LOCK_NB # non-blocking
LOCK_EX = fcntl.LOCK_EX
except (ImportError, AttributeError):
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = 0
# Dummy functions that don't do anything.
def lock(f, flags):
# File is not locked
return False
def unlock(f):
# File is unlocked
return True
else:
def lock(f, flags):
ret = fcntl.flock(_fd(f), flags)
return (ret == 0)
def unlock(f):
ret = fcntl.flock(_fd(f), fcntl.LOCK_UN)
return (ret == 0)
|
jcorrius/go-oo-mingw32-soc | refs/heads/master | scratch/mso-dumper/ppt-dump.py | 1 | #!/usr/bin/env python
########################################################################
#
# OpenOffice.org - a multi-platform office productivity suite
#
# Author:
# Kohei Yoshida <kyoshida@novell.com>
# Thorsten Behrens <tbehrens@novell.com>
#
# The Contents of this file are made available subject to the terms
# of GNU Lesser General Public License Version 2.1 and any later
# version.
#
########################################################################
import sys, os.path, getopt
sys.path.append(sys.path[0]+"/src")
import ole, pptstream, globals
from globals import error
def usage (exname):
exname = os.path.basename(exname)
msg = """Usage: %s [options] [ppt file]
Options:
--help displays this help message.
"""%exname
print msg
class PPTDumper(object):
def __init__ (self, filepath, params):
self.filepath = filepath
self.params = params
def __printDirHeader (self, dirname, byteLen):
dirname = globals.decodeName(dirname)
print("")
print("="*68)
print("%s (size: %d bytes)"%(dirname, byteLen))
print("-"*68)
def dump (self):
file = open(self.filepath, 'rb')
strm = pptstream.PPTFile(file.read(), self.params)
file.close()
strm.printStreamInfo()
strm.printHeader()
strm.printDirectory()
dirnames = strm.getDirectoryNames()
result = True
for dirname in dirnames:
if len(dirname) == 0 or dirname == 'Root Entry':
continue
dirstrm = strm.getDirectoryStreamByName(dirname)
self.__printDirHeader(dirname, len(dirstrm.bytes))
if dirname == "PowerPoint Document":
if not self.__readSubStream(dirstrm):
result = False
elif dirname == "Current User":
if not self.__readSubStream(dirstrm):
result = False
else:
globals.dumpBytes(dirstrm.bytes, 512)
return result
def __readSubStream (self, strm):
# read all records in substream
return strm.readRecords()
def main (args):
exname, args = args[0], args[1:]
if len(args) < 1:
print("takes at least one argument")
usage(exname)
return
params = globals.Params()
try:
opts, args = getopt.getopt(args, "h", ["help", "debug", "show-sector-chain"])
for opt, arg in opts:
if opt in ['-h', '--help']:
usage(exname)
return
elif opt in ['--debug']:
params.debug = True
elif opt in ['--show-sector-chain']:
params.showSectorChain = True
else:
error("unknown option %s\n"%opt)
usage()
except getopt.GetoptError:
error("error parsing input options\n")
usage(exname)
return
dumper = PPTDumper(args[0], params)
if not dumper.dump():
error("FAILURE\n")
if __name__ == '__main__':
main(sys.argv)
|
Supersuuu/python | refs/heads/master | NKUCodingCat/0001/0001.py | 40 | #coding=utf-8
import os,time,random,hashlib
def md5(str):
m = hashlib.md5()
m.update(str)
return m.hexdigest()
def salt():
return "%s"*5%tuple([random.randint(10000000,99999999) for i in range(5)])
res = [md5(salt()+str(time.time())) for i in range(200)]
path = os.path.split(os.path.realpath(__file__))[0]+"/"
f = open(path+"code.txt","w")
for i in res:
f.write(i+"\n")
f.close() |
QGB/shadowsocks | refs/heads/master | shadowsocks/crypto/util.py | 1032 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import logging
def find_library_nt(name):
# modified from ctypes.util
# ctypes.util.find_library just returns first result he found
# but we want to try them all
# because on Windows, users may have both 32bit and 64bit version installed
results = []
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
results.append(fname)
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
results.append(fname)
return results
def find_library(possible_lib_names, search_symbol, library_name):
import ctypes.util
from ctypes import CDLL
paths = []
if type(possible_lib_names) not in (list, tuple):
possible_lib_names = [possible_lib_names]
lib_names = []
for lib_name in possible_lib_names:
lib_names.append(lib_name)
lib_names.append('lib' + lib_name)
for name in lib_names:
if os.name == "nt":
paths.extend(find_library_nt(name))
else:
path = ctypes.util.find_library(name)
if path:
paths.append(path)
if not paths:
# We may get here when find_library fails because, for example,
# the user does not have sufficient privileges to access those
# tools underlying find_library on linux.
import glob
for name in lib_names:
patterns = [
'/usr/local/lib*/lib%s.*' % name,
'/usr/lib*/lib%s.*' % name,
'lib%s.*' % name,
'%s.dll' % name]
for pat in patterns:
files = glob.glob(pat)
if files:
paths.extend(files)
for path in paths:
try:
lib = CDLL(path)
if hasattr(lib, search_symbol):
logging.info('loading %s from %s', library_name, path)
return lib
else:
logging.warn('can\'t find symbol %s in %s', search_symbol,
path)
except Exception:
pass
return None
def run_cipher(cipher, decipher):
from os import urandom
import random
import time
BLOCK_SIZE = 16384
rounds = 1 * 1024
plain = urandom(BLOCK_SIZE * rounds)
results = []
pos = 0
print('test start')
start = time.time()
while pos < len(plain):
l = random.randint(100, 32768)
c = cipher.update(plain[pos:pos + l])
results.append(c)
pos += l
pos = 0
c = b''.join(results)
results = []
while pos < len(plain):
l = random.randint(100, 32768)
results.append(decipher.update(c[pos:pos + l]))
pos += l
end = time.time()
print('speed: %d bytes/s' % (BLOCK_SIZE * rounds / (end - start)))
assert b''.join(results) == plain
def test_find_library():
assert find_library('c', 'strcpy', 'libc') is not None
assert find_library(['c'], 'strcpy', 'libc') is not None
assert find_library(('c',), 'strcpy', 'libc') is not None
assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate',
'libcrypto') is not None
assert find_library('notexist', 'strcpy', 'libnotexist') is None
assert find_library('c', 'symbol_not_exist', 'c') is None
assert find_library(('notexist', 'c', 'crypto', 'eay32'),
'EVP_CipherUpdate', 'libc') is not None
if __name__ == '__main__':
test_find_library()
|
DONIKAN/django | refs/heads/master | tests/template_tests/filter_tests/test_yesno.py | 430 | from django.template.defaultfilters import yesno
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_true(self):
self.assertEqual(yesno(True), 'yes')
def test_false(self):
self.assertEqual(yesno(False), 'no')
def test_none(self):
self.assertEqual(yesno(None), 'maybe')
def test_true_arguments(self):
self.assertEqual(yesno(True, 'certainly,get out of town,perhaps'), 'certainly')
def test_false_arguments(self):
self.assertEqual(yesno(False, 'certainly,get out of town,perhaps'), 'get out of town')
def test_none_two_arguments(self):
self.assertEqual(yesno(None, 'certainly,get out of town'), 'get out of town')
def test_none_three_arguments(self):
self.assertEqual(yesno(None, 'certainly,get out of town,perhaps'), 'perhaps')
|
bhupennewalkar1337/erpnext | refs/heads/develop | erpnext/hub_node/doctype/hub_settings/hub_settings.py | 29 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, requests, json
from frappe.model.document import Document
from frappe.utils import cint, expand_relative_urls
from frappe import _
class HubSettings(Document):
hub_url = "http://localhost:8001"
def validate(self):
if cint(self.publish):
if not self.name_token:
self.register()
else:
self.update_seller_details()
self.publish_selling_items()
else:
if self.name_token:
self.unpublish()
def publish_selling_items(self):
"""Set `publish_in_hub`=1 for all Sales Items"""
for item in frappe.get_all("Item", fields=["name"],
filters={ "publish_in_hub": "0"}):
frappe.db.set_value("Item", item.name, "publish_in_hub", 1)
def register(self):
"""Register at hub.erpnext.com, save `name_token` and `access_token`"""
response = requests.post(self.hub_url + "/api/method/hub.hub.api.register", data=self.get_args())
response.raise_for_status()
response = response.json()
self.name_token = response.get("message").get("name")
self.access_token = response.get("message").get("access_token")
def unpublish(self):
"""Unpublish from hub.erpnext.com"""
response = requests.post(self.hub_url + "/api/method/hub.hub.api.unpublish", data={
"access_token": self.access_token
})
response.raise_for_status()
def update_seller_details(self):
"""Update details at hub.erpnext.com"""
args = self.get_args()
args["published"] = 1
response = requests.post(self.hub_url + "/api/method/hub.hub.api.update_seller", data={
"access_token": self.access_token,
"args": json.dumps(args)
})
response.raise_for_status()
def get_args(self):
return {
"seller_name": self.seller_name,
"seller_country": self.seller_country,
"seller_city": self.seller_city,
"seller_email": self.seller_email,
"seller_website": self.seller_website,
"seller_description": self.seller_description
}
def sync(self, verbose=True):
"""Sync items with hub.erpnext.com"""
if not self.publish:
if verbose:
frappe.msgprint(_("Publish to sync items"))
return
items = frappe.db.get_all("Item",
fields=["name", "item_name", "description", "image", "item_group"],
filters={"publish_in_hub": 1, "synced_with_hub": 0})
for item in items:
item.item_code = item.name
if item.image:
item.image = expand_relative_urls(item.image)
item_list = frappe.db.sql_list("select name from tabItem where publish_in_hub=1")
if items:
response = requests.post(self.hub_url + "/api/method/hub.hub.api.sync", data={
"access_token": self.access_token,
"items": json.dumps(items),
"item_list": json.dumps(item_list)
})
response.raise_for_status()
for item in items:
frappe.db.set_value("Item", item.name, "synced_with_hub", 1)
if verbose:
frappe.msgprint(_("{0} Items synced".format(len(items))))
else:
if verbose:
frappe.msgprint(_("Items already synced"))
|
kostaspl/SpiderMonkey38 | refs/heads/tmpbr | testing/mozbase/mozprofile/mozprofile/diff.py | 7 | #!/usr/bin/env python
"""
diff two profile summaries
"""
import difflib
import profile
import optparse
import os
import sys
__all__ = ['diff', 'diff_profiles']
def diff(profile1, profile2, diff_function=difflib.unified_diff):
profiles = (profile1, profile2)
parts = {}
parts_dict = {}
for index in (0, 1):
prof = profiles[index]
# first part, the path, isn't useful for diffing
parts[index] = prof.summary(return_parts=True)[1:]
parts_dict[index] = dict(parts[index])
# keys the first profile is missing
first_missing = [i for i in parts_dict[1]
if i not in parts_dict[0]]
parts[0].extend([(i, '') for i in first_missing])
# diffs
retval = []
for key, value in parts[0]:
other = parts_dict[1].get(key, '')
value = value.strip(); other = other.strip()
if key == 'Files':
# first line of files is the path; we don't care to diff that
value = '\n'.join(value.splitlines()[1:])
if other:
other = '\n'.join(other.splitlines()[1:])
value = value.splitlines()
other = other.splitlines()
section_diff = list(diff_function(value, other, profile1.profile, profile2.profile))
if section_diff:
retval.append((key, '\n'.join(section_diff)))
return retval
def diff_profiles(args=sys.argv[1:]):
# parse command line
usage = '%prog [options] profile1 profile2'
parser = optparse.OptionParser(usage=usage, description=__doc__)
options, args = parser.parse_args(args)
if len(args) != 2:
parser.error("Must give two profile paths")
missing = [arg for arg in args if not os.path.exists(arg)]
if missing:
parser.error("Profile not found: %s" % (', '.join(missing)))
# get the profile differences
diffs = diff(*([profile.Profile(arg)
for arg in args]))
# display them
while diffs:
key, value = diffs.pop(0)
print '[%s]:\n' % key
print value
if diffs:
print '-' * 4
if __name__ == '__main__':
diff_profiles()
|
alexdglover/shill-isms | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py | 1776 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
jeremiah-c-leary/vhdl-style-guide | refs/heads/master | vsg/tests/block/test_rule_202.py | 1 |
import os
import unittest
from vsg.rules import block
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_202_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_202_test_input.fixed.vhd'), lExpected, False)
class test_block_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_202(self):
oRule = block.rule_202()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'block')
self.assertEqual(oRule.identifier, '202')
lExpected = [20]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_202(self):
oRule = block.rule_202()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
|
j3ffhubb/neoteric | refs/heads/master | neoteric/resource/tracker.py | 2 | """
"""
from neoteric.util.debug import get_stack
class DuplicateResourceError(Exception):
pass
class ResourceTracker:
""" Each resource type should implement this class as a global
variable to prevent duplicate resource definitions.
To override properties of an existing object, retrieve it with
ResourceTracker[key_name] and modify, or remove it with
ResourceTracker.pop(key_name) and then instantiate a new resource
"""
def __init__(self):
self.resources = {}
self.tb = {}
def _clear(self):
""" This is only meant to be used for testing,
it is very unlikely that you would want to use
this in real life
"""
self.resources.clear()
self.tb.clear()
def pop(self, key):
self.resources.pop(key)
self.tb.pop(key)
def __getitem__(self, key):
return self.resources[key]
def __setitem__(self, key, value):
if key in self.resources:
msg = "Duplicate resource definition: "\
"{0} already defined as {1} "\
"from (most recent call last):".format(key, self[key])
msg += self.tb[key]
value._has_actioned = True # suppress GC error messages
raise DuplicateResourceError(msg)
self.resources[key] = value
self.tb[key] = get_stack()
|
nikolas/lettuce | refs/heads/master | tests/functional/output_features/writes_to_console/writes_to_console_steps.py | 18 | import sys
from lettuce import step
@step('When I write to stdout')
def write_stdout(step):
print >> sys.stdout, "Badger"
@step('When I write to stderr')
def write_stderr(step):
print >> sys.stderr, "Mushroom"
@step('Then I am happy')
def happy(step):
pass
|
meghana1995/sympy | refs/heads/master | sympy/core/tests/test_complex.py | 69 | from sympy import (S, Symbol, sqrt, I, Integer, Rational, cos, sin, im, re, Abs,
exp, sinh, cosh, tan, tanh, conjugate, sign, cot, coth, pi, symbols,
expand_complex)
def test_complex():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
e = (a + I*b)*(a - I*b)
assert e.expand() == a**2 + b**2
assert sqrt(I) == (-1)**Rational(1, 4)
def test_conjugate():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
c = Symbol("c", imaginary=True)
d = Symbol("d", imaginary=True)
x = Symbol('x')
z = a + I*b + c + I*d
zc = a - I*b - c + I*d
assert conjugate(z) == zc
assert conjugate(exp(z)) == exp(zc)
assert conjugate(exp(I*x)) == exp(-I*conjugate(x))
assert conjugate(z**5) == zc**5
assert conjugate(abs(x)) == abs(x)
assert conjugate(sign(z)) == sign(zc)
assert conjugate(sin(z)) == sin(zc)
assert conjugate(cos(z)) == cos(zc)
assert conjugate(tan(z)) == tan(zc)
assert conjugate(cot(z)) == cot(zc)
assert conjugate(sinh(z)) == sinh(zc)
assert conjugate(cosh(z)) == cosh(zc)
assert conjugate(tanh(z)) == tanh(zc)
assert conjugate(coth(z)) == coth(zc)
def test_abs1():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
assert abs(a) == abs(a)
assert abs(-a) == abs(a)
assert abs(a + I*b) == sqrt(a**2 + b**2)
def test_abs2():
a = Symbol("a", real=False)
b = Symbol("b", real=False)
assert abs(a) != a
assert abs(-a) != a
assert abs(a + I*b) != sqrt(a**2 + b**2)
def test_evalc():
x = Symbol("x", real=True)
y = Symbol("y", real=True)
z = Symbol("z")
assert ((x + I*y)**2).expand(complex=True) == x**2 + 2*I*x*y - y**2
assert expand_complex(z**(2*I)) == (re((re(z) + I*im(z))**(2*I)) +
I*im((re(z) + I*im(z))**(2*I)))
assert expand_complex(
z**(2*I), deep=False) == I*im(z**(2*I)) + re(z**(2*I))
assert exp(I*x) != cos(x) + I*sin(x)
assert exp(I*x).expand(complex=True) == cos(x) + I*sin(x)
assert exp(I*x + y).expand(complex=True) == exp(y)*cos(x) + I*sin(x)*exp(y)
assert sin(I*x).expand(complex=True) == I * sinh(x)
assert sin(x + I*y).expand(complex=True) == sin(x)*cosh(y) + \
I * sinh(y) * cos(x)
assert cos(I*x).expand(complex=True) == cosh(x)
assert cos(x + I*y).expand(complex=True) == cos(x)*cosh(y) - \
I * sinh(y) * sin(x)
assert tan(I*x).expand(complex=True) == tanh(x) * I
assert tan(x + I*y).expand(complex=True) == (
sin(2*x)/(cos(2*x) + cosh(2*y)) +
I*sinh(2*y)/(cos(2*x) + cosh(2*y)))
assert sinh(I*x).expand(complex=True) == I * sin(x)
assert sinh(x + I*y).expand(complex=True) == sinh(x)*cos(y) + \
I * sin(y) * cosh(x)
assert cosh(I*x).expand(complex=True) == cos(x)
assert cosh(x + I*y).expand(complex=True) == cosh(x)*cos(y) + \
I * sin(y) * sinh(x)
assert tanh(I*x).expand(complex=True) == tan(x) * I
assert tanh(x + I*y).expand(complex=True) == (
(sinh(x)*cosh(x) + I*cos(y)*sin(y)) /
(sinh(x)**2 + cos(y)**2)).expand()
def test_pythoncomplex():
x = Symbol("x")
assert 4j*x == 4*x*I
assert 4j*x == 4.0*x*I
assert 4.1j*x != 4*x*I
def test_rootcomplex():
R = Rational
assert ((+1 + I)**R(1, 2)).expand(
complex=True) == 2**R(1, 4)*cos( pi/8) + 2**R(1, 4)*sin( pi/8)*I
assert ((-1 - I)**R(1, 2)).expand(
complex=True) == 2**R(1, 4)*cos(3*pi/8) - 2**R(1, 4)*sin(3*pi/8)*I
assert (sqrt(-10)*I).as_real_imag() == (-sqrt(10), 0)
def test_expand_inverse():
assert (1/(1 + I)).expand(complex=True) == (1 - I)/2
assert ((1 + 2*I)**(-2)).expand(complex=True) == (-3 - 4*I)/25
assert ((1 + I)**(-8)).expand(complex=True) == Rational(1, 16)
def test_expand_complex():
assert ((2 + 3*I)**10).expand(complex=True) == -341525 - 145668*I
# the following two tests are to ensure the SymPy uses an efficient
# algorithm for calculating powers of complex numbers. They should execute
# in something like 0.01s.
assert ((2 + 3*I)**1000).expand(complex=True) == \
-81079464736246615951519029367296227340216902563389546989376269312984127074385455204551402940331021387412262494620336565547972162814110386834027871072723273110439771695255662375718498785908345629702081336606863762777939617745464755635193139022811989314881997210583159045854968310911252660312523907616129080027594310008539817935736331124833163907518549408018652090650537035647520296539436440394920287688149200763245475036722326561143851304795139005599209239350981457301460233967137708519975586996623552182807311159141501424576682074392689622074945519232029999 + \
46938745946789557590804551905243206242164799136976022474337918748798900569942573265747576032611189047943842446167719177749107138603040963603119861476016947257034472364028585381714774667326478071264878108114128915685688115488744955550920239128462489496563930809677159214598114273887061533057125164518549173898349061972857446844052995037423459472376202251620778517659247970283904820245958198842631651569984310559418135975795868314764489884749573052997832686979294085577689571149679540256349988338406458116270429842222666345146926395233040564229555893248370000*I
assert ((2 + 3*I/4)**1000).expand(complex=True) == \
Integer(1)*37079892761199059751745775382463070250205990218394308874593455293485167797989691280095867197640410033222367257278387021789651672598831503296531725827158233077451476545928116965316544607115843772405184272449644892857783761260737279675075819921259597776770965829089907990486964515784097181964312256560561065607846661496055417619388874421218472707497847700629822858068783288579581649321248495739224020822198695759609598745114438265083593711851665996586461937988748911532242908776883696631067311443171682974330675406616373422505939887984366289623091300746049101284856530270685577940283077888955692921951247230006346681086274961362500646889925803654263491848309446197554307105991537357310209426736453173441104334496173618419659521888945605315751089087820455852582920963561495787655250624781448951403353654348109893478206364632640344111022531861683064175862889459084900614967785405977231549003280842218501570429860550379522498497412180001/114813069527425452423283320117768198402231770208869520047764273682576626139237031385665948631650626991844596463898746277344711896086305533142593135616665318539129989145312280000688779148240044871428926990063486244781615463646388363947317026040466353970904996558162398808944629605623311649536164221970332681344168908984458505602379484807914058900934776500429002716706625830522008132236281291761267883317206598995396418127021779858404042159853183251540889433902091920554957783589672039160081957216630582755380425583726015528348786419432054508915275783882625175435528800822842770817965453762184851149029376 + \
I*421638390580169706973991429333213477486930178424989246669892530737775352519112934278994501272111385966211392610029433824534634841747911783746811994443436271013377059560245191441549885048056920190833693041257216263519792201852046825443439142932464031501882145407459174948712992271510309541474392303461939389368955986650538525895866713074543004916049550090364398070215427272240155060576252568700906004691224321432509053286859100920489253598392100207663785243368195857086816912514025693453058403158416856847185079684216151337200057494966741268925263085619240941610301610538225414050394612058339070756009433535451561664522479191267503989904464718368605684297071150902631208673621618217106272361061676184840810762902463998065947687814692402219182668782278472952758690939877465065070481351343206840649517150634973307937551168752642148704904383991876969408056379195860410677814566225456558230131911142229028179902418223009651437985670625/1793954211366022694113801876840128100034871409513586250746316776290259783425578615401030447369541046747571819748417910583511123376348523955353017744010395602173906080395504375010762174191250701116076984219741972574712741619474818186676828531882286780795390571221287481389759837587864244524002565968286448146002639202882164150037179450123657170327105882819203167448541028601906377066191895183769810676831353109303069033234715310287563158747705988305326397404720186258671215368588625611876280581509852855552819149745718992630449787803625851701801184123166018366180137512856918294030710215034138299203584
assert ((2 + 3*I)**-1000).expand(complex=True) == \
Integer(1)*-81079464736246615951519029367296227340216902563389546989376269312984127074385455204551402940331021387412262494620336565547972162814110386834027871072723273110439771695255662375718498785908345629702081336606863762777939617745464755635193139022811989314881997210583159045854968310911252660312523907616129080027594310008539817935736331124833163907518549408018652090650537035647520296539436440394920287688149200763245475036722326561143851304795139005599209239350981457301460233967137708519975586996623552182807311159141501424576682074392689622074945519232029999/8777125472973511649630750050295188683351430110097915876250894978429797369155961290321829625004920141758416719066805645579710744290541337680113772670040386863849283653078324415471816788604945889094925784900885812724984087843737442111926413818245854362613018058774368703971604921858023116665586358870612944209398056562604561248859926344335598822815885851096698226775053153403320782439987679978321289537645645163767251396759519805603090332694449553371530571613352311006350058217982509738362083094920649452123351717366337410243853659113315547584871655479914439219520157174729130746351059075207407866012574386726064196992865627149566238044625779078186624347183905913357718850537058578084932880569701242598663149911276357125355850792073635533676541250531086757377369962506979378337216411188347761901006460813413505861461267545723590468627854202034450569581626648934062198718362303420281555886394558137408159453103395918783625713213314350531051312551733021627153081075080140680608080529736975658786227362251632725009435866547613598753584705455955419696609282059191031962604169242974038517575645939316377801594539335940001 - Integer(1)*46938745946789557590804551905243206242164799136976022474337918748798900569942573265747576032611189047943842446167719177749107138603040963603119861476016947257034472364028585381714774667326478071264878108114128915685688115488744955550920239128462489496563930809677159214598114273887061533057125164518549173898349061972857446844052995037423459472376202251620778517659247970283904820245958198842631651569984310559418135975795868314764489884749573052997832686979294085577689571149679540256349988338406458116270429842222666345146926395233040564229555893248370000*I/8777125472973511649630750050295188683351430110097915876250894978429797369155961290321829625004920141758416719066805645579710744290541337680113772670040386863849283653078324415471816788604945889094925784900885812724984087843737442111926413818245854362613018058774368703971604921858023116665586358870612944209398056562604561248859926344335598822815885851096698226775053153403320782439987679978321289537645645163767251396759519805603090332694449553371530571613352311006350058217982509738362083094920649452123351717366337410243853659113315547584871655479914439219520157174729130746351059075207407866012574386726064196992865627149566238044625779078186624347183905913357718850537058578084932880569701242598663149911276357125355850792073635533676541250531086757377369962506979378337216411188347761901006460813413505861461267545723590468627854202034450569581626648934062198718362303420281555886394558137408159453103395918783625713213314350531051312551733021627153081075080140680608080529736975658786227362251632725009435866547613598753584705455955419696609282059191031962604169242974038517575645939316377801594539335940001
assert ((2 + 3*I/4)**-1000).expand(complex=True) == \
Integer(1)*4257256305661027385394552848555894604806501409793288342610746813288539790051927148781268212212078237301273165351052934681382567968787279534591114913777456610214738290619922068269909423637926549603264174216950025398244509039145410016404821694746262142525173737175066432954496592560621330313807235750500564940782099283410261748370262433487444897446779072067625787246390824312580440138770014838135245148574339248259670887549732495841810961088930810608893772914812838358159009303794863047635845688453859317690488124382253918725010358589723156019888846606295866740117645571396817375322724096486161308083462637370825829567578309445855481578518239186117686659177284332344643124760453112513611749309168470605289172320376911472635805822082051716625171429727162039621902266619821870482519063133136820085579315127038372190224739238686708451840610064871885616258831386810233957438253532027049148030157164346719204500373766157143311767338973363806106967439378604898250533766359989107510507493549529158818602327525235240510049484816090584478644771183158342479140194633579061295740839490629457435283873180259847394582069479062820225159699506175855369539201399183443253793905149785994830358114153241481884290274629611529758663543080724574566578220908907477622643689220814376054314972190402285121776593824615083669045183404206291739005554569305329760211752815718335731118664756831942466773261465213581616104242113894521054475516019456867271362053692785300826523328020796670205463390909136593859765912483565093461468865534470710132881677639651348709376/2103100954337624833663208713697737151593634525061637972297915388685604042449504336765884978184588688426595940401280828953096857809292320006227881797146858511436638446932833617514351442216409828605662238790280753075176269765767010004889778647709740770757817960711900340755635772183674511158570690702969774966791073165467918123298694584729211212414462628433370481195120564586361368504153395406845170075275051749019600057116719726628746724489572189061061036426955163696859127711110719502594479795200686212257570291758725259007379710596548777812659422174199194837355646482046783616494013289495563083118517507178847555801163089723056310287760875135196081975602765511153122381201303871673391366630940702817360340900568748719988954847590748960761446218262344767250783946365392689256634180417145926390656439421745644011831124277463643383712803287985472471755648426749842410972650924240795946699346613614779460399530274263580007672855851663196114585312432954432654691485867618908420370875753749297487803461900447407917655296784879220450937110470920633595689721819488638484547259978337741496090602390463594556401615298457456112485536498177883358587125449801777718900375736758266215245325999241624148841915093787519330809347240990363802360596034171167818310322276373120180985148650099673289383722502488957717848531612020897298448601714154586319660314294591620415272119454982220034319689607295960162971300417552364254983071768070124456169427638371140064235083443242844616326538396503937972586505546495649094344512270582463639152160238137952390380581401171977159154009407415523525096743009110916334144716516647041176989758534635251844947906038080852185583742296318878233394998111078843229681030277039104786225656992262073797524057992347971177720807155842376332851559276430280477639539393920006008737472164850104411971830120295750221200029811143140323763349636629725073624360001 - Integer(1)*3098214262599218784594285246258841485430681674561917573155883806818465520660668045042109232930382494608383663464454841313154390741655282039877410154577448327874989496074260116195788919037407420625081798124301494353693248757853222257918294662198297114746822817460991242508743651430439120439020484502408313310689912381846149597061657483084652685283853595100434135149479564507015504022249330340259111426799121454516345905101620532787348293877485702600390665276070250119465888154331218827342488849948540687659846652377277250614246402784754153678374932540789808703029043827352976139228402417432199779415751301480406673762521987999573209628597459357964214510139892316208670927074795773830798600837815329291912002136924506221066071242281626618211060464126372574400100990746934953437169840312584285942093951405864225230033279614235191326102697164613004299868695519642598882914862568516635347204441042798206770888274175592401790040170576311989738272102077819127459014286741435419468254146418098278519775722104890854275995510700298782146199325790002255362719776098816136732897323406228294203133323296591166026338391813696715894870956511298793595675308998014158717167429941371979636895553724830981754579086664608880698350866487717403917070872269853194118364230971216854931998642990452908852258008095741042117326241406479532880476938937997238098399302185675832474590293188864060116934035867037219176916416481757918864533515526389079998129329045569609325290897577497835388451456680707076072624629697883854217331728051953671643278797380171857920000*I/2103100954337624833663208713697737151593634525061637972297915388685604042449504336765884978184588688426595940401280828953096857809292320006227881797146858511436638446932833617514351442216409828605662238790280753075176269765767010004889778647709740770757817960711900340755635772183674511158570690702969774966791073165467918123298694584729211212414462628433370481195120564586361368504153395406845170075275051749019600057116719726628746724489572189061061036426955163696859127711110719502594479795200686212257570291758725259007379710596548777812659422174199194837355646482046783616494013289495563083118517507178847555801163089723056310287760875135196081975602765511153122381201303871673391366630940702817360340900568748719988954847590748960761446218262344767250783946365392689256634180417145926390656439421745644011831124277463643383712803287985472471755648426749842410972650924240795946699346613614779460399530274263580007672855851663196114585312432954432654691485867618908420370875753749297487803461900447407917655296784879220450937110470920633595689721819488638484547259978337741496090602390463594556401615298457456112485536498177883358587125449801777718900375736758266215245325999241624148841915093787519330809347240990363802360596034171167818310322276373120180985148650099673289383722502488957717848531612020897298448601714154586319660314294591620415272119454982220034319689607295960162971300417552364254983071768070124456169427638371140064235083443242844616326538396503937972586505546495649094344512270582463639152160238137952390380581401171977159154009407415523525096743009110916334144716516647041176989758534635251844947906038080852185583742296318878233394998111078843229681030277039104786225656992262073797524057992347971177720807155842376332851559276430280477639539393920006008737472164850104411971830120295750221200029811143140323763349636629725073624360001
a = Symbol('a', real=True)
b = Symbol('b', real=True)
assert exp(a*(2 + I*b)).expand(complex=True) == \
I*exp(2*a)*sin(a*b) + exp(2*a)*cos(a*b)
def test_expand():
f = (16 - 2*sqrt(29))**2
assert f.expand() == 372 - 64*sqrt(29)
f = (Integer(1)/2 + I/2)**10
assert f.expand() == I/32
f = (Integer(1)/2 + I)**10
assert f.expand() == Integer(237)/1024 - 779*I/256
def test_re_im1652():
x = Symbol('x')
assert re(x) == re(conjugate(x))
assert im(x) == - im(conjugate(x))
assert im(x)*re(conjugate(x)) + im(conjugate(x)) * re(x) == 0
def test_issue_5084():
x = Symbol('x')
assert ((x + x*I)/(1 + I)).as_real_imag() == (re((x + I*x)/(1 + I)
), im((x + I*x)/(1 + I)))
def test_issue_5236():
assert (cos(1 + I)**3).as_real_imag() == (-3*sin(1)**2*sinh(1)**2*cos(1)*cosh(1) +
cos(1)**3*cosh(1)**3, -3*cos(1)**2*cosh(1)**2*sin(1)*sinh(1) + sin(1)**3*sinh(1)**3)
def test_real_imag():
x, y, z = symbols('x, y, z')
X, Y, Z = symbols('X, Y, Z', commutative=False)
a = Symbol('a', real=True)
assert (2*a*x).as_real_imag() == (2*a*re(x), 2*a*im(x))
# issue 5395:
assert (x*x.conjugate()).as_real_imag() == (Abs(x)**2, 0)
assert im(x*x.conjugate()) == 0
assert im(x*y.conjugate()*z*y) == im(x*z)*Abs(y)**2
assert im(x*y.conjugate()*x*y) == im(x**2)*Abs(y)**2
assert im(Z*y.conjugate()*X*y) == im(Z*X)*Abs(y)**2
assert im(X*X.conjugate()) == im(X*X.conjugate(), evaluate=False)
assert (sin(x)*sin(x).conjugate()).as_real_imag() == \
(Abs(sin(x))**2, 0)
# issue 6573:
assert (x**2).as_real_imag() == (re(x)**2 - im(x)**2, 2*re(x)*im(x))
# issue 6428:
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert (i*r*x).as_real_imag() == (I*i*r*im(x), -I*i*r*re(x))
assert (i*r*x*(y + 2)).as_real_imag() == (
I*i*r*(re(y) + 2)*im(x) + I*i*r*re(x)*im(y),
-I*i*r*(re(y) + 2)*re(x) + I*i*r*im(x)*im(y))
# issue 7106:
assert ((1 + I)/(1 - I)).as_real_imag() == (0, 1)
assert ((1 + 2*I)*(1 + 3*I)).as_real_imag() == (-5, 5)
def test_pow_issue_1724():
e = ((-1)**(S(1)/3))
assert e.conjugate().n() == e.n().conjugate()
e = S('-2/3 - (-29/54 + sqrt(93)/18)**(1/3) - 1/(9*(-29/54 + sqrt(93)/18)**(1/3))')
assert e.conjugate().n() == e.n().conjugate()
e = 2**I
assert e.conjugate().n() == e.n().conjugate()
def test_issue_5429():
assert sqrt(I).conjugate() != sqrt(I)
def test_issue_4124():
from sympy import oo
assert expand_complex(I*oo) == oo*I
|
kjoconnor/mongo-aws-backup | refs/heads/master | delete_snapshots.py | 1 | import sys
from boto.ec2 import connect_to_region
from datetime import datetime, timedelta
try:
days = int(sys.argv[1])
except IndexError:
days = 30
delete_time = datetime.utcnow() - timedelta(days=days)
filters = {
'tag-key': 'uselesssnapshots'
}
print 'Deleting any snapshots older than {days} days'.format(days=days)
ec2 = connect_to_region('us-east-1')
snapshots = ec2.get_all_snapshots(filters=filters)
deletion_counter = 0
size_counter = 0
for snapshot in snapshots:
start_time = datetime.strptime(
snapshot.start_time,
'%Y-%m-%dT%H:%M:%S.000Z'
)
if start_time < delete_time:
print 'Deleting {id}'.format(id=snapshot.id)
deletion_counter = deletion_counter + 1
size_counter = size_counter + snapshot.volume_size
# Just to make sure you're reading!
# snapshot.delete()
print 'Deleted {number} snapshots totalling {size} GB'.format(
number=deletion_counter,
size=size_counter
)
|
wimac/home | refs/heads/master | Dropbox/skel/bin/sick-beard/lib/hachoir_parser/game/spider_man_video.py | 90 | """
Parser for an obscure FMV file format: bin files from the game
"The Amazing Spider-Man vs. The Kingpin" (Sega CD)
Author: Mike Melanson
Creation date: 2006-09-30
File samples: http://samples.mplayerhq.hu/game-formats/spiderman-segacd-bin/
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import FieldSet, UInt32, String, RawBytes
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
class Chunk(FieldSet):
tag_info = {
"CONF" : ("conf[]", None, "Configuration header"),
"AUDI" : ("audio[]", None, "Audio chunk"),
"SYNC" : ("sync[]", None, "Start of video frame data"),
"IVRA" : ("ivra[]", None, "Vector codebook (?)"),
"VRAM" : ("video[]", None, "Video RAM tile pattern"),
"CRAM" : ("color[]", None, "Color RAM (palette)"),
"CEND" : ("video_end[]", None, "End of video data"),
"MEND" : ("end_file", None, "End of file"),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["length"].value * 8
fourcc = self["fourcc"].value
if fourcc in self.tag_info:
self._name, self._parser, self._description = self.tag_info[fourcc]
else:
self._parser = None
self._description = "Unknown chunk: fourcc %s" % self["fourcc"].display
def createFields(self):
yield String(self, "fourcc", 4, "FourCC", charset="ASCII")
yield textHandler(UInt32(self, "length", "length"), hexadecimal)
size = self["length"].value - 8
if 0 < size:
if self._parser:
for field in self._parser(self, size):
yield field
else:
yield RawBytes(self, "data", size)
class SpiderManVideoFile(Parser):
PARSER_TAGS = {
"id": "spiderman_video",
"category": "game",
"file_ext": ("bin",),
"min_size": 8*8,
"description": "The Amazing Spider-Man vs. The Kingpin (Sega CD) FMV video"
}
endian = BIG_ENDIAN
def validate(self):
return (self.stream.readBytes(0, 4) == 'CONF')
def createFields(self):
while not self.eof:
yield Chunk(self, "chunk[]")
|
RJHsiao/paiza_poh5 | refs/heads/master | Mission_2.py | 1 | #! /bin/env python3
if __name__ == '__main__':
n = int(input())
sums = [0,0,0,0,0,0,0]
for i in range(n):
sums[i % 7] += int(input())
for s in sums:
print(s) |
nklulupeterpan/Comedy | refs/heads/master | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/setup.py | 2462 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from setuptools import setup
setup(
name='gyp',
version='0.1',
description='Generate Your Projects',
author='Chromium Authors',
author_email='chromium-dev@googlegroups.com',
url='http://code.google.com/p/gyp',
package_dir = {'': 'pylib'},
packages=['gyp', 'gyp.generator'],
entry_points = {'console_scripts': ['gyp=gyp:script_main'] }
)
|
wakashige/kubernetes | refs/heads/master | cluster/juju/charms/trusty/kubernetes-master/unit_tests/test_install.py | 145 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock, MagicMock
from path import Path
import pytest
import sys
# Munge the python path so we can find our hook code
d = Path('__file__').parent.abspath() / 'hooks'
sys.path.insert(0, d.abspath())
# Import the modules from the hook
import install
class TestInstallHook():
@patch('install.path')
def test_update_rc_files(self, pmock):
"""
Test happy path on updating env files. Assuming everything
exists and is in place.
"""
pmock.return_value.lines.return_value = ['line1', 'line2']
install.update_rc_files(['test1', 'test2'])
pmock.return_value.write_lines.assert_called_with(['line1', 'line2',
'test1', 'test2'])
def test_update_rc_files_with_nonexistent_path(self):
"""
Test an unhappy path if the bashrc/users do not exist.
"""
with pytest.raises(OSError) as exinfo:
install.update_rc_files(['test1','test2'])
@patch('install.fetch')
@patch('install.hookenv')
def test_package_installation(self, hemock, ftmock):
"""
Verify we are calling the known essentials to build and syndicate
kubes.
"""
pkgs = ['build-essential', 'git',
'make', 'nginx', 'python-pip']
install.install_packages()
hemock.log.assert_called_with('Installing Debian packages')
ftmock.filter_installed_packages.assert_called_with(pkgs)
@patch('install.archiveurl.ArchiveUrlFetchHandler')
def test_go_download(self, aumock):
"""
Test that we are actually handing off to charm-helpers to
download a specific archive of Go. This is non-configurable so
its reasonably safe to assume we're going to always do this,
and when it changes we shall curse the brittleness of this test.
"""
ins_mock = aumock.return_value.install
install.download_go()
url = 'https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz'
sha1='5020af94b52b65cc9b6f11d50a67e4bae07b0aff'
ins_mock.assert_called_with(url, '/usr/local', sha1, 'sha1')
@patch('install.subprocess')
def test_clone_repository(self, spmock):
"""
We're not using a unit-tested git library - so ensure our subprocess
call is consistent. If we change this, we want to know we've broken it.
"""
install.clone_repository()
repo = 'https://github.com/kubernetes/kubernetes.git'
direct = '/opt/kubernetes'
spmock.check_output.assert_called_with(['git', 'clone', repo, direct])
@patch('install.install_packages')
@patch('install.download_go')
@patch('install.clone_repository')
@patch('install.update_rc_files')
@patch('install.hookenv')
def test_install_main(self, hemock, urmock, crmock, dgmock, ipmock):
"""
Ensure the driver/main method is calling all the supporting methods.
"""
strings = [
'export GOROOT=/usr/local/go\n',
'export PATH=$PATH:$GOROOT/bin\n',
'export KUBE_MASTER_IP=0.0.0.0\n',
'export KUBERNETES_MASTER=http://$KUBE_MASTER_IP\n',
]
install.install()
crmock.assert_called_once()
dgmock.assert_called_once()
crmock.assert_called_once()
urmock.assert_called_with(strings)
hemock.open_port.assert_called_with(8080)
|
neutronimaging/imagingsuite | refs/heads/master | external/src/pybind11/2.2.3/tests/test_constants_and_functions.py | 31 | from pybind11_tests import constants_and_functions as m
def test_constants():
assert m.some_constant == 14
def test_function_overloading():
assert m.test_function() == "test_function()"
assert m.test_function(7) == "test_function(7)"
assert m.test_function(m.MyEnum.EFirstEntry) == "test_function(enum=1)"
assert m.test_function(m.MyEnum.ESecondEntry) == "test_function(enum=2)"
assert m.test_function() == "test_function()"
assert m.test_function("abcd") == "test_function(char *)"
assert m.test_function(1, 1.0) == "test_function(int, float)"
assert m.test_function(1, 1.0) == "test_function(int, float)"
assert m.test_function(2.0, 2) == "test_function(float, int)"
def test_bytes():
assert m.print_bytes(m.return_bytes()) == "bytes[1 0 2 0]"
def test_exception_specifiers():
c = m.C()
assert c.m1(2) == 1
assert c.m2(3) == 1
assert c.m3(5) == 2
assert c.m4(7) == 3
assert c.m5(10) == 5
assert c.m6(14) == 8
assert c.m7(20) == 13
assert c.m8(29) == 21
assert m.f1(33) == 34
assert m.f2(53) == 55
assert m.f3(86) == 89
assert m.f4(140) == 144
|
ohmini/thaifoodapi | refs/heads/master | lib/django/db/backends/sqlite3/features.py | 194 | from __future__ import unicode_literals
from django.db import utils
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils import six
from django.utils.functional import cached_property
from .base import Database
try:
import pytz
except ImportError:
pytz = None
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
supports_foreign_keys = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
can_clone_databases = True
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def can_release_savepoints(self):
return self.uses_savepoints
@cached_property
def can_share_in_memory_db(self):
return (
six.PY3 and
Database.__name__ == 'sqlite3.dbapi2' and
Database.sqlite_version_info >= (3, 7, 13)
)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_expression_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
|
ganeshgore/myremolab | refs/heads/master | server/launch/sample_balanced2_concurrent_experiments/main_machine/lab_and_experiment1/experiment70/server_config.py | 242 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
weblab_xilinx_experiment_xilinx_device = 'FPGA'
weblab_xilinx_experiment_port_number = 1
# This should be something like this:
# import os as _os
# xilinx_home = _os.getenv('XILINX_HOME')
# if xilinx_home == None:
# if _os.name == 'nt':
# xilinx_home = r'C:\Program Files\Xilinx'
# elif _os.name == 'posix':
# xilinx_home = r"/home/nctrun/Xilinx"
#
# if _os.name == 'nt':
# xilinx_impact_full_path = [xilinx_home + r'\bin\nt\impact']
# elif _os.name == 'posix':
# xilinx_impact_full_path = [xilinx_home + r'/bin/lin/impact']
# But for testing we are going to fake it:
xilinx_home = "."
xilinx_impact_full_path = ["python","./tests/unit/weblab/experiment/devices/xilinx_impact/fake_impact.py" ]
xilinx_device_to_program = 'XilinxImpact' # 'JTagBlazer', 'DigilentAdept'
xilinx_device_to_send_commands = 'SerialPort' # 'HttpDevice'
digilent_adept_full_path = ["python","./test/unit/weblab/experiment/devices/digilent_adept/fake_digilent_adept.py" ]
digilent_adept_batch_content = """something with the variable $FILE"""
xilinx_http_device_ip_FPGA = "192.168.50.138"
xilinx_http_device_port_FPGA = 80
xilinx_http_device_app_FPGA = ""
xilinx_batch_content_FPGA = """setMode -bs
setCable -port auto
addDevice -position 1 -file $FILE
Program -p 1
exit
"""
# Though it is not really a FPGA, the webcam url var name depends on the device,
# specified above.
fpga_webcam_url = '''https://www.weblab.deusto.es/webcam/fpga0/image.jpg''' |
tejesh95/Zubio.in | refs/heads/master | zubio/gym/migrations/0002_auto_20150313_1638.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gym', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='document',
name='address',
field=models.CharField(default=b'please fill', max_length=500),
preserve_default=True,
),
migrations.AddField(
model_name='document',
name='title',
field=models.CharField(default=b'please fill', max_length=50),
preserve_default=True,
),
]
|
maljac/odoo-addons | refs/heads/8.0 | evaluation/__openerp__.py | 1 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'active': False,
'author': u'ADHOC SA',
'category': u'base.module_category_knowledge_management',
'demo_xml': [],
'depends': ['survey',],
'description': u"""
Extends the functionality of the survey module in order to make assessments that are corrected automatically
""",
'installable': True,
'license': 'AGPL-3',
'name': u'Academic Evaluations',
'test': [
],
'data': [
'view/survey_view.xml',
'security/ir.model.access.csv',
'security/survey_security.xml',
],
'version': u'1.0',
'website': 'www.adhoc.com.ar'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Kefkius/electrum-frc | refs/heads/master | gui/qt/receiving_widget.py | 1 | from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_frc.i18n import _
class ReceivingWidget(QTreeWidget):
def toggle_used(self):
if self.hide_used:
self.hide_used = False
self.setColumnHidden(2, False)
else:
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
def edit_label(self, item, column):
if column == 1 and item.isSelected():
self.editing = True
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editItem(item, column)
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editing = False
def update_label(self, item, column):
if self.editing:
return
else:
address = str(item.text(0))
label = unicode( item.text(1) )
self.owner.actuator.g.wallet.set_label(address, label)
def copy_address(self):
address = self.currentItem().text(0)
qApp.clipboard().setText(address)
def update_list(self):
return
self.clear()
addresses = self.owner.actuator.g.wallet.addresses(False)
for address in addresses:
history = self.owner.actuator.g.wallet.history.get(address,[])
used = "No"
# It appears that at this moment history can either be an array with tx and block height
# Or just a tx that's why this ugly code duplication is in, will fix
if len(history) == 1:
# This means pruned data. If that's the case the address has to been used at one point
if history[0] == "*":
used = "Yes"
else:
for tx_hash in history:
tx = self.owner.actuator.g.wallet.transactions.get(tx_hash)
if tx:
used = "Yes"
else:
for tx_hash, height in history:
tx = self.owner.actuator.g.wallet.transactions.get(tx_hash)
if tx:
used = "Yes"
if(self.hide_used == True and used == "No") or self.hide_used == False:
label = self.owner.actuator.g.wallet.labels.get(address,'')
item = QTreeWidgetItem([address, label, used])
self.insertTopLevelItem(0, item)
def __init__(self, owner=None):
self.owner = owner
self.editing = False
QTreeWidget.__init__(self, owner)
self.setColumnCount(3)
self.setHeaderLabels([_("Address"), _("Label"), _("Used")])
self.setIndentation(0)
self.hide_used = True
self.setColumnHidden(2, True)
|
swjtuacmer/Ranker | refs/heads/master | Ranker/venv/lib/python2.7/site-packages/pip/exceptions.py | 344 | """Exceptions used throughout package"""
from __future__ import absolute_import
from itertools import chain, groupby, repeat
from pip._vendor.six import iteritems
class PipError(Exception):
"""Base pip exception"""
class InstallationError(PipError):
"""General exception during installation"""
class UninstallationError(PipError):
"""General exception during uninstallation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
class RequirementsFileParseError(InstallationError):
"""Raised when a general error occurs parsing a requirements file line."""
class BestVersionAlreadyInstalled(PipError):
"""Raised when the most up-to-date version of a package is already
installed."""
class BadCommand(PipError):
"""Raised when virtualenv or a command is not found"""
class CommandError(PipError):
"""Raised when there is an error in command-line arguments"""
class PreviousBuildDirError(PipError):
"""Raised when there's a previous conflicting build directory"""
class InvalidWheelFilename(InstallationError):
"""Invalid wheel filename."""
class UnsupportedWheel(InstallationError):
"""Unsupported wheel."""
class HashErrors(InstallationError):
"""Multiple HashError instances rolled into one for reporting"""
def __init__(self):
self.errors = []
def append(self, error):
self.errors.append(error)
def __str__(self):
lines = []
self.errors.sort(key=lambda e: e.order)
for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__):
lines.append(cls.head)
lines.extend(e.body() for e in errors_of_cls)
if lines:
return '\n'.join(lines)
def __nonzero__(self):
return bool(self.errors)
def __bool__(self):
return self.__nonzero__()
class HashError(InstallationError):
"""
A failure to verify a package against known-good hashes
:cvar order: An int sorting hash exception classes by difficulty of
recovery (lower being harder), so the user doesn't bother fretting
about unpinned packages when he has deeper issues, like VCS
dependencies, to deal with. Also keeps error reports in a
deterministic order.
:cvar head: A section heading for display above potentially many
exceptions of this kind
:ivar req: The InstallRequirement that triggered this error. This is
pasted on after the exception is instantiated, because it's not
typically available earlier.
"""
req = None
head = ''
def body(self):
"""Return a summary of me for display under the heading.
This default implementation simply prints a description of the
triggering requirement.
:param req: The InstallRequirement that provoked this error, with
populate_link() having already been called
"""
return ' %s' % self._requirement_name()
def __str__(self):
return '%s\n%s' % (self.head, self.body())
def _requirement_name(self):
"""Return a description of the requirement that triggered me.
This default implementation returns long description of the req, with
line numbers
"""
return str(self.req) if self.req else 'unknown package'
class VcsHashUnsupported(HashError):
"""A hash was provided for a version-control-system-based requirement, but
we don't have a method for hashing those."""
order = 0
head = ("Can't verify hashes for these requirements because we don't "
"have a way to hash version control repositories:")
class DirectoryUrlHashUnsupported(HashError):
"""A hash was provided for a version-control-system-based requirement, but
we don't have a method for hashing those."""
order = 1
head = ("Can't verify hashes for these file:// requirements because they "
"point to directories:")
class HashMissing(HashError):
"""A hash was needed for a requirement but is absent."""
order = 2
head = ('Hashes are required in --require-hashes mode, but they are '
'missing from some requirements. Here is a list of those '
'requirements along with the hashes their downloaded archives '
'actually had. Add lines like these to your requirements files to '
'prevent tampering. (If you did not enable --require-hashes '
'manually, note that it turns on automatically when any package '
'has a hash.)')
def __init__(self, gotten_hash):
"""
:param gotten_hash: The hash of the (possibly malicious) archive we
just downloaded
"""
self.gotten_hash = gotten_hash
def body(self):
from pip.utils.hashes import FAVORITE_HASH # Dodge circular import.
package = None
if self.req:
# In the case of URL-based requirements, display the original URL
# seen in the requirements file rather than the package name,
# so the output can be directly copied into the requirements file.
package = (self.req.original_link if self.req.original_link
# In case someone feeds something downright stupid
# to InstallRequirement's constructor.
else getattr(self.req, 'req', None))
return ' %s --hash=%s:%s' % (package or 'unknown package',
FAVORITE_HASH,
self.gotten_hash)
class HashUnpinned(HashError):
"""A requirement had a hash specified but was not pinned to a specific
version."""
order = 3
head = ('In --require-hashes mode, all requirements must have their '
'versions pinned with ==. These do not:')
class HashMismatch(HashError):
"""
Distribution file hash values don't match.
:ivar package_name: The name of the package that triggered the hash
mismatch. Feel free to write to this after the exception is raise to
improve its error message.
"""
order = 4
head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS '
'FILE. If you have updated the package versions, please update '
'the hashes. Otherwise, examine the package contents carefully; '
'someone may have tampered with them.')
def __init__(self, allowed, gots):
"""
:param allowed: A dict of algorithm names pointing to lists of allowed
hex digests
:param gots: A dict of algorithm names pointing to hashes we
actually got from the files under suspicion
"""
self.allowed = allowed
self.gots = gots
def body(self):
return ' %s:\n%s' % (self._requirement_name(),
self._hash_comparison())
def _hash_comparison(self):
"""
Return a comparison of actual and expected hash values.
Example::
Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde
or 123451234512345123451234512345123451234512345
Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef
"""
def hash_then_or(hash_name):
# For now, all the decent hashes have 6-char names, so we can get
# away with hard-coding space literals.
return chain([hash_name], repeat(' or'))
lines = []
for hash_name, expecteds in iteritems(self.allowed):
prefix = hash_then_or(hash_name)
lines.extend((' Expected %s %s' % (next(prefix), e))
for e in expecteds)
lines.append(' Got %s\n' %
self.gots[hash_name].hexdigest())
prefix = ' or'
return '\n'.join(lines)
class UnsupportedPythonVersion(InstallationError):
"""Unsupported python version according to Requires-Python package
metadata."""
|
ProjectSWGCore/NGECore2 | refs/heads/master | scripts/buffs/of_stimulator_1.py | 2 | import sys
def setup(core, actor, buff):
return
def add(core, actor, buff):
return
def remove(core, actor, buff):
return |
jkburges/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/config.py | 126 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrapper objects for WebKit-specific utility routines."""
# FIXME: This file needs to be unified with common/config/ports.py .
import logging
from webkitpy.common import webkit_finder
_log = logging.getLogger(__name__)
#
# FIXME: This is used to record if we've already hit the filesystem to look
# for a default configuration. We cache this to speed up the unit tests,
# but this can be reset with clear_cached_configuration(). This should be
# replaced with us consistently using MockConfigs() for tests that don't
# hit the filesystem at all and provide a reliable value.
#
_have_determined_configuration = False
_configuration = "Release"
def clear_cached_configuration():
global _have_determined_configuration, _configuration
_have_determined_configuration = False
_configuration = "Release"
class Config(object):
_FLAGS_FROM_CONFIGURATIONS = {
"Debug": "--debug",
"Release": "--release",
}
def __init__(self, executive, filesystem, port_implementation=None):
self._executive = executive
self._filesystem = filesystem
self._webkit_finder = webkit_finder.WebKitFinder(self._filesystem)
self._default_configuration = None
self._build_directories = {}
self._port_implementation = port_implementation
def build_directory(self, configuration):
"""Returns the path to the build directory for the configuration."""
if configuration:
flags = ["--configuration", self.flag_for_configuration(configuration)]
else:
configuration = ""
flags = []
if self._port_implementation:
flags.append('--' + self._port_implementation)
if not self._build_directories.get(configuration):
args = ["perl", self._webkit_finder.path_to_script("webkit-build-directory")] + flags
output = self._executive.run_command(args, cwd=self._webkit_finder.webkit_base(), return_stderr=False).rstrip()
parts = output.split("\n")
self._build_directories[configuration] = parts[0]
if len(parts) == 2:
default_configuration = parts[1][len(parts[0]):]
if default_configuration.startswith("/"):
default_configuration = default_configuration[1:]
self._build_directories[default_configuration] = parts[1]
return self._build_directories[configuration]
def flag_for_configuration(self, configuration):
return self._FLAGS_FROM_CONFIGURATIONS[configuration]
def default_configuration(self):
"""Returns the default configuration for the user.
Returns the value set by 'set-webkit-configuration', or "Release"
if that has not been set. This mirrors the logic in webkitdirs.pm."""
if not self._default_configuration:
self._default_configuration = self._determine_configuration()
if not self._default_configuration:
self._default_configuration = 'Release'
if self._default_configuration not in self._FLAGS_FROM_CONFIGURATIONS:
_log.warn("Configuration \"%s\" is not a recognized value.\n" % self._default_configuration)
_log.warn("Scripts may fail. See 'set-webkit-configuration --help'.")
return self._default_configuration
def _determine_configuration(self):
# This mirrors the logic in webkitdirs.pm:determineConfiguration().
#
# FIXME: See the comment at the top of the file regarding unit tests
# and our use of global mutable static variables.
# FIXME: We should just @memoize this method and then this will only
# be read once per object lifetime (which should be sufficiently fast).
global _have_determined_configuration, _configuration
if not _have_determined_configuration:
contents = self._read_configuration()
if not contents:
contents = "Release"
if contents == "Deployment":
contents = "Release"
if contents == "Development":
contents = "Debug"
_configuration = contents
_have_determined_configuration = True
return _configuration
def _read_configuration(self):
try:
configuration_path = self._filesystem.join(self.build_directory(None), "Configuration")
if not self._filesystem.exists(configuration_path):
return None
except:
return None
return self._filesystem.read_text_file(configuration_path).rstrip()
|
RoboJackets/robocup-software | refs/heads/staging | soccer/gameplay/plays/restarts/their_kickoff.py | 1 | import standard_play
import behavior
import tactics.positions.defender
import tactics.stopped.circle_on_center
import skills.mark
import main
import robocup
import constants
import planning_priority
class TheirKickoff(standard_play.StandardPlay):
# Distance from the center line we should mark from (the mark target distance from the line)
LineBuffer = constants.Robot.Radius * 3
# Distance from center to mark if no robot is found
DefaultDist = constants.Field.CenterDiameter
# Ratio of the field to consider robots as a threat
FieldRatio = 3.0 / 4
def __init__(self):
super().__init__(continuous=True)
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running, lambda: True,
'immediately')
# Add a center blocker
self.add_subbehavior(
tactics.stopped.circle_on_center.CircleOnCenter(
# TODO find a way to do this without hard coding 3
# defense/goalie robots (or make those constants)
min_robots=1 if (main.our_robots() is not None) and len(
main.our_robots()) > 3 else 0),
'circle_up',
priority=15,
required=True)
# Add two marker robots (with lower than defense priority)
mark_one = skills.mark.Mark()
self.add_subbehavior(mark_one,
'mark_one',
priority=planning_priority.PIVOT_KICK + 1,
required=False)
mark_two = skills.mark.Mark()
self.add_subbehavior(mark_two,
'mark_two',
priority=planning_priority.PIVOT_KICK,
required=False)
def absmin(self, value, floor_val):
if value <= 0:
return min(value, -floor_val)
else:
return max(value, floor_val)
def execute_running(self):
their_robots = main.their_robots()
mark_one = self.subbehavior_with_name('mark_one')
mark_two = self.subbehavior_with_name('mark_two')
centerCircle = robocup.Circle(constants.Field.CenterPoint,
constants.Field.CenterRadius)
# Don't select robots that are
# 1. Not on our side of the field
# 2. behind or inside the goal circle
mark_robot_right = list(filter(
lambda robot: (robot.pos.x >= 0 and robot.pos.y < constants.Field.Length * TheirKickoff.FieldRatio and constants.Field.FieldRect.contains_point(robot.pos) and not centerCircle.contains_point(robot.pos)),
their_robots))
# Don't select robots that are
# 1. Not on our side of the field
# 2. behind or inside the goal circle
# 3. Not the robot selected before
mark_robot_left = list(filter(
lambda robot: (robot.pos.x <= 0 and robot.pos.y < constants.Field.Length * TheirKickoff.FieldRatio and constants.Field.FieldRect.contains_point(robot.pos) and not centerCircle.contains_point(robot.pos) and robot != mark_one.mark_robot),
their_robots))
# Special cases
if len(mark_robot_left) + len(mark_robot_right) == 0:
# Can't do anything
mark_robot_left = None
mark_robot_right = None
elif len(mark_robot_left) + len(mark_robot_right) == 1:
if len(mark_robot_left) == 1:
mark_robot_right = mark_robot_left[0]
mark_robot_left = None
else:
mark_robot_right = mark_robot_right[0]
mark_robot_left = None
elif len(mark_robot_left) == 0:
mark_robot_right = mark_robot_right
mark_robot_left = mark_robot_right
elif len(mark_robot_right) == 0:
mark_robot_right = mark_robot_left
mark_robot_left = mark_robot_left
# Else, everything can proceed as normal (pick best one from each side)
# Make every element a list to normalize for the next step
if type(mark_robot_right) is not list and mark_robot_right is not None:
mark_robot_right = [mark_robot_right]
if type(mark_robot_left) is not list and mark_robot_left is not None:
mark_robot_left = [mark_robot_left]
# Select best robot from candidate lists
selected = None
if mark_robot_right is not None:
mark_robot_right = min(mark_robot_right,
key=lambda robot: robot.pos.y).pos
selected = robocup.Point(mark_robot_right)
else:
mark_robot_right = robocup.Point(TheirKickoff.DefaultDist,
constants.Field.Length / 2)
# Set x and y separately as we want a constant y value (just behind the kick off line)
mark_robot_right.y = min(
constants.Field.Length / 2 - TheirKickoff.LineBuffer,
mark_robot_right.y)
mark_robot_right.x = self.absmin(mark_robot_right.x,
TheirKickoff.DefaultDist)
mark_one.mark_point = mark_robot_right
# Do the same thing as above on the left robot.
if mark_robot_left is not None:
# Don't mark the same robot twice
mark_robot_left = filter(
lambda x: True if selected is None else not x.pos.nearly_equals(selected),
mark_robot_left)
mark_robot_left = min(mark_robot_left,
key=lambda robot: robot.pos.y).pos
else:
mark_robot_left = robocup.Point(-TheirKickoff.DefaultDist,
constants.Field.Length / 2)
mark_robot_left.y = min(
constants.Field.Length / 2 - TheirKickoff.LineBuffer,
mark_robot_left.y)
mark_robot_left.x = self.absmin(mark_robot_left.x,
TheirKickoff.DefaultDist)
mark_two.mark_point = mark_robot_left
@classmethod
def score(cls):
gs = main.game_state()
if ((gs.is_setup_state() or not main.game_state().is_playing()) and
gs.is_their_kickoff()):
return 0
return float("inf")
@classmethod
def is_restart(cls):
return True
|
sdague/home-assistant | refs/heads/dev | tests/components/numato/common.py | 21 | """Definitions shared by all numato tests."""
from numato_gpio import NumatoGpioError
NUMATO_CFG = {
"numato": {
"discover": ["/ttyACM0", "/ttyACM1"],
"devices": [
{
"id": 0,
"binary_sensors": {
"invert_logic": False,
"ports": {
"2": "numato_binary_sensor_mock_port2",
"3": "numato_binary_sensor_mock_port3",
"4": "numato_binary_sensor_mock_port4",
},
},
"sensors": {
"ports": {
"1": {
"name": "numato_adc_mock_port1",
"source_range": [100, 1023],
"destination_range": [0, 10],
"unit": "mocks",
}
},
},
"switches": {
"invert_logic": False,
"ports": {
"5": "numato_switch_mock_port5",
"6": "numato_switch_mock_port6",
},
},
}
],
}
}
def mockup_raise(*args, **kwargs):
"""Mockup to replace regular functions for error injection."""
raise NumatoGpioError("Error mockup")
def mockup_return(*args, **kwargs):
"""Mockup to replace regular functions for error injection."""
return False
|
PaulFranklin/python_koans | refs/heads/master | python3/libs/colorama/win32.py | 451 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
from ctypes import wintypes
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort, POINTER
)
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", wintypes._COORD),
("dwCursorPosition", wintypes._COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", wintypes._COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
wintypes._COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = wintypes._COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = wintypes._COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
|
amitaekbote/dcos | refs/heads/master | packages/bootstrap/extra/dcos_internal_utils/exhibitor.py | 4 | import logging
import os
import sys
import requests
from dcos_internal_utils import utils
from pkgpanda.util import load_string, write_string
log = logging.getLogger(__name__)
EXHIBITOR_STATUS_URL = 'http://127.0.0.1:8181/exhibitor/v1/cluster/status'
zk_pid_path = "/var/lib/dcos/exhibitor/zk.pid"
stash_zk_pid_stat_mtime_path = "/var/lib/dcos/bootstrap/exhibitor_pid_stat"
def get_zk_pid_mtime():
try:
return os.stat(zk_pid_path).st_mtime_ns
except FileNotFoundError:
log.error("ZK pid file `%s` does not exist.", zk_pid_path)
return None
def get_zk_pid():
return load_string(zk_pid_path)
def try_shortcut():
try:
# pid stat file exists, read the value out of it
stashed_pid_stat = int(load_string(stash_zk_pid_stat_mtime_path))
except FileNotFoundError:
log.info('No zk.pid last mtime found at %s', stash_zk_pid_stat_mtime_path)
return False
# Make sure that the pid hasn't been written anew
cur_pid_stat = get_zk_pid_mtime()
if cur_pid_stat is None:
return False
if stashed_pid_stat != cur_pid_stat:
return False
# Check that the PID has a zk running at it currently.
zk_pid = get_zk_pid()
cmdline_path = '/proc/{}/cmdline'.format(zk_pid)
try:
# Custom because the command line is ascii with `\0` as separator.
with open(cmdline_path, 'rb') as f:
cmd_line = f.read().split(b'\0')[:-1]
except FileNotFoundError:
log.info('Process no longer running (couldn\'t read the cmdline at: %s)', zk_pid)
return False
log.info('PID %s has command line %s', zk_pid, cmd_line)
if len(cmd_line) < 3:
log.info("Command line too short to be zookeeper started by exhibitor")
return False
if cmd_line[-1] != b'/var/lib/dcos/exhibitor/conf/zoo.cfg' \
or cmd_line[0] != b'/opt/mesosphere/active/java/usr/java/bin/java':
log.info("command line doesn't start with java and end with zookeeper.cfg")
return False
log.info("PID file hasn't been modified. ZK still seems to be at that PID.")
return True
def wait(master_count_filename):
if try_shortcut():
log.info("Shortcut succeeeded, assuming local zk is in good config state, not waiting for quorum.")
return
log.info('Shortcut failed, waiting for exhibitor to bring up zookeeper and stabilize')
if not os.path.exists(master_count_filename):
log.info("master_count file doesn't exist when it should. Hard failing.")
sys.exit(1)
cluster_size = int(utils.read_file_line(master_count_filename))
log.info('Expected cluster size: {}'.format(cluster_size))
log.info('Waiting for ZooKeeper cluster to stabilize')
try:
response = requests.get(EXHIBITOR_STATUS_URL)
except requests.exceptions.ConnectionError as ex:
log.error('Could not connect to exhibitor: {}'.format(ex))
sys.exit(1)
if response.status_code != 200:
log.error('Could not get exhibitor status: {}, Status code: {}'.format(
EXHIBITOR_STATUS_URL, response.status_code))
sys.exit(1)
data = response.json()
serving = []
leaders = []
for node in data:
if node['isLeader']:
leaders.append(node['hostname'])
if node['description'] == 'serving':
serving.append(node['hostname'])
log.info(
"Serving hosts: `%s`, leader: `%s`", ','.join(serving), ','.join(leaders))
if len(serving) != cluster_size or len(leaders) != 1:
msg_fmt = 'Expected {} servers and 1 leader, got {} servers and {} leaders'
raise Exception(msg_fmt.format(cluster_size, len(serving), len(leaders)))
# Local Zookeeper is up. Config should be stable, local zookeeper happy. Stash the PID so if
# there is a restart we can come up quickly without requiring a new zookeeper quorum.
zk_pid_mtime = get_zk_pid_mtime()
if zk_pid_mtime is not None:
log.info('Stashing zk.pid mtime %s to %s', zk_pid_mtime, stash_zk_pid_stat_mtime_path)
write_string(stash_zk_pid_stat_mtime_path, str(zk_pid_mtime))
|
tal-nino/ansible | refs/heads/devel | lib/ansible/plugins/action/include_vars.py | 82 | # (c) 2013-2014, Benno Joy <benno@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
from ansible.parsing import DataLoader
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
source = self._task.args.get('_raw_params')
if self._task._role:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'vars', source)
else:
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'vars', source)
if os.path.exists(source):
(data, show_content) = self._loader._get_file_contents(source)
data = self._loader.load(data, show_content)
if data is None:
data = {}
if not isinstance(data, dict):
raise AnsibleError("%s must be stored as a dictionary/hash" % source)
return dict(ansible_facts=data, _ansible_no_log=not show_content)
else:
return dict(failed=True, msg="Source file not found.", file=source)
|
genome21/kubernetes | refs/heads/master | cluster/juju/charms/trusty/kubernetes/hooks/kubernetes_installer.py | 213 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from path import path
class KubernetesInstaller():
"""
This class contains the logic needed to install kuberentes binary files.
"""
def __init__(self, arch, version, master, output_dir):
""" Gather the required variables for the install. """
# The kubernetes charm needs certain commands to be aliased.
self.aliases = {'kube-proxy': 'proxy',
'kubelet': 'kubelet'}
self.arch = arch
self.version = version
self.master = master
self.output_dir = output_dir
def download(self):
""" Download the kuberentes binaries from the kubernetes master. """
url = 'http://{0}/kubernetes/{1}/local/bin/linux/{2}'.format(
self.master, self.version, self.arch)
if not self.output_dir.isdir():
self.output_dir.makedirs_p()
for key in self.aliases:
uri = '{0}/{1}'.format(url, key)
destination = self.output_dir / key
wget = 'wget -nv {0} -O {1}'.format(uri, destination)
print(wget)
output = subprocess.check_output(wget.split())
print(output)
destination.chmod(0o755)
def install(self, install_dir=path('/usr/local/bin')):
""" Create links to the binary files to the install directory. """
if not install_dir.isdir():
install_dir.makedirs_p()
# Create the symbolic links to the real kubernetes binaries.
for key, value in self.aliases.iteritems():
target = self.output_dir / key
if target.exists():
link = install_dir / value
if link.exists():
link.remove()
target.symlink(link)
else:
print('Error target file {0} does not exist.'.format(target))
exit(1)
|
Letractively/spiff | refs/heads/master | src/installer/CheckRequirements.py | 1 | # Copyright (C) 2006 Samuel Abels, http://debain.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import config, util, os.path
from Step import Step
class CheckRequirements(Step):
def __init__(self, id, request, state):
Step.__init__(self, id, request, state)
self.results = [self._is_not_installed(),
util.check_python_version((2, 3, 0, '', 0)),
util.check_dir_exists(config.data_dir),
util.check_is_writable(config.data_dir),
util.check_python_module_exists('pywsgi'),
util.check_python_module_exists('SpiffGuard'),
util.check_python_module_exists('SpiffIntegrator'),
util.check_python_module_exists('SpiffSignal'),
util.check_python_module_exists('SpiffWarehouse'),
util.check_python_module_exists('SpiffWikiMarkup')]
self.failed = False in [r for n, r, e in self.results]
def _is_not_installed(self):
name = 'Checking whether the installation is already complete.'
if not os.path.exists(config.cfg_file):
return name, True, None
try:
from ConfigParser import RawConfigParser
parser = RawConfigParser()
parser.read(config.cfg_file)
installed_version = parser.get('installer', 'version')
except Exception, e:
return name, False, str(e)
if installed_version == config.__version__:
error = 'Version %s is already installed.' % installed_version
return name, False, error
return name, True, None
def show(self):
self.render('CheckRequirements.tmpl',
results = self.results,
success = not self.failed)
def check(self):
if self.failed:
self.show()
return False
return True
|
mozilla/firefox-flicks | refs/heads/master | vendor-local/lib/python/factory/containers.py | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2010 Mark Sandstrom
# Copyright (c) 2011-2013 Raphaël Barrois
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
logger = logging.getLogger(__name__)
from . import declarations
from . import utils
class CyclicDefinitionError(Exception):
"""Raised when cyclic definition were found."""
class LazyStub(object):
"""A generic container that only allows getting attributes.
Attributes are set at instantiation time, values are computed lazily.
Attributes:
__initialized (bool): whether this object's __init__ as run. If set,
setting any attribute will be prevented.
__attrs (dict): maps attribute name to their declaration
__values (dict): maps attribute name to computed value
__pending (str list): names of the attributes whose value is being
computed. This allows to detect cyclic lazy attribute definition.
__containers (LazyStub list): "parents" of the LazyStub being built.
This allows to have the field of a field depend on the value of
another field
__target_class (type): the target class to build.
"""
__initialized = False
def __init__(self, attrs, containers=(), target_class=object, log_ctx=None):
self.__attrs = attrs
self.__values = {}
self.__pending = []
self.__containers = containers
self.__target_class = target_class
self.__log_ctx = log_ctx or '%s.%s' % (target_class.__module__, target_class.__name__)
self.factory_parent = containers[0] if containers else None
self.__initialized = True
def __repr__(self):
return '<LazyStub for %s.%s>' % (self.__target_class.__module__, self.__target_class.__name__)
def __str__(self):
return '<LazyStub for %s with %s>' % (
self.__target_class.__name__, list(self.__attrs.keys()))
def __fill__(self):
"""Fill this LazyStub, computing values of all defined attributes.
Retunrs:
dict: map of attribute name => computed value
"""
res = {}
logger.debug("LazyStub: Computing values for %s(%s)",
self.__log_ctx, utils.log_pprint(kwargs=self.__attrs),
)
for attr in self.__attrs:
res[attr] = getattr(self, attr)
logger.debug("LazyStub: Computed values, got %s(%s)",
self.__log_ctx, utils.log_pprint(kwargs=res),
)
return res
def __getattr__(self, name):
"""Retrieve an attribute's value.
This will compute it if needed, unless it is already on the list of
attributes being computed.
"""
if name in self.__pending:
raise CyclicDefinitionError(
"Cyclic lazy attribute definition for %s; cycle found in %r." %
(name, self.__pending))
elif name in self.__values:
return self.__values[name]
elif name in self.__attrs:
val = self.__attrs[name]
if isinstance(val, LazyValue):
self.__pending.append(name)
val = val.evaluate(self, self.__containers)
assert name == self.__pending.pop()
self.__values[name] = val
return val
else:
raise AttributeError(
"The parameter %s is unknown. Evaluated attributes are %r, "
"definitions are %r." % (name, self.__values, self.__attrs))
def __setattr__(self, name, value):
"""Prevent setting attributes once __init__ is done."""
if not self.__initialized:
return super(LazyStub, self).__setattr__(name, value)
else:
raise AttributeError('Setting of object attributes is not allowed')
class DeclarationDict(dict):
"""Slightly extended dict to work with OrderedDeclaration."""
def is_declaration(self, name, value):
"""Determines if a class attribute is a field value declaration.
Based on the name and value of the class attribute, return ``True`` if
it looks like a declaration of a default field value, ``False`` if it
is private (name starts with '_') or a classmethod or staticmethod.
"""
if isinstance(value, (classmethod, staticmethod)):
return False
elif isinstance(value, declarations.OrderedDeclaration):
return True
return (not name.startswith("_") and not name.startswith("FACTORY_"))
def update_with_public(self, d):
"""Updates the DeclarationDict from a class definition dict.
Takes into account all public attributes and OrderedDeclaration
instances; ignores all class/staticmethods and private attributes
(starting with '_').
Returns a dict containing all remaining elements.
"""
remaining = {}
for k, v in d.items():
if self.is_declaration(k, v):
self[k] = v
else:
remaining[k] = v
return remaining
def copy(self, extra=None):
"""Copy this DeclarationDict into another one, including extra values.
Args:
extra (dict): additional attributes to include in the copy.
"""
new = self.__class__()
new.update(self)
if extra:
new.update(extra)
return new
class PostGenerationDeclarationDict(DeclarationDict):
"""Alternate DeclarationDict for PostGenerationDeclaration."""
def is_declaration(self, name, value):
"""Captures instances of PostGenerationDeclaration."""
return isinstance(value, declarations.PostGenerationDeclaration)
class LazyValue(object):
"""Some kind of "lazy evaluating" object."""
def evaluate(self, obj, containers=()): # pragma: no cover
"""Compute the value, using the given object."""
raise NotImplementedError("This is an abstract method.")
class OrderedDeclarationWrapper(LazyValue):
"""Lazy wrapper around an OrderedDeclaration.
Attributes:
declaration (declarations.OrderedDeclaration): the OrderedDeclaration
being wrapped
sequence (int): the sequence counter to use when evaluatin the
declaration
"""
def __init__(self, declaration, sequence, create, extra=None, **kwargs):
super(OrderedDeclarationWrapper, self).__init__(**kwargs)
self.declaration = declaration
self.sequence = sequence
self.create = create
self.extra = extra
def evaluate(self, obj, containers=()):
"""Lazily evaluate the attached OrderedDeclaration.
Args:
obj (LazyStub): the object being built
containers (object list): the chain of containers of the object
being built, its immediate holder being first.
"""
return self.declaration.evaluate(self.sequence, obj,
create=self.create,
extra=self.extra,
containers=containers,
)
def __repr__(self):
return '<%s for %r>' % (self.__class__.__name__, self.declaration)
class AttributeBuilder(object):
"""Builds attributes from a factory and extra data.
Attributes:
factory (base.Factory): the Factory for which attributes are being
built
_attrs (DeclarationDict): the attribute declarations for the factory
_subfields (dict): dict mapping an attribute name to a dict of
overridden default values for the related SubFactory.
"""
def __init__(self, factory, extra=None, log_ctx=None, **kwargs):
super(AttributeBuilder, self).__init__(**kwargs)
if not extra:
extra = {}
self.factory = factory
self._containers = extra.pop('__containers', ())
self._attrs = factory.declarations(extra)
self._log_ctx = log_ctx
attrs_with_subfields = [
k for k, v in self._attrs.items()
if self.has_subfields(v)]
self._subfields = utils.multi_extract_dict(
attrs_with_subfields, self._attrs)
def has_subfields(self, value):
return isinstance(value, declarations.ParameteredAttribute)
def build(self, create, force_sequence=None):
"""Build a dictionary of attributes.
Args:
create (bool): whether to 'build' or 'create' the subfactories.
force_sequence (int or None): if set to an int, use this value for
the sequence counter; don't advance the related counter.
"""
# Setup factory sequence.
if force_sequence is None:
sequence = self.factory._generate_next_sequence()
else:
sequence = force_sequence
# Parse attribute declarations, wrapping SubFactory and
# OrderedDeclaration.
wrapped_attrs = {}
for k, v in self._attrs.items():
if isinstance(v, declarations.OrderedDeclaration):
v = OrderedDeclarationWrapper(v,
sequence=sequence,
create=create,
extra=self._subfields.get(k, {}),
)
wrapped_attrs[k] = v
stub = LazyStub(wrapped_attrs, containers=self._containers,
target_class=self.factory, log_ctx=self._log_ctx)
return stub.__fill__()
class StubObject(object):
"""A generic container."""
pass
|
qiankunshe/sky_engine | refs/heads/master | sky/tools/webkitpy/layout_tests/controllers/test_result_writer_unittest.py | 11 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import unittest
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.controllers.test_result_writer import write_test_result
from webkitpy.layout_tests.port.driver import DriverOutput
from webkitpy.layout_tests.port.test import TestPort
from webkitpy.layout_tests.models import test_failures
class TestResultWriterTests(unittest.TestCase):
def run_test(self, failures=None, files=None):
failures = failures or []
host = MockSystemHost()
host.filesystem.files = files or {}
port = TestPort(host=host, port_name='test-mac-snowleopard', options=optparse.Values())
actual_output = DriverOutput(text='', image=None, image_hash=None, audio=None)
expected_output = DriverOutput(text='', image=None, image_hash=None, audio=None)
write_test_result(host.filesystem, port, '/tmp', 'foo.html', actual_output, expected_output, failures)
return host.filesystem.written_files
def test_success(self):
# Nothing is written when the test passes.
written_files = self.run_test(failures=[])
self.assertEqual(written_files, {})
def test_reference_exists(self):
failure = test_failures.FailureReftestMismatch()
failure.reference_filename = '/src/exists-expected.sky'
files = {'/src/exists-expected.sky': 'yup'}
written_files = self.run_test(failures=[failure], files=files)
self.assertEqual(written_files, {'/tmp/exists-expected.sky': 'yup'})
failure = test_failures.FailureReftestMismatchDidNotOccur()
failure.reference_filename = '/src/exists-expected-mismatch.sky'
files = {'/src/exists-expected-mismatch.sky': 'yup'}
written_files = self.run_test(failures=[failure], files=files)
self.assertEqual(written_files, {'/tmp/exists-expected-mismatch.sky': 'yup'})
def test_reference_is_missing(self):
failure = test_failures.FailureReftestMismatch()
failure.reference_filename = 'notfound.html'
written_files = self.run_test(failures=[failure], files={})
self.assertEqual(written_files, {})
failure = test_failures.FailureReftestMismatchDidNotOccur()
failure.reference_filename = 'notfound.html'
written_files = self.run_test(failures=[failure], files={})
self.assertEqual(written_files, {})
|
davidvon/pipa-pay-server | refs/heads/master | site-packages/flask_admin/contrib/appengine/view.py | 5 | import logging
from flask.ext.admin.model import BaseModelView
from wtforms_appengine import db as wt_db
from wtforms_appengine import ndb as wt_ndb
from google.appengine.ext import db
from google.appengine.ext import ndb
class NdbModelView(BaseModelView):
"""
AppEngine NDB model scaffolding.
"""
def get_pk_value(self, model):
return model.key.urlsafe()
def scaffold_list_columns(self):
return sorted([k for (k, v) in self.model.__dict__.iteritems() if isinstance(v, ndb.Property)])
def scaffold_sortable_columns(self):
return [k for (k, v) in self.model.__dict__.iteritems() if isinstance(v, ndb.Property) and v._indexed]
def init_search(self):
return None
def is_valid_filter(self):
pass
def scaffold_filters(self):
#TODO: implement
pass
def scaffold_form(self):
return wt_ndb.model_form(self.model())
def get_list(self, page, sort_field, sort_desc, search, filters):
#TODO: implement filters (don't think search can work here)
q = self.model.query()
if sort_field:
order_field = getattr(self.model, sort_field)
if sort_desc:
order_field = -order_field
q = q.order(order_field)
results = q.fetch(self.page_size, offset=page*self.page_size)
return q.count(), results
def get_one(self, urlsafe_key):
return ndb.Key(urlsafe=urlsafe_key).get()
def create_model(self, form):
try:
model = self.model()
form.populate_obj(model)
model.put()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to create record. %(error)s',
# error=ex), 'error')
logging.exception('Failed to create record.')
return False
def update_model(self, form, model):
try:
form.populate_obj(model)
model.put()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to update record. %(error)s',
# error=ex), 'error')
logging.exception('Failed to update record.')
return False
def delete_model(self, model):
try:
model.key.delete()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to delete record. %(error)s',
# error=ex),
# 'error')
logging.exception('Failed to delete record.')
return False
class DbModelView(BaseModelView):
"""
AppEngine DB model scaffolding.
"""
def get_pk_value(self, model):
return str(model.key())
def scaffold_list_columns(self):
return sorted([k for (k, v) in self.model.__dict__.iteritems() if isinstance(v, db.Property)])
def scaffold_sortable_columns(self):
return [k for (k, v) in self.model.__dict__.iteritems() if isinstance(v, db.Property) and v._indexed]
def init_search(self):
return None
def is_valid_filter(self):
pass
def scaffold_filters(self):
#TODO: implement
pass
def scaffold_form(self):
return wt_db.model_form(self.model())
def get_list(self, page, sort_field, sort_desc, search, filters):
#TODO: implement filters (don't think search can work here)
q = self.model.all()
if sort_field:
if sort_desc:
sort_field = "-" + sort_field
q.order(sort_field)
results = q.fetch(self.page_size, offset=page*self.page_size)
return q.count(), results
def get_one(self, encoded_key):
return db.get(db.Key(encoded=encoded_key))
def create_model(self, form):
try:
model = self.model()
form.populate_obj(model)
model.put()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to create record. %(error)s',
# error=ex), 'error')
logging.exception('Failed to create record.')
return False
def update_model(self, form, model):
try:
form.populate_obj(model)
model.put()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to update record. %(error)s',
# error=ex), 'error')
logging.exception('Failed to update record.')
return False
def delete_model(self, model):
try:
model.delete()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to delete record. %(error)s',
# error=ex),
# 'error')
logging.exception('Failed to delete record.')
return False
def ModelView(model):
if issubclass(model, ndb.Model):
return NdbModelView(model)
elif issubclass(model, db.Model):
return DbModelView(model)
else:
raise ValueError("Unsupported model: %s" % model)
|
jandrest2018/TWJ-2017-A | refs/heads/master | 04 Angular/C-Web/node_modules/node-gyp/gyp/tools/pretty_sln.py | 1831 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
jkbits1/backend | refs/heads/master | db/test/test_02_submit_rider.py | 3 | import pytest
import pgdb
@pytest.fixture
def pgdbConn(dbhost, db, frontenduser):
return pgdb.connect(dbhost + ':' + db + ':' + frontenduser)
def generic_rider_insert(conn, args):
cursor=conn.cursor()
cursor.execute("""
SELECT * from carpoolvote.submit_new_rider (
%(IPAddress)s,
%(RiderFirstName)s,
%(RiderLastName)s,
%(RiderEmail)s,
%(RiderPhone)s,
%(RiderCollectionZIP)s,
%(RiderDropOffZIP)s,
%(AvailableRideTimesLocal)s,
%(TotalPartySize)s,
%(TwoWayTripNeeded)s,
%(RiderIsVulnerable)s,
%(RiderWillNotTalkPolitics)s,
%(PleaseStayInTouch)s,
%(NeedWheelchair)s,
%(RiderPreferredContact)s,
%(RiderAccommodationNotes)s,
%(RiderLegalConsent)s,
%(RiderWillBeSafe)s,
%(RiderCollectionAddress)s,
%(RiderDestinationAddress)s
)
""", args)
results=cursor.fetchone()
conn.commit()
return {'uuid' : results[0], 'error_code' : results[1], 'error_text' : results[2]}
def test_insert_rider_000_all_valid(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
cursor = pgdbConn.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 'Pending'
def test_insert_rider_001_IPAddress_invalid(pgdbConn):
args = {
'IPAddress' : 'abcd',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_002_RiderCollectionZIP_invalid_empty(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_003_RiderCollectionZIP_invalid_not_exists(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '00000',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_004_RiderCollectionZIP_invalid_not_number(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : 'abcd',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_005_RiderDropOffZIP_invalid_empty(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_006_RiderDropOffZIP_invalid_not_found(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '00000',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_007_RiderDropOffZIP_invalid_not_number(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : 'abcd',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_008_AvailableRideTimesLocal_empty(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_009_AvailableRideTimesLocal_invalid_incomplete(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_010_AvailableRideTimesLocal_invalid_incomplete(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_011_AvailableRideTimesLocal_invalid_incomplete(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_012_AvailableRideTimesLocal_invalid_chronology(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T03:00/2018-10-01T02:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_013_AvailableRideTimesLocal_invalid_past(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2000-10-01T02:00/2000-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_014_TotalPartySize_invalid_zero(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '0',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_015_TotalPartySize_invalid_negative(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '-10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_016_RiderPreferredContact_valid_SMS(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'SMS',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
def test_insert_rider_017_RiderPreferredContact_valid_Email(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
def test_insert_rider_018_RiderPreferredContact_valid_Phone(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Phone',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
|
kikokubo/Sick-Beard-TPB | refs/heads/ThePirateBay | lib/hachoir_parser/__init__.py | 90 | from lib.hachoir_parser.version import __version__
from lib.hachoir_parser.parser import ValidateError, HachoirParser, Parser
from lib.hachoir_parser.parser_list import ParserList, HachoirParserList
from lib.hachoir_parser.guess import (QueryParser, guessParser, createParser)
from lib.hachoir_parser import (archive, audio, container,
file_system, image, game, misc, network, program, video)
|
talib570/catalog | refs/heads/master | project.py | 1 | from flask import Flask, render_template, jsonify, url_for, flash
from flask import session as login_session
from flask import make_response, request, redirect
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Game, Player, Team, User
import helper
from werkzeug import secure_filename
import random, string, json, httplib2, requests, os, datetime, json
from time import localtime, strftime
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
UPLOAD_FOLDER = 'static/images/userimages/'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Team player Application"
# Connect to Database and create database session
engine = create_engine('sqlite:///teamcatalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token
@app.route('/login')
def showLogin():
state = ''.join(
random.choice(string.ascii_uppercase + string.digits) for x in range(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = request.data
print "access token received %s " % access_token
app_id = json.loads(open('fb_client_secrets.json', 'r').read())[
'web']['app_id']
app_secret = json.loads(
open('fb_client_secrets.json', 'r').read())['web']['app_secret']
url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (
app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.4/me"
# strip expire tag from access token
token = result.split("&")[0]
url = 'https://graph.facebook.com/v2.4/me?%s&fields=name,id,email' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# print "url sent for API access:%s"% url
# print "API JSON result: %s" % result
data = json.loads(result)
login_session['provider'] = 'facebook'
login_session['username'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data["id"]
# The token must be stored in the login_session in order to properly logout, let's strip out the information before the equals sign in our token
stored_token = token.split("=")[1]
login_session['access_token'] = stored_token
# Get user picture
url = 'https://graph.facebook.com/v2.4/me/picture?%s&redirect=0&height=200&width=200' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['picture'] = data["data"]["url"]
# see if user exists
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("Now logged in as %s" % login_session['username'])
return output
@app.route('/fbdisconnect')
def fbdisconnect():
facebook_id = login_session['facebook_id']
# The access token must me included to successfully logout
access_token = login_session['access_token']
url = 'https://graph.facebook.com/%s/permissions?access_token=%s' % (facebook_id,access_token)
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
return "you have been logged out"
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code, now compatible with Python3
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
# Submit request, parse response - Python3 compatible
h = httplib2.Http()
response = h.request(url, 'GET')[1]
str_response = response.decode('utf-8')
result = json.loads(str_response)
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already connected.'),
200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['provider'] = 'google'
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# see if user exists, if it doesn't make a new one
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
return output
# User Helper Functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/gdisconnect')
def gdisconnect():
# Only disconnect a connected user.
credentials = login_session.get('credentials')
if credentials is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] != '200':
# For whatever reason, the given token was invalid.
response = make_response(
json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
# Disconnect based on provider
@app.route('/disconnect')
def disconnect():
print login_session
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
if login_session['provider'] == 'facebook':
fbdisconnect()
del login_session['facebook_id']
flash("You have successfully been logged out.")
login_session.clear()
return redirect(url_for('showTeams'))
else:
flash("You were not logged in")
return redirect(url_for('showTeams'))
# JSON APIs to view Teams Information
@app.route('/team/<int:team_id>/players/JSON')
def teamPlayerJSON(team_id):
team = session.query(Team).filter_by(id=team_id,is_delete='0').one()
players = session.query(Player).filter_by(is_delete='0').all()
return jsonify(Players=[i.serialize for i in players])
@app.route('/team/<int:team_id>/player/<int:player_id>/JSON')
def playerJSON(team_id, player_id):
player = session.query(Player).filter_by(id=player_id,is_delete='0').one()
return jsonify(Player=player.serialize)
@app.route('/teams/JSON')
def teamsJSON():
teams = session.query(Team).filter_by(is_delete='0').all()
return jsonify(teams=[team.serialize for team in teams])
# Show all teams
@app.route('/')
@app.route('/teams/')
def showTeams():
teams = session.query(Team).filter_by(is_delete='0').order_by(asc(Team.name))
if 'username' not in login_session:
return render_template('teams/teams.html', teams=teams)
else:
return render_template('teams/teams.html', teams=teams)
# Create a new team
@app.route('/team/new/', methods=['GET', 'POST'])
def newTeam():
if 'username' not in login_session:
return redirect('/login')
games = session.query(Game)
if request.method == "POST":
file = request.files['logo']
if file and helper.allowed_file(file.filename):
extension = file.filename.rsplit('.' ,1)
filename = secure_filename(file.filename)
filename = helper.hash_filename(filename)+"."+extension[1]
# saves file in file system
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
else:
filename = 'no_logo.jpg'
strdate = request.form['start_year'].rsplit('/', 1)
dateObj = datetime.datetime.strptime(strdate[1], "%Y").date()
newTeam = Team(
name=request.form['name'],
locallity=request.form['locallity'],
logo=filename,
start_year=dateObj,
game_id=request.form['game_id'],
created_on=datetime.datetime.strptime(strftime("%Y-%m-%d %H:%M:%S", localtime()), "%Y-%m-%d %H:%M:%S"),
created_by=login_session['user_id'],
is_active='1' if request.form['status'] == 'Active' else '0',
is_delete='0',
)
session.add(newTeam)
session.commit()
flash('New Team %s Successfully Created' % newTeam.name)
return redirect(url_for('showTeams'))
else:
return render_template('teams/newteam.html', games=games)
@app.route('/team/<int:team_id>/removeTeamLogo', methods=["GET", "POST"])
def removeTeamLogo(team_id):
if 'username' not in login_session:
return redirect('/login')
if request.method == "POST":
team = session.query(Team).filter_by(id=team_id).one()
if team.logo != 'no_logo.jpg':
os.remove(team.logo) if os.path.exists(team.logo) else None
team.logo = 'no_logo.jpg'
session.add(team)
session.commit()
return jsonify(status=True)
else:
return "Remove implementation"
@app.route('/player/<int:player_id>/removePlayerPicture', methods=["GET", "POST"])
def removePlayerPicture(player_id):
if 'username' not in login_session:
return redirect('/login')
if request.method == "POST":
player = session.query(Player).filter_by(id=player_id).one()
if player.picture != 'no_picture.jpg':
os.remove(player.picture) if os.path.exists(player.picture) else None
player.picture = 'no_picture.jpg'
session.add(player)
session.commit()
return jsonify(status=True)
else:
return "Remove implementation"
# Edit a team
@app.route('/team/<int:team_id>/edit/', methods=["GET", "POST"])
def editTeam(team_id):
if 'username' not in login_session:
return redirect('/login')
team = session.query(Team).filter_by(id=team_id).one()
if team.created_by != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized to edit this team. Please create your own team in order to edit.');}</script><body onload='myFunction()''>"
if request.method=="POST":
team.name=request.form['name']
team.locallity=request.form['locallity']
team.game_id=request.form['game_id']
team.is_active='1' if request.form['status'] == 'Active' else '0'
team.name=request.form['name']
if team.logo != request.files['logo']:
file = request.files['logo']
if file and helper.allowed_file(file.filename):
extension = file.filename.rsplit('.' ,1)
filename = secure_filename(file.filename)
filename = helper.hash_filename(filename)+"."+extension[1]
team.logo = filename
# saves file in file system
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
session.add(team)
session.commit()
flash('Team Successfully Edited %s' % team.name)
return redirect(url_for('showTeams'))
else:
games= session.query(Game)
return render_template('teams/editteam.html', team=team, games=games)
# Delete a team
@app.route('/team/<int:team_id>/delete/', methods=["GET", "POST"])
def deleteTeam(team_id):
if 'username' not in login_session:
return redirect('/login')
team = session.query(Team).filter_by(id=team_id).one()
if team.created_by != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized to delete this team. Please create your own team in order to delete.');}</script><body onload='myFunction()''>"
if request.method == 'POST':
team = session.query(Team).filter_by(id=team_id).one()
team.is_delete = '1'
session.add(team)
session.commit()
flash('%s Successfully Deleted' % team.name)
return redirect(url_for('showTeams'))
else:
return render_template('teams/deleteteam.html', team=team)
# Show team players
@app.route('/team/<int:team_id>/')
@app.route('/team/<int:team_id>/players/')
def showPlayers(team_id):
players = session.query(Player).filter_by(team_id=team_id, is_delete='0')
team = session.query(Team).filter_by(id=team_id).one()
return render_template('players/players.html', players=players, team=team)
# Create a new player
@app.route('/team/<int:team_id>/player/new/', methods=["GET", "POST"])
def newPlayer(team_id):
if 'username' not in login_session:
return redirect('/login')
if request.method == "POST":
file = request.files['picture']
if file and helper.allowed_file(file.filename):
extension = file.filename.rsplit('.' ,1)
filename = secure_filename(file.filename)
filename = helper.hash_filename(filename)+"."+extension[1]
# saves file in file system
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
else:
filename = 'no_picture.jpg'
newPlayer = Player(
name=request.form['name'],
email=request.form['email'],
phone=request.form['phone'],
skill_level=request.form['skill_level'],
picture=filename,
summary=request.form['summary'],
team_id=team_id,
share_contact=request.form['share_contact'],
created_on=datetime.datetime.strptime(strftime("%Y-%m-%d %H:%M:%S", localtime()), "%Y-%m-%d %H:%M:%S"),
created_by=login_session['user_id'],
is_active='1' if request.form['status'] == 'Active' else '0',
is_delete='0',
)
session.add(newPlayer)
session.commit()
flash('New Player %s Successfully Created' % newPlayer.name)
return redirect(url_for('showPlayers', team_id=team_id))
else:
skill_levels = ['Beginner', 'Intermediate', 'Advanced']
return render_template('players/newplayer.html', skill_levels=skill_levels)
# Edit player details
@app.route('/team/<int:team_id>/player/<int:player_id>/edit', methods=["GET", "POST"])
def editPlayer(team_id, player_id):
if 'username' not in login_session:
return redirect('/login')
player = session.query(Player).filter_by(team_id=team_id, is_delete='0', id=player_id).one()
if player.created_by != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized to edit this player. Please create your own player in order to edit.');}</script><body onload='myFunction()''>"
team = session.query(Team).filter_by(id=team_id).one()
skill_levels = ['Beginner', 'Intermediate', 'Advanced']
if request.method == "POST":
player.name=request.form['name']
player.email=request.form['email']
player.phone=request.form['phone']
player.skill_level=request.form['skill_level']
player.summary=request.form['summary']
player.is_active='1' if request.form['status'] == 'Active' else '0'
player.share_contact=request.form['share_contact']
if player.picture != request.files['picture']:
file = request.files['picture']
if file and helper.allowed_file(file.filename):
extension = file.filename.rsplit('.' ,1)
filename = secure_filename(file.filename)
filename = helper.hash_filename(filename)+"."+extension[1]
player.picture = filename
# saves file in file system
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
session.add(player)
session.commit()
flash('Player %s Edited Successfully' % player.name)
return redirect(url_for('showPlayers', team_id=team_id))
return render_template('players/editplayer.html', skill_levels=skill_levels, player=player,team=team)
# Delete a player
@app.route('/team/<int:team_id>/player/<int:player_id>/delete', methods=["GET", "POST"])
def deletePlayer(team_id, player_id):
if 'username' not in login_session:
return redirect('/login')
player = session.query(Player).filter_by(team_id=team_id, is_delete='0', id=player_id).one()
if player.created_by != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized to delete this player. Please create your own player in order to delete.');}</script><body onload='myFunction()''>"
team = session.query(Team).filter_by(id=team_id).one()
if request.method == "POST":
player.is_delete = '1'
session.add(player)
session.commit()
flash('%s Deleted Successfully' % player.name)
return redirect(url_for('showPlayers', team_id=team_id))
else:
return render_template('players/deleteplayer.html', player=player, team=team, team_id=team_id)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
afandria/mojo | refs/heads/master | build/android/pylib/base/base_setup.py | 33 | # Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base script for doing test setup."""
import logging
import os
from pylib import constants
from pylib import valgrind_tools
from pylib.utils import isolator
def GenerateDepsDirUsingIsolate(suite_name, isolate_file_path,
isolate_file_paths, deps_exclusion_list):
"""Generate the dependency dir for the test suite using isolate.
Args:
suite_name: Name of the test suite (e.g. base_unittests).
isolate_file_path: .isolate file path to use. If there is a default .isolate
file path for the suite_name, this will override it.
isolate_file_paths: Dictionary with the default .isolate file paths for
the test suites.
deps_exclusion_list: A list of files that are listed as dependencies in the
.isolate files but should not be pushed to the device.
Returns:
The Isolator instance used to remap the dependencies, or None.
"""
if isolate_file_path:
if os.path.isabs(isolate_file_path):
isolate_abs_path = isolate_file_path
else:
isolate_abs_path = os.path.join(constants.DIR_SOURCE_ROOT,
isolate_file_path)
else:
isolate_rel_path = isolate_file_paths.get(suite_name)
if not isolate_rel_path:
logging.info('Did not find an isolate file for the test suite.')
return
isolate_abs_path = os.path.join(constants.DIR_SOURCE_ROOT, isolate_rel_path)
isolated_abs_path = os.path.join(
constants.GetOutDirectory(), '%s.isolated' % suite_name)
assert os.path.exists(isolate_abs_path), 'Cannot find %s' % isolate_abs_path
i = isolator.Isolator(constants.ISOLATE_DEPS_DIR)
i.Clear()
i.Remap(isolate_abs_path, isolated_abs_path)
# We're relying on the fact that timestamps are preserved
# by the remap command (hardlinked). Otherwise, all the data
# will be pushed to the device once we move to using time diff
# instead of md5sum. Perform a sanity check here.
i.VerifyHardlinks()
i.PurgeExcluded(deps_exclusion_list)
i.MoveOutputDeps()
return i
def PushDataDeps(device, device_dir, test_options):
valgrind_tools.PushFilesForTool(test_options.tool, device)
if os.path.exists(constants.ISOLATE_DEPS_DIR):
device.PushChangedFiles([(constants.ISOLATE_DEPS_DIR, device_dir)],
delete_device_stale=test_options.delete_stale_data)
|
brunogamacatao/portalsaladeaula | refs/heads/master | django/contrib/gis/gdal/__init__.py | 397 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existant file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.datasource import DataSource
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, gdal_release_date, GEOJSON, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
from django.contrib.gis.gdal.geometries import OGRGeometry
HAS_GDAL = True
except:
HAS_GDAL, GEOJSON = False, False
try:
from django.contrib.gis.gdal.envelope import Envelope
except ImportError:
# No ctypes, but don't raise an exception.
pass
from django.contrib.gis.gdal.error import check_err, OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
|
hbrunn/OpenUpgrade | refs/heads/master | addons/google_drive/google_drive.py | 41 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
import werkzeug.urls
import urllib2
import json
import re
_logger = logging.getLogger(__name__)
class config(osv.Model):
_name = 'google.drive.config'
_description = "Google Drive templates config"
def get_google_drive_url(self, cr, uid, config_id, res_id, template_id, context=None):
config = self.browse(cr, SUPERUSER_ID, config_id, context=context)
model = config.model_id
filter_name = config.filter_id and config.filter_id.name or False
record = self.pool.get(model.model).read(cr, uid, res_id, [], context=context)
record.update({'model': model.name, 'filter': filter_name})
name_gdocs = config.name_template
try:
name_gdocs = name_gdocs % record
except:
raise osv.except_osv(_('Key Error!'), _("At least one key cannot be found in your Google Drive name pattern"))
attach_pool = self.pool.get("ir.attachment")
attach_ids = attach_pool.search(cr, uid, [('res_model', '=', model.model), ('name', '=', name_gdocs), ('res_id', '=', res_id)])
url = False
if attach_ids:
attachment = attach_pool.browse(cr, uid, attach_ids[0], context)
url = attachment.url
else:
url = self.copy_doc(cr, uid, res_id, template_id, name_gdocs, model.model, context).get('url')
return url
def get_access_token(self, cr, uid, scope=None, context=None):
ir_config = self.pool['ir.config_parameter']
google_drive_refresh_token = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_refresh_token')
user_is_admin = self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager')
if not google_drive_refresh_token:
if user_is_admin:
raise self.pool.get('res.config.settings').get_config_warning(cr, _("You haven't configured 'Authorization Code' generated from google, Please generate and configure it in %(menu:base_setup.menu_general_configuration)s."), context=context)
else:
raise osv.except_osv(_('Error!'), _("Google Drive is not yet configured. Please contact your administrator."))
google_drive_client_id = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_client_id')
google_drive_client_secret = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_client_secret')
#For Getting New Access Token With help of old Refresh Token
data = werkzeug.url_encode(dict(client_id=google_drive_client_id,
refresh_token=google_drive_refresh_token,
client_secret=google_drive_client_secret,
grant_type="refresh_token",
scope=scope or 'https://www.googleapis.com/auth/drive'))
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept-Encoding": "gzip, deflate"}
try:
req = urllib2.Request('https://accounts.google.com/o/oauth2/token', data, headers)
content = urllib2.urlopen(req).read()
except urllib2.HTTPError:
if user_is_admin:
raise self.pool.get('res.config.settings').get_config_warning(cr, _("Something went wrong during the token generation. Please request again an authorization code in %(menu:base_setup.menu_general_configuration)s."), context=context)
else:
raise osv.except_osv(_('Error!'), _("Google Drive is not yet configured. Please contact your administrator."))
content = json.loads(content)
return content.get('access_token')
def copy_doc(self, cr, uid, res_id, template_id, name_gdocs, res_model, context=None):
ir_config = self.pool['ir.config_parameter']
google_web_base_url = ir_config.get_param(cr, SUPERUSER_ID, 'web.base.url')
access_token = self.get_access_token(cr, uid, context=context)
# Copy template in to drive with help of new access token
request_url = "https://www.googleapis.com/drive/v2/files/%s?fields=parents/id&access_token=%s" % (template_id, access_token)
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept-Encoding": "gzip, deflate"}
try:
req = urllib2.Request(request_url, None, headers)
parents = urllib2.urlopen(req).read()
except urllib2.HTTPError:
raise self.pool.get('res.config.settings').get_config_warning(cr, _("The Google Template cannot be found. Maybe it has been deleted."), context=context)
parents_dict = json.loads(parents)
record_url = "Click on link to open Record in OpenERP\n %s/?db=%s#id=%s&model=%s" % (google_web_base_url, cr.dbname, res_id, res_model)
data = {"title": name_gdocs, "description": record_url, "parents": parents_dict['parents']}
request_url = "https://www.googleapis.com/drive/v2/files/%s/copy?access_token=%s" % (template_id, access_token)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data_json = json.dumps(data)
# resp, content = Http().request(request_url, "POST", data_json, headers)
req = urllib2.Request(request_url, data_json, headers)
content = urllib2.urlopen(req).read()
content = json.loads(content)
res = {}
if content.get('alternateLink'):
attach_pool = self.pool.get("ir.attachment")
attach_vals = {'res_model': res_model, 'name': name_gdocs, 'res_id': res_id, 'type': 'url', 'url': content['alternateLink']}
res['id'] = attach_pool.create(cr, uid, attach_vals)
# Commit in order to attach the document to the current object instance, even if the permissions has not been written.
cr.commit()
res['url'] = content['alternateLink']
key = self._get_key_from_url(res['url'])
request_url = "https://www.googleapis.com/drive/v2/files/%s/permissions?emailMessage=This+is+a+drive+file+created+by+OpenERP&sendNotificationEmails=false&access_token=%s" % (key, access_token)
data = {'role': 'writer', 'type': 'anyone', 'value': '', 'withLink': True}
try:
req = urllib2.Request(request_url, json.dumps(data), headers)
urllib2.urlopen(req)
except urllib2.HTTPError:
raise self.pool.get('res.config.settings').get_config_warning(cr, _("The permission 'reader' for 'anyone with the link' has not been written on the document"), context=context)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if user.email:
data = {'role': 'writer', 'type': 'user', 'value': user.email}
try:
req = urllib2.Request(request_url, json.dumps(data), headers)
urllib2.urlopen(req)
except urllib2.HTTPError:
pass
return res
def get_google_drive_config(self, cr, uid, res_model, res_id, context=None):
'''
Function called by the js, when no google doc are yet associated with a record, with the aim to create one. It
will first seek for a google.docs.config associated with the model `res_model` to find out what's the template
of google doc to copy (this is usefull if you want to start with a non-empty document, a type or a name
different than the default values). If no config is associated with the `res_model`, then a blank text document
with a default name is created.
:param res_model: the object for which the google doc is created
:param ids: the list of ids of the objects for which the google doc is created. This list is supposed to have
a length of 1 element only (batch processing is not supported in the code, though nothing really prevent it)
:return: the config id and config name
'''
if not res_id:
raise osv.except_osv(_('Google Drive Error!'), _("Creating google drive may only be done by one at a time."))
# check if a model is configured with a template
config_ids = self.search(cr, uid, [('model_id', '=', res_model)], context=context)
configs = []
for config in self.browse(cr, uid, config_ids, context=context):
if config.filter_id:
if (config.filter_id.user_id and config.filter_id.user_id.id != uid):
#Private
continue
domain = [('id', 'in', [res_id])] + eval(config.filter_id.domain)
local_context = context and context.copy() or {}
local_context.update(eval(config.filter_id.context))
google_doc_configs = self.pool.get(config.filter_id.model_id).search(cr, uid, domain, context=local_context)
if google_doc_configs:
configs.append({'id': config.id, 'name': config.name})
else:
configs.append({'id': config.id, 'name': config.name})
return configs
def _get_key_from_url(self, url):
mo = re.search("(key=|/d/)([A-Za-z0-9-_]+)", url)
if mo:
return mo.group(2)
return None
def _resource_get(self, cr, uid, ids, name, arg, context=None):
result = {}
for data in self.browse(cr, uid, ids, context):
mo = self._get_key_from_url(data.google_drive_template_url)
if mo:
result[data.id] = mo
else:
raise osv.except_osv(_('Incorrect URL!'), _("Please enter a valid Google Document URL."))
return result
def _client_id_get(self, cr, uid, ids, name, arg, context=None):
result = {}
client_id = self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'google_drive_client_id')
for config_id in ids:
result[config_id] = client_id
return result
_columns = {
'name': fields.char('Template Name', required=True, size=1024),
'model_id': fields.many2one('ir.model', 'Model', ondelete='set null', required=True),
'model': fields.related('model_id', 'model', type='char', string='Model', readonly=True),
'filter_id': fields.many2one('ir.filters', 'Filter', domain="[('model_id', '=', model)]"),
'google_drive_template_url': fields.char('Template URL', required=True, size=1024),
'google_drive_resource_id': fields.function(_resource_get, type="char", string='Resource Id'),
'google_drive_client_id': fields.function(_client_id_get, type="char", string='Google Client '),
'name_template': fields.char('Google Drive Name Pattern', size=64, help='Choose how the new google drive will be named, on google side. Eg. gdoc_%(field_name)s', required=True),
'active': fields.boolean('Active'),
}
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
res = {}
if model_id:
model = self.pool['ir.model'].browse(cr, uid, model_id, context=context)
res['value'] = {'model': model.model}
else:
res['value'] = {'filter_id': False, 'model': False}
return res
_defaults = {
'name_template': 'Document %(name)s',
'active': True,
}
def _check_model_id(self, cr, uid, ids, context=None):
config_id = self.browse(cr, uid, ids[0], context=context)
if config_id.filter_id and config_id.model_id.model != config_id.filter_id.model_id:
return False
return True
_constraints = [
(_check_model_id, 'Model of selected filter is not matching with model of current template.', ['model_id', 'filter_id']),
]
def get_google_scope(self):
return 'https://www.googleapis.com/auth/drive https://www.googleapis.com/auth/drive.file'
class base_config_settings(osv.TransientModel):
_inherit = "base.config.settings"
_columns = {
'google_drive_authorization_code': fields.char('Authorization Code', size=124),
'google_drive_uri': fields.char('URI', readonly=True, help="The URL to generate the authorization code from Google"),
}
_defaults = {
'google_drive_uri': lambda s, cr, uid, c: s.pool['google.service']._get_google_token_uri(cr, uid, 'drive', scope=s.pool['google.drive.config'].get_google_scope(), context=c),
'google_drive_authorization_code': lambda s, cr, uid, c: s.pool['ir.config_parameter'].get_param(cr, uid, 'google_drive_authorization_code', context=c),
}
def set_google_authorization_code(self, cr, uid, ids, context=None):
ir_config_param = self.pool['ir.config_parameter']
config = self.browse(cr, uid, ids[0], context)
auth_code = config.google_drive_authorization_code
if auth_code and auth_code != ir_config_param.get_param(cr, uid, 'google_drive_authorization_code', context=context):
refresh_token = self.pool['google.service'].generate_refresh_token(cr, uid, 'drive', config.google_drive_authorization_code, context=context)
ir_config_param.set_param(cr, uid, 'google_drive_authorization_code', auth_code)
ir_config_param.set_param(cr, uid, 'google_drive_refresh_token', refresh_token)
|
S-Bahrasemani/hhana | refs/heads/master | mva/__init__.py | 4 | import os
import sys
# https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/PubComPlotStyle#ATLAS_labels
# https://twiki.cern.ch/twiki/pub/AtlasProtected/AtlasPolicyDocuments/Physics_Policy.pdf
ATLAS_LABEL = os.getenv('ATLAS_LABEL', 'Internal').strip()
BASE_DIR = os.getenv('HIGGSTAUTAU_MVA_DIR')
if not BASE_DIR:
sys.exit('You did not source setup.sh!')
CACHE_DIR = os.path.join(BASE_DIR, 'cache')
if not os.path.exists(CACHE_DIR):
log.info("creating directory %s" % CACHE_DIR)
os.mkdir(CACHE_DIR)
ETC_DIR = os.path.join(BASE_DIR, 'etc')
DAT_DIR = os.path.join(BASE_DIR, 'dat')
BDT_DIR = os.path.join(BASE_DIR, 'bdts')
NTUPLE_PATH = os.path.join(os.getenv('HIGGSTAUTAU_NTUPLE_DIR'), 'prod_v29')
DEFAULT_STUDENT = 'hhskim'
# import rootpy before ROOT
import rootpy
import ROOT
# trigger PyROOT's finalSetup() early...
ROOT.kTRUE
import logging
log = logging.getLogger('mva')
if not os.environ.get("DEBUG", False):
log.setLevel(logging.INFO)
rootpy.log.setLevel(logging.INFO)
if hasattr(logging, 'captureWarnings'):
logging.captureWarnings(True)
log['/ROOT.TH1D.Chi2TestX'].setLevel(log.WARNING)
# Speed things up a bit
ROOT.SetSignalPolicy(ROOT.kSignalFast)
if not os.getenv('MVA_NO_BATCH', False):
ROOT.gROOT.SetBatch(True)
log.info("ROOT is in batch mode")
from rootpy.utils.path import mkdir_p
def plots_dir(script):
script = os.path.basename(script)
script = os.path.splitext(script)[0]
dir = os.path.join(PLOTS_DIR, script)
mkdir_p(dir)
return dir
import numpy as np
# for reproducibilty
# especially for test/train set selection
np.random.seed(1987)
MMC_VERSION = 1
MMC_MASS = 'mmc%d_mass' % MMC_VERSION
MMC_PT = 'mmc%d_resonance_pt' % MMC_VERSION
from rootpy.utils.silence import silence_sout_serr
with silence_sout_serr():
from rootpy.stats import mute_roostats; mute_roostats()
# default minimizer options
ROOT.Math.MinimizerOptions.SetDefaultStrategy(1)
ROOT.Math.MinimizerOptions.SetDefaultMinimizer('Minuit2')
import yellowhiggs
log.info("using yellowhiggs {0}".format(yellowhiggs.__version__))
CONST_PARAMS = [
'Lumi',
'mu_XS8_ggH',
'mu_XS7_ggH',
'mu_XS8_VBF',
'mu_XS7_VBF',
'mu_XS8_WH',
'mu_XS7_WH',
'mu_XS8_ZH',
'mu_XS7_ZH',
'mu_BR_tautau',
]
POI = 'SigXsecOverSM'
# pip install --user GitPython
from git import Repo
REPO = Repo(BASE_DIR)
try:
REPO_BRANCH = REPO.active_branch
except:
REPO_BRANCH = 'master'
PLOTS_DIR = os.path.join(BASE_DIR, 'plots', 'variables')
def plot_dir(name):
return os.path.join(BASE_DIR, 'plots', name)
def save_canvas(canvas, directory, name, formats=None):
# save images in directories corresponding to current git branch
# filepath = os.path.join(directory, REPO_BRANCH, name)
filepath = os.path.join(directory, name)
path = os.path.dirname(filepath)
if not os.path.exists(path):
mkdir_p(path)
if formats is not None:
if isinstance(formats, basestring):
formats = formats.split()
for fmt in formats:
if fmt[0] != '.':
fmt = '.' + fmt
canvas.SaveAs(filepath + fmt)
else:
canvas.SaveAs(filepath)
from rootpy.plotting.style import get_style, set_style
def set_hsg4_style(shape='square'):
style = get_style('ATLAS', shape=shape)
#style.SetFrameLineWidth(2)
#style.SetLineWidth(2)
#style.SetTitleYOffset(1.8)
#style.SetTickLength(0.04, 'X')
#style.SetTickLength(0.02, 'Y')
# custom HSG4 modifications
# style.SetPadTopMargin(0.06)
style.SetPadLeftMargin(0.16)
style.SetTitleYOffset(1.6)
style.SetHistTopMargin(0.)
style.SetHatchesLineWidth(1)
style.SetHatchesSpacing(1)
ROOT.TGaxis.SetMaxDigits(4)
set_style(style)
set_hsg4_style()
|
henriquegemignani/randovania | refs/heads/master | randovania/gui/online_game_list_window.py | 1 | import datetime
from typing import List
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QPushButton, QDialogButtonBox, QDialog, QTableWidgetItem, QInputDialog, QLineEdit
from qasync import asyncSlot
from randovania.gui.generated.game_session_browser_dialog_ui import Ui_GameSessionBrowserDialog
from randovania.gui.lib import common_qt_lib, async_dialog
from randovania.gui.lib.qt_network_client import handle_network_errors, QtNetworkClient
from randovania.network_client.game_session import GameSessionListEntry
from randovania.network_client.network_client import ConnectionState
from randovania.network_common.error import WrongPassword
from randovania.network_common.session_state import GameSessionState
class GameSessionBrowserDialog(QDialog, Ui_GameSessionBrowserDialog):
sessions: List[GameSessionListEntry]
visible_sessions: List[GameSessionListEntry]
def __init__(self, network_client: QtNetworkClient):
super().__init__()
self.setupUi(self)
common_qt_lib.set_default_window_icon(self)
self.network_client = network_client
self.refresh_button = QPushButton("Refresh")
self.button_box.addButton(self.refresh_button, QDialogButtonBox.ResetRole)
self.button_box.button(QDialogButtonBox.Ok).setEnabled(False)
self.button_box.button(QDialogButtonBox.Ok).setText("Join")
self.button_box.accepted.connect(self.attempt_join)
self.button_box.rejected.connect(self.reject)
self.refresh_button.clicked.connect(self.refresh)
checks = (
self.has_password_yes_check,
self.has_password_no_check,
self.state_setup_check,
self.state_inprogress_check,
self.state_finished_check,
self.filter_age_check,
)
for check in checks:
check.stateChanged.connect(self.update_list)
self.filter_name_edit.textEdited.connect(self.update_list)
self.filter_age_spin.valueChanged.connect(self.update_list)
self.table_widget.itemSelectionChanged.connect(self.on_selection_changed)
self.table_widget.itemDoubleClicked.connect(self.on_double_click)
self.network_client.ConnectionStateUpdated.connect(self.on_server_connection_state_updated)
self.on_server_connection_state_updated(self.network_client.connection_state)
@asyncSlot()
@handle_network_errors
async def refresh(self):
self.refresh_button.setEnabled(False)
try:
self.sessions = await self.network_client.get_game_session_list()
self.update_list()
finally:
self.refresh_button.setEnabled(True)
def on_selection_changed(self):
self.button_box.button(QDialogButtonBox.Ok).setEnabled(len(self.table_widget.selectedItems()) > 0)
@property
def selected_session(self) -> GameSessionListEntry:
return self.table_widget.selectedItems()[0].data(Qt.UserRole)
@asyncSlot(QTableWidgetItem)
async def on_double_click(self, item: QTableWidgetItem):
await self.attempt_join()
@asyncSlot()
@handle_network_errors
async def attempt_join(self):
if not self.visible_sessions:
return
session = self.selected_session
if session.has_password:
dialog = QInputDialog(self)
dialog.setWindowTitle("Enter password")
dialog.setLabelText("This session requires a password:")
dialog.setWindowModality(Qt.WindowModal)
dialog.setTextEchoMode(QLineEdit.Password)
if await async_dialog.execute_dialog(dialog) != dialog.Accepted:
return
password = dialog.textValue()
else:
password = None
try:
await self.network_client.join_game_session(session, password)
return self.accept()
except WrongPassword:
await async_dialog.warning(self, "Incorrect Password", "The password entered was incorrect.")
def update_list(self):
self.table_widget.clear()
self.table_widget.setHorizontalHeaderLabels(["Name", "State", "Players", "Password?", "Creator",
"Creation Date"])
name_filter = self.filter_name_edit.text().strip()
displayed_has_password = set()
if self.has_password_yes_check.isChecked():
displayed_has_password.add(True)
if self.has_password_no_check.isChecked():
displayed_has_password.add(False)
displayed_states = set()
for (check, state) in ((self.state_setup_check, GameSessionState.SETUP),
(self.state_inprogress_check, GameSessionState.IN_PROGRESS),
(self.state_finished_check, GameSessionState.FINISHED)):
if check.isChecked():
displayed_states.add(state)
dont_filter_age = not self.filter_age_check.isChecked()
now = datetime.datetime.now(tz=datetime.timezone.utc)
max_session_age = datetime.timedelta(days=self.filter_age_spin.value())
visible_sessions = [
session
for session in reversed(self.sessions)
if (session.has_password in displayed_has_password
and session.state in displayed_states
and name_filter in session.name
and (dont_filter_age or (now - session.creation_date) < max_session_age))
]
self.visible_sessions = visible_sessions
self.table_widget.setRowCount(len(visible_sessions))
for i, session in enumerate(visible_sessions):
name = QTableWidgetItem(session.name)
state = QTableWidgetItem(session.state.user_friendly_name)
players_item = QTableWidgetItem(str(session.num_players))
has_password = QTableWidgetItem("Yes" if session.has_password else "No")
creator = QTableWidgetItem(session.creator)
creation_date = QTableWidgetItem(session.creation_date.astimezone().strftime("%Y-%m-%d %H:%M"))
name.setData(Qt.UserRole, session)
self.table_widget.setItem(i, 0, name)
self.table_widget.setItem(i, 1, state)
self.table_widget.setItem(i, 2, players_item)
self.table_widget.setItem(i, 3, has_password)
self.table_widget.setItem(i, 4, creator)
self.table_widget.setItem(i, 5, creation_date)
for i in range(6):
self.table_widget.resizeColumnToContents(i)
self.status_label.setText(f"{len(self.sessions)} sessions total, {len(visible_sessions)} displayed.")
def on_server_connection_state_updated(self, state: ConnectionState):
self.server_connection_label.setText(f"Server: {state.value}")
|
juniper-project/modelling-mongodb | refs/heads/master | src/main/conf/res/scripts/createCollection.py | 1 | #
# Copyright 2014 Modeliosoft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
datamodel=selectedElements.get(0)
collection = modelingSession.getModel().createPackage('Collection', datamodel,'MongoDBModeler','Collection')
collection.addStereotype('PersistentProfile','DataModel')
collection.removeStereotypes('JavaDesigner','JavaPackage')
createdElement = collection |
xogeny/recon | refs/heads/master | recon/util.py | 1 | import struct
import bz2
def write_len(fp, l):
"""
A frequently used utility function to write an integer
to a given stream.
"""
fp.write(struct.pack('!L', l))
def conv_len(bytes):
"""
This takes some bytes and converts them to an integer following
the same conventions used by the other routines in this file.
"""
up = struct.unpack('!L', bytes)
return up[0]
def read_len(fp, ignoreEOF=False, verbose=False):
"""
This reads a length from the stream. If the ignoreEOF flag
is set, a failure to read the length simple results in
a None being returned (vs. an exception being thrown)
"""
lbytes = fp.read(4)
#if verbose:
# print "Raw length bytes: "+str(repr(lbytes))
if len(lbytes)!=4:
if ignoreEOF:
return None
else: # pragma no cover
raise IOError("Failed to read length data")
up = struct.unpack('!L', lbytes)
return up[0]
# Transforms
T_INV = "inv"
T_AFF = "aff"
class InvTransform:
def __init__(self):
pass
def apply(self, data):
def afunc(x):
if type(x)==bool:
return not x
if type(x)==float:
return -x
if type(x)==int: # pragma: no cover
return -x
if type(x)==long: # pragma: no cover
return -x
else: # pragma: no cover
return x
return map(lambda x: afunc(x), data)
class AffineTransform:
def __init__(self, scale, offset):
self.scale = scale
self.offset = offset
def apply(self, data):
def sfunc(x):
# TODO: Are these sufficient?
if type(x)==float or type(x)==int or type(x)==long:
return x*self.scale+self.offset
else: # pragma: no cover
return x
return map(lambda x: sfunc(x), data)
def parse_transform(t):
if t==None:
return None
if type(t)!=str:
return None
trans = t.replace(" ","")
if trans==T_INV:
return InvTransform()
if trans.startswith(T_AFF+"(") and trans.endswith(")"):
try:
(s, o) = map(lambda x: float(x), trans[4:-1].split(","))
return AffineTransform(s, o)
except:
return None
|
seem-sky/kbengine | refs/heads/master | kbe/src/lib/python/Lib/encodings/cp863.py | 272 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp863',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00b6, # PILCROW SIGN
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x2017, # DOUBLE LOW LINE
0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x008f: 0x00a7, # SECTION SIGN
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00a4, # CURRENCY SIGN
0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00a6, # BROKEN BAR
0x00a1: 0x00b4, # ACUTE ACCENT
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00a8, # DIAERESIS
0x00a5: 0x00b8, # CEDILLA
0x00a6: 0x00b3, # SUPERSCRIPT THREE
0x00a7: 0x00af, # MACRON
0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xb6' # 0x0086 -> PILCROW SIGN
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u2017' # 0x008d -> DOUBLE LOW LINE
'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
'\xa7' # 0x008f -> SECTION SIGN
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xa4' # 0x0098 -> CURRENCY SIGN
'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xa6' # 0x00a0 -> BROKEN BAR
'\xb4' # 0x00a1 -> ACUTE ACCENT
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xa8' # 0x00a4 -> DIAERESIS
'\xb8' # 0x00a5 -> CEDILLA
'\xb3' # 0x00a6 -> SUPERSCRIPT THREE
'\xaf' # 0x00a7 -> MACRON
'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x0098, # CURRENCY SIGN
0x00a6: 0x00a0, # BROKEN BAR
0x00a7: 0x008f, # SECTION SIGN
0x00a8: 0x00a4, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00af: 0x00a7, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00a6, # SUPERSCRIPT THREE
0x00b4: 0x00a1, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x0086, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00a5, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x2017: 0x008d, # DOUBLE LOW LINE
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
Br3nda/docvert | refs/heads/master | core/pipeline_type/loop.py | 1 | # -*- coding: utf-8 -*-
import os
import lxml.etree
import StringIO
import pipeline_item
import core.docvert_exception
class Loop(pipeline_item.pipeline_stage):
def stage(self, pipeline_value):
return pipeline_value
|
ngoix/OCRF | refs/heads/master | sklearn/decomposition/truncated_svd.py | 3 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0782... 0.0552... 0.0544... 0.0499... 0.0413...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.279...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
|
zhjunlang/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/test_asyncio/echo2.py | 121 | import os
if __name__ == '__main__':
buf = os.read(0, 1024)
os.write(1, b'OUT:'+buf)
os.write(2, b'ERR:'+buf)
|
vup1120/oq-risklib | refs/heads/master | docs/my_calculators/hello.py | 2 | import os
from openquake.commonlib.calculators import base
@base.calculators.add('hello')
class HelloCalculator(base.BaseCalculator):
def pre_execute(self):
pass
def execute(self):
return 'hello world'
def post_execute(self, result):
fname = os.path.join(self.oqparam.export_dir, 'hello.txt')
open(fname, 'w').write(result)
return dict(hello=fname)
|
boneknuckleskin/libforensics | refs/heads/master | code/lf/apps/msoffice/word/metadata.py | 13 | # Copyright 2009 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""
Metadata from Microsoft Word documents.
.. moduleauthor:: Michael Murr (mmurr@codeforensics.net)
"""
__docformat__ = "restructuredtext en"
__all__ = [
"WordMetadata"
]
from datetime import date
from lf.apps.msoffice.word.objects import Fib, SttbShortUnicode
class WordMetadata():
"""
Represents metadata from a Microsoft Word document.
.. attribute:: magic
The magic number from the FIB.
.. attribute:: version
The file format version from the FIB.
.. attribute:: lang_id
The language identifier from the FIB.
.. attribute:: encryption_key
The encryption key from the FIB.
.. attribute:: is_template
True if the document is a template.
.. attribute:: is_glossary
True if the document is a glossary.
.. attribute:: is_complex
True if the document is in complex fast-saved format.
.. attribute:: has_pictures
True if the document has pictures.
.. attribute:: is_encrypted
True if the document is encrypted.
.. attribute:: is_far_east_encoded
True if the document is encoded for the far east.
.. attribute:: created_environment
The environment the document was created in.
.. attribute:: saved_mac
True if the document was last saved on a Mac.
.. attribute:: magic_created_by
The magic number of the application that created the document.
.. attribute:: magic_revised_by
The magic number of the application that last revised the document.
.. attribute:: created_build_date
The build date of the application that created the document.
.. attribute:: revised_build_date
The build date of the application that last revised the document.
.. attribute:: last_saved_by
A list of the last authors to save the document.
.. attribute:: last_saved_locations
A list of the last locations the document was saved to (correspond
with last_saved_by)
.. attribute:: associated_strings
Associated strings.
.. attribute:: users_roles
A list of (user name, role) pairs for protected content.
"""
def __init__(self, cfb):
"""
Initializes a WordMetadata object.
:parameters:
cfb
A CompoundFile object for the word document.
"""
for entry in cfb.dir_entries.values():
if entry.name == "WordDocument":
stream_id = entry.sid
# end if
# end for
fib = Fib(cfb.get_stream(stream_id))
if fib.header.whichTblStm:
table_name = "1Table"
else:
table_name = "0Table"
# end if
for entry in cfb.dir_entries.values():
if entry.name == table_name:
stream_id = entry.sid
# end if
# end for
table_stream = cfb.get_stream(stream_id, ignore_size=True)
self.magic = fib.header.wIdent
self.version = fib.header.nFib
self.lang_id = fib.header.lid
self.encryption_key = fib.header.lKey
self.is_template = bool(fib.header.dot)
self.is_glossary = bool(fib.header.glsy)
self.is_complex = bool(fib.header.complex)
self.has_pictures = bool(fib.header.hasPic)
self.is_encrypted = bool(fib.header.encrypted)
self.is_far_east_encoded = bool(fib.header.farEast)
self.saved_mac = bool(fib.header.mac)
self.created_environment = fib.header.envr
self.magic_created_by = fib.shorts.wMagicCreated
self.magic_revised_by = fib.shorts.wMagicRevised
created_date = fib.longs.lProductCreated
year = (created_date % 100) + 1900
day = (created_date // 100) % 100
month = (created_date // 10000) % 100
self.created_build_date = date(year, month, day)
revised_date = fib.longs.lProductRevised
year = (revised_date % 100) + 1900
day = (revised_date // 100) % 100
month = (revised_date // 10000) % 100
self.revised_build_date = date(year, month, day)
if fib.fc_lcb.sttbSavedBy.lcb:
saved_by = SttbShortUnicode(
table_stream, fib.fc_lcb.sttbSavedBy.fc
)
last_saved_by = list(saved_by.data[::2])
last_saved_locations = list(saved_by.data[1::2])
else:
last_saved_by = list()
last_saved_locations = list()
# end if
if fib.fc_lcb.sttbfAssoc.lcb:
assoc = SttbShortUnicode(table_stream, fib.fc_lcb.sttbfAssoc.fc)
associated_strings = assoc.data
else:
associated_strings = list()
# end if
if hasattr(fib.fc_lcb, "sttbProtUser"):
if fib.fc_lcb.sttbProtUser.lcb:
prot_users = SttbShortUnicode(
table_stream, fib.fc_lcb.sttbProtUser.fc
)
users_roles = list(zip(prot_users.data, prot_users.extra_data))
else:
users_roles = list()
# end if
else:
users_roles = list()
# end if
self.last_saved_by = last_saved_by
self.last_saved_locations = last_saved_locations
self.associated_strings = associated_strings
self.users_roles = users_roles
# end def __init__
# end class WordMetadata
|
mapbased/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/changelog_unittest.py | 122 | # Copyright (C) 2010 Apple Inc. All rights reserved.
# Copyright (C) 2011 Patrick Gansterer <paroga@paroga.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for changelog.py."""
import changelog
import unittest2 as unittest
class ChangeLogCheckerTest(unittest.TestCase):
"""Tests ChangeLogChecker class."""
def assert_no_error(self, lines_to_check, changelog_data):
def handle_style_error(line_number, category, confidence, message):
self.fail('Unexpected error: %d %s %d %s for\n%s' % (line_number, category, confidence, message, changelog_data))
self.lines_to_check = set(lines_to_check)
checker = changelog.ChangeLogChecker('ChangeLog', handle_style_error, self.mock_should_line_be_checked)
checker.check(changelog_data.split('\n'))
def assert_error(self, expected_line_number, lines_to_check, expected_category, changelog_data):
self.had_error = False
def handle_style_error(line_number, category, confidence, message):
self.had_error = True
self.assertEqual(expected_line_number, line_number)
self.assertEqual(expected_category, category)
self.lines_to_check = set(lines_to_check)
checker = changelog.ChangeLogChecker('ChangeLog', handle_style_error, self.mock_should_line_be_checked)
checker.check(changelog_data.split('\n'))
self.assertTrue(self.had_error)
def mock_handle_style_error(self):
pass
def mock_should_line_be_checked(self, line_number):
return line_number in self.lines_to_check
def test_init(self):
checker = changelog.ChangeLogChecker('ChangeLog', self.mock_handle_style_error, self.mock_should_line_be_checked)
self.assertEqual(checker.file_path, 'ChangeLog')
self.assertEqual(checker.handle_style_error, self.mock_handle_style_error)
self.assertEqual(checker.should_line_be_checked, self.mock_should_line_be_checked)
def test_missing_bug_number(self):
self.assert_error(1, range(1, 20), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n')
self.assert_error(1, range(1, 20), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' http://bugs.webkit.org/show_bug.cgi?id=\n')
self.assert_error(1, range(1, 20), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' https://bugs.webkit.org/show_bug.cgi?id=\n')
self.assert_error(1, range(1, 20), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' http://webkit.org/b/\n')
self.assert_error(1, range(1, 20), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug'
'\n'
' http://trac.webkit.org/changeset/12345\n')
self.assert_error(2, range(2, 5), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' Example bug\n'
' https://bugs.webkit.org/show_bug.cgi\n'
'\n'
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' Another change\n')
self.assert_error(2, range(2, 6), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' Example bug\n'
' More text about bug.\n'
'\n'
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' No bug in this change.\n')
def test_file_descriptions(self):
self.assert_error(5, range(1, 20), 'changelog/filechangedescriptionwhitespace',
'2011-01-01 Dmitry Lomov <dslomov@google.com>\n'
' ExampleBug\n'
' http://bugs.webkit.org/show_bug.cgi?id=12345\n'
'\n'
' * Source/Tools/random-script.py:Fixed')
self.assert_error(6, range(1, 20), 'changelog/filechangedescriptionwhitespace',
'2011-01-01 Dmitry Lomov <dslomov@google.com>\n'
' ExampleBug\n'
' http://bugs.webkit.org/show_bug.cgi?id=12345\n'
'\n'
' * Source/Tools/another-file: Done\n'
' * Source/Tools/random-script.py:Fixed\n'
' * Source/Tools/one-morefile:\n')
def test_no_new_tests(self):
self.assert_error(5, range(1, 20), 'changelog/nonewtests',
'2011-01-01 Dmitry Lomov <dslomov@google.com>\n'
' ExampleBug\n'
' http://bugs.webkit.org/show_bug.cgi?id=12345\n'
'\n'
' No new tests. (OOPS!)\n'
' * Source/Tools/random-script.py: Fixed')
def test_no_error(self):
self.assert_no_error([],
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example ChangeLog entry out of range\n'
' http://example.com/\n')
self.assert_no_error([],
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' http://bugs.webkit.org/show_bug.cgi?id=12345\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' http://bugs.webkit.org/show_bug.cgi?id=12345\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' https://bugs.webkit.org/show_bug.cgi?id=12345\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' http://webkit.org/b/12345\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Unreview build fix for r12345.\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Fix build after a bad change.\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Fix example port build.\n')
self.assert_no_error(range(2, 6),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' Example bug\n'
' https://bugs.webkit.org/show_bug.cgi?id=12345\n'
'\n'
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' No bug here!\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' Example bug\n'
' https://bugs.webkit.org/show_bug.cgi?id=12345\n'
' * Source/WebKit/foo.cpp: \n'
' * Source/WebKit/bar.cpp:\n'
' * Source/WebKit/foobar.cpp: Description\n')
|
nuxeh/keystone | refs/heads/master | keystone/tests/unit/backend/core_sql.py | 10 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from keystone.common import sql
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import database
class BaseBackendSqlTests(tests.SQLDriverOverrides, tests.TestCase):
def setUp(self):
super(BaseBackendSqlTests, self).setUp()
self.useFixture(database.Database())
self.load_backends()
# populate the engine with tables & fixtures
self.load_fixtures(default_fixtures)
# defaulted by the data load
self.user_foo['enabled'] = True
def config_files(self):
config_files = super(BaseBackendSqlTests, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
class BaseBackendSqlModels(BaseBackendSqlTests):
def select_table(self, name):
table = sqlalchemy.Table(name,
sql.ModelBase.metadata,
autoload=True)
s = sqlalchemy.select([table])
return s
def assertExpectedSchema(self, table, cols):
table = self.select_table(table)
for col, type_, length in cols:
self.assertIsInstance(table.c[col].type, type_)
if length:
self.assertEqual(length, table.c[col].type.length)
|
UDST/synthpop | refs/heads/master | synthpop/recipes/starter2.py | 1 | import numpy as np
import pandas as pd
from .. import categorizer as cat
from ..census_helpers import Census
# TODO DOCSTRINGS!!
class Starter:
"""
This is a recipe for getting the marginals and joint distributions to use
to pass to the synthesizer using simple categories - population, age,
race, and sex for people, and children, income, cars, and workers for
households. This module is responsible for
Parameters
----------
c : object
census_helpers.Census object
state : string
FIPS code the state
county : string
FIPS code for the county
tract : string, optional
FIPS code for a specific track or None for all tracts in the county
acsyear : integer, optional
Final year in the 5-year estimates ACS dataset.
Default: 2016, which corresponds to 2011-2016 ACS dataset
Returns
-------
household_marginals : DataFrame
Marginals per block group for the household data (from ACS 5-year estimates)
person_marginals : DataFrame
Marginals per block group for the person data (from ACS 5-year estimates)
household_jointdist : DataFrame
joint distributions for the households (from PUMS 2010-2000), one joint
distribution for each PUMA (one row per PUMA)
person_jointdist : DataFrame
joint distributions for the persons (from PUMS 2010-2000), one joint
distribution for each PUMA (one row per PUMA)
tract_to_puma_map : dictionary
keys are tract ids and pumas are puma ids
"""
def __init__(self, key, state, county, tract=None, acsyear=2016):
self.c = c = Census(key, acsyear)
self.state = state
self.county = county
self.tract = tract
self.acsyear = acsyear
structure_size_columns = ['B25032_0%02dE' % i for i in range(1, 24)]
age_of_head_columns = ['B25007_0%02dE' % i for i in range(1, 22)]
race_of_head_columns = ['B25006_0%02dE' % i for i in range(1, 11)]
hispanic_head_columns = ['B25003I_0%02dE' % i for i in range(1, 4)]
hh_size_columns = ['B25009_0%02dE' % i for i in range(1, 18)]
income_columns = ['B19001_0%02dE' % i for i in range(1, 18)]
vehicle_columns = ['B08201_0%02dE' % i for i in range(1, 7)]
workers_columns = ['B08202_0%02dE' % i for i in range(1, 6)]
presence_of_children_columns = ['B11005_001E', 'B11005_002E', 'B11005_011E']
presence_of_seniors_columns = ['B11007_002E', 'B11007_007E']
tenure_mover_columns = ['B25038_0%02dE' % i for i in range(1, 16)]
block_group_columns = (
income_columns + presence_of_children_columns +
presence_of_seniors_columns + tenure_mover_columns +
hh_size_columns + age_of_head_columns + structure_size_columns +
race_of_head_columns + hispanic_head_columns)
tract_columns = vehicle_columns + workers_columns
h_acs = c.block_group_and_tract_query(
block_group_columns,
tract_columns, state, county,
merge_columns=['tract', 'county', 'state'],
block_group_size_attr="B11005_001E",
tract_size_attr="B08201_001E",
tract=tract, year=acsyear)
self.h_acs = h_acs
self.h_acs_cat = cat.categorize(h_acs, {
("sf_detached", "yes"): "B25032_003E + B25032_014E",
("sf_detached", "no"): "B25032_001E - B25032_003E - B25032_014E",
("hh_age_of_head", "lt35"):
"B25007_003E + B25007_004E + B25007_013E + B25007_014E",
("hh_age_of_head", "gt35-lt65"):
"B25007_005E + B25007_006E + B25007_007E + B25007_008E + "
"B25007_015E + B25007_016E + B25007_017E + B25007_018E",
("hh_age_of_head", "gt65"):
"B25007_009E + B25007_010E + B25007_011E + "
"B25007_019E + B25007_020E + B25007_021E",
("hh_race_of_head", "black"): "B25006_003E",
("hh_race_of_head", "white"): "B25006_002E",
("hh_race_of_head", "asian"): "B25006_005E",
("hh_race_of_head", "other"):
"B25006_004E + B25006_006E + B25006_007E + B25006_008E ",
("hispanic_head", "yes"): "B25003I_001E",
("hispanic_head", "no"): "B11005_001E - B25003I_001E",
("hh_children", "yes"): "B11005_002E",
("hh_children", "no"): "B11005_011E",
("seniors", "yes"): "B11007_002E",
("seniors", "no"): "B11007_007E",
("hh_income", "lt30"):
"B19001_002E + B19001_003E + B19001_004E + "
"B19001_005E + B19001_006E",
("hh_income", "gt30-lt60"):
"B19001_007E + B19001_008E + B19001_009E + "
"B19001_010E + B19001_011E",
("hh_income", "gt60-lt100"): "B19001_012E + B19001_013E",
("hh_income", "gt100-lt150"): "B19001_014E + B19001_015E",
("hh_income", "gt150"): "B19001_016E + B19001_017E",
("hh_cars", "none"): "B08201_002E",
("hh_cars", "one"): "B08201_003E",
("hh_cars", "two or more"):
"B08201_004E + B08201_005E + B08201_006E",
("hh_workers", "none"): "B08202_002E",
("hh_workers", "one"): "B08202_003E",
("hh_workers", "two or more"): "B08202_004E + B08202_005E",
("tenure_mover", "own recent"): "B25038_003E",
("tenure_mover", "own not recent"): "B25038_002E - B25038_003E",
("tenure_mover", "rent recent"): "B25038_010E",
("tenure_mover", "rent not recent"): "B25038_009E - B25038_010E",
("hh_size", "one"): "B25009_003E + B25009_011E",
("hh_size", "two"): "B25009_004E + B25009_012E",
("hh_size", "three"): "B25009_005E + B25009_013E",
("hh_size", "four or more"): "B25009_006E + B25009_014E + "
"B25009_007E + B25009_015E + "
"B25009_008E + B25009_016E + "
"B25009_009E + B25009_017E",
}, index_cols=['state', 'county', 'tract', 'block group'])
# gq_population = ['B26001_001E']
# HH population, for the hhpop/totalpop adjustment
hh_population = ['B11002_001E']
population = ['B01001_001E'] # This includes GQ
hispanic = ['B03003_002E', 'B03003_003E']
sex = ['B01001_002E', 'B01001_026E']
race = ['B02001_0%02dE' % i for i in range(1, 11)]
male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]
female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]
all_columns = population + sex + race + male_age_columns + \
female_age_columns + hh_population + hispanic
p_acs = c.block_group_query(all_columns, state, county, tract=tract, year=acsyear)
self.p_acs = p_acs
self.p_acs_cat = cat.categorize(p_acs, {
("person_age", "19 and under"):
"(B01001_003E + B01001_004E + B01001_005E + "
"B01001_006E + B01001_007E + B01001_027E + "
"B01001_028E + B01001_029E + B01001_030E + "
"B01001_031E) * B11002_001E*1.0/B01001_001E",
("person_age", "20 to 35"):
"(B01001_008E + B01001_009E + B01001_010E + "
"B01001_011E + B01001_012E + B01001_032E + "
"B01001_033E + B01001_034E + B01001_035E + "
"B01001_036E) * B11002_001E*1.0/B01001_001E",
("person_age", "35 to 60"):
"(B01001_013E + B01001_014E + B01001_015E + "
"B01001_016E + B01001_017E + B01001_037E + "
"B01001_038E + B01001_039E + B01001_040E + "
"B01001_041E) * B11002_001E*1.0/B01001_001E",
("person_age", "above 60"):
"(B01001_018E + B01001_019E + B01001_020E + "
"B01001_021E + B01001_022E + B01001_023E + "
"B01001_024E + B01001_025E + B01001_042E + "
"B01001_043E + B01001_044E + B01001_045E + "
"B01001_046E + B01001_047E + B01001_048E + "
"B01001_049E) * B11002_001E*1.0/B01001_001E",
("race", "white"): "(B02001_002E) * B11002_001E*1.0/B01001_001E",
("race", "black"): "(B02001_003E) * B11002_001E*1.0/B01001_001E",
("race", "asian"): "(B02001_005E) * B11002_001E*1.0/B01001_001E",
("race", "other"): "(B02001_004E + B02001_006E + B02001_007E + "
"B02001_008E) * B11002_001E*1.0/B01001_001E",
("person_sex", "male"):
"(B01001_002E) * B11002_001E*1.0/B01001_001E",
("person_sex", "female"):
"(B01001_026E) * B11002_001E*1.0/B01001_001E",
("hispanic", "yes"):
"(B03003_003E) * B11002_001E*1.0/B01001_001E",
("hispanic", "no"):
"(B03003_002E) * B11002_001E*1.0/B01001_001E",
}, index_cols=['state', 'county', 'tract', 'block group'])
# Put the needed PUMS variables here. These are also the PUMS variables
# that will be in the outputted synthetic population
self.h_pums_cols = ('serialno', 'PUMA10', 'RT', 'NP', 'TYPE',
'R65', 'HINCP', 'VEH', 'MV', 'TEN', 'BLD', 'R18')
self.p_pums_cols = ('serialno', 'PUMA10', 'RELP', 'AGEP',
'ESR', 'RAC1P', 'HISP', 'SEX', 'SPORDER',
'PERNP', 'SCHL', 'WKHP', 'JWTR', 'SCH')
if self.acsyear < 2018:
self.h_pums_cols = list(self.h_pums_cols)
self.h_pums_cols.insert(1, 'PUMA00')
self.h_pums_cols = tuple(self.h_pums_cols)
self.p_pums_cols = list(self.p_pums_cols)
self.p_pums_cols.insert(1, 'PUMA00')
self.p_pums_cols = tuple(self.p_pums_cols)
def get_geography_name(self):
# this synthesis is at the block group level for most variables
return "block_group"
def get_num_geographies(self):
return len(self.p_acs_cat)
def get_available_geography_ids(self):
# return the ids of the geographies, in this case a state, county,
# tract, block_group id tuple
for tup in self.p_acs_cat.index:
yield pd.Series(tup, index=self.p_acs_cat.index.names)
def get_household_marginal_for_geography(self, ind):
return self.h_acs_cat.loc[tuple(ind.values)]
def get_person_marginal_for_geography(self, ind):
return self.p_acs_cat.loc[tuple(ind.values)]
def get_household_joint_dist_for_geography(self, ind):
c = self.c
puma10, puma00 = c.tract_to_puma(ind.state, ind.county, ind.tract)
# this is cached so won't download more than once
if type(puma00) == str:
h_pums = self.c.download_household_pums(ind.state, puma10, puma00,
usecols=self.h_pums_cols)
p_pums = self.c.download_population_pums(ind.state, puma10, puma00,
usecols=self.p_pums_cols)
elif np.isnan(puma00): # only puma10 available
h_pums = self.c.download_household_pums(ind.state, puma10, None,
usecols=self.h_pums_cols)
p_pums = self.c.download_population_pums(ind.state, puma10, None,
usecols=self.p_pums_cols)
h_pums = h_pums.set_index('serialno')
# join persons to households,
# calculate needed household-level variables
age_of_head = p_pums[p_pums.RELP == 0].groupby('serialno').AGEP.max()
num_workers = p_pums[p_pums.ESR.isin([1, 2, 4, 5])].groupby(
'serialno').size()
h_pums['race_of_head'] = p_pums[p_pums.RELP == 0].groupby(
'serialno').RAC1P.max()
h_pums['hispanic_head'] = p_pums[p_pums.RELP == 0].groupby(
'serialno').HISP.max()
h_pums['age_of_head'] = age_of_head
h_pums['workers'] = num_workers
h_pums.workers = h_pums.workers.fillna(0)
h_pums = h_pums.reset_index()
def sf_detached_cat(r):
if r.BLD == 2:
return "yes"
return "no"
def age_of_head_cat(r):
if r.age_of_head < 35:
return "lt35"
elif r.age_of_head >= 65:
return "gt65"
return "gt35-lt65"
def race_of_head_cat(r):
if r.race_of_head == 1:
return "white"
elif r.race_of_head == 2:
return "black"
elif r.race_of_head == 6:
return "asian"
return "other"
def hispanic_head_cat(r):
if r.hispanic_head == 1:
return "no"
return "yes"
def hh_size_cat(r):
if r.NP == 1:
return "one"
elif r.NP == 2:
return "two"
elif r.NP == 3:
return "three"
return "four or more"
def cars_cat(r):
if r.VEH == 0:
return "none"
elif r.VEH == 1:
return "one"
return "two or more"
def children_cat(r):
if r.R18 == 1:
return "yes"
return "no"
def seniors_cat(r):
if r.R65 > 0:
return "yes"
return "no"
def income_cat(r):
if r.HINCP >= 150000:
return "gt150"
elif (r.HINCP >= 100000) & (r.HINCP < 150000):
return "gt100-lt150"
elif (r.HINCP >= 60000) & (r.HINCP < 100000):
return "gt60-lt100"
elif (r.HINCP >= 30000) & (r.HINCP < 60000):
return "gt30-lt60"
return "lt30"
def workers_cat(r):
if r.workers >= 2:
return "two or more"
elif r.workers == 1:
return "one"
return "none"
def tenure_mover_cat(r):
if (r.MV < 4) & (r.TEN < 3):
return "own recent"
elif (r.MV >= 4) & (r.TEN < 3):
return "own not recent"
elif (r.MV < 4) & (r.TEN >= 3):
return "rent recent"
return "rent not recent"
h_pums, jd_households = cat.joint_distribution(
h_pums,
cat.category_combinations(self.h_acs_cat.columns),
{"hh_cars": cars_cat,
"hh_children": children_cat,
"hh_income": income_cat,
"hh_workers": workers_cat,
"tenure_mover": tenure_mover_cat,
"seniors": seniors_cat,
"hh_size": hh_size_cat,
"hh_age_of_head": age_of_head_cat,
"sf_detached": sf_detached_cat,
"hh_race_of_head": race_of_head_cat,
"hispanic_head": hispanic_head_cat}
)
return h_pums, jd_households
def get_person_joint_dist_for_geography(self, ind):
c = self.c
puma10, puma00 = c.tract_to_puma(ind.state, ind.county, ind.tract)
# this is cached so won't download more than once
if type(puma00) == str:
p_pums = self.c.download_population_pums(ind.state, puma10, puma00,
usecols=self.p_pums_cols)
elif np.isnan(puma00): # only puma10 available
p_pums = self.c.download_population_pums(ind.state, puma10, None,
usecols=self.p_pums_cols)
def age_cat(r):
if r.AGEP <= 19:
return "19 and under"
elif r.AGEP <= 35:
return "20 to 35"
elif r.AGEP <= 60:
return "35 to 60"
return "above 60"
def race_cat(r):
if r.RAC1P == 1:
return "white"
elif r.RAC1P == 2:
return "black"
elif r.RAC1P == 6:
return "asian"
return "other"
def sex_cat(r):
if r.SEX == 1:
return "male"
return "female"
def hispanic_cat(r):
if r.HISP == 1:
return "no"
return "yes"
p_pums, jd_persons = cat.joint_distribution(
p_pums,
cat.category_combinations(self.p_acs_cat.columns),
{"person_age": age_cat, "race": race_cat, "person_sex": sex_cat,
"hispanic": hispanic_cat}
)
return p_pums, jd_persons
|
sdague/home-assistant | refs/heads/dev | homeassistant/components/amcrest/const.py | 21 | """Constants for amcrest component."""
DOMAIN = "amcrest"
DATA_AMCREST = DOMAIN
CAMERAS = "cameras"
DEVICES = "devices"
BINARY_SENSOR_SCAN_INTERVAL_SECS = 5
CAMERA_WEB_SESSION_TIMEOUT = 10
COMM_RETRIES = 1
COMM_TIMEOUT = 6.05
SENSOR_SCAN_INTERVAL_SECS = 10
SNAPSHOT_TIMEOUT = 20
SERVICE_EVENT = "event"
SERVICE_UPDATE = "update"
SENSOR_DEVICE_CLASS = "class"
SENSOR_EVENT_CODE = "code"
SENSOR_NAME = "name"
|
lento/tagger | refs/heads/master | tagger/lib/base.py | 1 | # -*- coding: utf-8 -*-
"""The base Controller API."""
from tg import TGController, tmpl_context, config, i18n, url, app_globals as G
from tg.render import render
from tg import request
from pylons.i18n import _, ungettext, N_
from tw.api import WidgetBunch, JSLink
from tagger.model import DBSession, Language, Category, Setting, Media
from tagger.lib.render import LinkWidget, MediaWidget
from tagger.lib.widgets import SideArticle, SideMedia, SideLink
__all__ = ['BaseController']
w_link = LinkWidget()
w_media = MediaWidget()
w_sideobj = dict(
article=SideArticle(),
media=SideMedia(),
link=SideLink(),
)
# JQuery and plugins
jquery_js = JSLink(link=url('/js/jquery.js'))
jquery_tools_js = JSLink(link=url('/js/jquery.tools.js'))
# tagger
tagger_js = JSLink(link=url('/js/tagger.js'))
# springs
mjs_js = JSLink(link=url('/js/extern/mjs.js'))
springs_js = JSLink(link=url('/js/springs.js'))
# FlowPlayer - don't load this at startup, its a fallback for browsers not
# supporting HTML5 <video> tag
flowplayer_js = JSLink(link=url('/js/flowplayer.js'))
class BaseController(TGController):
"""
Base class for the controllers in the application.
Your web application should have one of these. The root of
your application is used to compute URLs used by your app.
"""
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# TGController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
identity = request.environ.get('repoze.who.identity')
request.identity = identity
tmpl_context.identity = identity
tmpl_context.user = identity and identity['user'] or False
# get settings from the db
settings = dict([(s.id, s.value) for s in DBSession.query(Setting)])
# set theme, title and copyright notice
tmpl_context.theme = (settings.get('theme') or
config.get('theme') or
'default'
)
tmpl_context.title = (settings.get('title') or
config.get('title', '').strip('\"') or
''
)
tmpl_context.copyright = (settings.get('copyright') or
config.get('copyright', '').strip('\"') or
''
)
tmpl_context.cc = (settings.get('cc') or
config.get('cc', '').strip('\"') or
''
)
# load javascripts
jquery_js.inject()
jquery_tools_js.inject()
tagger_js.inject()
springs_js.inject()
mjs_js.inject()
# add languages and categories to template context (used in the header)
tmpl_context.languages = DBSession.query(Language)
tmpl_context.categories = DBSession.query(Category)
# set language
if 'lang' in request.cookies:
tmpl_context.lang = request.cookies['lang']
i18n.set_lang(tmpl_context.lang)
else:
tmpl_context.lang = config.get('lang', None)
i18n.set_lang(None)
# add current url to template context
tmpl_context.current_url = request.url
# set logo
l_mediaid = settings.get(u'logo_media', None)
l_media = l_mediaid and DBSession.query(Media).get(l_mediaid)
l_mediaurl = l_media and url('/%s/%s' % (G.upload_prefix, l_media.uri))
tmpl_context.logo_mediaurl = l_mediaurl
# set banner link and background image
tmpl_context.w_link = w_link
tmpl_context.w_media = w_media
tmpl_context.banner_linkid = settings.get(u'banner_link', None)
b_mediaid = settings.get(u'banner_media', None)
b_media = b_mediaid and DBSession.query(Media).get(b_mediaid)
b_mediaurl = b_media and url('/%s/%s' % (G.upload_prefix, b_media.uri))
tmpl_context.banner_mediaurl = b_mediaurl
# add Sidebar widgets
tmpl_context.w_sideobj = w_sideobj
return TGController.__call__(self, environ, start_response)
|
happyspace/flask | refs/heads/master | examples/persona/persona.py | 159 | from flask import Flask, render_template, session, request, abort, g
import requests
app = Flask(__name__)
app.config.update(
DEBUG=True,
SECRET_KEY='my development key',
PERSONA_JS='https://login.persona.org/include.js',
PERSONA_VERIFIER='https://verifier.login.persona.org/verify',
)
app.config.from_envvar('PERSONA_SETTINGS', silent=True)
@app.before_request
def get_current_user():
g.user = None
email = session.get('email')
if email is not None:
g.user = email
@app.route('/')
def index():
"""Just a generic index page to show."""
return render_template('index.html')
@app.route('/_auth/login', methods=['GET', 'POST'])
def login_handler():
"""This is used by the persona.js file to kick off the
verification securely from the server side. If all is okay
the email address is remembered on the server.
"""
resp = requests.post(app.config['PERSONA_VERIFIER'], data={
'assertion': request.form['assertion'],
'audience': request.host_url,
}, verify=True)
if resp.ok:
verification_data = resp.json()
if verification_data['status'] == 'okay':
session['email'] = verification_data['email']
return 'OK'
abort(400)
@app.route('/_auth/logout', methods=['POST'])
def logout_handler():
"""This is what persona.js will call to sign the user
out again.
"""
session.clear()
return 'OK'
|
undoware/neutron-drive | refs/heads/master | neutron-drive/django/bin/unique-messages.py | 454 | #!/usr/bin/env python
import os
import sys
def unique_messages():
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print "this script should be run from the django svn tree or your project or app tree"
sys.exit(1)
for (dirpath, dirnames, filenames) in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
cmd = 'msguniq "%s.po"' % pf
stdout = os.popen(cmd)
msg = stdout.read()
open('%s.po' % pf, 'w').write(msg)
if __name__ == "__main__":
unique_messages()
|
nurhandipa/python | refs/heads/master | codecademy/bringing_it_all_together.py | 1 | //codecademy course answer
#written by pranantyo
monty = True
python = 1.234
monty_python = python * python
|
jackjennings/Mechanic | refs/heads/master | Mechanic.roboFontExt/lib/site-packages/requests/compat.py | 134 | # -*- coding: utf-8 -*-
"""
requests.compat
~~~~~~~~~~~~~~~
This module handles import compatibility issues between Python 2 and
Python 3.
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
integer_types = (int, long)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
integer_types = (int,)
|
ADMau/JFBenitez | refs/heads/master | vendor/doctrine/orm/docs/en/_exts/configurationblock.py | 2577 | #Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
|
jlcarmic/producthunt_simulator | refs/heads/master | venv/lib/python2.7/site-packages/numpy/linalg/setup.py | 129 | from __future__ import division, print_function
import os
import sys
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('linalg', parent_package, top_path)
config.add_data_dir('tests')
# Configure lapack_lite
src_dir = 'lapack_lite'
lapack_lite_src = [
os.path.join(src_dir, 'python_xerbla.c'),
os.path.join(src_dir, 'zlapack_lite.c'),
os.path.join(src_dir, 'dlapack_lite.c'),
os.path.join(src_dir, 'blas_lite.c'),
os.path.join(src_dir, 'dlamch.c'),
os.path.join(src_dir, 'f2c_lite.c'),
os.path.join(src_dir, 'f2c.h'),
]
lapack_info = get_info('lapack_opt', 0) # and {}
def get_lapack_lite_sources(ext, build_dir):
if not lapack_info:
print("### Warning: Using unoptimized lapack ###")
return ext.depends[:-1]
else:
if sys.platform=='win32':
print("### Warning: python_xerbla.c is disabled ###")
return ext.depends[:1]
return ext.depends[:2]
config.add_extension('lapack_lite',
sources = [get_lapack_lite_sources],
depends = ['lapack_litemodule.c'] + lapack_lite_src,
extra_info = lapack_info
)
# umath_linalg module
config.add_extension('_umath_linalg',
sources = [get_lapack_lite_sources],
depends = ['umath_linalg.c.src'] + lapack_lite_src,
extra_info = lapack_info,
libraries = ['npymath'],
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
sunils34/buffer-django-nonrel | refs/heads/master | django/dispatch/__init__.py | 571 | """Multi-consumer multi-producer dispatching mechanism
Originally based on pydispatch (BSD) http://pypi.python.org/pypi/PyDispatcher/2.0.1
See license.txt for original license.
Heavily modified for Django's purposes.
"""
from django.dispatch.dispatcher import Signal, receiver |
Titan-C/scikit-learn | refs/heads/master | sklearn/linear_model/tests/test_base.py | 83 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import sparse_center_data, center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
@ignore_warnings # all deprecation warnings
def test_deprecation_center_data():
n_samples = 200
n_features = 2
w = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
param_grid = product([True, False], [True, False], [True, False],
[None, w])
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
XX = X.copy() # such that we can try copy=False as well
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
XX = X.copy()
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
assert_array_almost_equal(X1, X2)
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
# Sparse cases
X = sparse.csr_matrix(X)
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=copy, sample_weight=sample_weight)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight, return_mean=False)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
for (fit_intercept, normalize) in product([True, False], [True, False]):
X1, y1, X1_mean, X1_var, y1_mean = \
sparse_center_data(X, y, fit_intercept=fit_intercept,
normalize=normalize)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
|
ReanGD/web-work-fitnesse | refs/heads/master | fitnesse/correct_model.py | 1 | # -*- coding: utf-8 -*-
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "root.settings")
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if BASE_DIR not in sys.path:
sys.path.append(BASE_DIR)
os.chdir(BASE_DIR)
from fitnesse.models import Job, Build, KeyBuildArtifact, BuildArtifact, Suite, Test, KeyTestArtifact, TestArtifact
def main():
for it in Suite.objects.all():
it.name = it.name.split(".")[-1]
it.save()
if __name__ == "__main__":
main()
|
tschinz/iPython_Workspace | refs/heads/master | 02_WP/Wavedrom/nbwavedrom/__init__.py | 2 | """ nbwavedrom - wavedrom timing diagrams for jupyter notebook """
import json
import os
import subprocess
import IPython.display
def _get_js_path(jsfile):
base = os.path.dirname(os.path.realpath(__file__))
return os.path.join(base, 'js', jsfile)
def _draw_wavedrom_javascript(data, width):
style = ""
if width != None:
style = ' style="width: ' + str(int(width)) + 'px'
htmldata = '<script>' + open(_get_js_path('wavedromskin.js')).read() + '</script>'
htmldata += '<script>' + open(_get_js_path('wavedrom.min.js')).read() + '</script>'
htmldata += '<div' + style + '><script type="WaveDrom">' + data + '</script></div>'
htmldata += '<script>WaveDrom.ProcessAll();</script>'
return IPython.display.HTML(data=htmldata)
def _draw_wavedrom_phantomjs(data, phantomjs):
prog = subprocess.Popen([phantomjs, _get_js_path('wavedrom-cli.js'), '-i', '-', '-s', '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = prog.communicate(data.encode('utf-8'))
return IPython.display.SVG(stdout)
def _is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def _find_phantomjs(program='phantomjs'):
fpath, _ = os.path.split(program)
if fpath:
if _is_exe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if _is_exe(exe_file):
return exe_file
return False
def _convert_data(data):
if not isinstance(data, str):
data = json.dumps(data)
return data
def draw(data, width=None, phantomjs=None):
"""
A function to provide digital waveform drawing in ipython notebook.
It utilises the wavedrom java script library, documentation for which can
be found here: wavedrom.com.
Example usage:
import nbwavedrom
a = {
'signal': [
{'name': 'clk', 'wave': 'p.....|...'},
{'name': 'dat', 'wave': 'x.345x|=.x', 'data': ['head', 'body', 'tail', 'data']},
{'name': 'req', 'wave': '0.1..0|1.0'},
{},
{'name': 'ack', 'wave': '1.....|01.'}
]}
nbwavedrom.draw(a)
Note, there are two wavedrom rendering engines built in. The default is to use the
wavedrom-cli (https://github.com/wavedrom/cli). This requires phantomjs. This is the
best option as it embeds the SVG into the notebook. If phantomjs is not found then a
browser based render will be used, this means that the notebook can't be converted to
PDF or displayed on github.
The following arguments are used:
* data - the wavedrom configuration (see wavedrom.com)
* width - forces max width of the output when using browser rendering
* phantomjs - set to the path to phantomjs if it is not in your PATH. Set to False to
force browser rendering
"""
data = _convert_data(data)
if phantomjs != False:
if phantomjs is None or phantomjs is True:
phantomjs = _find_phantomjs() # Search it in path
else:
phantomjs = _find_phantomjs(phantomjs) # Search it with user path
if phantomjs is False:
return _draw_wavedrom_javascript(data, width)
else:
return _draw_wavedrom_phantomjs(data, phantomjs)
|
tellesnobrega/sahara | refs/heads/master | sahara/service/edp/binary_retrievers/sahara_db.py | 19 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara import conductor as c
conductor = c.API
def get_raw_data(context, job_binary):
# url example: 'internal-db://JobBinaryInternal-UUID'
binary_internal_id = job_binary.url[len("internal-db://"):]
return conductor.job_binary_internal_get_raw_data(context,
binary_internal_id)
|
ioram7/keystone-federado-pgid2013 | refs/heads/master | build/sqlalchemy/test/aaa_profiling/test_compiler.py | 1 | from sqlalchemy import *
from test.lib import *
from sqlalchemy.engine import default
class CompileTest(fixtures.TestBase, AssertsExecutionResults):
__requires__ = 'cpython',
@classmethod
def setup_class(cls):
global t1, t2, metadata
metadata = MetaData()
t1 = Table('t1', metadata,
Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
t2 = Table('t2', metadata,
Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
# do a "compile" ahead of time to load
# deferred imports
t1.insert().compile()
# go through all the TypeEngine
# objects in use and pre-load their _type_affinity
# entries.
for t in (t1, t2):
for c in t.c:
c.type._type_affinity
from sqlalchemy import types
for t in types._type_map.values():
t._type_affinity
cls.dialect = default.DefaultDialect()
@profiling.function_call_count(62)
def test_insert(self):
t1.insert().compile(dialect=self.dialect)
@profiling.function_call_count(56)
def test_update(self):
t1.update().compile(dialect=self.dialect)
@profiling.function_call_count(110)
def test_update_whereclause(self):
t1.update().where(t1.c.c2==12).compile(dialect=self.dialect)
@profiling.function_call_count(139)
def test_select(self):
s = select([t1], t1.c.c2==t2.c.c1)
s.compile(dialect=self.dialect)
|
bvanapriya/androguard | refs/heads/master | androguard/core/binaries/idawrapper.py | 38 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from idaapi import *
from idautils import *
from idc import *
from SimpleXMLRPCServer import SimpleXMLRPCServer
import cPickle
def is_connected() :
return True
def wrapper_get_raw(oops) :
F = {}
for function_ea in Functions() :
F[ function_ea ] = []
f_start = function_ea
f_end = GetFunctionAttr(function_ea, FUNCATTR_END)
edges = set()
boundaries = set((f_start,))
F[ function_ea ].append( GetFunctionName(function_ea) )
for head in Heads(f_start, f_end) :
if isCode( GetFlags( head ) ) :
F[ function_ea ].append( (head, GetMnem(head), GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2)) )
refs = CodeRefsFrom(head, 0)
refs = set(filter(lambda x: x>=f_start and x<=f_end, refs))
if refs :
next_head = NextHead(head, f_end)
if isFlow(GetFlags(next_head)):
refs.add(next_head)
# Update the boundaries found so far.
boundaries.update(refs)
# For each of the references found, and edge is
# created.
for r in refs:
# If the flow could also come from the address
# previous to the destination of the branching
# an edge is created.
if isFlow(GetFlags(r)):
edges.add((PrevHead(r, f_start), r))
edges.add((head, r))
#print edges, boundaries
# Let's build the list of (startEA, startEA) couples
# for each basic block
sorted_boundaries = sorted(boundaries, reverse = True)
end_addr = PrevHead(f_end, f_start)
bb_addr = []
for begin_addr in sorted_boundaries:
bb_addr.append((begin_addr, end_addr))
# search the next end_addr which could be
# farther than just the previous head
# if data are interlaced in the code
# WARNING: it assumes it won't epicly fail ;)
end_addr = PrevHead(begin_addr, f_start)
while not isCode(GetFlags(end_addr)):
end_addr = PrevHead(end_addr, f_start)
# And finally return the result
bb_addr.reverse()
F[ function_ea ].append( (bb_addr, sorted(edges)) )
return cPickle.dumps( F )
def wrapper_Heads(oops) :
start, end = cPickle.loads(oops)
return cPickle.dumps( [ x for x in Heads( start, end ) ] )
def wrapper_Functions(oops) :
return cPickle.dumps( [ x for x in Functions() ] )
def wrapper_get_function(oops) :
name = cPickle.loads(oops)
for function_ea in Functions() :
if GetFunctionName(function_ea) == name :
return cPickle.dumps( function_ea )
return cPickle.dumps( -1 )
def wrapper_quit(oops) :
qexit(0)
class IDAWrapper :
def _dispatch(self, x, params) :
#fd = open("toto.txt", "w")
#fd.write( x + "\n" )
#fd.write( str(type(params[0])) + "\n" )
#fd.close()
params = cPickle.loads( *params )
if isinstance(params, tuple) == False :
params = (params,)
import types
import idautils
import idc
#[getattr(idautils, a, None) for a in dir(idautils) if isinstance(getattr(idautils, a, None) , types.FunctionType)]
for a in dir(idautils) :
#fd.write( "\t" + a + "\n" )
if a == x :
z = getattr(idautils, a, None)
ret = z( *params )
if type(ret).__name__=='generator' :
return cPickle.dumps( [ i for i in ret ] )
return cPickle.dumps( ret )
for a in dir(idc) :
#fd.write( "\t" + a + "\n" )
if a == x :
z = getattr(idc, a, None)
ret = z( *params )
if type(ret).__name__=='generator' :
return cPickle.dumps( [ i for i in ret ] )
return cPickle.dumps( ret )
return cPickle.dumps( [] )
def main() :
autoWait()
ea = ScreenEA()
server = SimpleXMLRPCServer(("localhost", 9000))
server.register_function(is_connected, "is_connected")
server.register_function(wrapper_get_raw, "get_raw")
server.register_function(wrapper_get_function, "get_function")
server.register_function(wrapper_Heads, "Heads")
server.register_function(wrapper_Functions, "Functions")
server.register_instance(IDAWrapper())
server.register_function(wrapper_quit, "quit")
server.serve_forever()
qexit(0)
main()
|
Medigate/cutiuta-server | refs/heads/master | cutiuta-server/env/lib/python3.4/site-packages/django/contrib/gis/geos/base.py | 437 | from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.