code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class AttachToS2SharedTargetAction(BaseAction):
action = 'AttachToS2SharedTarget'
command = 'attach-to-s2-shared-target'
usage = '%(prog)s -s <shared_target> -v <volumes> [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-s", "--shared-target", dest="shared_target",
action="store", type=str, default=None,
help="the ID of shared target.")
parser.add_argument("-v", "--volumes", dest="volumes",
action="store", type=str, default=None,
help="the IDs of volumes.")
@classmethod
def build_directive(cls, options):
for key in ['shared_target', 'volumes']:
if not hasattr(options, key):
print("error: [%s] should be specified." % key)
return None
directive = {
"shared_target": options.shared_target,
"volumes": explode_array(options.volumes),
}
return directive
| yunify/qingcloud-cli | qingcloud/cli/iaas_client/actions/s2/attach_to_s2_shared_target.py | Python | apache-2.0 | 2,022 |
"""Support for Honeywell Lyric climate platform."""
from __future__ import annotations
import logging
from time import localtime, strftime, time
from aiolyric.objects.device import LyricDevice
from aiolyric.objects.location import LyricLocation
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity, ClimateEntityDescription
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import LyricDeviceEntity
from .const import (
DOMAIN,
LYRIC_EXCEPTIONS,
PRESET_HOLD_UNTIL,
PRESET_NO_HOLD,
PRESET_PERMANENT_HOLD,
PRESET_TEMPORARY_HOLD,
PRESET_VACATION_HOLD,
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
LYRIC_HVAC_ACTION_OFF = "EquipmentOff"
LYRIC_HVAC_ACTION_HEAT = "Heat"
LYRIC_HVAC_ACTION_COOL = "Cool"
LYRIC_HVAC_MODE_OFF = "Off"
LYRIC_HVAC_MODE_HEAT = "Heat"
LYRIC_HVAC_MODE_COOL = "Cool"
LYRIC_HVAC_MODE_HEAT_COOL = "Auto"
LYRIC_HVAC_MODES = {
HVAC_MODE_OFF: LYRIC_HVAC_MODE_OFF,
HVAC_MODE_HEAT: LYRIC_HVAC_MODE_HEAT,
HVAC_MODE_COOL: LYRIC_HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL: LYRIC_HVAC_MODE_HEAT_COOL,
}
HVAC_MODES = {
LYRIC_HVAC_MODE_OFF: HVAC_MODE_OFF,
LYRIC_HVAC_MODE_HEAT: HVAC_MODE_HEAT,
LYRIC_HVAC_MODE_COOL: HVAC_MODE_COOL,
LYRIC_HVAC_MODE_HEAT_COOL: HVAC_MODE_HEAT_COOL,
}
HVAC_ACTIONS = {
LYRIC_HVAC_ACTION_OFF: CURRENT_HVAC_OFF,
LYRIC_HVAC_ACTION_HEAT: CURRENT_HVAC_HEAT,
LYRIC_HVAC_ACTION_COOL: CURRENT_HVAC_COOL,
}
SERVICE_HOLD_TIME = "set_hold_time"
ATTR_TIME_PERIOD = "time_period"
SCHEMA_HOLD_TIME = {
vol.Required(ATTR_TIME_PERIOD, default="01:00:00"): vol.All(
cv.time_period,
cv.positive_timedelta,
lambda td: strftime("%H:%M:%S", localtime(time() + td.total_seconds())),
)
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Honeywell Lyric climate platform based on a config entry."""
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
entities = []
for location in coordinator.data.locations:
for device in location.devices:
entities.append(
LyricClimate(
coordinator,
ClimateEntityDescription(
key=f"{device.macID}_thermostat",
name=device.name,
),
location,
device,
hass.config.units.temperature_unit,
)
)
async_add_entities(entities, True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_HOLD_TIME,
SCHEMA_HOLD_TIME,
"async_set_hold_time",
)
class LyricClimate(LyricDeviceEntity, ClimateEntity):
"""Defines a Honeywell Lyric climate entity."""
coordinator: DataUpdateCoordinator
entity_description: ClimateEntityDescription
def __init__(
self,
coordinator: DataUpdateCoordinator,
description: ClimateEntityDescription,
location: LyricLocation,
device: LyricDevice,
temperature_unit: str,
) -> None:
"""Initialize Honeywell Lyric climate entity."""
self._temperature_unit = temperature_unit
# Setup supported hvac modes
self._hvac_modes = [HVAC_MODE_OFF]
# Add supported lyric thermostat features
if LYRIC_HVAC_MODE_HEAT in device.allowedModes:
self._hvac_modes.append(HVAC_MODE_HEAT)
if LYRIC_HVAC_MODE_COOL in device.allowedModes:
self._hvac_modes.append(HVAC_MODE_COOL)
if (
LYRIC_HVAC_MODE_HEAT in device.allowedModes
and LYRIC_HVAC_MODE_COOL in device.allowedModes
):
self._hvac_modes.append(HVAC_MODE_HEAT_COOL)
super().__init__(
coordinator,
location,
device,
f"{device.macID}_thermostat",
)
self.entity_description = description
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return self._temperature_unit
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self.device.indoorTemperature
@property
def hvac_action(self) -> str:
"""Return the current hvac action."""
action = HVAC_ACTIONS.get(self.device.operationStatus.mode, None)
if action == CURRENT_HVAC_OFF and self.hvac_mode != HVAC_MODE_OFF:
action = CURRENT_HVAC_IDLE
return action
@property
def hvac_mode(self) -> str:
"""Return the hvac mode."""
return HVAC_MODES[self.device.changeableValues.mode]
@property
def hvac_modes(self) -> list[str]:
"""List of available hvac modes."""
return self._hvac_modes
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
device = self.device
if not device.hasDualSetpointStatus:
if self.hvac_mode == HVAC_MODE_COOL:
return device.changeableValues.coolSetpoint
return device.changeableValues.heatSetpoint
return None
@property
def target_temperature_low(self) -> float | None:
"""Return the upper bound temperature we try to reach."""
device = self.device
if device.hasDualSetpointStatus:
return device.changeableValues.coolSetpoint
return None
@property
def target_temperature_high(self) -> float | None:
"""Return the upper bound temperature we try to reach."""
device = self.device
if device.hasDualSetpointStatus:
return device.changeableValues.heatSetpoint
return None
@property
def preset_mode(self) -> str | None:
"""Return current preset mode."""
return self.device.changeableValues.thermostatSetpointStatus
@property
def preset_modes(self) -> list[str] | None:
"""Return preset modes."""
return [
PRESET_NO_HOLD,
PRESET_HOLD_UNTIL,
PRESET_PERMANENT_HOLD,
PRESET_TEMPORARY_HOLD,
PRESET_VACATION_HOLD,
]
@property
def min_temp(self) -> float:
"""Identify min_temp in Lyric API or defaults if not available."""
device = self.device
if LYRIC_HVAC_MODE_COOL in device.allowedModes:
return device.minCoolSetpoint
return device.minHeatSetpoint
@property
def max_temp(self) -> float:
"""Identify max_temp in Lyric API or defaults if not available."""
device = self.device
if LYRIC_HVAC_MODE_HEAT in device.allowedModes:
return device.maxHeatSetpoint
return device.maxCoolSetpoint
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
device = self.device
if device.hasDualSetpointStatus:
if target_temp_low is None or target_temp_high is None:
raise HomeAssistantError(
"Could not find target_temp_low and/or target_temp_high in arguments"
)
_LOGGER.debug("Set temperature: %s - %s", target_temp_low, target_temp_high)
try:
await self._update_thermostat(
self.location,
device,
coolSetpoint=target_temp_low,
heatSetpoint=target_temp_high,
)
except LYRIC_EXCEPTIONS as exception:
_LOGGER.error(exception)
else:
temp = kwargs.get(ATTR_TEMPERATURE)
_LOGGER.debug("Set temperature: %s", temp)
try:
if self.hvac_mode == HVAC_MODE_COOL:
await self._update_thermostat(
self.location, device, coolSetpoint=temp
)
else:
await self._update_thermostat(
self.location, device, heatSetpoint=temp
)
except LYRIC_EXCEPTIONS as exception:
_LOGGER.error(exception)
await self.coordinator.async_refresh()
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set hvac mode."""
_LOGGER.debug("Set hvac mode: %s", hvac_mode)
try:
await self._update_thermostat(
self.location, self.device, mode=LYRIC_HVAC_MODES[hvac_mode]
)
except LYRIC_EXCEPTIONS as exception:
_LOGGER.error(exception)
await self.coordinator.async_refresh()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set preset (PermanentHold, HoldUntil, NoHold, VacationHold) mode."""
_LOGGER.debug("Set preset mode: %s", preset_mode)
try:
await self._update_thermostat(
self.location, self.device, thermostatSetpointStatus=preset_mode
)
except LYRIC_EXCEPTIONS as exception:
_LOGGER.error(exception)
await self.coordinator.async_refresh()
async def async_set_hold_time(self, time_period: str) -> None:
"""Set the time to hold until."""
_LOGGER.debug("set_hold_time: %s", time_period)
try:
await self._update_thermostat(
self.location,
self.device,
thermostatSetpointStatus=PRESET_HOLD_UNTIL,
nextPeriodTime=time_period,
)
except LYRIC_EXCEPTIONS as exception:
_LOGGER.error(exception)
await self.coordinator.async_refresh()
| rohitranjan1991/home-assistant | homeassistant/components/lyric/climate.py | Python | mit | 10,847 |
from matplotlib import rc
from matplotlib import rcParams
font_size=14
rcParams["backend"] = "PDF"
rcParams["figure.figsize"] = (4, 3)
rcParams["font.family"] = "Serif"
rcParams["font.serif"] = ["Palatino"]
rcParams["font.size"] = font_size
rcParams["axes.labelsize"] = font_size
rcParams["xtick.labelsize"] = font_size - 2
rcParams["ytick.labelsize"] = font_size - 2
rcParams["legend.numpoints"] = 1
rcParams["legend.fontsize"] = "small"
rcParams["lines.markersize"] = 4
rcParams["figure.subplot.right"] = 0.95
rcParams["figure.subplot.top"] = 0.95
rcParams["figure.subplot.right"] = 0.95
rcParams["figure.subplot.top"] = 0.95
rcParams["figure.subplot.left"] = 0.2
rcParams["figure.subplot.bottom"] = 0.2
rcParams["image.cmap"] = "hot"
rcParams["text.usetex"] = True
rcParams["ps.usedistiller"] = "xpdf"
rcParams["pdf.compression"] = 9
rcParams["ps.useafm"] = True
rcParams["path.simplify"] = True
rcParams["text.latex.preamble"] = [#"\usepackage{times}",
#"\usepackage{euler}",
r"\usepackage{amssymb}",
r"\usepackage{amsmath}"]
from numpy import *
import scipy
import scipy.stats
from math import *
import numpy as np
import graph_tool.all as gt
| johankaito/fufuka | graph-tool/doc/pyenv.py | Python | apache-2.0 | 1,258 |
# -*- coding: utf-8 -*-
import inspect
import itertools
import logging
import math
import re
import urlparse
import werkzeug
import werkzeug.exceptions
import werkzeug.utils
import werkzeug.wrappers
# optional python-slugify import (https://github.com/un33k/python-slugify)
try:
import slugify as slugify_lib
except ImportError:
slugify_lib = None
import openerp
from openerp.osv import orm, osv, fields
from openerp.tools.safe_eval import safe_eval
from openerp.addons.web.http import request, LazyResponse
logger = logging.getLogger(__name__)
def url_for(path_or_uri, lang=None):
if isinstance(path_or_uri, unicode):
path_or_uri = path_or_uri.encode('utf-8')
current_path = request.httprequest.path
if isinstance(current_path, unicode):
current_path = current_path.encode('utf-8')
location = path_or_uri.strip()
force_lang = lang is not None
url = urlparse.urlparse(location)
if request and not url.netloc and not url.scheme and (url.path or force_lang):
location = urlparse.urljoin(current_path, location)
lang = lang or request.context.get('lang')
langs = [lg[0] for lg in request.website.get_languages()]
if (len(langs) > 1 or force_lang) and is_multilang_url(location, langs):
ps = location.split('/')
if ps[1] in langs:
# Replace the language only if we explicitly provide a language to url_for
if force_lang:
ps[1] = lang
# Remove the default language unless it's explicitly provided
elif ps[1] == request.website.default_lang_code:
ps.pop(1)
# Insert the context language or the provided language
elif lang != request.website.default_lang_code or force_lang:
ps.insert(1, lang)
location = '/'.join(ps)
return location.decode('utf-8')
def is_multilang_url(path, langs=None):
if not langs:
langs = [lg[0] for lg in request.website.get_languages()]
spath = path.split('/')
# if a language is already in the path, remove it
if spath[1] in langs:
spath.pop(1)
path = '/'.join(spath)
try:
router = request.httprequest.app.get_db_router(request.db).bind('')
func = router.match(path)[0]
return func.routing.get('multilang', False)
except Exception:
return False
def slugify(s, max_length=None):
if slugify_lib:
# There are 2 different libraries only python-slugify is supported
try:
return slugify_lib.slugify(s, max_length=max_length)
except TypeError:
pass
spaceless = re.sub(r'\s+', '-', s)
specialless = re.sub(r'[^-_A-Za-z0-9]', '', spaceless)
return specialless[:max_length]
def slug(value):
if isinstance(value, orm.browse_record):
# [(id, name)] = value.name_get()
id, name = value.id, value[value._rec_name]
else:
# assume name_search result tuple
id, name = value
return "%s-%d" % (slugify(name), id)
def urlplus(url, params):
return werkzeug.Href(url)(params or None)
class website(osv.osv):
def _get_menu_website(self, cr, uid, ids, context=None):
# IF a menu is changed, update all websites
return self.search(cr, uid, [], context=context)
def _get_menu(self, cr, uid, ids, name, arg, context=None):
root_domain = [('parent_id', '=', False)]
menus = self.pool.get('website.menu').search(cr, uid, root_domain, order='id', context=context)
menu = menus and menus[0] or False
return dict( map(lambda x: (x, menu), ids) )
def _get_public_user(self, cr, uid, ids, name='public_user', arg=(), context=None):
ref = self.get_public_user(cr, uid, context=context)
return dict( map(lambda x: (x, ref), ids) )
_name = "website" # Avoid website.website convention for conciseness (for new api). Got a special authorization from xmo and rco
_description = "Website"
_columns = {
'name': fields.char('Domain'),
'company_id': fields.many2one('res.company', string="Company"),
'language_ids': fields.many2many('res.lang', 'website_lang_rel', 'website_id', 'lang_id', 'Languages'),
'default_lang_id': fields.many2one('res.lang', string="Default language"),
'default_lang_code': fields.related('default_lang_id', 'code', type="char", string="Default language code", store=True),
'social_twitter': fields.char('Twitter Account'),
'social_facebook': fields.char('Facebook Account'),
'social_github': fields.char('GitHub Account'),
'social_linkedin': fields.char('LinkedIn Account'),
'social_youtube': fields.char('Youtube Account'),
'social_googleplus': fields.char('Google+ Account'),
'google_analytics_key': fields.char('Google Analytics Key'),
'user_id': fields.many2one('res.users', string='Public User'),
'public_user': fields.function(_get_public_user, relation='res.users', type='many2one', string='Public User'),
'menu_id': fields.function(_get_menu, relation='website.menu', type='many2one', string='Main Menu',
store= {
'website.menu': (_get_menu_website, ['sequence','parent_id','website_id'], 10)
})
}
# cf. Wizard hack in website_views.xml
def noop(self, *args, **kwargs):
pass
def write(self, cr, uid, ids, vals, context=None):
self._get_languages.clear_cache(self)
return super(website, self).write(cr, uid, ids, vals, context)
def new_page(self, cr, uid, name, template='website.default_page', ispage=True, context=None):
context = context or {}
imd = self.pool.get('ir.model.data')
view = self.pool.get('ir.ui.view')
template_module, template_name = template.split('.')
# completely arbitrary max_length
page_name = slugify(name, max_length=50)
page_xmlid = "%s.%s" % (template_module, page_name)
try:
# existing page
imd.get_object_reference(cr, uid, template_module, page_name)
except ValueError:
# new page
_, template_id = imd.get_object_reference(cr, uid, template_module, template_name)
page_id = view.copy(cr, uid, template_id, context=context)
page = view.browse(cr, uid, page_id, context=context)
page.write({
'arch': page.arch.replace(template, page_xmlid),
'name': page_name,
'page': ispage,
})
imd.create(cr, uid, {
'name': page_name,
'module': template_module,
'model': 'ir.ui.view',
'res_id': page_id,
'noupdate': True
}, context=context)
return page_xmlid
def page_for_name(self, cr, uid, ids, name, module='website', context=None):
# whatever
return '%s.%s' % (module, slugify(name, max_length=50))
def page_exists(self, cr, uid, ids, name, module='website', context=None):
try:
return self.pool["ir.model.data"].get_object_reference(cr, uid, module, name)
except:
return False
def get_public_user(self, cr, uid, context=None):
uid = openerp.SUPERUSER_ID
res = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'public_user')
return res and res[1] or False
@openerp.tools.ormcache(skiparg=3)
def _get_languages(self, cr, uid, id, context=None):
website = self.browse(cr, uid, id)
return [(lg.code, lg.name) for lg in website.language_ids]
def get_languages(self, cr, uid, ids, context=None):
return self._get_languages(cr, uid, ids[0])
def get_current_website(self, cr, uid, context=None):
# TODO: Select website, currently hard coded
return self.pool['website'].browse(cr, uid, 1, context=context)
def preprocess_request(self, cr, uid, ids, request, context=None):
# TODO FP: is_website_publisher and editable in context should be removed
# for performance reasons (1 query per image to load) but also to be cleaner
# I propose to replace this by a group 'base.group_website_publisher' on the
# view that requires it.
Access = request.registry['ir.model.access']
is_website_publisher = Access.check(cr, uid, 'ir.ui.view', 'write', False, context)
lang = request.context['lang']
is_master_lang = lang == request.website.default_lang_code
request.redirect = lambda url: werkzeug.utils.redirect(url_for(url))
request.context.update(
editable=is_website_publisher,
translatable=not is_master_lang,
)
def get_template(self, cr, uid, ids, template, context=None):
if '.' not in template:
template = 'website.%s' % template
module, xmlid = template.split('.', 1)
model, view_id = request.registry["ir.model.data"].get_object_reference(cr, uid, module, xmlid)
return self.pool["ir.ui.view"].browse(cr, uid, view_id, context=context)
def _render(self, cr, uid, ids, template, values=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return self.pool['ir.ui.view'].render(cr, uid, template, values=values, context=context)
def render(self, cr, uid, ids, template, values=None, status_code=None, context=None):
def callback(template, values, context):
return self._render(cr, uid, ids, template, values, context)
if values is None:
values = {}
return LazyResponse(callback, status_code=status_code, template=template, values=values, context=context)
def pager(self, cr, uid, ids, url, total, page=1, step=30, scope=5, url_args=None, context=None):
# Compute Pager
page_count = int(math.ceil(float(total) / step))
page = max(1, min(int(page), page_count))
scope -= 1
pmin = max(page - int(math.floor(scope/2)), 1)
pmax = min(pmin + scope, page_count)
if pmax - pmin < scope:
pmin = pmax - scope if pmax - scope > 0 else 1
def get_url(page):
_url = "%spage/%s/" % (url, page) if page > 1 else url
if url_args:
_url = "%s?%s" % (_url, werkzeug.url_encode(url_args))
return _url
return {
"page_count": page_count,
"offset": (page - 1) * step,
"page": {
'url': get_url(page),
'num': page
},
"page_start": {
'url': get_url(pmin),
'num': pmin
},
"page_previous": {
'url': get_url(max(pmin, page - 1)),
'num': max(pmin, page - 1)
},
"page_next": {
'url': get_url(min(pmax, page + 1)),
'num': min(pmax, page + 1)
},
"page_end": {
'url': get_url(pmax),
'num': pmax
},
"pages": [
{'url': get_url(page), 'num': page}
for page in xrange(pmin, pmax+1)
]
}
def rule_is_enumerable(self, rule):
""" Checks that it is possible to generate sensible GET queries for
a given rule (if the endpoint matches its own requirements)
:type rule: werkzeug.routing.Rule
:rtype: bool
"""
endpoint = rule.endpoint
methods = rule.methods or ['GET']
converters = rule._converters.values()
return (
'GET' in methods
and endpoint.routing['type'] == 'http'
and endpoint.routing['auth'] in ('none', 'public')
and endpoint.routing.get('website', False)
# preclude combinatorial explosion by only allowing a single converter
and len(converters) <= 1
# ensure all converters on the rule are able to generate values for
# themselves
and all(hasattr(converter, 'generate') for converter in converters)
) and self.endpoint_is_enumerable(rule)
def endpoint_is_enumerable(self, rule):
""" Verifies that it's possible to generate a valid url for the rule's
endpoint
:type rule: werkzeug.routing.Rule
:rtype: bool
"""
spec = inspect.getargspec(rule.endpoint.method)
# if *args bail the fuck out, only dragons can live there
if spec.varargs:
return False
# remove all arguments with a default value from the list
defaults_count = len(spec.defaults or []) # spec.defaults can be None
# a[:-0] ~ a[:0] ~ [] -> replace defaults_count == 0 by None to get
# a[:None] ~ a
args = spec.args[:(-defaults_count or None)]
# params with defaults were removed, leftover allowed are:
# * self (technically should be first-parameter-of-instance-method but whatever)
# * any parameter mapping to a converter
return all(
(arg == 'self' or arg in rule._converters)
for arg in args)
def enumerate_pages(self, cr, uid, ids, query_string=None, context=None):
""" Available pages in the website/CMS. This is mostly used for links
generation and can be overridden by modules setting up new HTML
controllers for dynamic pages (e.g. blog).
By default, returns template views marked as pages.
:param str query_string: a (user-provided) string, fetches pages
matching the string
:returns: a list of mappings with two keys: ``name`` is the displayable
name of the resource (page), ``url`` is the absolute URL
of the same.
:rtype: list({name: str, url: str})
"""
router = request.httprequest.app.get_db_router(request.db)
# Force enumeration to be performed as public user
uid = self.get_public_user(cr, uid, context=context)
url_list = []
for rule in router.iter_rules():
if not self.rule_is_enumerable(rule):
continue
converters = rule._converters
filtered = bool(converters)
if converters:
# allow single converter as decided by fp, checked by
# rule_is_enumerable
[(name, converter)] = converters.items()
converter_values = converter.generate(
request.cr, uid, query=query_string, context=context)
generated = ({k: v} for k, v in itertools.izip(
itertools.repeat(name), converter_values))
else:
# force single iteration for literal urls
generated = [{}]
for values in generated:
domain_part, url = rule.build(values, append_unknown=False)
page = {'name': url, 'url': url}
if url in url_list:
continue
url_list.append(url)
if not filtered and query_string and not self.page_matches(cr, uid, page, query_string, context=context):
continue
yield page
def search_pages(self, cr, uid, ids, needle=None, limit=None, context=None):
return list(itertools.islice(
self.enumerate_pages(cr, uid, ids, query_string=needle, context=context),
limit))
def page_matches(self, cr, uid, page, needle, context=None):
""" Checks that a "page" matches a user-provide search string.
The default implementation attempts to perform a non-contiguous
substring match of the page's name.
:param page: {'name': str, 'url': str}
:param needle: str
:rtype: bool
"""
haystack = page['name'].lower()
needle = iter(needle.lower())
n = next(needle)
end = object()
for char in haystack:
if char != n: continue
n = next(needle, end)
# found all characters of needle in haystack in order
if n is end:
return True
return False
def kanban(self, cr, uid, ids, model, domain, column, template, step=None, scope=None, orderby=None, context=None):
step = step and int(step) or 10
scope = scope and int(scope) or 5
orderby = orderby or "name"
get_args = dict(request.httprequest.args or {})
model_obj = self.pool[model]
relation = model_obj._columns.get(column)._obj
relation_obj = self.pool[relation]
get_args.setdefault('kanban', "")
kanban = get_args.pop('kanban')
kanban_url = "?%s&kanban=" % werkzeug.url_encode(get_args)
pages = {}
for col in kanban.split(","):
if col:
col = col.split("-")
pages[int(col[0])] = int(col[1])
objects = []
for group in model_obj.read_group(cr, uid, domain, ["id", column], groupby=column):
obj = {}
# browse column
relation_id = group[column][0]
obj['column_id'] = relation_obj.browse(cr, uid, relation_id)
obj['kanban_url'] = kanban_url
for k, v in pages.items():
if k != relation_id:
obj['kanban_url'] += "%s-%s" % (k, v)
# pager
number = model_obj.search(cr, uid, group['__domain'], count=True)
obj['page_count'] = int(math.ceil(float(number) / step))
obj['page'] = pages.get(relation_id) or 1
if obj['page'] > obj['page_count']:
obj['page'] = obj['page_count']
offset = (obj['page']-1) * step
obj['page_start'] = max(obj['page'] - int(math.floor((scope-1)/2)), 1)
obj['page_end'] = min(obj['page_start'] + (scope-1), obj['page_count'])
# view data
obj['domain'] = group['__domain']
obj['model'] = model
obj['step'] = step
obj['orderby'] = orderby
# browse objects
object_ids = model_obj.search(cr, uid, group['__domain'], limit=step, offset=offset, order=orderby)
obj['object_ids'] = model_obj.browse(cr, uid, object_ids)
objects.append(obj)
values = {
'objects': objects,
'range': range,
'template': template,
}
return request.website._render("website.kanban_contain", values)
def kanban_col(self, cr, uid, ids, model, domain, page, template, step, orderby, context=None):
html = ""
model_obj = self.pool[model]
domain = safe_eval(domain)
step = int(step)
offset = (int(page)-1) * step
object_ids = model_obj.search(cr, uid, domain, limit=step, offset=offset, order=orderby)
object_ids = model_obj.browse(cr, uid, object_ids)
for object_id in object_ids:
html += request.website._render(template, {'object_id': object_id})
return html
class website_menu(osv.osv):
_name = "website.menu"
_description = "Website Menu"
_columns = {
'name': fields.char('Menu', size=64, required=True, translate=True),
'url': fields.char('Url', required=True, translate=True),
'new_window': fields.boolean('New Window'),
'sequence': fields.integer('Sequence'),
# TODO: support multiwebsite once done for ir.ui.views
'website_id': fields.many2one('website', 'Website'),
'parent_id': fields.many2one('website.menu', 'Parent Menu', select=True, ondelete="cascade"),
'child_id': fields.one2many('website.menu', 'parent_id', string='Child Menus'),
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
}
def __defaults_sequence(self, cr, uid, context):
menu = self.search_read(cr, uid, [(1,"=",1)], ["sequence"], limit=1, order="sequence DESC", context=context)
return menu and menu[0]["sequence"] or 0
_defaults = {
'url': '',
'sequence': __defaults_sequence,
'new_window': False,
}
_parent_store = True
_parent_order = 'sequence'
_order = "sequence"
# would be better to take a menu_id as argument
def get_tree(self, cr, uid, website_id, context=None):
def make_tree(node):
menu_node = dict(
id=node.id,
name=node.name,
url=node.url,
new_window=node.new_window,
sequence=node.sequence,
parent_id=node.parent_id.id,
children=[],
)
for child in node.child_id:
menu_node['children'].append(make_tree(child))
return menu_node
menu = self.pool.get('website').browse(cr, uid, website_id, context=context).menu_id
return make_tree(menu)
def save(self, cr, uid, website_id, data, context=None):
def replace_id(old_id, new_id):
for menu in data['data']:
if menu['id'] == old_id:
menu['id'] = new_id
if menu['parent_id'] == old_id:
menu['parent_id'] = new_id
to_delete = data['to_delete']
if to_delete:
self.unlink(cr, uid, to_delete, context=context)
for menu in data['data']:
mid = menu['id']
if isinstance(mid, str):
new_id = self.create(cr, uid, {'name': menu['name']}, context=context)
replace_id(mid, new_id)
for menu in data['data']:
self.write(cr, uid, [menu['id']], menu, context=context)
return True
class ir_attachment(osv.osv):
_inherit = "ir.attachment"
def _website_url_get(self, cr, uid, ids, name, arg, context=None):
result = {}
for attach in self.browse(cr, uid, ids, context=context):
if attach.type == 'url':
result[attach.id] = attach.url
else:
result[attach.id] = urlplus('/website/image', {
'model': 'ir.attachment',
'field': 'datas',
'id': attach.id,
'max_width': 1024,
'max_height': 768,
})
return result
_columns = {
'website_url': fields.function(_website_url_get, string="Attachment URL", type='char')
}
def try_remove(self, cr, uid, ids, context=None):
""" Removes a web-based image attachment if it is used by no view
(template)
Returns a dict mapping attachments which would not be removed (if any)
mapped to the views preventing their removal
"""
Views = self.pool['ir.ui.view']
attachments_to_remove = []
# views blocking removal of the attachment
removal_blocked_by = {}
for attachment in self.browse(cr, uid, ids, context=context):
# in-document URLs are html-escaped, a straight search will not
# find them
url = werkzeug.utils.escape(attachment.website_url)
ids = Views.search(cr, uid, [('arch', 'like', url)], context=context)
if ids:
removal_blocked_by[attachment.id] = Views.read(
cr, uid, ids, ['name'], context=context)
else:
attachments_to_remove.append(attachment.id)
if attachments_to_remove:
self.unlink(cr, uid, attachments_to_remove, context=context)
return removal_blocked_by
class res_partner(osv.osv):
_inherit = "res.partner"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'center': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'size': "%sx%s" % (height, width),
'zoom': zoom,
'sensor': 'false',
}
return urlplus('http://maps.googleapis.com/maps/api/staticmap' , params)
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'q': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'z': 10
}
return urlplus('https://maps.google.com/maps' , params)
class res_company(osv.osv):
_inherit = "res.company"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_img(zoom, width, height, context=context) or None
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_link(zoom, context=context) or None
class base_language_install(osv.osv_memory):
_inherit = "base.language.install"
_columns = {
'website_ids': fields.many2many('website', string='Websites to translate'),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
defaults = super(base_language_install, self).default_get(cr, uid, fields, context)
website_id = context.get('params', {}).get('website_id')
if website_id:
if 'website_ids' not in defaults:
defaults['website_ids'] = []
defaults['website_ids'].append(website_id)
return defaults
def lang_install(self, cr, uid, ids, context=None):
if context is None:
context = {}
action = super(base_language_install, self).lang_install(cr, uid, ids, context)
language_obj = self.browse(cr, uid, ids)[0]
website_ids = [website.id for website in language_obj['website_ids']]
lang_id = self.pool['res.lang'].search(cr, uid, [('code', '=', language_obj['lang'])])
if website_ids and lang_id:
data = {'language_ids': [(4, lang_id[0])]}
self.pool['website'].write(cr, uid, website_ids, data)
params = context.get('params', {})
if 'url_return' in params:
return {
'url': params['url_return'].replace('[lang]', language_obj['lang']),
'type': 'ir.actions.act_url',
'target': 'self'
}
return action
class website_seo_metadata(osv.Model):
_name = 'website.seo.metadata'
_description = 'SEO metadata'
_columns = {
'website_meta_title': fields.char("Website meta title", translate=True),
'website_meta_description': fields.text("Website meta description", translate=True),
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
}
# vim:et:
| browseinfo/odoo_saas3_nicolas | addons/website/models/website.py | Python | agpl-3.0 | 27,340 |
from __future__ import print_function
from builtins import range
from util import hook, http
import random
def card_search(name):
matching_cards = http.get_json(
"https://api.magicthegathering.io/v1/cards", name=name
)
for card in matching_cards["cards"]:
if card["name"].lower() == name.lower():
return card
return random.choice(matching_cards["cards"])
@hook.command
def mtg(inp, say=None):
""".mtg <name> - Searches for Magic the Gathering card given <name>"""
try:
card = card_search(inp)
except IndexError:
return "Card not found."
symbols = {
"{0}": "0",
"{1}": "1",
"{2}": "2",
"{3}": "3",
"{4}": "4",
"{5}": "5",
"{6}": "6",
"{7}": "7",
"{8}": "8",
"{9}": "9",
"{10}": "10",
"{11}": "11",
"{12}": "12",
"{13}": "13",
"{14}": "14",
"{15}": "15",
"{16}": "16",
"{17}": "17",
"{18}": "18",
"{19}": "19",
"{20}": "20",
"{T}": "\u27F3",
"{S}": "\u2744",
"{Q}": "\u21BA",
"{C}": "\u27E1",
"{W}": "W",
"{U}": "U",
"{B}": "B",
"{R}": "R",
"{G}": "G",
"{W/P}": "\u03D5",
"{U/P}": "\u03D5",
"{B/P}": "\u03D5",
"{R/P}": "\u03D5",
"{G/P}": "\u03D5",
"{X}": "X",
"\n": " ",
}
results = {
"name": card["name"],
"type": card["type"],
"cost": card.get("manaCost", ""),
"text": card.get("text", ""),
"power": card.get("power"),
"toughness": card.get("toughness"),
"loyalty": card.get("loyalty"),
"multiverseid": card.get("multiverseid"),
}
for fragment, rep in symbols.items():
results["text"] = results["text"].replace(fragment, rep)
results["cost"] = results["cost"].replace(fragment, rep)
template = ["{name} -"]
template.append("{type}")
template.append("- {cost} |")
if results["loyalty"]:
template.append("{loyalty} Loyalty |")
if results["power"]:
template.append("{power}/{toughness} |")
template.append(
"{text} | http://gatherer.wizards.com/Pages/Card/Details.aspx?multiverseid={multiverseid}"
)
return " ".join(template).format(**results)
if __name__ == "__main__":
print(card_search("Black Lotus"))
print(mtg("Black Lotus"))
| jmgao/skybot | plugins/mtg.py | Python | unlicense | 2,470 |
import subprocess
import sys
import setup_util
import os
def start(args, logfile, errfile):
try:
subprocess.check_call("mvn clean compile assembly:single", shell=True, cwd="grizzly-bm", stderr=errfile, stdout=logfile)
subprocess.Popen("java -Dorg.glassfish.grizzly.nio.transport.TCPNIOTransport.max-receive-buffer-size=16384 -Dorg.glassfish.grizzly.http.io.OutputBuffer.default-buffer-size=1024 -Dorg.glassfish.grizzly.memory.BuffersBuffer.bb-cache-size=32 -jar grizzly-bm-0.1-jar-with-dependencies.jar".rsplit(" "), cwd="grizzly-bm/target", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'grizzly-bm' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
return 0 | seem-sky/FrameworkBenchmarks | grizzly-bm/setup.py | Python | bsd-3-clause | 904 |
#from django.test import TestCase
from django.test import TestCase
from submission.models import URL, Archive, Code, StudentSubmission, select_all_components, ALL_TYPE_CLASSES
from submission.models.code import SubmittedCode
from submission.forms import filetype
from grades.models import NumericActivity, Activity
from groups.models import Group, GroupMember
from coredata.tests import create_offering, validate_content
from coredata.models import Member, Person, CourseOffering
from django.urls import reverse
from courselib.testing import Client, test_views, basic_page_tests, TEST_COURSE_SLUG
import datetime, tempfile, os
import base64, io
TGZ_FILE = base64.b64decode('H4sIAI7Wr0sAA+3OuxHCMBAE0CtFJUjoVw8BODfQP3bgGSKIcPResjO3G9w9/i9vRmt7ltnzZx6ilNrr7PVS9vscbUTKJ/wWr8fzuqYUy3pbvu1+9QAAAAAAAAAAAHCiNyHUDpAAKAAA')
GZ_FILE = base64.b64decode('H4sICIjWr0sAA2YAAwAAAAAAAAAAAA==')
ZIP_FILE = base64.b64decode('UEsDBAoAAAAAAMB6fDwAAAAAAAAAAAAAAAABABwAZlVUCQADiNavSzTYr0t1eAsAAQToAwAABOgDAABQSwECHgMKAAAAAADAenw8AAAAAAAAAAAAAAAAAQAYAAAAAAAAAAAApIEAAAAAZlVUBQADiNavS3V4CwABBOgDAAAE6AMAAFBLBQYAAAAAAQABAEcAAAA7AAAAAAA=')
RAR_FILE = base64.b64decode('UmFyIRoHAM+QcwAADQAAAAAAAABMpHQggCEACAAAAAAAAAADAAAAAMB6fDwdMwEApIEAAGYAv4hn9qn/1MQ9ewBABwA=')
PDF_FILE = base64.b64decode("""JVBERi0xLjQKJcfsj6IKNSAwIG9iago8PC9MZW5ndGggNiAwIFIvRmlsdGVyIC9GbGF0ZURlY29k
ZT4+CnN0cmVhbQp4nCtUMNAzVDAAQSidnMulH2SukF7MZaDgDsTpXIVchmAFClAqOVfBKQSoyELB
yEAhJI0Los9QwdxIwdQAKJLLpeGRmpOTr1CeX5SToqgZksXlGsIVCIQA1l0XrmVuZHN0cmVhbQpl
bmRvYmoKNiAwIG9iago5MgplbmRvYmoKNCAwIG9iago8PC9UeXBlL1BhZ2UvTWVkaWFCb3ggWzAg
MCA2MTIgNzkyXQovUm90YXRlIDAvUGFyZW50IDMgMCBSCi9SZXNvdXJjZXM8PC9Qcm9jU2V0Wy9Q
REYgL1RleHRdCi9FeHRHU3RhdGUgOSAwIFIKL0ZvbnQgMTAgMCBSCj4+Ci9Db250ZW50cyA1IDAg
Ugo+PgplbmRvYmoKMyAwIG9iago8PCAvVHlwZSAvUGFnZXMgL0tpZHMgWwo0IDAgUgpdIC9Db3Vu
dCAxCj4+CmVuZG9iagoxIDAgb2JqCjw8L1R5cGUgL0NhdGFsb2cgL1BhZ2VzIDMgMCBSCi9NZXRh
ZGF0YSAxMSAwIFIKPj4KZW5kb2JqCjcgMCBvYmoKPDwvVHlwZS9FeHRHU3RhdGUKL09QTSAxPj5l
bmRvYmoKOSAwIG9iago8PC9SNwo3IDAgUj4+CmVuZG9iagoxMCAwIG9iago8PC9SOAo4IDAgUj4+
CmVuZG9iago4IDAgb2JqCjw8L0Jhc2VGb250L0NvdXJpZXIvVHlwZS9Gb250Ci9TdWJ0eXBlL1R5
cGUxPj4KZW5kb2JqCjExIDAgb2JqCjw8L1R5cGUvTWV0YWRhdGEKL1N1YnR5cGUvWE1ML0xlbmd0
aCAxMzE5Pj5zdHJlYW0KPD94cGFja2V0IGJlZ2luPSfvu78nIGlkPSdXNU0wTXBDZWhpSHpyZVN6
TlRjemtjOWQnPz4KPD9hZG9iZS14YXAtZmlsdGVycyBlc2M9IkNSTEYiPz4KPHg6eG1wbWV0YSB4
bWxuczp4PSdhZG9iZTpuczptZXRhLycgeDp4bXB0az0nWE1QIHRvb2xraXQgMi45LjEtMTMsIGZy
YW1ld29yayAxLjYnPgo8cmRmOlJERiB4bWxuczpyZGY9J2h0dHA6Ly93d3cudzMub3JnLzE5OTkv
MDIvMjItcmRmLXN5bnRheC1ucyMnIHhtbG5zOmlYPSdodHRwOi8vbnMuYWRvYmUuY29tL2lYLzEu
MC8nPgo8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0nM2YzY2FmMmYtNzJkNy0xMWVhLTAwMDAt
NmVhZWMyYzJlNmZkJyB4bWxuczpwZGY9J2h0dHA6Ly9ucy5hZG9iZS5jb20vcGRmLzEuMy8nIHBk
ZjpQcm9kdWNlcj0nR1BMIEdob3N0c2NyaXB0IDguNzAnLz4KPHJkZjpEZXNjcmlwdGlvbiByZGY6
YWJvdXQ9JzNmM2NhZjJmLTcyZDctMTFlYS0wMDAwLTZlYWVjMmMyZTZmZCcgeG1sbnM6eG1wPSdo
dHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvJz48eG1wOk1vZGlmeURhdGU+MjAxMC0wMy0yOFQx
NTozODo1OC0wNzowMDwveG1wOk1vZGlmeURhdGU+Cjx4bXA6Q3JlYXRlRGF0ZT4yMDEwLTAzLTI4
VDE1OjM4OjU4LTA3OjAwPC94bXA6Q3JlYXRlRGF0ZT4KPHhtcDpDcmVhdG9yVG9vbD5Vbmtub3du
QXBwbGljYXRpb248L3htcDpDcmVhdG9yVG9vbD48L3JkZjpEZXNjcmlwdGlvbj4KPHJkZjpEZXNj
cmlwdGlvbiByZGY6YWJvdXQ9JzNmM2NhZjJmLTcyZDctMTFlYS0wMDAwLTZlYWVjMmMyZTZmZCcg
eG1sbnM6eGFwTU09J2h0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8nIHhhcE1NOkRvY3Vt
ZW50SUQ9JzNmM2NhZjJmLTcyZDctMTFlYS0wMDAwLTZlYWVjMmMyZTZmZCcvPgo8cmRmOkRlc2Ny
aXB0aW9uIHJkZjphYm91dD0nM2YzY2FmMmYtNzJkNy0xMWVhLTAwMDAtNmVhZWMyYzJlNmZkJyB4
bWxuczpkYz0naHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8nIGRjOmZvcm1hdD0nYXBw
bGljYXRpb24vcGRmJz48ZGM6dGl0bGU+PHJkZjpBbHQ+PHJkZjpsaSB4bWw6bGFuZz0neC1kZWZh
dWx0Jz5VbnRpdGxlZDwvcmRmOmxpPjwvcmRmOkFsdD48L2RjOnRpdGxlPjwvcmRmOkRlc2NyaXB0
aW9uPgo8L3JkZjpSREY+CjwveDp4bXBtZXRhPgogICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgCjw/eHBhY2tldCBlbmQ9J3cnPz4KZW5kc3RyZWFtCmVuZG9iagoyIDAgb2JqCjw8L1Byb2R1
Y2VyKEdQTCBHaG9zdHNjcmlwdCA4LjcwKQovQ3JlYXRpb25EYXRlKEQ6MjAxMDAzMjgxNTM4NTgt
MDcnMDAnKQovTW9kRGF0ZShEOjIwMTAwMzI4MTUzODU4LTA3JzAwJyk+PmVuZG9iagp4cmVmCjAg
MTIKMDAwMDAwMDAwMCA2NTUzNSBmIAowMDAwMDAwNDEzIDAwMDAwIG4gCjAwMDAwMDIwMzYgMDAw
MDAgbiAKMDAwMDAwMDM1NCAwMDAwMCBuIAowMDAwMDAwMTk1IDAwMDAwIG4gCjAwMDAwMDAwMTUg
MDAwMDAgbiAKMDAwMDAwMDE3NyAwMDAwMCBuIAowMDAwMDAwNDc4IDAwMDAwIG4gCjAwMDAwMDA1
NzggMDAwMDAgbiAKMDAwMDAwMDUxOSAwMDAwMCBuIAowMDAwMDAwNTQ4IDAwMDAwIG4gCjAwMDAw
MDA2NDAgMDAwMDAgbiAKdHJhaWxlcgo8PCAvU2l6ZSAxMiAvUm9vdCAxIDAgUiAvSW5mbyAyIDAg
UgovSUQgWzxFODIxMEZDNzI4OUJDM0Y5QzdCNEQxMjJDRjNCM0YwMD48RTgyMTBGQzcyODlCQzNG
OUM3QjREMTIyQ0YzQjNGMDA+XQo+PgpzdGFydHhyZWYKMjE1OQolJUVPRgo=""")
class SubmissionTest(TestCase):
fixtures = ['basedata', 'coredata', 'grades']
def setUp(self):
pass
def test_select_components(self):
"""
Test submission component classes: subclasses, selection, sorting.
"""
_, course = create_offering()
a1 = NumericActivity(name="Assignment 1", short_name="A1", status="RLS", offering=course, position=2, max_grade=15, due_date="2010-04-01")
a1.save()
a2 = NumericActivity(name="Assignment 2", short_name="A2", status="RLS", offering=course, position=1, max_grade=15, due_date="2010-03-01")
a2.save()
p = Person.objects.get(userid="ggbaker")
member = Member(person=p, offering=course, role="INST", career="NONS", added_reason="UNK")
member.save()
c1 = URL.Component(activity=a1, title="URL Link", position=8)
c1.save()
c2 = Archive.Component(activity=a1, title="Archive File", position=1, max_size=100000)
c2.save()
c3 = Code.Component(activity=a1, title="Code File", position=3, max_size=2000, allowed=".py")
c3.save()
comps = select_all_components(a1)
self.assertEqual(len(comps), 3)
self.assertEqual(comps[0].title, 'Archive File') # make sure position=1 is first
self.assertEqual(str(comps[1].Type.name), "Code")
self.assertEqual(str(comps[2].Type.name), "URL")
def test_component_view_page(self):
_, course = create_offering()
a1 = NumericActivity(name="Assignment 1", short_name="A1", status="RLS", offering=course, position=2, max_grade=15, due_date="2010-04-01")
a1.save()
a2 = NumericActivity(name="Assignment 2", short_name="A2", status="RLS", offering=course, position=1, max_grade=15, due_date="2010-03-01")
a2.save()
p = Person.objects.get(userid="ggbaker")
member = Member(person=p, offering=course, role="INST", career="NONS", added_reason="UNK")
member.save()
c1 = URL.Component(activity=a1, title="URL Link", position=8)
c1.save()
c2 = Archive.Component(activity=a1, title="Archive File", position=1, max_size=100000)
c2.save()
c3 = Code.Component(activity=a1, title="Code File", position=3, max_size=2000, allowed=".py")
c3.save()
client = Client()
client.login_user("ggbaker")
# When no component, should display error message
url = reverse('offering:submission:show_components', kwargs={'course_slug':course.slug, 'activity_slug':a2.slug})
response = basic_page_tests(self, client, url)
self.assertContains(response, 'No components configured.')
# add component and test
component = URL.Component(activity=a2, title="URL2", position=1)
component.save()
component = Archive.Component(activity=a2, title="Archive2", position=1, max_size=100)
component.save()
# should all appear
response = basic_page_tests(self, client, url)
self.assertContains(response, "URL2")
self.assertContains(response, "Archive2")
# make sure type displays
#self.assertContains(response, '<li class="view"><label>Type:</label>Archive</li>')
# delete component
self.assertRaises(NotImplementedError, component.delete)
def test_magic(self):
"""
Test file type inference function
"""
fh = io.BytesIO(TGZ_FILE)
fh.name = "something.tar.gz"
ftype = filetype(fh)
self.assertEqual(ftype, "TGZ")
fh = io.BytesIO(GZ_FILE)
fh.name = "something.gz"
ftype = filetype(fh)
self.assertEqual(ftype, "GZIP")
fh = io.BytesIO(ZIP_FILE)
fh.name = "something.zip"
ftype = filetype(fh)
self.assertEqual(ftype, "ZIP")
fh = io.BytesIO(RAR_FILE)
fh.name = "something.rar"
ftype = filetype(fh)
self.assertEqual(ftype, "RAR")
fh = io.BytesIO(PDF_FILE)
fh.name = "something.pdf"
ftype = filetype(fh)
self.assertEqual(ftype, "PDF")
testfiles = [
('text.odt', 'OD-TEXT'),
('pres.odp', 'OD-PRES'),
('ss.ods', 'OD-SS'),
('drawing.odg', 'OD-GRA'),
('excel.xls', 'MS-EXCEL'),
('excel.xlsx', 'MS-EXCEL'),
('project.mpp', 'MS-PROJ'),
('visio.vsd', 'MS-VISIO'),
('word.doc', 'MS-WORD'),
('word.docx', 'MS-WORD'),
('pres.ppt', 'MS-PPT'),
('pres.pptx', 'MS-PPT'),
]
for fn, ftype in testfiles:
with open(os.path.join("submission", "testfiles", fn), 'rb') as fh:
ftypem = filetype(fh)
self.assertEqual(ftype, ftypem)
def test_group_submission_view(self):
"""
test if group submission can be viewed by group member and non group member
"""
now = datetime.datetime.now()
_, course = create_offering()
a1 = NumericActivity(name="Assignment 1", short_name="A1", status="RLS", offering=course, position=2, max_grade=15, due_date=now, group=True)
a1.save()
a2 = NumericActivity(name="Assignment 2", short_name="A2", status="RLS", offering=course, position=1, max_grade=15, due_date=now, group=True)
a2.save()
p = Person.objects.get(userid="ggbaker")
member = Member(person=p, offering=course, role="INST", career="NONS", added_reason="UNK")
member.save()
c1 = URL.Component(activity=a1, title="URL Link", position=8)
c1.save()
c2 = Archive.Component(activity=a1, title="Archive File", position=1, max_size=100000)
c2.save()
c3 = Code.Component(activity=a1, title="Code File", position=3, max_size=2000, allowed=".py")
c3.save()
userid1 = "0aaa0"
userid2 = "0aaa1"
userid3 = "0aaa2"
for u in [userid1, userid2,userid3]:
p = Person.objects.get(userid=u)
m = Member(person=p, offering=course, role="STUD", credits=3, career="UGRD", added_reason="UNK")
m.save()
m = Member.objects.get(person__userid=userid1, offering=course)
g = Group(name="Test Group", manager=m, courseoffering=course)
g.save()
gm = GroupMember(group=g, student=m, confirmed=True, activity=a1)
gm.save()
gm = GroupMember(group=g, student=m, confirmed=True, activity=a2)
gm.save()
m = Member.objects.get(person__userid=userid2, offering=course)
gm = GroupMember(group=g, student=m, confirmed=True, activity=a1)
gm.save()
gm = GroupMember(group=g, student=m, confirmed=True, activity=a2)
gm.save()
m = Member.objects.get(person__userid=userid3, offering=course)
gm = GroupMember(group=g, student=m, confirmed=True, activity=a2)
gm.save()
client = Client()
# login as "0aaa0", member of group : test_group for assignment1 and assgnment2
client.login_user("0aaa0")
#submission page for assignment 1
url = reverse('offering:submission:show_components', kwargs={'course_slug': course.slug,'activity_slug':a1.slug})
response = basic_page_tests(self, client, url)
self.assertContains(response, "This is a group activity. You will submit on behalf of the group “Test Group”.")
self.assertContains(response, "You haven't made a submission for this component.")
def test_upload(self):
_, course = create_offering()
a1 = NumericActivity(name="Assignment 1", short_name="A1", status="RLS", offering=course, position=2, max_grade=15, due_date=datetime.datetime.now() + datetime.timedelta(hours=1), group=False)
a1.save()
p = Person.objects.get(userid="ggbaker")
member = Member(person=p, offering=course, role="INST", career="NONS", added_reason="UNK")
member.save()
c = Code.Component(activity=a1, title="Code File", position=3, max_size=2000, allowed=".py")
c.save()
userid1 = "0aaa0"
userid2 = "0aaa1"
userid3 = "0aaa2"
for u in [userid1, userid2,userid3]:
p = Person.objects.get(userid=u)
m = Member(person=p, offering=course, role="STUD", credits=3, career="UGRD", added_reason="UNK")
m.save()
# submit as student
client = Client()
client.login_user("0aaa0")
url = reverse('offering:submission:show_components', kwargs={'course_slug': course.slug,'activity_slug':a1.slug})
response = basic_page_tests(self, client, url)
# submit a file
tmpf = tempfile.NamedTemporaryFile(suffix=".py", delete=False)
codecontents = b'print "Hello World!"\n'
tmpf.write(codecontents)
tmpf.close()
try:
fh = open(tmpf.name, "r")
data = {"%i-code" % (c.id): fh}
response = client.post(url, data)
self.assertEqual(response.status_code, 302)
fh.close()
finally:
os.unlink(tmpf.name)
# make sure it's there and correct
subs = StudentSubmission.objects.all()
self.assertEqual(len(subs), 1)
sub = subs[0]
self.assertEqual(sub.member.person.userid, '0aaa0')
codes = SubmittedCode.objects.all()
self.assertEqual(len(codes), 1)
code = codes[0]
code.code.open()
self.assertEqual(code.code.read(), codecontents)
code.code.close()
def test_pages(self):
"Test a bunch of page views"
offering = CourseOffering.objects.get(slug=TEST_COURSE_SLUG)
activity = Activity.objects.get(offering=offering, slug='rep')
activity.due_date = datetime.datetime.now() + datetime.timedelta(days=1) # make sure it's submittable
activity.save()
client = Client()
# instructor views
client.login_user("ggbaker")
component1 = URL.Component(activity=activity, title='Sample URL 1', description='Please submit some URL.',
check=False, prefix='')
component1.save()
component2 = URL.Component(activity=activity, title='Sample URL 2', description='Please submit some URL.',
check=False, prefix='')
component2.save()
test_views(self, client, 'offering:submission:', ['show_components', 'add_component'],
{'course_slug': offering.slug, 'activity_slug': activity.slug})
url = reverse('offering:submission:edit_single', kwargs={'course_slug': offering.slug, 'activity_slug': activity.slug}) \
+ '?id=' + str(component1.id)
basic_page_tests(self, client, url)
url = reverse('offering:submission:add_component', kwargs={'course_slug': offering.slug, 'activity_slug': activity.slug}) \
+ '?type=url'
basic_page_tests(self, client, url)
# student views: with none, some, and all submitted
client.login_user("0aaa0")
# test various permutations of success to make sure everything returns okay
name1 = '%i-url' % (component1.id)
name2 = '%i-url' % (component2.id)
submissions = [
({}, False),
({name1: '', name2: ''}, False),
({name1: '', name2: 'do i look like a url to you?'}, False),
({name1: 'http://www.sfu.ca/', name2: ''}, False),
({name1: 'http://www.cs.sfu.ca/', name2: 'http://example.com/'}, True),
({name1: 'http://www.sfu.ca/', name2: 'http://example.com/'}, True),
]
for submitdata, redir in submissions:
test_views(self, client, 'offering:submission:', ['show_components', 'show_components_submission_history'],
{'course_slug': offering.slug, 'activity_slug': activity.slug})
url = reverse('offering:submission:show_components', kwargs={'course_slug': offering.slug, 'activity_slug': activity.slug})
response = client.post(url, submitdata)
if redir:
# success: we expect a redirect
self.assertEqual(response.status_code, 302)
else:
# some problems: expect a page reporting that
self.assertEqual(response.status_code, 200)
validate_content(self, response.content, url)
def test_submission_types(self):
"Minimally test each submission type"
offering = CourseOffering.objects.get(slug=TEST_COURSE_SLUG)
activity = Activity.objects.get(offering=offering, slug='rep')
activity.due_date = datetime.datetime.now() + datetime.timedelta(days=1) # make sure it's submittable
activity.save()
client = Client()
# instructor views
client.login_user("ggbaker")
for Type in ALL_TYPE_CLASSES:
label = Type.label
test_views(self, client, 'offering:submission:', ['add_component'],
{'course_slug': offering.slug, 'activity_slug': activity.slug}, qs='type='+label)
| sfu-fas/coursys | submission/tests.py | Python | gpl-3.0 | 18,007 |
from flask import Response, render_template
from model import blog
from . import handlers
@handlers.route('/')
def index():
return render_template('index.html')
@handlers.route('/showcase')
def showcase():
return render_template('showcase.html')
@handlers.route('/picscan')
def picscan():
return render_template('picscan/index.html')
@handlers.route('/picscan/privacy-policy')
def picscan_privacy():
return render_template('picscan/privacy-policy.html')
@handlers.route('/sitemap.xml')
def sitemap():
pages = []
pages.append({
"url": "//www.codeka.com/showcase",
"lastmod": "2022-02-16",
"priority": 1,
"changefreq": "yearly"
})
for f in blog.listPosts():
post = blog.loadPost(f)
pages.append({
"url": ("//www.codeka.com/blog/%04d/%02d/%s" %
(post.posted.year, post.posted.month, post.slug)),
"lastmod": "%04d-%02d-%02d" % (post.updated.year, post.updated.month, post.updated.day),
"priority": 10,
"changefreq": "monthly"
})
return Response(render_template("sitemap.xml", pages=pages), content_type="text/xml")
| codeka/website | handlers/main.py | Python | mit | 1,134 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.container_v1.types import cluster_service
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-container",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ClusterManagerTransport(abc.ABC):
"""Abstract transport class for ClusterManager."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "container.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_clusters: gapic_v1.method.wrap_method(
self.list_clusters,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.get_cluster: gapic_v1.method.wrap_method(
self.get_cluster,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.create_cluster: gapic_v1.method.wrap_method(
self.create_cluster, default_timeout=45.0, client_info=client_info,
),
self.update_cluster: gapic_v1.method.wrap_method(
self.update_cluster, default_timeout=45.0, client_info=client_info,
),
self.update_node_pool: gapic_v1.method.wrap_method(
self.update_node_pool, default_timeout=45.0, client_info=client_info,
),
self.set_node_pool_autoscaling: gapic_v1.method.wrap_method(
self.set_node_pool_autoscaling,
default_timeout=45.0,
client_info=client_info,
),
self.set_logging_service: gapic_v1.method.wrap_method(
self.set_logging_service, default_timeout=45.0, client_info=client_info,
),
self.set_monitoring_service: gapic_v1.method.wrap_method(
self.set_monitoring_service,
default_timeout=45.0,
client_info=client_info,
),
self.set_addons_config: gapic_v1.method.wrap_method(
self.set_addons_config, default_timeout=45.0, client_info=client_info,
),
self.set_locations: gapic_v1.method.wrap_method(
self.set_locations, default_timeout=45.0, client_info=client_info,
),
self.update_master: gapic_v1.method.wrap_method(
self.update_master, default_timeout=45.0, client_info=client_info,
),
self.set_master_auth: gapic_v1.method.wrap_method(
self.set_master_auth, default_timeout=45.0, client_info=client_info,
),
self.delete_cluster: gapic_v1.method.wrap_method(
self.delete_cluster,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.list_operations: gapic_v1.method.wrap_method(
self.list_operations,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.get_operation: gapic_v1.method.wrap_method(
self.get_operation,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.cancel_operation: gapic_v1.method.wrap_method(
self.cancel_operation, default_timeout=45.0, client_info=client_info,
),
self.get_server_config: gapic_v1.method.wrap_method(
self.get_server_config,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.get_json_web_keys: gapic_v1.method.wrap_method(
self.get_json_web_keys, default_timeout=None, client_info=client_info,
),
self.list_node_pools: gapic_v1.method.wrap_method(
self.list_node_pools,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.get_node_pool: gapic_v1.method.wrap_method(
self.get_node_pool,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.create_node_pool: gapic_v1.method.wrap_method(
self.create_node_pool, default_timeout=45.0, client_info=client_info,
),
self.delete_node_pool: gapic_v1.method.wrap_method(
self.delete_node_pool,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.rollback_node_pool_upgrade: gapic_v1.method.wrap_method(
self.rollback_node_pool_upgrade,
default_timeout=45.0,
client_info=client_info,
),
self.set_node_pool_management: gapic_v1.method.wrap_method(
self.set_node_pool_management,
default_timeout=45.0,
client_info=client_info,
),
self.set_labels: gapic_v1.method.wrap_method(
self.set_labels, default_timeout=45.0, client_info=client_info,
),
self.set_legacy_abac: gapic_v1.method.wrap_method(
self.set_legacy_abac, default_timeout=45.0, client_info=client_info,
),
self.start_ip_rotation: gapic_v1.method.wrap_method(
self.start_ip_rotation, default_timeout=45.0, client_info=client_info,
),
self.complete_ip_rotation: gapic_v1.method.wrap_method(
self.complete_ip_rotation,
default_timeout=45.0,
client_info=client_info,
),
self.set_node_pool_size: gapic_v1.method.wrap_method(
self.set_node_pool_size, default_timeout=45.0, client_info=client_info,
),
self.set_network_policy: gapic_v1.method.wrap_method(
self.set_network_policy, default_timeout=45.0, client_info=client_info,
),
self.set_maintenance_policy: gapic_v1.method.wrap_method(
self.set_maintenance_policy,
default_timeout=45.0,
client_info=client_info,
),
self.list_usable_subnetworks: gapic_v1.method.wrap_method(
self.list_usable_subnetworks,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_clusters(
self,
) -> Callable[
[cluster_service.ListClustersRequest],
Union[
cluster_service.ListClustersResponse,
Awaitable[cluster_service.ListClustersResponse],
],
]:
raise NotImplementedError()
@property
def get_cluster(
self,
) -> Callable[
[cluster_service.GetClusterRequest],
Union[cluster_service.Cluster, Awaitable[cluster_service.Cluster]],
]:
raise NotImplementedError()
@property
def create_cluster(
self,
) -> Callable[
[cluster_service.CreateClusterRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def update_cluster(
self,
) -> Callable[
[cluster_service.UpdateClusterRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def update_node_pool(
self,
) -> Callable[
[cluster_service.UpdateNodePoolRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_node_pool_autoscaling(
self,
) -> Callable[
[cluster_service.SetNodePoolAutoscalingRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_logging_service(
self,
) -> Callable[
[cluster_service.SetLoggingServiceRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_monitoring_service(
self,
) -> Callable[
[cluster_service.SetMonitoringServiceRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_addons_config(
self,
) -> Callable[
[cluster_service.SetAddonsConfigRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_locations(
self,
) -> Callable[
[cluster_service.SetLocationsRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def update_master(
self,
) -> Callable[
[cluster_service.UpdateMasterRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_master_auth(
self,
) -> Callable[
[cluster_service.SetMasterAuthRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def delete_cluster(
self,
) -> Callable[
[cluster_service.DeleteClusterRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def list_operations(
self,
) -> Callable[
[cluster_service.ListOperationsRequest],
Union[
cluster_service.ListOperationsResponse,
Awaitable[cluster_service.ListOperationsResponse],
],
]:
raise NotImplementedError()
@property
def get_operation(
self,
) -> Callable[
[cluster_service.GetOperationRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def cancel_operation(
self,
) -> Callable[
[cluster_service.CancelOperationRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def get_server_config(
self,
) -> Callable[
[cluster_service.GetServerConfigRequest],
Union[cluster_service.ServerConfig, Awaitable[cluster_service.ServerConfig]],
]:
raise NotImplementedError()
@property
def get_json_web_keys(
self,
) -> Callable[
[cluster_service.GetJSONWebKeysRequest],
Union[
cluster_service.GetJSONWebKeysResponse,
Awaitable[cluster_service.GetJSONWebKeysResponse],
],
]:
raise NotImplementedError()
@property
def list_node_pools(
self,
) -> Callable[
[cluster_service.ListNodePoolsRequest],
Union[
cluster_service.ListNodePoolsResponse,
Awaitable[cluster_service.ListNodePoolsResponse],
],
]:
raise NotImplementedError()
@property
def get_node_pool(
self,
) -> Callable[
[cluster_service.GetNodePoolRequest],
Union[cluster_service.NodePool, Awaitable[cluster_service.NodePool]],
]:
raise NotImplementedError()
@property
def create_node_pool(
self,
) -> Callable[
[cluster_service.CreateNodePoolRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def delete_node_pool(
self,
) -> Callable[
[cluster_service.DeleteNodePoolRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def rollback_node_pool_upgrade(
self,
) -> Callable[
[cluster_service.RollbackNodePoolUpgradeRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_node_pool_management(
self,
) -> Callable[
[cluster_service.SetNodePoolManagementRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_labels(
self,
) -> Callable[
[cluster_service.SetLabelsRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_legacy_abac(
self,
) -> Callable[
[cluster_service.SetLegacyAbacRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def start_ip_rotation(
self,
) -> Callable[
[cluster_service.StartIPRotationRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def complete_ip_rotation(
self,
) -> Callable[
[cluster_service.CompleteIPRotationRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_node_pool_size(
self,
) -> Callable[
[cluster_service.SetNodePoolSizeRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_network_policy(
self,
) -> Callable[
[cluster_service.SetNetworkPolicyRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def set_maintenance_policy(
self,
) -> Callable[
[cluster_service.SetMaintenancePolicyRequest],
Union[cluster_service.Operation, Awaitable[cluster_service.Operation]],
]:
raise NotImplementedError()
@property
def list_usable_subnetworks(
self,
) -> Callable[
[cluster_service.ListUsableSubnetworksRequest],
Union[
cluster_service.ListUsableSubnetworksResponse,
Awaitable[cluster_service.ListUsableSubnetworksResponse],
],
]:
raise NotImplementedError()
__all__ = ("ClusterManagerTransport",)
| googleapis/python-container | google/cloud/container_v1/services/cluster_manager/transports/base.py | Python | apache-2.0 | 22,997 |
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from metakernel import Magic
class Prompt4VarMagic(Magic):
def __init__(self, *args, **kwargs):
super(Prompt4VarMagic, self).__init__(*args, **kwargs)
def line_prompt4var(self, *args):
"""
%%prompt4var - Prompt for macro variables that will
be assigned to the SAS session. The variables will be
prompted each time the line magic is executed.
Example:
%prompt4var libpath file1
filename myfile "~&file1.";
libname data "&libpath";
"""
prmpt = OrderedDict()
for arg in args:
assert isinstance(arg, str)
prmpt[arg] = False
if not len(self.code):
if self.kernel.mva is None:
self.kernel._allow_stdin = True
self.kernel._start_sas()
self.kernel.mva.submit(code=self.code, results="html", prompt=prmpt)
else:
self.kernel.promptDict = prmpt
def cell_prompt4var(self, *args):
"""
%%prompt4var - The cell magic prompts users for variables that are
intended to be private -- passwords and such. The macro variables
will be deleted from the when the cell finishes processing.
Libnames assigned will still be active but the password will not
be stored anywhere.
Examples:
%%prompt4var alter read
data work.cars(alter="&alter" read="&read");
set sashelp.cars;
id = _n_;
run;
proc print data=cars(read="badpw" obs=10);
run;
proc print data=cars(read="&read" obs=10);
run;
%%prompt4var pw1 pw2
libname foo teradata user=scott password=&pw1;
libname bar oracle user=tiger password=&pw2;
"""
prmpt = OrderedDict()
for arg in args:
assert isinstance(arg, str)
prmpt[arg] = True
if not len(self.code):
if self.kernel.mva is None:
self._allow_stdin = True
self.kernel._start_sas()
self.kernel.mva.submit(code=self.code, results="html", prompt=prmpt)
else:
self.kernel.promptDict = prmpt
def register_magics(kernel):
kernel.register_magics(Prompt4VarMagic)
def register_ipython_magics():
from metakernel import IPythonKernel
from IPython.core.magic import register_line_magic
kernel = IPythonKernel()
magic = Prompt4VarMagic(kernel)
# Make magics callable:
kernel.line_magics["prompt4var"] = magic
@register_line_magic
def prompt4var(line):
kernel.call_magic("%prompt4var " + line)
| sassoftware/sas_kernel | sas_kernel/magics/prompt4var_magic.py | Python | apache-2.0 | 3,243 |
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
# pylint: disable=global-statement
import logging
import os
from .base import EngineBase, EngineError, MimicFailure
# ---------------------------------------------------------------------------
_default_engine = None
_engines_by_name = {}
def get_engine(name=None, **kwargs):
"""
Get the engine implementation.
This function will initialize an engine instance using the
``get_engine`` and ``is_engine_available`` functions in the engine
packages and return an instance of the first available engine. If
one has already been initialized, it will be returned instead.
If no specific engine is requested and no engine has already been
initialized, this function will initialize and return an instance of
the first available engine in the following order:
======================= =========================================
SR engine back-end Engine name string(s)
======================= =========================================
1. Dragon/Natlink ``"natlink"``
2. Kaldi ``"kaldi"``
3. WSR/SAPI 5 ``"sapi5", "sapi5inproc", "sapi5shared"``
4. CMU Pocket Sphinx ``"sphinx"``
======================= =========================================
The :ref:`Text-input engine <RefTextEngine>` can be initialized by
specifying ``"text"`` as the engine name. This back-end will
**not** be initialized if no specific engine is requested because
the back-end is not a real SR engine and is used mostly for testing.
**Arguments**:
:param name: optional human-readable name of the engine to return.
:type name: str
:param \\**kwargs: optional keyword arguments passed through to the
engine for engine-specific configuration.
:rtype: EngineBase
:returns: engine instance
:raises: EngineError
"""
# pylint: disable=too-many-statements,too-many-branches
global _default_engine, _engines_by_name
log = logging.getLogger("engine")
if name and name in _engines_by_name:
# If the requested engine has already been initialized, return it.
engine = _engines_by_name[name]
elif not name and _default_engine:
# If no specific engine is requested and an engine has already
# been initialized, return it.
engine = _default_engine
else:
# No engine has been initialized yet.
engine = None
# Check if there is an already initialized engine *and* custom engine
# initialization arguments. This is not allowed.
if engine and kwargs is not None and len(kwargs) > 0:
message = ("Error: Passing get_engine arguments to an engine "
"that has already been initialized, hence these "
"arguments are ignored.")
log.error(message)
raise EngineError(message)
# If there is a relevant initialized engine already, then return it.
if engine:
return engine
# Check if we're on Windows. If we're not on Windows, then we don't
# evaluate Windows-only engines like natlink.
windows = os.name == 'nt'
if not engine and windows and name in (None, "natlink"):
# Attempt to retrieve the natlink back-end.
try:
from .backend_natlink import is_engine_available
from .backend_natlink import get_engine as get_specific_engine
if is_engine_available(**kwargs):
engine = get_specific_engine(**kwargs)
except Exception as e:
message = ("Exception while initializing natlink engine:"
" %s" % (e,))
log.warning(message)
if name:
raise EngineError(message)
if not engine and name in (None, "kaldi"):
# Attempt to retrieve the Kaldi back-end.
try:
from .backend_kaldi import is_engine_available
from .backend_kaldi import get_engine as get_specific_engine
if is_engine_available(**kwargs):
engine = get_specific_engine(**kwargs)
except Exception as e:
message = ("Exception while initializing kaldi engine:"
" %s" % (e,))
log.warning(message)
if name:
raise EngineError(message)
sapi5_names = (None, "sapi5shared", "sapi5inproc", "sapi5")
if not engine and windows and name in sapi5_names:
# Attempt to retrieve the sapi5 back-end.
try:
from .backend_sapi5 import is_engine_available
from .backend_sapi5 import get_engine as get_specific_engine
if is_engine_available(name, **kwargs):
engine = get_specific_engine(name, **kwargs)
except Exception as e:
message = ("Exception while initializing sapi5 engine:"
" %s" % (e,))
log.warning(message)
if name:
raise EngineError(message)
if not engine and name in (None, "sphinx"):
# Attempt to retrieve the CMU Sphinx back-end.
try:
from .backend_sphinx import is_engine_available
from .backend_sphinx import get_engine as get_specific_engine
if is_engine_available(**kwargs):
engine = get_specific_engine(**kwargs)
except Exception as e:
message = ("Exception while initializing sphinx engine:"
" %s" % (e,))
log.warning(message)
if name:
raise EngineError(message)
# Only retrieve the text input engine if explicitly specified; it is not
# an actual SR engine implementation and is mostly intended to be used
# for testing.
if not engine and name == "text":
# Attempt to retrieve the TextInput engine instance.
try:
from .backend_text import is_engine_available
from .backend_text import get_engine as get_specific_engine
if is_engine_available(**kwargs):
engine = get_specific_engine(**kwargs)
except Exception as e:
message = ("Exception while initializing text-input engine:"
" %s" % (e,))
log.warning(message)
if name:
raise EngineError(message)
# Return the engine instance, if one has been initialized. Log a
# message about which SR engine back-end was used.
if engine:
message = "Initialized %r SR engine: %r." % (engine.name, engine)
log.info(message)
return engine
elif not name:
raise EngineError("No usable engines found.")
else:
valid_names = ["natlink", "kaldi", "sphinx", "sapi5shared",
"sapi5inproc", "sapi5", "text"]
if name not in valid_names:
raise EngineError("Requested engine %r is not a valid engine "
"name." % (name,))
else:
raise EngineError("Requested engine %r not available."
% (name,))
def get_current_engine():
"""
Get the currently initialized SR engine object.
If an SR engine has not been initialized yet, ``None`` will be
returned instead.
:rtype: EngineBase | None
:returns: engine object or None
Usage example:
.. code-block:: python
# Print the name of the current engine if one has been
# initialized.
from dragonfly import get_current_engine
engine = get_current_engine()
if engine:
print("Engine name: %r" % engine.name)
else:
print("No engine has been initialized.")
"""
global _default_engine
return _default_engine
# ---------------------------------------------------------------------------
def register_engine_init(engine):
"""
Register initialization of an engine.
This function sets the default engine to the first engine
initialized.
"""
global _default_engine, _engines_by_name
if not _default_engine:
_default_engine = engine
if engine and engine.name not in _engines_by_name:
_engines_by_name[engine.name] = engine
| wolfmanstout/dragonfly | dragonfly/engines/__init__.py | Python | lgpl-3.0 | 9,160 |
# Copyright (c) 2007 Enough Project.
# See LICENSE for details.
import pygame
import contextlib
import gui.draw
@contextlib.contextmanager
def pygame_display(*args, **kw):
pygame.init()
try:
yield gui.draw.set_mode(*args, **kw)
except:
import sys
sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
import traceback
traceback.print_exc()
import pdb
pdb.pm()
finally:
pygame.quit()
| waldyrious/GraphUI | gui/main.py | Python | gpl-3.0 | 478 |
# -*- coding: utf-8 -*-
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enumerates all Chrome OS packages that are marked as `hot`.
Dumps results as a list of package names to a JSON file. Hotness is
determined by statically analyzing an ebuild.
Primarily intended for use by the Chrome OS toolchain team.
"""
from __future__ import print_function
import json
import os
import sys
from chromite.lib import commandline
from chromite.lib import cros_logging as logging
from chromite.lib import portage_util
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def is_ebuild_marked_hot(ebuild_path):
with open(ebuild_path) as f:
# The detection of this is intentionally super simple.
#
# It's important to note that while this is a function, we also use it in
# comments in packages that are forcibly optimized for speed in some other
# way, like chromeos-chrome.
return any('cros_optimize_package_for_speed' in line for line in f)
def enumerate_package_ebuilds():
"""Determines package -> ebuild mappings for all packages.
Yields a series of (package_path, package_name, [path_to_ebuilds]). This may
yield the same package name multiple times if it's available in multiple
overlays.
"""
for overlay in portage_util.FindOverlays(overlay_type='both'):
logging.debug('Found overlay %s', overlay)
# Note that portage_util.GetOverlayEBuilds can't be used here, since that
# specifically only searches for cros_workon candidates. We care about
# everything we can possibly build.
for dir_path, dir_names, file_names in os.walk(overlay):
ebuilds = [x for x in file_names if x.endswith('.ebuild')]
if not ebuilds:
continue
# os.walk directly uses `dir_names` to figure out what to walk next. If
# there are ebuilds here, walking any lower is a waste, so don't do it.
del dir_names[:]
ebuild_dir = os.path.basename(dir_path)
ebuild_parent_dir = os.path.basename(os.path.dirname(dir_path))
package_name = '%s/%s' % (ebuild_parent_dir, ebuild_dir)
yield dir_path, package_name, ebuilds
def main(argv):
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--output', required=True)
opts = parser.parse_args(argv)
ebuilds_found = 0
packages_found = 0
merged_packages = 0
mappings = {}
for package_dir, package, ebuilds in enumerate_package_ebuilds():
packages_found += 1
ebuilds_found += len(ebuilds)
logging.debug('Found package %r in %r with ebuilds %r', package,
package_dir, ebuilds)
is_marked_hot = any(
is_ebuild_marked_hot(os.path.join(package_dir, x)) for x in ebuilds)
if is_marked_hot:
logging.debug('Package is marked as hot')
else:
logging.debug('Package is not marked as hot')
if package in mappings:
logging.warning('Multiple entries found for package %r; merging', package)
merged_packages += 1
mappings[package] = is_marked_hot or mappings[package]
else:
mappings[package] = is_marked_hot
hot_packages = sorted(
package for package, is_hot in mappings.items() if is_hot)
logging.info('%d ebuilds found', ebuilds_found)
logging.info('%d packages found', packages_found)
logging.info('%d packages merged', merged_packages)
logging.info('%d hot packages found, total', len(hot_packages))
with open(opts.output, 'w') as f:
json.dump(hot_packages, f)
| endlessm/chromium-browser | third_party/chromite/scripts/enumerate_hot_packages.py | Python | bsd-3-clause | 3,585 |
#!/usr/bin/env python
#import sys and global var libraries, as well as option parser to make command line args
import os,sys
import glob
from optparse import OptionParser
#first, define system call with default retval for debugging
def mysystem(s,defaultretval=0):
#allows us to set effective debug flag
global dryrun
#want to be able to see statement executed
print(s)
#set for debugging
retval = defaultretval
if not dryrun:
retval = os.system(s)
return retval
#allows us to set up file names with all requisite info, including a default refinelevel of 0 and blank pre and post ext
#+03 gives signed three digit, auto filled integer
def format_name(case,order,mach,alpha,refinelevel=0,postext="",preext=""):
return "%s_%d_%0.2f_%+03d_%02d%s%s"%(case,order,mach,alpha,refinelevel,preext,postext)
#writes file to rotate grids with multiline string
def write_smooth_file(fname,case,alpha):
f = open(fname,"w")
s = """%s_%+03d
0
1
0.25
100
1
1
%d
2 3
1.0
1.0e-14
"""%(case,alpha,alpha)
f.write(s)
#writes opt smooth file
def write_smooth1_file(fname,case,order,mach,alpha,refinelevel):
f = open(fname,"w")
#allows us to concatenate carriage return
f.write(format_name(case,order,mach,alpha,refinelevel)+"\n")
s = """0
2
500
1
1
0
2 3
1.0e-6
"""
f.write(s)
#writes subdiv file, always sets output one level higher
def write_subdiv_file(fname,case,order,mach,alpha,refinelevel):
f = open(fname,"w")
f.write(format_name(case,order,mach,alpha,refinelevel+1)+"\n")
s = """1
3
5
0
1.0
1.0
1.0
5.0
5.0
5.0
2.0
2.0
2.0
1
"""
f.write(s)
#writes euler file
def write_euler_file(fname,case,alpha,mach,order,cfl,ptiter,refinelevel,extension="",path=""):
f = open(fname,"w")
s = """%d
%0.2f
1.4
"""%(alpha,mach)
f.write(s)
f.write("%s%s"%(path,format_name(case,order,mach,alpha,refinelevel,".mesh",extension)) + "\n")
s = """2 3
1
1.0
%d
100
%d
%d
10000
1.0
1.0e-15
"""%(cfl,order,ptiter)
f.write(s)
def main():
global dryrun
parser = OptionParser()
parser.add_option("--grids",action="store_true",dest="grids",default=False,help="Generates only initial grids at all alphas. Parallel must be set to 0.")
parser.add_option("--dryrun",action="store_true",dest="dryrun",default=False,help="Just print the commands; do not execute them.")
parser.add_option("--case",dest="case",default="naca0012",help="Original meshfile name, without extension.")
parser.add_option("--parallel",dest="parallel",default="0",help="Splits job into 21 separate jobs. Each must be given proc number from 1 to 21. Zero may only be used for generating grids.")
(options,args) = parser.parse_args()
#sets global variable to allow retvals to reflect debug and not execute
dryrun = options.dryrun
#if we set parallel to 0, runs all on one
#else, we need to split up parallel artifically (which could be done more automatically, but it is simple to do it this way too)
if options.parallel == "0":
alphas = range(-10,11)
if options.parallel == "3":
alpha = -8
if options.parallel == "4":
alpha = -7
if options.parallel == "5":
alpha = -6
if options.parallel == "6":
alpha = -5
if options.parallel == "7":
alpha = -4
if options.parallel == "8":
alpha = -3
if options.parallel == "9":
alpha = -2
if options.parallel == "10":
alpha = -1
if options.parallel == "11":
alpha = 0
if options.parallel == "12":
alpha = 1
if options.parallel == "13":
alpha = 2
if options.parallel == "14":
alpha = 3
if options.parallel == "15":
alpha = 4
if options.parallel == "16":
alpha = 5
if options.parallel == "17":
alpha = 6
if options.parallel == "18":
alpha = 7
if options.parallel == "19":
alpha = 8
orders = [2]
machs = [0.55,0.65,0.75,0.85,0.95,1.05,1.15,1.25]
#allows us to get whole range, excluding last number, and inc by third value
cfls = range(50,550,50)
ptiters = range(20,220,20)
#always do grid run separate
if options.grids:
for alpha in alphas:
write_smooth_file("MYSMOOTH",options.case,alpha)
mysystem("./SMOOTH %s.mesh %s.mesh < MYSMOOTH > stdo.out"%(options.case,options.case))
for order in orders:
for mach in machs:
f1 = "%s_%+03d_01.dat"%(options.case,alpha)
f2 = "/ibrix-scr/vbetro/meshes/%s"%format_name(options.case,order,mach,alpha,0,".dat")
mysystem("cp %s %s"%(f1,f2))
f1 = "%s_%+03d_01.mesh"%(options.case,alpha)
f2 = "/ibrix-scr/vbetro/meshes/%s"%format_name(options.case,order,mach,alpha,0,".mesh")
mysystem("cp %s %s"%(f1,f2))
#now, remove all .dat and deprecated mesh files
mysystem("rm -f *.dat *_01.mesh")
sys.exit(1)
#need to artifically set refinelevel
refinelevel = 1
#now, loop over all parameters and do all three adaptation runs for each
for order in orders:
for mach in machs:
for cfl in cfls:
for ptiter in ptiters:
write_euler_file("MYSOLVER%s"%options.parallel,options.case,alpha,mach,order,cfl,ptiter,refinelevel,"","/ibrix-scr/vbetro/meshes/")
result = mysystem("./EULER < MYSOLVER%s > stdo.out"%options.parallel)
#need to signify went fine without 1st 2nd switch
files = glob.glob("*_%d_%+03d_%0.2f_%03d_%03d_%02d.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
for f in files:
newf = f.replace(".dat","_00.dat")
mysystem("mv %s %s"%(f,newf))
#if we did not get results 2nd order, we do first then second and reappend name
if result==0 and order==2:
mysystem("rm -f *_%d_%+03d_%0.2f_%03d_%03d_%02d_00.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
write_euler_file("MYSOLVER%s"%options.parallel,options.case,alpha,mach,1,cfl,ptiter,refinelevel,"","/ibrix-scr/vbetro/meshes/")
mysystem("./EULER < MYSOLVER%s > stdo.out"%options.parallel)
mysystem("rm -f *_%d_%+03d_%0.2f_%03d_%03d_%02d.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
write_euler_file("MYSOLVER%s"%options.parallel,options.case,alpha,mach,order,cfl,ptiter,refinelevel,"_out")
result = mysystem("./EULER < MYSOLVER%s > stdo.out"%options.parallel)
files = glob.glob("*_%d_%+03d_%0.2f_%03d_%03d_%02d.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
for f in files:
newf = f.replace(".dat","_12.dat")
mysystem("mv %s %s"%(f,newf))
if result==0:
files = glob.glob("*_%d_%+03d_%0.2f_%03d_%03d_%02d*.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
for f in files:
newf = f.replace(".dat","_nan.dat")
mysystem("mv %s %s"%(f,newf))
if result==-1:
files = glob.glob("*_%d_%+03d_%0.2f_%03d_%03d_%02d*.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
for f in files:
newf = f.replace(".dat","_uncvg.dat")
mysystem("mv %s %s"%(f,newf))
#d = "/tmp/vbetro/order%d/mach%0.2f/alpha%+03d"%(order,mach,alpha)
#mysystem("mkdir -p " + d)
#mysystem("mv *_%d_%+03d_%0.2f_%03d_%03d_%02d*.dat"%(order,alpha,mach,ptiter,cfl,refinelevel) + d)
if result==1 and refinelevel < 2:
write_subdiv_file("MYSUBDIV%s"%options.parallel,options.case,order,mach,alpha,refinelevel)
fname = format_name(options.case,order,mach,alpha,refinelevel,".mesh","_out")
mysystem("./SMOOTH /ibrix-scr/vbetro/meshes/%s /ibrix-scr/vbetro/meshes/%s < MYSUBDIV%s > stdo.out"%(fname,fname,options.parallel))
write_smooth1_file("MYSMOOTH1%s"%options.parallel,options.case,order,mach,alpha,refinelevel+1)
fname = format_name(options.case,order,mach,alpha,refinelevel+1,".mesh")
mysystem("./SMOOTH /ibrix-scr/vbetro/meshes/%s /ibrix-scr/vbetro/meshes/%s < MYSMOOTH1%s > stdo.out"%(fname,fname,options.parallel))
base = format_name(options.case,order,mach,alpha,refinelevel+1)
mysystem("mv %s_01.dat /ibrix-scr/vbetro/meshes/%s.dat"%(base,base))
mysystem("mv %s_01.mesh /ibrix-scr/vbetro/meshes/%s.mesh"%(base,base))
if __name__ == "__main__":
main()
| vincentbetro/NACA-SIM | scripts/adaptationruns1.py | Python | gpl-2.0 | 9,065 |
import json
from sqlalchemy import Column, Integer, String
from geoalchemy2 import Geometry
from geoindex.extensions import db
import geoalchemy2.functions as geofunc
class Boundary(db.Model):
__tablename__ = 'boundary'
id = Column(Integer, primary_key=True)
name = Column(String)
code = Column(String)
polygon = Column(Geometry('MULTIPOLYGON', srid=4326))
def to_dict(self):
boundary = {"type": "Feature", "properties": {}, "geometry": {}}
boundary["properties"] = {"name": self.name, "code": self.code}
polygon = json.loads(db.session.scalar(geofunc.ST_AsGeoJSON(self.polygon)))
coordinates = polygon["coordinates"]
if len(coordinates) == 1:
boundary["geometry"]["type"] = "Polygon"
boundary["geometry"]["coordinates"] = coordinates[0]
return boundary
| openregister/geoindex | geoindex/frontend/models.py | Python | mit | 862 |
from flask import current_app
from flask_login import AnonymousUserMixin
from datetime import datetime, date
from decimal import Decimal
class SerializerMixin(object):
__public__ = None
"""Must be implemented by implementors"""
def _get_fields(self):
for f in self.__mapper__.iterate_properties:
yield f.key
def serialize(self, exclude=(), extra=()):
"""Returns model's public data for jsonify
:param set exclude: Exclude these items from serialization
:param set extra: Include these items for serialization
:return: dictionary to be passed to jsonify
:rtype: dict
"""
data = {}
keys = self._sa_instance_state.attrs.items()
public = self.__public__ + extra if self.__public__ else extra
for k, field in keys:
if public and k not in public:
continue
if exclude and k in exclude:
continue
value = self._serialize(field.value)
if value:
data[k] = value
extras = list(set(public).difference(
self._sa_instance_state.attrs.keys()))
for e in extras:
try:
data[e] = self._serialize(getattr(self, e))
except AttributeError as ae: # noqa
current_app.log.error(ae)
return data
@classmethod
def _serialize(cls, value):
"""Serialize value based its type
:param value:
:type value:
:return:
:rtype:
"""
if type(value) in (datetime, date):
ret = value.isoformat()
elif type(value) is Decimal:
ret = str(value)
elif hasattr(value, '__iter__') and not isinstance(value, str):
ret = []
for v in value:
ret.append(cls._serialize(v))
elif SerializerMixin in value.__class__.__bases__:
ret = value.serialize()
else:
ret = value
return ret
class Anonymous(AnonymousUserMixin):
def __init__(self):
self.name = 'Anonymous'
self.email = 'anonymous@domain.tld'
self.organization_id = 0
def can(self, permissions):
return False
def is_admin(self):
return False
| certeu/do-portal | app/utils/mixins.py | Python | bsd-3-clause | 2,296 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'johnx'
__date__ = '11/18/13 1:11 PM'
from stalk.session import SessionManager
from stalk.util import make_client, run_command, CONFIG
from stalk.head_quarters import HeadQuarters, CommandNotImplemented
admin_email = CONFIG['admin_email']
command_lead = CONFIG['command_lead']
def process_message(_, message_node):
from_id = message_node.getFrom().getStripped()
content = message_node.getBody()
if not content:
return
print '%s: %s' % (from_id, content)
if from_id == admin_email:
if content.startswith(command_lead):
try:
client.send_text(from_id, HeadQuarters.handle(from_id, content))
except CommandNotImplemented as err:
client.send_text(from_id, 'command not found: %s%s.' % (command_lead, err.name))
else:
admin_channel = SessionManager.get_session(from_id)['channel']
client.send_text(from_id, run_command(content, admin_channel))
if __name__ == '__main__':
HeadQuarters.load_all_commands()
client = make_client()
client.RegisterHandler('message', process_message)
client.loop()
| boyxuper/server_talk | script/server.py | Python | apache-2.0 | 1,195 |
from __future__ import absolute_import
from rest_framework.response import Response
from sentry import options
from sentry.api.bases.project import ProjectEndpoint
from sentry.models import ProjectKey
class ProjectDocsEndpoint(ProjectEndpoint):
def get(self, request, project):
data = options.get('sentry:docs')
project_key = ProjectKey.get_default(project)
context = {
'platforms': data['platforms'],
}
if project_key:
context['dsn'] = project_key.dsn_private
context['dsnPublic'] = project_key.dsn_public
return Response(context)
| imankulov/sentry | src/sentry/api/endpoints/project_docs.py | Python | bsd-3-clause | 626 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CultivosAnuales'
db.create_table(u'encuesta_cultivosanuales', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'encuesta', ['CultivosAnuales'])
# Adding model 'ProductoAnimal'
db.create_table(u'encuesta_productoanimal', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'encuesta', ['ProductoAnimal'])
# Adding model 'ProductoProcesado'
db.create_table(u'encuesta_productoprocesado', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'encuesta', ['ProductoProcesado'])
# Deleting field 'SeguridadCAnuales.unidad_medida'
db.delete_column(u'encuesta_seguridadcanuales', 'unidad_medida')
# Renaming column for 'SeguridadCAnuales.cultivos' to match new field type.
db.rename_column(u'encuesta_seguridadcanuales', 'cultivos', 'cultivos_id')
# Changing field 'SeguridadCAnuales.cultivos'
db.alter_column(u'encuesta_seguridadcanuales', 'cultivos_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.CultivosAnuales']))
# Adding index on 'SeguridadCAnuales', fields ['cultivos']
db.create_index(u'encuesta_seguridadcanuales', ['cultivos_id'])
# Deleting field 'SeguridadPProcesados.unidad_medida'
db.delete_column(u'encuesta_seguridadpprocesados', 'unidad_medida')
# Renaming column for 'SeguridadPProcesados.producto' to match new field type.
db.rename_column(u'encuesta_seguridadpprocesados', 'producto', 'producto_id')
# Changing field 'SeguridadPProcesados.producto'
db.alter_column(u'encuesta_seguridadpprocesados', 'producto_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.ProductoProcesado']))
# Adding index on 'SeguridadPProcesados', fields ['producto']
db.create_index(u'encuesta_seguridadpprocesados', ['producto_id'])
# Deleting field 'SeguridadPAnimal.unidad_medida'
db.delete_column(u'encuesta_seguridadpanimal', 'unidad_medida')
# Renaming column for 'SeguridadPAnimal.producto' to match new field type.
db.rename_column(u'encuesta_seguridadpanimal', 'producto', 'producto_id')
# Changing field 'SeguridadPAnimal.producto'
db.alter_column(u'encuesta_seguridadpanimal', 'producto_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.ProductoAnimal']))
# Adding index on 'SeguridadPAnimal', fields ['producto']
db.create_index(u'encuesta_seguridadpanimal', ['producto_id'])
def backwards(self, orm):
# Removing index on 'SeguridadPAnimal', fields ['producto']
db.delete_index(u'encuesta_seguridadpanimal', ['producto_id'])
# Removing index on 'SeguridadPProcesados', fields ['producto']
db.delete_index(u'encuesta_seguridadpprocesados', ['producto_id'])
# Removing index on 'SeguridadCAnuales', fields ['cultivos']
db.delete_index(u'encuesta_seguridadcanuales', ['cultivos_id'])
# Deleting model 'CultivosAnuales'
db.delete_table(u'encuesta_cultivosanuales')
# Deleting model 'ProductoAnimal'
db.delete_table(u'encuesta_productoanimal')
# Deleting model 'ProductoProcesado'
db.delete_table(u'encuesta_productoprocesado')
# Adding field 'SeguridadCAnuales.unidad_medida'
db.add_column(u'encuesta_seguridadcanuales', 'unidad_medida',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
# Renaming column for 'SeguridadCAnuales.cultivos' to match new field type.
db.rename_column(u'encuesta_seguridadcanuales', 'cultivos_id', 'cultivos')
# Changing field 'SeguridadCAnuales.cultivos'
db.alter_column(u'encuesta_seguridadcanuales', 'cultivos', self.gf('django.db.models.fields.IntegerField')())
# Adding field 'SeguridadPProcesados.unidad_medida'
db.add_column(u'encuesta_seguridadpprocesados', 'unidad_medida',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
# Renaming column for 'SeguridadPProcesados.producto' to match new field type.
db.rename_column(u'encuesta_seguridadpprocesados', 'producto_id', 'producto')
# Changing field 'SeguridadPProcesados.producto'
db.alter_column(u'encuesta_seguridadpprocesados', 'producto', self.gf('django.db.models.fields.IntegerField')())
# Adding field 'SeguridadPAnimal.unidad_medida'
db.add_column(u'encuesta_seguridadpanimal', 'unidad_medida',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
# Renaming column for 'SeguridadPAnimal.producto' to match new field type.
db.rename_column(u'encuesta_seguridadpanimal', 'producto_id', 'producto')
# Changing field 'SeguridadPAnimal.producto'
db.alter_column(u'encuesta_seguridadpanimal', 'producto', self.gf('django.db.models.fields.IntegerField')())
models = {
u'encuesta.credito': {
'Meta': {'object_name': 'Credito'},
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organizacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.OrganizacionesDanCredito']"}),
'personas': ('django.db.models.fields.IntegerField', [], {}),
'uso': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['encuesta.UsoCredito']", 'symmetrical': 'False'})
},
u'encuesta.cultivosanuales': {
'Meta': {'object_name': 'CultivosAnuales'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encuesta.cultivossaf': {
'Meta': {'object_name': 'CultivosSaf'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encuesta.educacion': {
'Meta': {'ordering': "(u'sexo_edad',)", 'object_name': 'Educacion'},
'circ_estudio_adulto': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'estudiando': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nosabe_leer': ('django.db.models.fields.IntegerField', [], {}),
'num_persona': ('django.db.models.fields.IntegerField', [], {}),
'pri_completa': ('django.db.models.fields.IntegerField', [], {}),
'pri_incompleta': ('django.db.models.fields.IntegerField', [], {}),
'secu_completa': ('django.db.models.fields.IntegerField', [], {}),
'secu_incompleta': ('django.db.models.fields.IntegerField', [], {}),
'sexo_edad': ('django.db.models.fields.IntegerField', [], {}),
'uni_o_tecnico': ('django.db.models.fields.IntegerField', [], {})
},
u'encuesta.encuesta': {
'Meta': {'object_name': 'Encuesta'},
'ano': ('django.db.models.fields.IntegerField', [], {}),
'fecha': ('django.db.models.fields.DateField', [], {}),
'fecha2': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oficina': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'personas': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'recolector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Recolector']"})
},
u'encuesta.finca': {
'Meta': {'ordering': "(u'finca',)", 'object_name': 'Finca'},
'animal_aves': ('django.db.models.fields.IntegerField', [], {}),
'animal_bovino': ('django.db.models.fields.IntegerField', [], {}),
'animal_caprino': ('django.db.models.fields.IntegerField', [], {}),
'animal_equino': ('django.db.models.fields.IntegerField', [], {}),
'animal_porcino': ('django.db.models.fields.IntegerField', [], {}),
'area_casa': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'area_finca': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'comunidad': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'coordenadas_gps': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '6', 'blank': 'True'}),
'coordenadas_lg': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '6', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'finca': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'fuente_agua': ('django.db.models.fields.IntegerField', [], {'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legalidad': ('django.db.models.fields.IntegerField', [], {'max_length': '60'}),
'microcuenca': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Microcuenca']"}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'municipio'", 'to': u"orm['lugar.Municipio']"}),
'nombre_productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'productores'", 'to': u"orm['encuesta.Productores']"}),
'propietario': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tipo_casa': ('django.db.models.fields.IntegerField', [], {'max_length': '60'})
},
u'encuesta.fotos': {
'Meta': {'object_name': 'Fotos'},
'adjunto': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.ingresoservicionegocio': {
'Meta': {'object_name': 'IngresoServicioNegocio'},
'cantidad': ('django.db.models.fields.IntegerField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingresos': ('django.db.models.fields.FloatField', [], {}),
'maneja': ('django.db.models.fields.IntegerField', [], {}),
'plan_negocio': ('django.db.models.fields.IntegerField', [], {}),
'precio': ('django.db.models.fields.FloatField', [], {}),
'servicios': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.ServiciosActividades']"})
},
u'encuesta.innovacion': {
'Meta': {'object_name': 'Innovacion'},
'aplica': ('django.db.models.fields.IntegerField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'innovacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.TipoInnovacion']"})
},
u'encuesta.organizacionesdancredito': {
'Meta': {'object_name': 'OrganizacionesDanCredito'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'encuesta.productoanimal': {
'Meta': {'object_name': 'ProductoAnimal'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encuesta.productoprocesado': {
'Meta': {'object_name': 'ProductoProcesado'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encuesta.productores': {
'Meta': {'object_name': 'Productores'},
'cedula_productor': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'celular': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'contador': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'sexo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'encuesta.recolector': {
'Meta': {'unique_together': "((u'nombre',),)", 'object_name': 'Recolector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encuesta.seguridadalimentaria': {
'Meta': {'object_name': 'SeguridadAlimentaria'},
'alimentos': ('django.db.models.fields.IntegerField', [], {}),
'comprar': ('django.db.models.fields.BooleanField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nivel_consumo_suficiente': ('django.db.models.fields.IntegerField', [], {}),
'porcentaje_compran': ('django.db.models.fields.IntegerField', [], {}),
'porcentaje_nivel': ('django.db.models.fields.IntegerField', [], {})
},
u'encuesta.seguridadcanuales': {
'Meta': {'object_name': 'SeguridadCAnuales'},
'area_produccion': ('django.db.models.fields.FloatField', [], {}),
'auto_consumo': ('django.db.models.fields.FloatField', [], {}),
'cultivos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.CultivosAnuales']"}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'perdidas': ('django.db.models.fields.FloatField', [], {}),
'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}),
'precio_promedio_orga': ('django.db.models.fields.FloatField', [], {}),
'produccion': ('django.db.models.fields.FloatField', [], {}),
'venta_no': ('django.db.models.fields.FloatField', [], {}),
'venta_organizada': ('django.db.models.fields.FloatField', [], {})
},
u'encuesta.seguridadpanimal': {
'Meta': {'object_name': 'SeguridadPAnimal'},
'auto_consumo': ('django.db.models.fields.FloatField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maneja': ('django.db.models.fields.IntegerField', [], {}),
'perdidas': ('django.db.models.fields.FloatField', [], {}),
'plan_negocio': ('django.db.models.fields.IntegerField', [], {}),
'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}),
'precio_promedio_orga': ('django.db.models.fields.FloatField', [], {}),
'produccion': ('django.db.models.fields.FloatField', [], {}),
'producto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.ProductoAnimal']"}),
'venta_no': ('django.db.models.fields.FloatField', [], {}),
'venta_organizada': ('django.db.models.fields.FloatField', [], {})
},
u'encuesta.seguridadpprocesados': {
'Meta': {'object_name': 'SeguridadPProcesados'},
'auto_consumo': ('django.db.models.fields.FloatField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maneja': ('django.db.models.fields.IntegerField', [], {}),
'perdidas': ('django.db.models.fields.FloatField', [], {}),
'plan_negocio': ('django.db.models.fields.IntegerField', [], {}),
'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}),
'produccion': ('django.db.models.fields.FloatField', [], {}),
'producto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.ProductoProcesado']"}),
'venta_no': ('django.db.models.fields.FloatField', [], {}),
'venta_organizada': ('django.db.models.fields.FloatField', [], {})
},
u'encuesta.seguridadsaf': {
'Meta': {'object_name': 'SeguridadSaf'},
'area_desarrollo': ('django.db.models.fields.FloatField', [], {}),
'area_produccion': ('django.db.models.fields.FloatField', [], {}),
'auto_consumo': ('django.db.models.fields.FloatField', [], {}),
'cultivos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.CultivosSaf']"}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'perdidas': ('django.db.models.fields.FloatField', [], {}),
'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}),
'precio_promedio_orga': ('django.db.models.fields.FloatField', [], {}),
'produccion_total': ('django.db.models.fields.FloatField', [], {}),
'rendimiento': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'venta_no': ('django.db.models.fields.FloatField', [], {}),
'venta_organizada': ('django.db.models.fields.FloatField', [], {})
},
u'encuesta.serviciosactividades': {
'Meta': {'object_name': 'ServiciosActividades'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.tipoinnovacion': {
'Meta': {'object_name': 'TipoInnovacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'encuesta.usocredito': {
'Meta': {'object_name': 'UsoCredito'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'encuesta.usotierra': {
'Meta': {'object_name': 'UsoTierra'},
'anuales_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'arboles_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bosque_primario': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'bosque_secundario': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'cultivos_anuales': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'cultivos_perennes': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'cultivos_semiperennes': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'forestales_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'perennes_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'plantaciones_forestales': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'potrero_arboles': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'potrero_sin_arboles': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'primario_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'secundario_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'semiperennes_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'sin_arboles_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'tacotal': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'tacotal_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_uso': ('django.db.models.fields.FloatField', [], {})
},
u'lugar.comunidad': {
'Meta': {'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'object_name': 'Departamento'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.microcuenca': {
'Meta': {'object_name': 'Microcuenca'},
'comunidad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Comunidad']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'lugar.municipio': {
'Meta': {'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
}
}
complete_apps = ['encuesta'] | CARocha/addac_fadcanic | encuesta/migrations/0005_auto__add_cultivosanuales__add_productoanimal__add_productoprocesado__.py | Python | gpl-3.0 | 26,202 |
"""
Tests for contentstore.views.preview.py
"""
import re
import ddt
import mock
from django.test.client import Client, RequestFactory
from xblock.core import XBlock, XBlockAside
from contentstore.utils import reverse_usage_url
from contentstore.views.preview import _preview_module_system, get_preview_fragment
from student.tests.factories import UserFactory
from xblock_config.models import StudioConfig
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.test_asides import AsideTestType
@ddt.ddt
class GetPreviewHtmlTestCase(ModuleStoreTestCase):
"""
Tests for get_preview_fragment.
Note that there are other existing test cases in test_contentstore that indirectly execute
get_preview_fragment via the xblock RESTful API.
"""
@XBlockAside.register_temp_plugin(AsideTestType, 'test_aside')
def test_preview_fragment(self):
"""
Test for calling get_preview_html. Ensures data-usage-id is correctly set and
asides are correctly included.
"""
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
html = ItemFactory.create(
parent_location=course.location,
category="html",
data={'data': "<html>foobar</html>"}
)
config = StudioConfig.current()
config.enabled = True
config.save()
request = RequestFactory().get('/dummy-url')
request.user = UserFactory()
request.session = {}
# Call get_preview_fragment directly.
context = {
'reorderable_items': set(),
'read_only': True
}
html = get_preview_fragment(request, html, context).content
# Verify student view html is returned, and the usage ID is as expected.
html_pattern = re.escape(unicode(course.id.make_usage_key('html', 'replaceme'))).replace('replaceme', r'html_[0-9]*')
self.assertRegexpMatches(
html,
'data-usage-id="{}"'.format(html_pattern)
)
self.assertRegexpMatches(html, '<html>foobar</html>')
self.assertRegexpMatches(html, r"data-block-type=[\"\']test_aside[\"\']")
self.assertRegexpMatches(html, "Aside rendered")
# Now ensure the acid_aside is not in the result
self.assertNotRegexpMatches(html, r"data-block-type=[\"\']acid_aside[\"\']")
# Ensure about pages don't have asides
about = modulestore().get_item(course.id.make_usage_key('about', 'overview'))
html = get_preview_fragment(request, about, context).content
self.assertNotRegexpMatches(html, r"data-block-type=[\"\']test_aside[\"\']")
self.assertNotRegexpMatches(html, "Aside rendered")
@XBlockAside.register_temp_plugin(AsideTestType, 'test_aside')
def test_preview_no_asides(self):
"""
Test for calling get_preview_html. Ensures data-usage-id is correctly set and
asides are correctly excluded because they are not enabled.
"""
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
html = ItemFactory.create(
parent_location=course.location,
category="html",
data={'data': "<html>foobar</html>"}
)
config = StudioConfig.current()
config.enabled = False
config.save()
request = RequestFactory().get('/dummy-url')
request.user = UserFactory()
request.session = {}
# Call get_preview_fragment directly.
context = {
'reorderable_items': set(),
'read_only': True
}
html = get_preview_fragment(request, html, context).content
self.assertNotRegexpMatches(html, r"data-block-type=[\"\']test_aside[\"\']")
self.assertNotRegexpMatches(html, "Aside rendered")
@mock.patch('xmodule.conditional_module.ConditionalModule.is_condition_satisfied')
def test_preview_conditional_module_children_context(self, mock_is_condition_satisfied):
"""
Testst that when empty context is pass to children of ConditionalModule it will not raise KeyError.
"""
mock_is_condition_satisfied.return_value = True
client = Client()
client.login(username=self.user.username, password=self.user_password)
with self.store.default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
conditional_block = ItemFactory.create(
parent_location=course.location,
category="conditional"
)
# child conditional_block
ItemFactory.create(
parent_location=conditional_block.location,
category="conditional"
)
url = reverse_usage_url(
'preview_handler',
conditional_block.location,
kwargs={'handler': 'xmodule_handler/conditional_get'}
)
response = client.post(url)
self.assertEqual(response.status_code, 200)
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_block_branch_not_changed_by_preview_handler(self, default_store):
"""
Tests preview_handler should not update blocks being previewed
"""
client = Client()
client.login(username=self.user.username, password=self.user_password)
with self.store.default_store(default_store):
course = CourseFactory.create()
block = ItemFactory.create(
parent_location=course.location,
category="problem"
)
url = reverse_usage_url(
'preview_handler',
block.location,
kwargs={'handler': 'xmodule_handler/problem_check'}
)
response = client.post(url)
self.assertEqual(response.status_code, 200)
self.assertFalse(modulestore().has_changes(modulestore().get_item(block.location)))
@XBlock.needs("field-data")
@XBlock.needs("i18n")
@XBlock.needs("user")
class PureXBlock(XBlock):
"""
Pure XBlock to use in tests.
"""
pass
@ddt.ddt
class StudioXBlockServiceBindingTest(ModuleStoreTestCase):
"""
Tests that the Studio Module System (XBlock Runtime) provides an expected set of services.
"""
def setUp(self):
"""
Set up the user and request that will be used.
"""
super(StudioXBlockServiceBindingTest, self).setUp()
self.user = UserFactory()
self.course = CourseFactory.create()
self.request = mock.Mock()
self.field_data = mock.Mock()
@XBlock.register_temp_plugin(PureXBlock, identifier='pure')
@ddt.data("user", "i18n", "field-data")
def test_expected_services_exist(self, expected_service):
"""
Tests that the 'user' and 'i18n' services are provided by the Studio runtime.
"""
descriptor = ItemFactory(category="pure", parent=self.course)
runtime = _preview_module_system(
self.request,
descriptor,
self.field_data,
)
service = runtime.service(descriptor, expected_service)
self.assertIsNotNone(service)
| angelapper/edx-platform | cms/djangoapps/contentstore/views/tests/test_preview.py | Python | agpl-3.0 | 7,455 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-12 10:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0050_auto_20161112_2133'),
]
operations = [
migrations.AlterField(
model_name='yearlevel',
name='name',
field=models.CharField(blank=True, choices=[(1, '1'), (2, '2'), (3, 'A'), (4, '3B'), (5, '4C'), (6, '5D')], max_length=30, null=True),
),
]
| caw/curriculum | db/migrations/0051_auto_20161112_2140.py | Python | gpl-3.0 | 543 |
# -*- coding: utf-8 -*-
from unittest import TestCase
from app.crypto import CryptoKey
from app.database import Database
from app.hosts import Hosts
class BaseTestCase(TestCase):
def setUp(self):
self.db = Database('sqlite://', echo=False) # in memory database
self.db.create()
self.ck = CryptoKey()
self.hosts = Hosts(self.db, self.ck)
def tearDown(self):
self.db.drop()
| szatanszmatan/myrdp | tests/__init__.py | Python | gpl-2.0 | 424 |
#User
#Add User
AddUser="KKWebVideoDL_EventID_User_Add"
#Verify User
VefUser="KKWebVideoDL_EventID_User_VefUser"
#Disable User
DisableUser="KKWebVideoDL_EventID_User_Disable"
#Task
#Add
AddTask="KKWebVideoDL_EventID_Task_Add"
#File
#Create
CreateFile="KKWebVideoDL_EventID_File_Create"
#Achive
AchivedFile="KKWebVideoDL_EventID_File_Achived"
#Status
#SetStatus
SetStatus="KKWebVideoDL_EventID_Status_Set" | xiaokangwang/KKWebVideoDL-X | eventID.py | Python | gpl-3.0 | 418 |
from splice.environment import Environment
# flask_restful doesn't allow multiple initializations
register_flask_restful = False
def setup_routes(app):
env = Environment.instance()
global register_flask_restful
if "signing" in env.config.ALLOWED_APPS:
import splice.web.api.content
splice.web.api.content.register_routes(app)
if "tiles" in env.config.ALLOWED_APPS:
import splice.web.views
splice.web.views.register_routes(app)
import splice.web.api.heartbeat
splice.web.api.heartbeat.register_routes(app)
if not register_flask_restful:
import splice.web.api.init
splice.web.api.init.register_routes(app)
import splice.web.api.account
splice.web.api.account.register_routes(app)
import splice.web.api.campaign
splice.web.api.campaign.register_routes(app)
import splice.web.api.adgroup
splice.web.api.adgroup.register_routes(app)
import splice.web.api.tile
splice.web.api.tile.register_routes(app)
import splice.web.api.reporting
splice.web.api.reporting.register_routes(app)
import splice.web.api.distribution
splice.web.api.distribution.register_routes(app)
register_flask_restful = True
def create_webapp(*args, **kwargs):
env = Environment.instance(*args, **kwargs)
setup_routes(env.application)
return env.application
| ncloudioj/splice | splice/webapp.py | Python | mpl-2.0 | 1,502 |
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# pygtail - a python "port" of logtail2
# Copyright (C) 2011 Brad Greenlee <brad@footle.org>
#
# Derived from logcheck <http://logcheck.org>
# Copyright (C) 2003 Jonathan Middleton <jjm@ixtab.org.uk>
# Copyright (C) 2001 Paul Slootman <paul@debian.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from os import stat
from os.path import exists
import sys
import glob
import string
from optparse import OptionParser
__version__ = '0.2.1'
class Pygtail(object):
"""
Creates an iterable object that returns only unread lines.
"""
def __init__(self, filename, offset_file=None, paranoid=False):
self.filename = filename
self.paranoid = paranoid
self._offset_file = offset_file or "%s.offset" % self.filename
self._offset_file_inode = 0
self._offset = 0
self._fh = None
self._rotated_logfile = None
# if offset file exists, open and parse it
if exists(self._offset_file):
offset_fh = open(self._offset_file, "r")
(self._offset_file_inode, self._offset) = \
[string.atoi(line.strip()) for line in offset_fh]
offset_fh.close()
if self._offset_file_inode != stat(self.filename).st_ino:
# The inode has changed, so the file might have been rotated.
# Look for the rotated file and process that if we find it.
self._rotated_logfile = self._determine_rotated_logfile()
def __iter__(self):
return self
def next(self):
"""
Return the next line in the file, updating the offset.
"""
try:
line = self._filehandle().next()
except StopIteration:
# we've reached the end of the file; if we're processing the
# rotated log file, we can continue with the actual file; otherwise
# update the offset file
if self._rotated_logfile:
self._rotated_logfile = None
self._fh.close()
self._offset = 0
# open up current logfile and continue
try:
line = self._filehandle().next()
except StopIteration: # oops, empty file
self._update_offset_file()
raise
else:
self._update_offset_file()
raise
if self.paranoid:
self._update_offset_file()
return line
def readlines(self):
"""
Read in all unread lines and return them as a list.
"""
return [line for line in self]
def read(self):
"""
Read in all unread lines and return them as a single string.
"""
lines = self.readlines()
if lines:
return ''.join(lines)
else:
return None
def _filehandle(self):
"""
Return a filehandle to the file being tailed, with the position set
to the current offset.
"""
if not self._fh or self._fh.closed:
filename = self._rotated_logfile or self.filename
self._fh = open(filename, "r")
self._fh.seek(self._offset)
return self._fh
def _update_offset_file(self):
"""
Update the offset file with the current inode and offset.
"""
offset = self._filehandle().tell()
inode = stat(self.filename).st_ino
fh = open(self._offset_file, "w")
fh.write("%s\n%s\n" % (inode, offset))
fh.close()
def _determine_rotated_logfile(self):
"""
We suspect the logfile has been rotated, so try to guess what the
rotated filename is, and return it.
"""
rotated_filename = self._check_rotated_filename_candidates()
if (rotated_filename and exists(rotated_filename) and
stat(rotated_filename).st_ino == self._offset_file_inode):
return rotated_filename
else:
return None
def _check_rotated_filename_candidates(self):
"""
Check for various rotated logfile filename patterns and return the first
match we find.
"""
# savelog(8)
candidate = "%s.0" % self.filename
if (exists(candidate) and exists("%s.1.gz" % self.filename) and
(stat(candidate).st_mtime > stat("%s.1.gz" % self.filename).st_mtime)):
return candidate
# logrotate(8)
candidate = "%s.1" % self.filename
if exists(candidate):
return candidate
# dateext rotation scheme
candidates = glob.glob("%s-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]" % self.filename)
if candidates:
candidates.sort()
return candidates[-1] # return most recent
# no match
return None
def main():
# command-line parsing
cmdline = OptionParser(usage="usage: %prog [options] logfile",
description="Print log file lines that have not been read.")
cmdline.add_option("--offset-file", "-o", action="store",
help="File to which offset data is written (default: <logfile>.offset).")
cmdline.add_option("--paranoid", "-p", action="store_true",
help="Update the offset file every time we read a line (as opposed to"
" only when we reach the end of the file).")
options, args = cmdline.parse_args()
if (len(args) != 1):
cmdline.error("Please provide a logfile to read.")
pygtail = Pygtail(args[0],
offset_file=options.offset_file,
paranoid=options.paranoid)
for line in pygtail:
sys.stdout.write(line)
if __name__ == "__main__":
main()
| mariodebian/server-stats-system-agent | sssa/pygtail.py | Python | gpl-2.0 | 6,429 |
from db import db
class ItemModel(db.Model):
__tablename__ = 'items'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
price = db.Column(db.Float(precision=2))
store_id = db.Column(db.Integer, db.ForeignKey('stores.id'))
store = db.relationship('StoreModel')
def __init__(self, name, price, store_id):
self.name = name
self.price = price
self.store_id = store_id
def json(self):
return { 'name' : self.name, 'price' : self.price }
@classmethod
def find_by_name(cls,name):
return cls.query.filter_by(name=name).first()
# Select * from items where name = name LIMIT 1
def save_to_db(self):
# Session in this instance is a collection of objects
# that we are going to write to the database
# we can add multiple objects to the session and then write mutiple at once
# in this case one
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit() | ysabel31/Python | flask-06-SQL_Alchemy/code/models/item.py | Python | mit | 1,119 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-is-reachability/neighbors/neighbors/subTLVs/subTLVs/lan-adjacency-sid/sid/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of LAN Adjacency-SID.
"""
__slots__ = (
"_path_helper", "_extmethods", "__value", "__flags", "__weight", "__neighbor_id"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__neighbor_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="neighbor-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-is-reachability",
"neighbors",
"neighbors",
"subTLVs",
"subTLVs",
"lan-adjacency-sid",
"sid",
"state",
]
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/value (uint32)
YANG Description: LAN Adjacency-SID value.
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/value (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: LAN Adjacency-SID value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_flags(self):
"""
Getter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/flags (enumeration)
YANG Description: Flags associated with LAN-Adj-Segment-ID.
"""
return self.__flags
def _set_flags(self, v, load=False):
"""
Setter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/flags (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_flags is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flags() directly.
YANG Description: Flags associated with LAN-Adj-Segment-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """flags must be of a type compatible with enumeration""",
"defined-type": "openconfig-network-instance:enumeration",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ADDRESS_FAMILY': {}, 'BACKUP': {}, 'VALUE': {}, 'LOCAL': {}, 'SET': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=False)""",
}
)
self.__flags = t
if hasattr(self, "_set"):
self._set()
def _unset_flags(self):
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/weight (uint8)
YANG Description: Value that represents the weight of the Adj-SID for the purpose
of load balancing.
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/weight (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: Value that represents the weight of the Adj-SID for the purpose
of load balancing.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """weight must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__weight = t
if hasattr(self, "_set"):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_neighbor_id(self):
"""
Getter method for neighbor_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/neighbor_id (oc-isis-types:system-id)
YANG Description: System ID of the neighbor associated with the LAN-Adj-Segment-ID
value.
"""
return self.__neighbor_id
def _set_neighbor_id(self, v, load=False):
"""
Setter method for neighbor_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/neighbor_id (oc-isis-types:system-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor_id() directly.
YANG Description: System ID of the neighbor associated with the LAN-Adj-Segment-ID
value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="neighbor-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbor_id must be of a type compatible with oc-isis-types:system-id""",
"defined-type": "oc-isis-types:system-id",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}'}), is_leaf=True, yang_name="neighbor-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:system-id', is_config=False)""",
}
)
self.__neighbor_id = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbor_id(self):
self.__neighbor_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="neighbor-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
value = __builtin__.property(_get_value)
flags = __builtin__.property(_get_flags)
weight = __builtin__.property(_get_weight)
neighbor_id = __builtin__.property(_get_neighbor_id)
_pyangbind_elements = OrderedDict(
[
("value", value),
("flags", flags),
("weight", weight),
("neighbor_id", neighbor_id),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-is-reachability/neighbors/neighbors/subTLVs/subTLVs/lan-adjacency-sid/sid/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of LAN Adjacency-SID.
"""
__slots__ = (
"_path_helper", "_extmethods", "__value", "__flags", "__weight", "__neighbor_id"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__neighbor_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="neighbor-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-is-reachability",
"neighbors",
"neighbors",
"subTLVs",
"subTLVs",
"lan-adjacency-sid",
"sid",
"state",
]
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/value (uint32)
YANG Description: LAN Adjacency-SID value.
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/value (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: LAN Adjacency-SID value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_flags(self):
"""
Getter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/flags (enumeration)
YANG Description: Flags associated with LAN-Adj-Segment-ID.
"""
return self.__flags
def _set_flags(self, v, load=False):
"""
Setter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/flags (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_flags is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flags() directly.
YANG Description: Flags associated with LAN-Adj-Segment-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """flags must be of a type compatible with enumeration""",
"defined-type": "openconfig-network-instance:enumeration",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ADDRESS_FAMILY': {}, 'BACKUP': {}, 'VALUE': {}, 'LOCAL': {}, 'SET': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=False)""",
}
)
self.__flags = t
if hasattr(self, "_set"):
self._set()
def _unset_flags(self):
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"ADDRESS_FAMILY": {},
"BACKUP": {},
"VALUE": {},
"LOCAL": {},
"SET": {},
},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/weight (uint8)
YANG Description: Value that represents the weight of the Adj-SID for the purpose
of load balancing.
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/weight (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: Value that represents the weight of the Adj-SID for the purpose
of load balancing.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """weight must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__weight = t
if hasattr(self, "_set"):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_neighbor_id(self):
"""
Getter method for neighbor_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/neighbor_id (oc-isis-types:system-id)
YANG Description: System ID of the neighbor associated with the LAN-Adj-Segment-ID
value.
"""
return self.__neighbor_id
def _set_neighbor_id(self, v, load=False):
"""
Setter method for neighbor_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors/subTLVs/subTLVs/lan_adjacency_sid/sid/state/neighbor_id (oc-isis-types:system-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor_id() directly.
YANG Description: System ID of the neighbor associated with the LAN-Adj-Segment-ID
value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="neighbor-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbor_id must be of a type compatible with oc-isis-types:system-id""",
"defined-type": "oc-isis-types:system-id",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}'}), is_leaf=True, yang_name="neighbor-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:system-id', is_config=False)""",
}
)
self.__neighbor_id = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbor_id(self):
self.__neighbor_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="neighbor-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
value = __builtin__.property(_get_value)
flags = __builtin__.property(_get_flags)
weight = __builtin__.property(_get_weight)
neighbor_id = __builtin__.property(_get_neighbor_id)
_pyangbind_elements = OrderedDict(
[
("value", value),
("flags", flags),
("weight", weight),
("neighbor_id", neighbor_id),
]
)
| napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_is_reachability/neighbors/neighbors_/subTLVs/subTLVs_/lan_adjacency_sid/sid/state/__init__.py | Python | apache-2.0 | 39,959 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Lightning Callbacks used when training."""
import os
from pytorch_lightning import callbacks
class TransformersModelCheckpoint(callbacks.ModelCheckpoint):
"""Saves model and tokenizer in Transformers format when ModelCheckpoint does save.
This way it is possible to simply load the model (without training hparameters)
using transformers.from_pretrained. Also adds an attribute .last_checkpoint_path.
"""
def on_train_start(self, trainer, pl_module):
super(TransformersModelCheckpoint, self).on_train_start(trainer, pl_module)
self._model = pl_module.model
self._tokenizer = pl_module.schema_tokenizer
def _save_model(self, filepath):
super(TransformersModelCheckpoint, self)._save_model(filepath)
self.last_checkpoint_path = filepath
save_path = os.path.dirname(filepath)
self._model.save_pretrained(save_path)
self._tokenizer.save(save_path)
| googleinterns/new-semantic-parsing | new_semantic_parsing/callbacks.py | Python | apache-2.0 | 1,597 |
"""
Example subclass of the Graph class.
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
__docformat__ = "restructuredtext en"
from networkx import Graph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
from copy import deepcopy
class PrintGraph(Graph):
"""
Example subclass of the Graph class.
Prints activity log to file or standard output.
"""
def __init__(self, data=None, name='', file=None, **attr):
Graph.__init__(self, data=data,name=name,**attr)
if file is None:
import sys
self.fh=sys.stdout
else:
self.fh=open(file,'w')
def add_node(self, n, attr_dict=None, **attr):
Graph.add_node(self,n,attr_dict=attr_dict,**attr)
self.fh.write("Add node: %s\n"%n)
def add_nodes_from(self, nodes, **attr):
for n in nodes:
self.add_node(n, **attr)
def remove_node(self,n):
Graph.remove_node(self,n)
self.fh.write("Remove node: %s\n"%n)
def remove_nodes_from(self, nodes):
adj = self.adj
for n in nodes:
self.remove_node(n)
def add_edge(self, u, v, attr_dict=None, **attr):
Graph.add_edge(self,u,v,attr_dict=attr_dict,**attr)
self.fh.write("Add edge: %s-%s\n"%(u,v))
def add_edges_from(self, ebunch, attr_dict=None, **attr):
for e in ebunch:
u,v=e[0:2]
self.add_edge(u,v,attr_dict=attr_dict,**attr)
def remove_edge(self, u, v):
Graph.remove_edge(self,u,v)
self.fh.write("Remove edge: %s-%s\n"%(u,v))
def remove_edges_from(self, ebunch):
for e in ebunch:
u,v=e[0:2]
self.remove_edge(u,v)
def clear(self):
self.name = ''
self.adj.clear()
self.node.clear()
self.graph.clear()
self.fh.write("Clear graph\n")
def subgraph(self, nbunch, copy=True):
# subgraph is needed here since it can destroy edges in the
# graph (copy=False) and we want to keep track of all changes.
#
# Also for copy=True Graph() uses dictionary assignment for speed
# Here we use H.add_edge()
bunch =set(self.nbunch_iter(nbunch))
if not copy:
# remove all nodes (and attached edges) not in nbunch
self.remove_nodes_from([n for n in self if n not in bunch])
self.name = "Subgraph of (%s)"%(self.name)
return self
else:
# create new graph and copy subgraph into it
H = self.__class__()
H.name = "Subgraph of (%s)"%(self.name)
# add nodes
H.add_nodes_from(bunch)
# add edges
seen=set()
for u,nbrs in self.adjacency_iter():
if u in bunch:
for v,datadict in nbrs.items():
if v in bunch and v not in seen:
dd=deepcopy(datadict)
H.add_edge(u,v,dd)
seen.add(u)
# copy node and graph attr dicts
H.node=dict( (n,deepcopy(d))
for (n,d) in self.node.items() if n in H)
H.graph=deepcopy(self.graph)
return H
if __name__=='__main__':
G=PrintGraph()
G.add_node('foo')
G.add_nodes_from('bar',weight=8)
G.remove_node('b')
G.remove_nodes_from('ar')
print(G.nodes(data=True))
G.add_edge(0,1,weight=10)
print(G.edges(data=True))
G.remove_edge(0,1)
G.add_edges_from(list(zip(list(range(0o3)),list(range(1,4)))),weight=10)
print(G.edges(data=True))
G.remove_edges_from(list(zip(list(range(0o3)),list(range(1,4)))))
print(G.edges(data=True))
G=PrintGraph()
G.add_path(list(range(10)))
print("subgraph")
H1=G.subgraph(list(range(4)),copy=False)
H2=G.subgraph(list(range(4)),copy=False)
print(H1.edges())
print(H2.edges())
| JFriel/honours_project | networkx/examples/subclass/printgraph.py | Python | gpl-3.0 | 4,152 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# git-agile documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 6 13:21:38 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "table"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
from datetime import date
##################
from recommonmark.parser import CommonMarkParser
source_suffix = [".rst", ".md"]
source_parsers = {
".md": CommonMarkParser,
}
docs_path = os.path.dirname(__file__)
base_path = os.path.dirname(docs_path)
ext_path = os.path.join(docs_path, "_ext")
sys.path.insert(0, base_path)
sys.path.insert(0, ext_path)
year = date.today().year
from ccy import __version__ as version
release = version
# General information about the project.
from setup import meta
project = meta["name"]
copyright = "%s, %s" % (year, meta["author"])
author = meta["author"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "logo.jpg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "git-agiledoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "git-agile.tex", "git-agile Documentation", "Quantmind", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "git-agile", "git-agile Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"git-agile",
"git-agile Documentation",
author,
"git-agile",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| lsbardel/ccy | docs/conf.py | Python | bsd-3-clause | 9,576 |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils import six
class ShowFieldBase(object):
""" base class for the ShowField... model mixins, does the work """
# cause nicer multiline PolymorphicQuery output:
polymorphic_query_multiline_output = True
polymorphic_showfield_type = False
polymorphic_showfield_content = False
# these may be overridden by the user
polymorphic_showfield_max_line_width = None
polymorphic_showfield_max_field_width = 20
polymorphic_showfield_old_format = False
def __repr__(self):
return self.__unicode__()
def _showfields_get_content(self, field_name, field_type=type(None)):
"helper for __unicode__"
content = getattr(self, field_name)
if self.polymorphic_showfield_old_format:
out = ': '
else:
out = ' '
if issubclass(field_type, models.ForeignKey):
if content is None:
out += 'None'
else:
out += content.__class__.__name__
elif issubclass(field_type, models.ManyToManyField):
out += '%d' % content.count()
elif isinstance(content, six.integer_types):
out += str(content)
elif content is None:
out += 'None'
else:
txt = str(content)
if len(txt) > self.polymorphic_showfield_max_field_width:
txt = txt[:self.polymorphic_showfield_max_field_width - 2] + \
'..'
out += '"' + txt + '"'
return out
def _showfields_add_regular_fields(self, parts):
"helper for __unicode__"
done_fields = set()
for field in self._meta.fields + self._meta.many_to_many:
if field.name in self.polymorphic_internal_model_fields or \
'_ptr' in field.name:
continue
if field.name in done_fields:
continue # work around django diamond inheritance problem
done_fields.add(field.name)
out = field.name
# if this is the standard primary key named "id", print it as we
# did with older versions of django_polymorphic:
if field.primary_key and \
field.name == 'id' and \
type(field) == models.AutoField:
out += ' ' + str(getattr(self, field.name))
# otherwise, display it just like all other fields (with correct
# type, shortened content etc.)
else:
if self.polymorphic_showfield_type:
out += ' (' + type(field).__name__
if field.primary_key:
out += '/pk'
out += ')'
if self.polymorphic_showfield_content:
out += self._showfields_get_content(
field.name, type(field)
)
parts.append((False, out, ','))
def _showfields_add_dynamic_fields(self, field_list, title, parts):
"helper for __unicode__"
parts.append((True, '- ' + title, ':'))
for field_name in field_list:
out = field_name
content = getattr(self, field_name)
if self.polymorphic_showfield_type:
out += ' (' + type(content).__name__ + ')'
if self.polymorphic_showfield_content:
out += self._showfields_get_content(field_name)
parts.append((False, out, ','))
def __unicode__(self):
# create list ("parts") containing one tuple for each title/field:
# ( bool: new section , item-text , separator to use after item )
# start with model name
parts = [(True, self.__class__.__name__, ':')]
# add all regular fields
self._showfields_add_regular_fields(parts)
# add annotate fields
if hasattr(self, 'polymorphic_annotate_names'):
self._showfields_add_dynamic_fields(
self.polymorphic_annotate_names, 'Ann', parts
)
# add extra() select fields
if hasattr(self, 'polymorphic_extra_select_names'):
self._showfields_add_dynamic_fields(
self.polymorphic_extra_select_names, 'Extra', parts
)
# format result
indent = len(self.__class__.__name__) + 5
indentstr = ''.rjust(indent)
out = ''
xpos = 0
possible_line_break_pos = None
for i in range(len(parts)):
new_section, p, separator = parts[i]
final = (i == len(parts) - 1)
if not final:
next_new_section, _, _ = parts[i + 1]
if self.polymorphic_showfield_max_line_width and \
xpos + len(p) > self.polymorphic_showfield_max_line_width and \
possible_line_break_pos is not None:
rest = out[possible_line_break_pos:]
out = out[:possible_line_break_pos]
out += '\n' + indentstr + rest
xpos = indent + len(rest)
out += p
xpos += len(p)
if not final:
if not next_new_section:
out += separator
xpos += len(separator)
out += ' '
xpos += 1
if not new_section:
possible_line_break_pos = len(out)
return '<' + out + '>'
class ShowFieldType(ShowFieldBase):
""" model mixin that shows the object's class and it's field types """
polymorphic_showfield_type = True
class ShowFieldContent(ShowFieldBase):
""" model mixin that shows the object's class, it's fields and field
contents """
polymorphic_showfield_content = True
class ShowFieldTypeAndContent(ShowFieldBase):
""" model mixin, like ShowFieldContent, but also show field types """
polymorphic_showfield_type = True
polymorphic_showfield_content = True
# compatibility with old class names
ShowFieldTypes = ShowFieldType
ShowFields = ShowFieldContent
ShowFieldsAndTypes = ShowFieldTypeAndContent
| hobarrera/django-polymorphic-ng | polymorphic/showfields.py | Python | bsd-3-clause | 6,130 |
"""
Automator code
Functions to convert from a supercell dictionary (output from a Diffuser) into a tarball
that contains all of the input files in an organized directory structure to run the
atomic-scale transition state calculations. This includes:
1. All positions in POSCAR format (POSCAR files for states to relax, POS as reference for transition endpoints that need to be relaxed)
2. Transformation information from relaxed states to initial states.
3. INCAR files for relaxation and NEB runs; KPOINTS for each.
4. perl script to transform CONTCAR output from a state relaxation to NEB endpoints.
5. perl script to linearly interpolate between NEB endpoints.*
6. Makefile to run NEB construction.
*Note:* the NEB interpolator script (nebmake.pl) is part of the `VTST scripts <http://theory.cm.utexas.edu/vtsttools/scripts.html>`_.
"""
__author__ = 'Dallas R. Trinkle'
import numpy as np
import collections, copy, itertools, warnings
from onsager import crystal, supercell
import tarfile, time, io, json
import pkg_resources
def map2string(tag, groupop, mapping):
"""
Takes in a map: tag, groupop, mapping and constructs a string representation
to be dumped to a file. If we want to call using the tuple, ``map2string(*(map))`` will suffice.
:param tag: string of initial state to rotate
:param groupop: see crystal.GroupOp; we use the rot and trans. This is in the supercell coord.
:param mapping: in "chemorder" format; list by chemistry of lists of indices of position
in initial cell to use.
:return string_rep: string representation (to be used by an external script)
"""
string_rep = tag + """
{rot[0][0]:3d} {rot[0][1]:3d} {rot[0][2]:3d}
{rot[1][0]:3d} {rot[1][1]:3d} {rot[1][2]:3d}
{rot[2][0]:3d} {rot[2][1]:3d} {rot[2][2]:3d}
{trans[0]:.16f} {trans[1]:.16f} {trans[2]:.16f}
""".format(rot=groupop.rot, trans=groupop.trans)
# the index shift needs to be added for each subsequent chemistry
indexshift = [0] + list(itertools.accumulate(len(remap) for remap in mapping))
string_rep += ' '.join(['{}'.format(m + shift)
for remap, shift in zip(mapping, indexshift)
for m in remap])
# needs a trailing newline
return string_rep + '\n'
### Some default input files to use for our runs, and a sed formatted script to recreate INCARs
SEDstring = "s/{{system}}/{system}/\n"
INCARrelax = """SYSTEM = {system}
PREC = High
ISIF = 2
EDIFF = 1E-8
EDIFFG = -10E-3
IBRION = 2
NSW = 50
ISMEAR = 1
SIGMA = 0.1
# ENCUT =
# NGX =
# NGY =
# NGZ =
# NGXF =
# NGYF =
# NGZF =
# NPAR =
LWAVE = .FALSE.
LCHARG = .FALSE.
LREAL = .FALSE.
VOSKOWN = 1
"""
INCARNEB = INCARrelax + \
"""IMAGES = 1
SPRING = -5
LCLIMB = .TRUE.
NELMIN = 4
NFREE = 10
"""
KPOINTSgammaonly = """Gamma
1
Reciprocal
0. 0. 0. 1.
"""
KPOINTS_MP = """Monkhorst-Pack mesh {N1}x{N2}x{N3}
0
Monkhorst
{N1} {N2} {N3}
0. 0. 0.
"""
KPOINTS_Gamma = """Gamma-centered mesh {N1}x{N2}x{N3}
0
Gamma
{N1} {N2} {N3}
0. 0. 0.
"""
MAKEFILE = r"""# Makefile to construct NEB input from relaxation output
# we set this so that the makefile doesn't use builtin implicit rules
MAKEFLAGS = -rk
makeneb := "./nebmake.pl"
transform := "./trans.pl"
Nimages ?= 1
.PHONY: help
target := $(foreach neb, $(wildcard neb.*), $(neb)/01/POSCAR)
target: $(target)
help:
@echo "# Creates input POSCAR for NEB runs, once relaxation runs are complete"
@echo "# Uses CONTCAR in relaxation directories to create initial run geometry"
@echo "# environment variable: Nimages (default: $(Nimages))"
@echo "# target files:"
@echo $(target) | sed 's/ /\n/g'
@echo "# default target: all"
neb.%: neb.%/01/POSCAR neb.%/POSCAR.init neb.%/POSCAR.final
neb.%/01/POSCAR: neb.%/POSCAR.init neb.%/POSCAR.final
@$(makeneb) $^ $(Nimages)
neb.%/POSCAR.init:
@$(transform) $^ > $@
neb.%/POSCAR.final:
@$(transform) $^ > $@
###############################################################
# structure of NEB runs:
"""
def supercelltar(tar, superdict, filemode=0o664, directmode=0o775, timestamp=None,
INCARrelax=INCARrelax, INCARNEB=INCARNEB, KPOINTS=KPOINTSgammaonly, basedir="",
statename='relax.', transitionname='neb.', IDformat='{:02d}',
JSONdict='tags.json', YAMLdef='supercell.yaml'):
"""
Takes in a tarfile (needs to be open for writing) and a supercelldict (from a
diffuser) and creates the full directory structure inside the tarfile. Best used in
a form like
::
with tarfile.open('supercells.tar.gz', mode='w:gz') as tar:
automator.supercelltar(tar, supercelldict)
:param tar: tarfile open for writing; may contain other files in advance.
:param superdict: dictionary of ``states``, ``transitions``, ``transmapping``, ``indices`` that
correspond to dictionaries with tags; the final tag ``reference`` is the basesupercell
for calculations without defects.
* superdict['states'][i] = supercell of state;
* superdict['transitions'][n] = (supercell initial, supercell final);
* superdict['transmapping'][n] = ((site tag, groupop, mapping), (site tag, groupop, mapping))
* superdict['indices'][tag] = (type, index) of tag, where tag is either a state or transition tag; or...
* superdict['indices'][tag] = index of tag, where tag is either a state or transition tag.
* superdict['reference'] = (optional) supercell reference, no defects
:param filemode: mode to use for files (default: 664)
:param directmode: mode to use for directories (default: 775)
:param timestamp: UNIX time for files; if None, use current time (default)
:param INCARrelax: contents of INCAR file to use for relaxation; must contain {system} to be replaced
by tag value (default: automator.INCARrelax)
:param INCARNEB: contents of INCAR file to use for NEB; must contain {system} to be replaced
by tag value (default: automator.INCARNEB)
:param KPOINTS: contents of KPOINTS file (default: gamma-point only calculation);
if None or empty, no KPOINTS file at all
:param basedir: prepended to all files/directories (default: '')
:param statename: prepended to all state names, before 2 digit number (default: relax.)
:param transitionname: prepended to all transition names, before 2 digit number (default: neb.)
:param IDformat: format for integer tags (default: {:02d})
:param JSONdict: name of JSON file storing the tags corresponding to each directory (default: tags.json)
:param YAMLdef: YAML file containing full definition of supercells, relationship, etc. (default: supercell.yaml);
set to None to not output. **may want to change this to None for the future**
"""
if timestamp is None: timestamp = time.time()
if len(basedir) > 0 and basedir[-1] != '/': basedir += '/'
kpoints = not ((KPOINTS is None) or (KPOINTS == ""))
def addfile(filename, strdata, executable=False):
info = tarfile.TarInfo(basedir + filename)
info.mode, info.mtime = filemode, timestamp
if executable: info.mode = directmode
info.size = len(strdata.encode('ascii'))
tar.addfile(info, io.BytesIO(strdata.encode('ascii')))
def adddirectory(dirname):
info = tarfile.TarInfo(basedir + dirname)
info.type = tarfile.DIRTYPE
info.mode, info.mtime = directmode, timestamp
tar.addfile(info)
def addsymlink(linkname, target):
info = tarfile.TarInfo(basedir + linkname)
info.type = tarfile.SYMTYPE
info.mode, info.mtime = filemode, timestamp
info.linkname = target
tar.addfile(info)
# our tags make for troublesome directory names; construct a mapping:
states, transitions, transmapping = superdict['states'], superdict['transitions'], superdict['transmapping']
# we do a reverse sorting on state keys, so that vacancies and complexes are first; we use
# normal order for the transitions.
dirmapping = {k: statename + IDformat.format(n)
for n, k in enumerate(sorted(states.keys(), reverse=True))}
for n, k in enumerate(sorted(transitions.keys())):
dirmapping[k] = transitionname + IDformat.format(n)
tagmapping = {v: k for k, v in dirmapping.items()}
# add the common VASP input files: (weird construction to check if kpoints is True)
for filename, strdata in (('INCAR.relax', INCARrelax), ('INCAR.NEB', INCARNEB)) + \
((('KPOINTS', KPOINTS),) if kpoints else tuple()):
addfile(filename, strdata)
addfile('trans.pl', str(pkg_resources.resource_string(__name__, 'trans.pl'), 'ascii'), executable=True)
addfile('nebmake.pl', str(pkg_resources.resource_string(__name__, 'nebmake.pl'), 'ascii'), executable=True)
addfile('Vasp.pm', str(pkg_resources.resource_string(__name__, 'Vasp.pm'), 'ascii'))
# now, go through the states:
if 'reference' in superdict:
addfile('POSCAR', superdict['reference'].POSCAR('Defect-free reference'))
for tag, super in states.items():
# directory first
dirname = dirmapping[tag]
adddirectory(dirname)
# POSCAR file next
addfile(dirname + '/POSCAR', super.POSCAR(tag))
addfile(dirname + '/INCAR', INCARrelax.format(system=tag))
addfile(dirname + '/incar.sed', SEDstring.format(system=tag))
if kpoints: addsymlink(dirname + '/KPOINTS', '../KPOINTS')
addsymlink(dirname + '/POTCAR', '../POTCAR')
# and the transitions:
for tag, (super0, super1) in transitions.items():
# directory first
dirname = dirmapping[tag]
adddirectory(dirname)
# POS/POSCAR files next
filename = dirname + '/POSCAR.init' \
if superdict['transmapping'][tag][0] is None \
else dirname + '/POS.init'
addfile(filename, super0.POSCAR('initial ' + tag))
filename = dirname + '/POSCAR.final' \
if superdict['transmapping'][tag][1] is None \
else dirname + '/POS.final'
addfile(filename, super1.POSCAR('final ' + tag))
addfile(dirname + '/INCAR', INCARNEB.format(system=tag))
addfile(dirname + '/incar.sed', SEDstring.format(system=tag))
if kpoints: addsymlink(dirname + '/KPOINTS', '../KPOINTS')
addsymlink(dirname + '/POTCAR', '../POTCAR')
# and the transition mappings:
Makefile = MAKEFILE
relaxNEB = {}
for tag in sorted(transmapping.keys()):
dirname = dirmapping[tag]
for m, t in ((transmapping[tag][0], 'init'), (transmapping[tag][1], 'final')):
if m is not None:
relax = dirmapping[m[0]]
addfile(dirname + '/trans.' + t, map2string(relax, m[1], m[2]))
Makefile += \
"{neb}/POSCAR.{type}: {neb}/trans.{type} {relax}/CONTCAR\n".format(neb=dirname,
type=t, relax=relax)
if relax not in relaxNEB: relaxNEB[relax] = {dirname}
else: relaxNEB[relax].add(dirname)
addfile('Makefile', Makefile)
for relax, NEBset in relaxNEB.items():
addfile(relax + '/NEBlist', '\n'.join(k for k in sorted(NEBset)) + '\n')
# JSON dictionary connecting directories and tags: (needs a trailing newline?)
addfile(JSONdict, json.dumps(tagmapping, indent=4, sort_keys=True) + '\n')
# YAML representation of supercell:
if YAMLdef is not None: addfile(YAMLdef, crystal.yaml.dump(superdict))
| DallasTrinkle/Onsager | onsager/automator.py | Python | mit | 11,559 |
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import CharField, DateField
from django.test import TestCase as DjangoTestCase
from django.test.utils import override_settings
from django.utils import six
from django.utils import translation
from django.utils.unittest import TestCase
from . import models
from .widgetadmin import site as widget_admin_site
admin_static_prefix = lambda: {
'ADMIN_STATIC_PREFIX': "%sadmin/" % settings.STATIC_URL,
}
class AdminFormfieldForDBFieldTests(TestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
# Check that we got a field of the right type
self.assertTrue(
isinstance(widget, widgetclass),
"Wrong widget for %s.%s: expected %s, got %s" % \
(model.__class__.__name__, fieldname, widgetclass, type(widget))
)
# Return the formfield so that other tests can continue
return ff
def testDateField(self):
self.assertFormfield(models.Event, 'start_date', widgets.AdminDateWidget)
def testDateTimeField(self):
self.assertFormfield(models.Member, 'birthdate', widgets.AdminSplitDateTime)
def testTimeField(self):
self.assertFormfield(models.Event, 'start_time', widgets.AdminTimeWidget)
def testTextField(self):
self.assertFormfield(models.Event, 'description', widgets.AdminTextareaWidget)
def testURLField(self):
self.assertFormfield(models.Event, 'link', widgets.AdminURLFieldWidget)
def testIntegerField(self):
self.assertFormfield(models.Event, 'min_age', widgets.AdminIntegerFieldWidget)
def testCharField(self):
self.assertFormfield(models.Member, 'name', widgets.AdminTextInputWidget)
def testEmailField(self):
self.assertFormfield(models.Member, 'email', widgets.AdminEmailInputWidget)
def testFileField(self):
self.assertFormfield(models.Album, 'cover_art', widgets.AdminFileWidget)
def testForeignKey(self):
self.assertFormfield(models.Event, 'band', forms.Select)
def testRawIDForeignKey(self):
self.assertFormfield(models.Event, 'band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['band'])
def testRadioFieldsForeignKey(self):
ff = self.assertFormfield(models.Event, 'band', widgets.AdminRadioSelect,
radio_fields={'band':admin.VERTICAL})
self.assertEqual(ff.empty_label, None)
def testManyToMany(self):
self.assertFormfield(models.Band, 'members', forms.SelectMultiple)
def testRawIDManyTOMany(self):
self.assertFormfield(models.Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def testFilteredManyToMany(self):
self.assertFormfield(models.Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def testFormfieldOverrides(self):
self.assertFormfield(models.Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def testFormfieldOverridesWidgetInstances(self):
"""
Test that widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': forms.TextInput(attrs={'size':'10'})}
}
ma = BandAdmin(models.Band, admin.site)
f1 = ma.formfield_for_dbfield(models.Band._meta.get_field('name'), request=None)
f2 = ma.formfield_for_dbfield(models.Band._meta.get_field('style'), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs['maxlength'], '100')
self.assertEqual(f2.widget.attrs['maxlength'], '20')
self.assertEqual(f2.widget.attrs['size'], '10')
def testFieldWithChoices(self):
self.assertFormfield(models.Member, 'gender', forms.Select)
def testChoicesWithRadioFields(self):
self.assertFormfield(models.Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender':admin.VERTICAL})
def testInheritance(self):
self.assertFormfield(models.Album, 'backside_art', widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical=['companies']
self.assertFormfield(models.Advisor, 'companies', widgets.FilteredSelectMultiple,
filter_vertical=['companies'])
ma = AdvisorAdmin(models.Advisor, admin.site)
f = ma.formfield_for_dbfield(models.Advisor._meta.get_field('companies'), request=None)
self.assertEqual(six.text_type(f.help_text), ' Hold down "Control", or "Command" on a Mac, to select more than one.')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminFormfieldForDBFieldWithRequestTests(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
def testFilterChoicesByRequestUser(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.login(username="super", password="secret")
response = self.client.get("/widget_admin/admin_widgets/cartire/add/")
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagon Passat")
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminForeignKeyWidgetChangeList(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
admin_root = '/widget_admin'
def setUp(self):
self.client.login(username="super", password="secret")
def tearDown(self):
self.client.logout()
def test_changelist_foreignkey(self):
response = self.client.get('%s/admin_widgets/car/' % self.admin_root)
self.assertContains(response, '%s/auth/user/add/' % self.admin_root)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminForeignKeyRawIdWidget(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
admin_root = '/widget_admin'
def setUp(self):
self.client.login(username="super", password="secret")
def tearDown(self):
self.client.logout()
def test_nonexistent_target_id(self):
band = models.Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"band": '%s' % pk,
}
# Try posting with a non-existent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post('%s/admin_widgets/event/add/' % self.admin_root,
post_data)
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post('%s/admin_widgets/event/add/' % self.admin_root,
{"band": test_str})
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({'color__in': ('red', 'blue')})
lookup2 = widgets.url_params_from_lookup_dict({'color__in': ['red', 'blue']})
self.assertEqual(lookup1, {'color__in': 'red,blue'})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return 'works'
lookup1 = widgets.url_params_from_lookup_dict({'myfield': my_callable})
lookup2 = widgets.url_params_from_lookup_dict({'myfield': my_callable()})
self.assertEqual(lookup1, lookup2)
class FilteredSelectMultipleWidgetTest(DjangoTestCase):
def test_render(self):
w = widgets.FilteredSelectMultiple('test', False)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilter">\n</select><script type="text/javascript">addEvent(window, "load", function(e) {SelectFilter.init("id_test", "test", 0, "%(ADMIN_STATIC_PREFIX)s"); });</script>\n' % admin_static_prefix()
)
def test_stacked_render(self):
w = widgets.FilteredSelectMultiple('test', True)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilterstacked">\n</select><script type="text/javascript">addEvent(window, "load", function(e) {SelectFilter.init("id_test", "test", 1, "%(ADMIN_STATIC_PREFIX)s"); });</script>\n' % admin_static_prefix()
)
class AdminDateWidgetTest(DjangoTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" size="10" />',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={'size': 20, 'class': 'myDateField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" size="20" />',
)
class AdminTimeWidgetTest(DjangoTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" size="8" />',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={'size': 20, 'class': 'myTimeField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" size="20" />',
)
class AdminSplitDateTimeWidgetTest(DjangoTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">Date: <input value="2007-12-01" type="text" class="vDateField" name="test_0" size="10" /><br />Time: <input value="09:30:00" type="text" class="vTimeField" name="test_1" size="8" /></p>',
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with self.settings(USE_L10N=True):
with translation.override('de-at'):
w.is_localized = True
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">Datum: <input value="01.12.2007" type="text" class="vDateField" name="test_0" size="10" /><br />Zeit: <input value="09:30:00" type="text" class="vTimeField" name="test_1" size="8" /></p>',
)
class AdminURLWidgetTest(DjangoTestCase):
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', ''),
'<input class="vURLField" name="test" type="url" />'
)
self.assertHTMLEqual(
w.render('test', 'http://example.com'),
'<p class="url">Currently:<a href="http://example.com">http://example.com</a><br />Change:<input class="vURLField" name="test" type="url" value="http://example.com" /></p>'
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', 'http://example-äüö.com'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">http://example-äüö.com</a><br />Change:<input class="vURLField" name="test" type="url" value="http://example-äüö.com" /></p>'
)
def test_render_quoting(self):
# WARNING: Don't use assertHTMLEqual in that testcase!
# assertHTMLEqual will get rid of some escapes which are tested here!
w = widgets.AdminURLFieldWidget()
self.assertEqual(
w.render('test', 'http://example.com/<sometag>some text</sometag>'),
'<p class="url">Currently: <a href="http://example.com/%3Csometag%3Esome%20text%3C/sometag%3E">http://example.com/<sometag>some text</sometag></a><br />Change: <input class="vURLField" name="test" type="url" value="http://example.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://example-äüö.com/<sometag>some text</sometag>'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com/%3Csometag%3Esome%20text%3C/sometag%3E">http://example-äüö.com/<sometag>some text</sometag></a><br />Change: <input class="vURLField" name="test" type="url" value="http://example-äüö.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"'),
'<p class="url">Currently: <a href="http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)%3C/script%3E%22">http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"</a><br />Change: <input class="vURLField" name="test" type="url" value="http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"" /></p>'
)
class AdminFileWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render('test', album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">albums\hybrid_theory.jpg</a> <span class="clearable-file-input"><input type="checkbox" name="test-clear" id="test-clear_id" /> <label for="test-clear_id">Clear</label></span><br />Change: <input type="file" name="test" /></p>' % { 'STORAGE_URL': default_storage.url('') },
)
self.assertHTMLEqual(
w.render('test', SimpleUploadedFile('test', b'content')),
'<input type="file" name="test" />',
)
class ForeignKeyRawIdWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = models.Album._meta.get_field('band').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', band.pk, attrs={}),
'<input type="text" name="test" value="%(bandpk)s" class="vForeignKeyRawIdAdminField" /><a href="/widget_admin/admin_widgets/band/?t=id" class="related-lookup" id="lookup_id_test" onclick="return showRelatedObjectLookupPopup(this);"> <img src="%(ADMIN_STATIC_PREFIX)simg/selector-search.gif" width="16" height="16" alt="Lookup" /></a> <strong>Linkin Park</strong>' % dict(admin_static_prefix(), bandpk=band.pk)
)
def test_relations_to_non_primary_key(self):
# Check that ForeignKeyRawIdWidget works with fields which aren't
# related to the model's primary key.
apple = models.Inventory.objects.create(barcode=86, name='Apple')
models.Inventory.objects.create(barcode=22, name='Pear')
core = models.Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = models.Inventory._meta.get_field('parent').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', core.parent_id, attrs={}),
'<input type="text" name="test" value="86" class="vForeignKeyRawIdAdminField" /><a href="/widget_admin/admin_widgets/inventory/?t=barcode" class="related-lookup" id="lookup_id_test" onclick="return showRelatedObjectLookupPopup(this);"> <img src="%(ADMIN_STATIC_PREFIX)simg/selector-search.gif" width="16" height="16" alt="Lookup" /></a> <strong>Apple</strong>' % admin_static_prefix()
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = models.Honeycomb.objects.create(location='Old tree')
big_honeycomb.bee_set.create()
rel = models.Bee._meta.get_field('honeycomb').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('honeycomb_widget', big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s" /> <strong>Honeycomb object</strong>' % {'hcombpk': big_honeycomb.pk}
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = models.Individual.objects.create(name='Subject #1')
models.Individual.objects.create(name='Child', parent=subject1)
rel = models.Individual._meta.get_field('parent').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('individual_widget', subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s" /> <strong>Individual object</strong>' % {'subj1pk': subject1.pk}
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = models.Inventory._meta.get_field('parent').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = models.Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = models.Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertHTMLEqual(
w.render('test', child_of_hidden.parent_id, attrs={}),
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField" /><a href="/widget_admin/admin_widgets/inventory/?t=barcode" class="related-lookup" id="lookup_id_test" onclick="return showRelatedObjectLookupPopup(this);"> <img src="%(ADMIN_STATIC_PREFIX)simg/selector-search.gif" width="16" height="16" alt="Lookup" /></a> <strong>Hidden</strong>' % admin_static_prefix()
)
class ManyToManyRawIdWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
m1 = models.Member.objects.create(name='Chester')
m2 = models.Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = models.Band._meta.get_field('members').rel
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', [m1.pk, m2.pk], attrs={}),
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField" /><a href="/widget_admin/admin_widgets/member/" class="related-lookup" id="lookup_id_test" onclick="return showRelatedObjectLookupPopup(this);"> <img src="/static/admin/img/selector-search.gif" width="16" height="16" alt="Lookup" /></a>' % dict(admin_static_prefix(), m1pk=m1.pk, m2pk=m2.pk)
)
self.assertHTMLEqual(
w.render('test', [m1.pk]),
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField" /><a href="/widget_admin/admin_widgets/member/" class="related-lookup" id="lookup_id_test" onclick="return showRelatedObjectLookupPopup(this);"> <img src="%(ADMIN_STATIC_PREFIX)simg/selector-search.gif" width="16" height="16" alt="Lookup" /></a>' % dict(admin_static_prefix(), m1pk=m1.pk)
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = models.Advisor.objects.create(name='Rockstar Techie')
c1 = models.Company.objects.create(name='Doodle')
c2 = models.Company.objects.create(name='Pear')
consultor1.companies.add(c1, c2)
rel = models.Advisor._meta.get_field('companies').rel
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('company_widget1', [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s" />' % {'c1pk': c1.pk, 'c2pk': c2.pk}
)
self.assertHTMLEqual(
w.render('company_widget2', [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s" />' % {'c1pk': c1.pk}
)
class RelatedFieldWidgetWrapperTests(DjangoTestCase):
def test_no_can_add_related(self):
rel = models.Individual._meta.get_field('parent').rel
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class DateTimePickerSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
urls = "admin_widgets.urls"
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_show_hide_date_time_picker_widgets(self):
"""
Ensure that pressing the ESC key closes the date and time picker
widgets.
Refs #17064.
"""
from selenium.webdriver.common.keys import Keys
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
'/admin_widgets/member/add/'))
# First, with the date picker widget ---------------------------------
# Check that the date picker is hidden
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Check that the date picker is visible
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the date picker is hidden again
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Then, with the time picker widget ----------------------------------
# Check that the time picker is hidden
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
# Click the time icon
self.selenium.find_element_by_id('clocklink0').click()
# Check that the time picker is visible
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the time picker is hidden again
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
class DateTimePickerSeleniumChromeTests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerSeleniumIETests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class HorizontalVerticalFilterSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
urls = "admin_widgets.urls"
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
self.lisa = models.Student.objects.create(name='Lisa')
self.john = models.Student.objects.create(name='John')
self.bob = models.Student.objects.create(name='Bob')
self.peter = models.Student.objects.create(name='Peter')
self.jenny = models.Student.objects.create(name='Jenny')
self.jason = models.Student.objects.create(name='Jason')
self.cliff = models.Student.objects.create(name='Cliff')
self.arthur = models.Student.objects.create(name='Arthur')
self.school = models.School.objects.create(name='School of Awesome')
super(HorizontalVerticalFilterSeleniumFirefoxTests, self).setUp()
def assertActiveButtons(self, mode, field_name, choose, remove,
choose_all=None, remove_all=None):
choose_link = '#id_%s_add_link' % field_name
choose_all_link = '#id_%s_add_all_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
remove_all_link = '#id_%s_remove_all_link' % field_name
self.assertEqual(self.has_css_class(choose_link, 'active'), choose)
self.assertEqual(self.has_css_class(remove_link, 'active'), remove)
if mode == 'horizontal':
self.assertEqual(self.has_css_class(choose_all_link, 'active'), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, 'active'), remove_all)
def execute_basic_operations(self, mode, field_name):
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
choose_all_link = 'id_%s_add_all_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
remove_all_link = 'id_%s_remove_all_link' % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(choose_all_link).click()
elif mode == 'vertical':
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements_by_css_selector(from_box + ' > option'):
option.click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(remove_all_link).click()
elif mode == 'vertical':
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements_by_css_selector(to_box + ' > option'):
option.click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
self.get_select_option(from_box, str(self.lisa.id)).click()
self.get_select_option(from_box, str(self.jason.id)).click()
self.get_select_option(from_box, str(self.bob.id)).click()
self.get_select_option(from_box, str(self.john.id)).click()
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element_by_id(choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.bob.id),
str(self.jason.id), str(self.john.id)])
# Remove some options -------------------------------------------------
self.get_select_option(to_box, str(self.lisa.id)).click()
self.get_select_option(to_box, str(self.bob.id)).click()
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element_by_id(remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.get_select_option(from_box, str(self.arthur.id)).click()
self.get_select_option(from_box, str(self.cliff.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id),
str(self.arthur.id), str(self.cliff.id)])
def test_basic(self):
self.school.students = [self.lisa, self.peter]
self.school.alumni = [self.lisa, self.peter]
self.school.save()
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, '/admin_widgets/school/%s/' % self.school.id))
self.wait_page_loaded()
self.execute_basic_operations('vertical', 'students')
self.execute_basic_operations('horizontal', 'alumni')
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.arthur, self.cliff, self.jason, self.john])
self.assertEqual(list(self.school.alumni.all()),
[self.arthur, self.cliff, self.jason, self.john])
def test_filter(self):
"""
Ensure that typing in the search box filters out options displayed in
the 'from' box.
"""
from selenium.webdriver.common.keys import Keys
self.school.students = [self.lisa, self.peter]
self.school.alumni = [self.lisa, self.peter]
self.school.save()
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, '/admin_widgets/school/%s/' % self.school.id))
for field_name in ['students', 'alumni']:
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = '#id_%s_add_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
input = self.selenium.find_element_by_css_selector('#id_%s_input' % field_name)
# Initial values
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# Typing in some characters filters out non-matching options
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys('R')
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# -----------------------------------------------------------------
# Check that chosing a filtered option sends it properly to the
# 'to' box.
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
self.get_select_option(from_box, str(self.jason.id)).click()
self.selenium.find_element_by_css_selector(choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.jason.id)])
self.get_select_option(to_box, str(self.lisa.id)).click()
self.selenium.find_element_by_css_selector(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jenny.id),
str(self.john.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()),
[self.jason, self.peter])
class HorizontalVerticalFilterSeleniumChromeTests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class HorizontalVerticalFilterSeleniumIETests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| dex4er/django | tests/admin_widgets/tests.py | Python | bsd-3-clause | 38,074 |
""" Class to handle VOTable
Created: 2005-05-31 by Shui Hung Kwok, shkwok at computer.org
See http://www.ivoa.net/Documents/latest/VOT.html .
"""
import sys
from types import *
import xml.sax
import xml.sax.handler
class VONode (object):
""" Class representing an XML node of a VOTable
"""
def __init__ (self, tagname=('','')):
self._tagname = tagname
self._nodeList = []
self._attributes = {}
def addNode (self, node):
self._nodeList.append (node)
if not isinstance (node, (StringType, UnicodeType)):
name = node.getNamePart ()
try:
val = self.__dict__[name]
if isinstance (val, ListType):
val.append (node)
else:
self.__dict__[name] = [val, node]
except:
self.__dict__[name] = node
else:
self.content = node
def addAttribute (self, attr):
name,value = attr
self._attributes[name] = value
self.__dict__[name] = value
def addAttributes (self, attrs):
self._attributes.update (attrs)
for k,v in attrs.items ():
self.__dict__[k[1]] = v
def __getitem__ (self, idx):
return self._nodeList[idx].getContent ()
def getAttribute (self, name):
""" Returns attribute by name or '' if not found """
return self._attributes.get (name)
def getAttributes (self):
""" Returns all attributes.
"""
res = {}
for (ns,n),at in self._attributes.items():
res[n] = at
return res
def getNodeList (self):
""" Returns a list of nodes that are of type VONode
"""
res = []
for node in self._nodeList:
try:
l = node._nodeList
res.append (node)
except Exception, e:
pass
return res
def getContent (self):
""" Returns all strings of the node.
"""
res = []
for node in self._nodeList:
try:
l = node.lower ()
res.append (node)
except Exception, e:
pass
return ''.join (res)
def getNamePart (self):
try:
ns, n = self._tagname
return n
except:
return n
def getNodesByName (self, look4):
""" Returns a list of nodes whose tagname = look4
"""
res = []
for node in self._nodeList:
try:
if look4 != node.getNamePart ():
continue
l = node._nodeList
res.append (node)
except Exception, e:
pass
return res
def __str__ (self):
try:
return self.content
except:
return self.buildName (self._tagname)
def getNode (self, path):
""" Returns a node for a given path.
Path is of the form /tag1/tag2/tag3.
Path can include array index, like /tag1/tag2[3]/tag4.
"""
node = self
children = []
pathArray = path.split ("/")
rootName = self.getNamePart ()
if rootName != pathArray[1]:
return None
pathArray = pathArray[2:]
for elem in pathArray:
tmp = elem.replace ('[', ']')
list = tmp.split (']')
name = list[0]
if len (list) > 1:
idx = int (list[1])
else:
idx = 0
children = node.getNodesByName (name)
nr = len (children)
if idx >= nr:
return None
node = children [idx]
return node
def getNodesByPath (self, path):
""" Returns an array of VONodes for a given path.
Path is of the form /tag1/tag2/tag3.
Path can include array index, like /tag1/tag2[3]/tag4.
"""
node = self
children = []
pathArray = path.split ("/")
rootName = self.getNamePart ()
if rootName != pathArray[1]:
return None
pathArray = pathArray[2:]
for elem in pathArray:
tmp = elem.replace ('[', ']')
list = tmp.split (']')
name = list[0]
if len (list) > 1:
idx = int (list[1])
else:
idx = 0
children = node.getNodesByName (name)
nr = len (children)
if idx >= nr:
return None
node = children [idx]
return children
def buildName (self, tname):
""" Returns a name with namespace as prefix
or just name if no namespace
Note that the prefix is the real namespace
and not the abbreviation used in the original XML
"""
ns,n = tname
"""
if ns:
return "%s:%s" % (self.qname, n)
else:
return n
"""
return n
def printAllNodes (self, func=sys.stdout.write, prefix=''):
""" Recursive method to visit all nodes of the tree
and calls the provided function to output the content.
"""
func ("%s<%s" % (prefix, self.buildName (self._tagname)))
for ns,v in self._attributes.items ():
func (" %s='%s'" % (self.buildName ((ns)), v))
func (">")
last = 0
for n in self._nodeList:
if isinstance (n, (StringType, UnicodeType)):
if last == 2:
func ("\n%s" % prefix)
func ("%s" % n)
last = 1
else:
if last <= 1:
func ("\n")
n.printAllNodes (func, prefix + ' ')
last = 2
if last <= 1:
func ("</%s>\n" % self.buildName (self._tagname))
else:
func ("%s</%s>\n" % (prefix, self.buildName (self._tagname)))
class VOTableHandler (xml.sax.handler.ContentHandler):
""" Class implementing callbacks for the SAX parser.
"""
def __init__ (self, vonode=VONode):
# Create a parser
xml.sax.handler.ContentHandler.__init__ (self)
self.parser = xml.sax.make_parser()
self.parser.setFeature (xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler (self)
self.vonode = vonode
self.sentinel = vonode ()
self.currNode = self.sentinel
self.stack = []
def startElementNS (self, (urn, name), qname, attrs):
#print "start ", name
self.stack.append (self.currNode)
self.currNode = self.vonode ((urn, name))
self.currNode.addAttributes (attrs)
def characters (self, chunk):
buf = chunk.strip ()
if len (buf) == 0: return
self.currNode.addNode (buf)
def endElementNS (self, (urn, name), qname):
#print "end ", name
newNode = self.currNode
self.currNode = self.stack.pop ()
self.currNode.addNode (newNode)
def parse (self, source):
""" Main entry point.
Source can be URL or file name.
"""
self.parser.parse (source)
return self.sentinel #._nodeList[0]
class VOTable (object):
""" Implementation of VOTable
"""
def __init__ (self, source=None, vonode=VONode):
""" Instantiate a VOTable.
source can be URL, file name or a string representing the VOTable.
vonode is a class representing VONode, must be derived from or
compatible with VONode.
"""
self.vonode = vonode
self.root = None
if source != None:
self.parse (source)
def parse (self, source):
""" Invokes XML parser and stores VOTable
in self.root as VONode.
"""
parser = VOTableHandler (self.vonode)
self.root = parser.parse (source)
def printAllNodes (self, func=sys.stdout.write):
""" Output entire content as XML.
func is the output method, defined as:
func (outString)
"""
# _nodeList[0] is VOTABLE
# We use _nodeList[0] instead, just in case
# the xml content does not start with VOTABLE,
# we still can print all nodes.
node = self.root._nodeList[0]
node.printAllNodes (func)
def getNode (self, path):
""" Returns a VONode of the given path.
"""
return self.root._nodeList[0].getNode (path)
def getContent (self, path):
""" Returns the content of a node.
Only strings are returned.
"""
node = self.getNode (path)
return node.getContent ()
def getColumnIdx (self, val):
""" Returns the column index for the given name
Will return any attribute value matching val.
"""
fields = self.getFields ()
for coln, f in enumerate (fields):
if val in f._attributes.values ():
return coln
return -1
def getFields (self):
""" Returns a list of VONode representing all the fields
"""
#table = self.root.VOTABLE.RESOURCE.TABLE
#return table.getNodesByName ('FIELD')
return self.root.VOTABLE.RESOURCE.TABLE.FIELD
def getParams (self):
""" Returns a list of VONode representing all PARAMS
"""
return self.root.VOTABLE.RESOURCE.RESOURCE.PARAM
def getFieldsAttrs (self):
""" Returns a list of maps that contains attributes.
Returned list looks like this: [{},{},...]
"""
res = []
fields = self.getFields ()
for elem in fields:
try:
res.append (elem.getAttributes())
except:
pass
return res
def getDataRows (self):
""" Returns a list of VONodes representing rows of the table.
Use getData () to extract data from each row.
for x in getDataRows ():
data = getData (x)
#data = [values ...]
"""
tableData = self.root.VOTABLE.RESOURCE.TABLE.DATA.BINARY.STREAM
return tableData._nodeList
def getData (self, row):
""" row is a VONode <TR> parent of a list of <TD>.
Returns a list of values.
"""
res = []
list = row._nodeList
for elm in list:
try:
res.append (elm.getContent ())
except:
res.append ('')
return res
def append (self, vot):
""" Appends votable vot to the end of this VOTable.
No tests to see if fields are the same.
vot must have the same fields.
"""
try:
node1 = self.root.VOTABLE.RESOURCE.TABLE.DATA.TABLEDATA
except:
node1 = None
try:
node2 = vot.root.VOTABLE.RESOURCE.TABLE.DATA.TABLEDATA
except:
node2 = None
if node1:
if node2:
node1._nodeList.extend (node2._nodeList)
else:
if node2:
self.root.VOTABLE.RESOURCE.TABLE.DATA.TABLEDATA = node2
if __name__ == '__main__':
votable = VOTable()
votable.parse (sys.argv[1])
#votable.printAllNodes ()
#print [x.getAttribute ('ID') for x in votable.getFields () ]
#print votable.root.VOTABLE.RESOURCE.TABLE.DATA.TABLEDATA.TR[1].TD[1]
print votable.getFields ()
| wschoenell/chimera_imported_googlecode | src/chimera/util/votable.py | Python | gpl-2.0 | 9,275 |
from __future__ import unicode_literals
from six import ensure_text
from .node import NodeVisitor, ValueNode, ListNode, BinaryExpressionNode
from .parser import atoms, precedence
atom_names = {v: "@%s" % k for (k,v) in atoms.items()}
named_escapes = {"\a", "\b", "\f", "\n", "\r", "\t", "\v"}
def escape(string, extras=""):
# Assumes input bytes are either UTF8 bytes or unicode.
rv = ""
for c in string:
if c in named_escapes:
rv += c.encode("unicode_escape").decode()
elif c == "\\":
rv += "\\\\"
elif c < '\x20':
rv += "\\x%02x" % ord(c)
elif c in extras:
rv += "\\" + c
else:
rv += c
return ensure_text(rv)
class ManifestSerializer(NodeVisitor):
def __init__(self, skip_empty_data=False):
self.skip_empty_data = skip_empty_data
def serialize(self, root):
self.indent = 2
rv = "\n".join(self.visit(root))
if not rv:
return rv
if rv[-1] != "\n":
rv = rv + "\n"
return rv
def visit_DataNode(self, node):
rv = []
if not self.skip_empty_data or node.children:
if node.data:
rv.append("[%s]" % escape(node.data, extras="]"))
indent = self.indent * " "
else:
indent = ""
for child in node.children:
rv.extend("%s%s" % (indent if item else "", item) for item in self.visit(child))
if node.parent:
rv.append("")
return rv
def visit_KeyValueNode(self, node):
rv = [escape(node.data, ":") + ":"]
indent = " " * self.indent
if len(node.children) == 1 and isinstance(node.children[0], (ValueNode, ListNode)):
rv[0] += " %s" % self.visit(node.children[0])[0]
else:
for child in node.children:
rv.append(indent + self.visit(child)[0])
return rv
def visit_ListNode(self, node):
rv = ["["]
rv.extend(", ".join(self.visit(child)[0] for child in node.children))
rv.append("]")
return ["".join(rv)]
def visit_ValueNode(self, node):
data = ensure_text(node.data)
if ("#" in data or
data.startswith("if ") or
(isinstance(node.parent, ListNode) and
("," in data or "]" in data))):
if "\"" in data:
quote = "'"
else:
quote = "\""
else:
quote = ""
return [quote + escape(data, extras=quote) + quote]
def visit_AtomNode(self, node):
return [atom_names[node.data]]
def visit_ConditionalNode(self, node):
return ["if %s: %s" % tuple(self.visit(item)[0] for item in node.children)]
def visit_StringNode(self, node):
rv = ["\"%s\"" % escape(node.data, extras="\"")]
for child in node.children:
rv[0] += self.visit(child)[0]
return rv
def visit_NumberNode(self, node):
return [ensure_text(node.data)]
def visit_VariableNode(self, node):
rv = escape(node.data)
for child in node.children:
rv += self.visit(child)
return [rv]
def visit_IndexNode(self, node):
assert len(node.children) == 1
return ["[%s]" % self.visit(node.children[0])[0]]
def visit_UnaryExpressionNode(self, node):
children = []
for child in node.children:
child_str = self.visit(child)[0]
if isinstance(child, BinaryExpressionNode):
child_str = "(%s)" % child_str
children.append(child_str)
return [" ".join(children)]
def visit_BinaryExpressionNode(self, node):
assert len(node.children) == 3
children = []
for child_index in [1, 0, 2]:
child = node.children[child_index]
child_str = self.visit(child)[0]
if (isinstance(child, BinaryExpressionNode) and
precedence(node.children[0]) < precedence(child.children[0])):
child_str = "(%s)" % child_str
children.append(child_str)
return [" ".join(children)]
def visit_UnaryOperatorNode(self, node):
return [ensure_text(node.data)]
def visit_BinaryOperatorNode(self, node):
return [ensure_text(node.data)]
def serialize(tree, *args, **kwargs):
s = ManifestSerializer(*args, **kwargs)
return s.serialize(tree)
| asajeffrey/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptmanifest/serializer.py | Python | mpl-2.0 | 4,498 |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
map_permission = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_maps("Players") \
.sync_map_permissions("bob") \
.fetch()
print(map_permission.identity)
print(map_permission.url)
| teoreteetik/api-snippets | sync/rest/map-permissions/retrieve-permission/retrieve-permission.6.x.py | Python | mit | 511 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Exile
@date: 05-07-2016
@place: Cartagena - Colombia
@licence: Creative Common
"""
from django.contrib import admin
from exileui.admin import exileui
from import_export.formats import base_formats
from import_export.admin import ExportMixin, ImportExportModelAdmin
from import_export import resources, fields
from plugins.pdf.format import PDF
class PdfExportMixin(ExportMixin):
def get_export_formats(self,):
formats = super(PdfExportMixin, self).get_export_formats()
if self.template:
PDF.template = self.template
# end if
return [PDF, base_formats.CSV, base_formats.XLSX]
# end def
# end class
registry = {}
def register_export(model, resource_class, template=None):
registry[model] = resource_class, template
# end def
old_register = exileui.register
def register(model, *args):
if model in registry:
if len(args):
modeladmin = args[0]
else:
modeladmin = admin.ModelAdmin
# end if
resource_class, template = registry[model]
class newadmin(PdfExportMixin, modeladmin):
pass
# end class
newadmin.template = template
newadmin.resource_class = resource_class
return old_register(model, newadmin)
# end if
return old_register(model, *args)
# end def
exileui.register = register
| exildev/AutoLavadox | operacion/informes/reports.py | Python | mit | 1,414 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from airflow.exceptions import AirflowException
from airflow.providers.salesforce.hooks.tableau import TableauHook, TableauJobFinishCode
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class TableauJobFailedException(AirflowException):
"""
An exception that indicates that a Job failed to complete.
"""
class TableauJobStatusSensor(BaseSensorOperator):
"""
Watches the status of a Tableau Server Job.
.. seealso:: https://tableau.github.io/server-client-python/docs/api-ref#jobs
:param job_id: The job to watch.
:type job_id: str
:param site_id: The id of the site where the workbook belongs to.
:type site_id: Optional[str]
:param tableau_conn_id: The Tableau Connection id containing the credentials
to authenticate to the Tableau Server.
:type tableau_conn_id: str
"""
template_fields = ('job_id',)
@apply_defaults
def __init__(self,
job_id: str,
site_id: Optional[str] = None,
tableau_conn_id: str = 'tableau_default',
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.tableau_conn_id = tableau_conn_id
self.job_id = job_id
self.site_id = site_id
def poke(self, context: dict) -> bool:
"""
Pokes until the job has successfully finished.
:param context: The task context during execution.
:type context: dict
:return: True if it succeeded and False if not.
:rtype: bool
"""
with TableauHook(self.site_id, self.tableau_conn_id) as tableau_hook:
finish_code = TableauJobFinishCode(
int(tableau_hook.server.jobs.get_by_id(self.job_id).finish_code)
)
self.log.info('Current finishCode is %s (%s)', finish_code.name, finish_code.value)
if finish_code in [TableauJobFinishCode.ERROR, TableauJobFinishCode.CANCELED]:
raise TableauJobFailedException('The Tableau Refresh Workbook Job failed!')
return finish_code == TableauJobFinishCode.SUCCESS
| wooga/airflow | airflow/providers/salesforce/sensors/tableau_job_status.py | Python | apache-2.0 | 2,992 |
# Copyright 2015-2019 Facundo Batista, Nicolás Demarchi
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check https://github.com/PyAr/fades
"""Helpers for the Cache tests collection."""
from pkg_resources import Distribution
def get_distrib(*dep_ver_pairs):
"""Build some Distributions with indicated info."""
return [Distribution(project_name=dep, version=ver) for dep, ver in dep_ver_pairs]
| PyAr/fades | tests/test_cache/__init__.py | Python | gpl-3.0 | 979 |
"""
# urljoin tests
>>> UrlRewriter.urljoin('http://example.com/test/', '../file.html')
'http://example.com/file.html'
>>> UrlRewriter.urljoin('http://example.com/test/', '../path/../../../file.html')
'http://example.com/file.html'
>>> UrlRewriter.urljoin('http://example.com/test/', '/../file.html')
'http://example.com/file.html'
>>> UrlRewriter.urljoin('http://example.com/', '/abc/../../file.html')
'http://example.com/file.html'
>>> UrlRewriter.urljoin('http://example.com/path/more/', 'abc/../../file.html')
'http://example.com/path/file.html'
>>> UrlRewriter.urljoin('http://example.com/test/', 'file.html')
'http://example.com/test/file.html'
# UrlRewriter tests
>>> do_rewrite('other.html', '20131010/http://example.com/path/page.html', 'https://web.archive.org/web/')
'/web/20131010/http://example.com/path/other.html'
>>> do_rewrite('file.js', '20131010/http://example.com/path/page.html', 'https://web.archive.org/web/', 'js_')
'/web/20131010js_/http://example.com/path/file.js'
>>> do_rewrite('file.js', '20131010/http://example.com/', '/coll/')
'/coll/20131010/http://example.com/file.js'
>>> do_rewrite('file.js', '20131010/http://example.com', '/coll/', 'js_')
'/coll/20131010js_/http://example.com/file.js'
>>> do_rewrite('file.js', '20131010/http://example.com', '/coll/', '')
'/coll/20131010/http://example.com/file.js'
>>> do_rewrite('/other.html', '20130907*/http://example.com/path/page.html', 'http://localhost:8080/coll/')
'/coll/20130907*/http://example.com/other.html'
>>> do_rewrite('/other.html', '20130907*/http://example.com/path/page.html', '/coll/')
'/coll/20130907*/http://example.com/other.html'
>>> do_rewrite('./other.html', '20130907*/http://example.com/path/page.html', '/coll/')
'/coll/20130907*/http://example.com/path/other.html'
>>> do_rewrite('../other.html', '20131112im_/http://example.com/path/page.html', '/coll/')
'/coll/20131112im_/http://example.com/other.html'
>>> do_rewrite('../../other.html', '*/http://example.com/index.html', 'localhost:8080/')
'localhost:8080/*/http://example.com/other.html'
>>> do_rewrite('path/../../other.html', '*/http://example.com/index.html', 'localhost:8080/')
'localhost:8080/*/http://example.com/other.html'
>>> do_rewrite('http://some-other-site.com', '20101226101112/http://example.com/index.html', 'localhost:8080/')
'localhost:8080/20101226101112/http://some-other-site.com'
>>> do_rewrite('http://localhost:8080/web/2014im_/http://some-other-site.com', 'http://example.com/index.html', '/web/', full_prefix='http://localhost:8080/web/')
'http://localhost:8080/web/2014im_/http://some-other-site.com'
>>> do_rewrite('/web/http://some-other-site.com', 'http://example.com/index.html', '/web/', full_prefix='http://localhost:8080/web/')
'/web/http://some-other-site.com'
>>> do_rewrite(r'http:\/\/some-other-site.com', '20101226101112/http://example.com/index.html', 'https://localhost:8080/')
'https://localhost:8080/20101226101112/http:\\\\/\\\\/some-other-site.com'
>>> do_rewrite(r'//some-other-site.com', '20101226101112/http://example.com/index.html', 'http://localhost:8080/')
'//localhost:8080/20101226101112///some-other-site.com'
>>> do_rewrite(r'\/\/some-other-site.com', '20101226101112/http://example.com/index.html', 'http://localhost:8080/')
'//localhost:8080/20101226101112/\\\\/\\\\/some-other-site.com'
>>> do_rewrite(r'\\/\\/some-other-site.com', '20101226101112/http://example.com/index.html', 'https://localhost:8080/')
'//localhost:8080/20101226101112/\\\\/\\\\/some-other-site.com'
>>> do_rewrite(r'http:\/\/some-other-site.com', '20101226101112/http://example.com/index.html', 'https://localhost:8080/')
'https://localhost:8080/20101226101112/http:\\\\/\\\\/some-other-site.com'
>>> do_rewrite(r'http:\/\/some-other-site.com', '20101226101112/http://example.com/index.html', 'http://localhost:8080/')
'http://localhost:8080/20101226101112/http:\\\\/\\\\/some-other-site.com'
>>> do_rewrite('../../other.html', '2020/http://example.com/index.html', '/')
'/2020/http://example.com/other.html'
>>> do_rewrite('../../other.html', '2020/http://example.com/index.html', '')
'2020/http://example.com/other.html'
>>> do_rewrite('', '20131010010203/http://example.com/file.html', '/web/')
'/web/20131010010203/http://example.com/file.html'
>>> do_rewrite('#anchor', '20131010/http://example.com/path/page.html', 'https://web.archive.org/web/')
'#anchor'
>>> do_rewrite('mailto:example@example.com', '20131010/http://example.com/path/page.html', 'https://web.archive.org/web/')
'mailto:example@example.com'
>>> do_rewrite('file:///some/path/', '20131010/http://example.com/path/page.html', 'https://web.archive.org/web/')
'file:///some/path/'
>>> UrlRewriter('19960708im_/http://domain.example.com/path.txt', '/abc/').get_new_url(url='')
'/abc/19960708im_/'
>>> UrlRewriter('2013id_/example.com/file/path/blah.html', '/123/').get_new_url(timestamp='20131024')
'/123/20131024id_/http://example.com/file/path/blah.html'
# deprefix tests
>>> do_deprefix('2013id_/http://example.com/file/path/blah.html?param=http://localhost:8080/pywb/20141226/http://example.com/', '/pywb/', 'http://localhost:8080/pywb/')
'http://example.com/file/path/blah.html?param=http://example.com/'
>>> do_deprefix('2013id_/http://example.com/file/path/blah.html?param=http://localhost:8080/pywb/if_/https://example.com/filename.html', '/pywb/', 'http://localhost:8080/pywb/')
'http://example.com/file/path/blah.html?param=https://example.com/filename.html'
>>> do_deprefix('2013id_/http://example.com/file/path/blah.html?param=http://localhost:8080/pywb/https://example.com/filename.html', '/pywb/', 'http://localhost:8080/pywb/')
'http://example.com/file/path/blah.html?param=https://example.com/filename.html'
>>> do_deprefix('http://example.com/file.html?param=http://localhost:8080/pywb/https%3A//example.com/filename.html&other=value&a=b¶m2=http://localhost:8080/pywb/http://test.example.com', '/pywb/', 'http://localhost:8080/pywb/')
'http://example.com/file.html?param=https://example.com/filename.html&other=value&a=b¶m2=http://test.example.com'
# urlencoded
>>> do_deprefix('http://example.com/file.html?foo=bar&url=' + quote_plus('http://localhost:8080/pywb/http://example.com/filename.html') + '&foo2=bar2', '/pywb/', 'http://localhost:8080/pywb/')
'http://example.com/file.html?foo=bar&url=http://example.com/filename.html&foo2=bar2'
# with extra path
>>> do_deprefix('http://example.com/file.html?foo=bar&url=' + quote_plus('http://localhost:8080/pywb/extra/path/http://example.com/filename.html') + '&foo2=bar2', '/pywb/', 'http://localhost:8080/pywb/')
'http://example.com/file.html?foo=bar&url=http://example.com/filename.html&foo2=bar2'
# SchemeOnlyUrlRewriter tests
>>> SchemeOnlyUrlRewriter('http://example.com/').rewrite('https://example.com/abc')
'http://example.com/abc'
>>> SchemeOnlyUrlRewriter('http://example.com/abc').rewrite('http://example.com/abc')
'http://example.com/abc'
>>> SchemeOnlyUrlRewriter('https://example.com/abc').rewrite('http://example.com/abc')
'https://example.com/abc'
>>> SchemeOnlyUrlRewriter('https://example.com/abc').rewrite('https://example.com/abc')
'https://example.com/abc'
>>> SchemeOnlyUrlRewriter('http://example.com/abc').rewrite('//example.com/abc')
'//example.com/abc'
>>> SchemeOnlyUrlRewriter('https://example.com/abc').rewrite('//example.com/abc')
'//example.com/abc'
# rebase is identity
>>> x = SchemeOnlyUrlRewriter('http://example.com'); x.rebase_rewriter('https://example.com/') == x
True
"""
from pywb.rewrite.url_rewriter import UrlRewriter, SchemeOnlyUrlRewriter
from six.moves.urllib.parse import quote_plus, unquote_plus
def do_rewrite(rel_url, base_url, prefix, mod=None, full_prefix=None):
rewriter = UrlRewriter(base_url, prefix, full_prefix=full_prefix)
return rewriter.rewrite(rel_url, mod)
def do_deprefix(url, rel_prefix, full_prefix):
rewriter = UrlRewriter(url, rel_prefix, full_prefix)
url = rewriter.deprefix_url()
return unquote_plus(url)
if __name__ == "__main__":
import doctest
doctest.testmod()
| pombredanne/pywb | pywb/rewrite/test/test_url_rewriter.py | Python | gpl-3.0 | 8,053 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from flask import Flask
from flask.ext.script import Manager
from app import create_app
app = Flask(__name__)
manage = Manager(create_app())
if __name__ == '__main__':
manage.run()
| hanks-zyh/fir-local | manage.py | Python | apache-2.0 | 234 |
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
''' I use this module to check for newer versions of ETE '''
import urllib2 as url
try:
from ete2 import __ETEID__
except ImportError:
__ETEID__ = "Unknown"
try:
from ete2 import __VERSION__
except ImportError:
__VERSION__ = "Unknown"
def call():
print " == Calling home...",
try:
f = url.urlopen('http://etetoolkit.org/et_phone_home.php?VERSION=%s&ID=%s'
%(__VERSION__, __ETEID__))
except:
print "No answer :("
else:
print "Got answer!"
print f.read()
module_name = __name__.split(".")[0]
try:
f = url.urlopen('http://etetoolkit.org/releases/ete2/%s.latest'
%module_name)
except:
latest = None
else:
latest = int(f.read())
try:
current = int(__VERSION__.split("rev")[1])
except (IndexError, ValueError):
current = None
if not latest:
print "I could not find data about your version [%s]" %module_name
print "Are you ok?"
elif not current:
print "I could not determine your version [%s]" %module_name
print "Are you ok?"
print "Latest stable ETE version is", latest
elif latest > current:
print "You look a bit old."
print "A newer version is available: rev%s" %latest
print "Use 'easy_install -U %s' to upgrade" %module_name
else:
print "I see you are in shape."
print "No updates are available."
try:
msg = raw_input("\n == Do you want to leave any message?\n(Press enter to finish)\n\n").strip()
except KeyboardInterrupt:
msg = None
if msg:
msg = url.quote(msg)
try:
f = url.urlopen('http://etetoolkit.org/et_phone_home.php?VERSION=%s&ID=%s&MSG=%s'
%(__VERSION__, __ETEID__, msg))
except:
print "Message could be delivered :("
else:
print f.read()
def new_version(module_name=None, current=None):
if not module_name:
module_name = __name__.split(".")[0]
try:
f = url.urlopen('http://etetoolkit.org/releases/ete2/%s.latest'
%module_name)
except:
latest = None
else:
latest = int(f.read())
news_url = 'http://etetoolkit.org/releases/ete2/%s.latest_news' %module_name
msg = read_content(news_url)
if not current:
try:
current = int(__VERSION__.split("rev")[1])
except (IndexError, ValueError):
current = None
return current, latest, msg
def read_content(address):
try:
f = url.urlopen(address)
except:
return None
else:
return f.read()
| sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/ete2/_ph.py | Python | mit | 4,356 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.security import csrf
from gratipay.testing import Harness
class Tests(Harness):
# st - _sanitize_token
def test_st_passes_through_good_token(self):
token = 'ddddeeeeaaaaddddbbbbeeeeeeeeffff'
assert csrf._sanitize_token(token) == token
def test_st_rejects_overlong_token(self):
token = 'ddddeeeeaaaaddddbbbbeeeeeeeefffff'
assert csrf._sanitize_token(token) is None
def test_st_rejects_underlong_token(self):
token = 'ddddeeeeaaaaddddbbbbeeeeeeeefff'
assert csrf._sanitize_token(token) is None
def test_st_rejects_goofy_token(self):
token = 'ddddeeeeaaaadddd bbbbeeeeeeeefff'
assert csrf._sanitize_token(token) is None
# integration tests
def test_no_csrf_cookie_gives_403(self):
r = self.client.POST('/', csrf_token=False, raise_immediately=False)
assert r.code == 403
assert "Bad CSRF cookie" in r.body
assert b'csrf_token' in r.headers.cookie
def test_bad_csrf_cookie_gives_403(self):
r = self.client.POST('/', csrf_token=b'bad_token', raise_immediately=False)
assert r.code == 403
assert "Bad CSRF cookie" in r.body
assert r.headers.cookie[b'csrf_token'].value != 'bad_token'
def test_csrf_cookie_set_for_most_requests(self):
r = self.client.GET('/about/')
assert b'csrf_token' in r.headers.cookie
def test_no_csrf_cookie_set_for_assets(self):
r = self.client.GET('/assets/gratipay.css')
assert b'csrf_token' not in r.headers.cookie
| gratipay/gratipay.com | tests/py/test_security_csrf.py | Python | mit | 1,670 |
'''
Created on Sep 15, 2012
@author: altay
'''
import unittest
from main.sigma import *
from numpy.lib.tests.test_format import assert_equal
import sys
sys.path.append("../")
class TestSigma(unittest.TestCase):
def setUp(self):
'''
Initializes the necessary resources for the tests.
'''
self.sigma = Sigma()
def tearDown(self):
'''
Releases the used sources for the tests.
'''
self.sigma = None
def test_get_sigma(self):
'''
Tests the get_sigma method in the Sigma class for predefined array.
'''
# Check if the sigma instance returns the first value pair
# from the predefined sigma array.
value = self.sigma.get_sigma()
assert_equal(0.66241954208476006, value[0])
assert_equal(0.66885000367117153, value[1])
# Check if the sigma instance returns the second value pair
# from the predefined sigma array.
value = self.sigma.get_sigma()
assert_equal(0.60021257050283561, value[0])
assert_equal(3.0323656162636006, value[1])
# Check if the sigma instance returns the last value pair
# after being called 251 times.
for i in range(1, 251):
value = self.sigma.get_sigma()
assert_equal(2.1725127255689838, value[0])
assert_equal(1.6099687932924471, value[1])
value = self.sigma.get_sigma()
assert_equal(0.66241954208476006, value[0])
assert_equal(0.66885000367117153, value[1])
def test_reset(self):
'''
Tests the reset method in the class for predefined array.
'''
# Check if the sigma instance returns the first value pair
# from the predefined sigma array.
value = self.sigma.get_sigma()
assert_equal(0.66241954208476006, value[0])
assert_equal(0.66885000367117153, value[1])
# Check if the sigma instance returns the second value pair
# from the predefined sigma array.
value = self.sigma.get_sigma()
assert_equal(0.60021257050283561, value[0])
assert_equal(3.0323656162636006, value[1])
# Resets the sigma instance.
self.sigma.reset()
# Check if the sigma instance returns the first value pair
# from the predefined sigma array.
value = self.sigma.get_sigma()
assert_equal(0.66241954208476006, value[0])
assert_equal(0.66885000367117153, value[1])
if __name__ == "__main__":
unittest.main()
| altay-oz/tech_market_simulations | src/test/test_sigma.py | Python | gpl-3.0 | 2,574 |
from webplot import p
p.use_doc('webplot example')
import numpy as np
import datetime
import time
x = np.arange(100) / 6.0
y = np.sin(x)
z = np.cos(x)
data_source = p.make_source(idx=range(100), x=x, y=y, z=z)
p.plot(x, y, 'orange')
p.figure()
p.plot('x', 'y', color='blue', data_source=data_source, title='sincos')
p.plot('x', 'z', color='green')
p.figure()
p.plot('x', 'y', data_source=data_source)
p.figure()
p.plot('x', 'z', data_source=data_source)
p.figure()
p.table(data_source, ['x', 'y', 'z'])
p.scatter('x', 'y', data_source=data_source)
p.figure()
p.scatter('x', 'z', data_source=data_source)
p.figure()
p.hold(False)
p.scatter('x', 'y', 'orange', data_source=data_source)
p.scatter('x', 'z', 'red', data_source=data_source)
p.plot('x', 'z', 'yellow', data_source=data_source)
p.plot('x', 'y', 'black', data_source=data_source)
print "click on the plots tab to see results"
| zrhans/python | exemplos/wakari/scripts-examples-webplot_example.py | Python | gpl-2.0 | 885 |
# -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A guard implementation which supports HTTP header-based authentication
schemes.
If either no www-authenticate header is present in the request or the
supplied response is invalid a status code of 401 will be sent in the
response along with all accepted authentication schemes.
"""
from zope.interface import implements
from twisted.python import log
from twisted.python.components import proxyForInterface
from twisted.web.resource import IResource
from twisted.web import util
from twisted.web.error import ErrorPage
from twisted.cred import error as credError
class UnauthorizedResource(object):
"""
Simple IResource to escape Resource dispatch
"""
implements(IResource)
isLeaf = True
def __init__(self, factories):
self._credentialFactories = factories
def render(self, request):
"""
Send www-authenticate headers to the client
"""
def generateWWWAuthenticate(scheme, challenge):
l = []
for k,v in challenge.iteritems():
l.append("%s=%s" % (k, quoteString(v)))
return "%s %s" % (scheme, ", ".join(l))
def quoteString(s):
return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\\"'),)
request.setResponseCode(401)
for fact in self._credentialFactories:
challenge = fact.getChallenge(request)
request.responseHeaders.addRawHeader(
'www-authenticate',
generateWWWAuthenticate(fact.scheme, challenge))
return 'Unauthorized'
def getChildWithDefault(self, path, request):
"""
Disable resource dispatch
"""
return self
class HTTPAuthSessionWrapper(object):
"""
Wrap a portal, enforcing supported header-based authentication schemes.
@ivar _portal: The L{Portal} which will be used to retrieve L{IResource}
avatars.
@ivar _credentialFactories: A list of L{ICredentialFactory} providers which
will be used to decode I{Authorization} headers into L{ICredentials}
providers.
"""
implements(IResource)
isLeaf = False
def __init__(self, portal, credentialFactories):
"""
Initialize a session wrapper
@type portal: C{Portal}
@param portal: The portal that will authenticate the remote client
@type credentialFactories: C{Iterable}
@param credentialFactories: The portal that will authenticate the
remote client based on one submitted C{ICredentialFactory}
"""
self._portal = portal
self._credentialFactories = credentialFactories
def render(self, request):
raise NotImplementedError
def getChildWithDefault(self, path, request):
"""
Inspect the Authorization HTTP header, and return a deferred which,
when fired after successful authentication, will return an authorized
C{Avatar}. On authentication failure, an C{UnauthorizedResource} will
be returned, essentially halting further dispatch on the wrapped
resource and all children
"""
authheader = request.getHeader('authorization')
if not authheader:
return UnauthorizedResource(self._credentialFactories)
factory, respString = self._selectParseHeader(authheader)
if factory is None:
return UnauthorizedResource(self._credentialFactories)
try:
credentials = factory.decode(respString, request)
except credError.LoginFailed:
return UnauthorizedResource(self._credentialFactories)
except:
log.err(None, "Unexpected failure from credentials factory")
return ErrorPage(500, None, None)
else:
return util.DeferredResource(self._login(credentials))
def _login(self, credentials):
"""
Get the L{IResource} avatar for the given credentials.
@return: A L{Deferred} which will be called back with an L{IResource}
avatar or which will errback if authentication fails.
"""
d = self._portal.login(credentials, None, IResource)
d.addCallbacks(self._loginSucceeded, self._loginFailed)
return d
def _loginSucceeded(self, (interface, avatar, logout)):
"""
Handle login success by wrapping the resulting L{IResource} avatar
so that the C{logout} callback will be invoked when rendering is
complete.
"""
class ResourceWrapper(proxyForInterface(IResource, 'resource')):
"""
Wrap an L{IResource} so that whenever it or a child of it
completes rendering, the cred logout hook will be invoked.
An assumption is made here that exactly one L{IResource} from
among C{avatar} and all of its children will be rendered. If
more than one is rendered, C{logout} will be invoked multiple
times and probably earlier than desired.
"""
def getChildWithDefault(self, name, request):
"""
Pass through the lookup to the wrapped resource, wrapping
the result in L{ResourceWrapper} to ensure C{logout} is
called when rendering of the child is complete.
"""
return ResourceWrapper(self.resource.getChildWithDefault(name, request))
def render(self, request):
"""
Hook into response generation so that when rendering has
finished completely, C{logout} is called.
"""
request.notifyFinish().addCallback(lambda ign: logout())
return super(ResourceWrapper, self).render(request)
return ResourceWrapper(avatar)
def _loginFailed(self, result):
"""
Handle login failure by presenting either another challenge (for
expected authentication/authorization-related failures) or a server
error page (for anything else).
"""
if result.check(credError.Unauthorized, credError.LoginFailed):
return UnauthorizedResource(self._credentialFactories)
else:
log.err(
result,
"HTTPAuthSessionWrapper.getChildWithDefault encountered "
"unexpected error")
return ErrorPage(500, None, None)
def _selectParseHeader(self, header):
"""
Choose an C{ICredentialFactory} from C{_credentialFactories}
suitable to use to decode the given I{Authenticate} header.
@return: A two-tuple of a factory and the remaining portion of the
header value to be decoded or a two-tuple of C{None} if no
factory can decode the header value.
"""
elements = header.split(' ')
scheme = elements[0].lower()
for fact in self._credentialFactories:
if fact.scheme == scheme:
return (fact, ' '.join(elements[1:]))
return (None, None)
| hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/twisted/web/_auth/wrapper.py | Python | apache-2.0 | 7,186 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Mike Voets, 2014, mike@samiverb.com
class SamiSfixAndSyntaxLibrary(object):
""" A tiny library for Sami suffixes and methods """
def __init__(self):
self.stageList = {"kŋ": "ŋ", "đđ": "đ", "ff": "f", "ll": "l", "hll": "hl",
"ljj": "lj", "mm": "m", "nn": "n", "nnj": "nj", "rr": "r", "hrr": "hr", "ss":
"s", "šš": "š", "ŧŧ": "ŧ", "vv": "v", "bb": "pp", "dd": "tt", "ddj": "dj", "dj"
: "j", "gg": "kk", "zz": "cc", "žž": "čč", "hcc": "hc", "hc": "z", "hčč": "hč",
"hč": "ž", "hkk": "hk", "hk": "g", "hpp": "hp", "hp": "b", "htt": "ht", "ht":
"đ", "bm": "pm", "pm": "m", "dn": "tn", "tn": "n", "dnj": "tnj", "tnj": "nj",
"gn": "kn", "kn": "n", "rbm": "rpm", "rdn": "rtn", "rdjn": "rtjn", "rgn": "rkn"
, "đb": "đbb", "đg": "đgg", "đj": "đjj", "đv": "đvv", "ib": "ibb", "ic": "icc",
"id": "idd", "if": "iff", "ig": "igg", "ik": "ikk", "il": "ill", "ihl": "ihll"
, "ihm": "ihmm", "ihn": "ihnn", "ip": "ipp", "ir": "irr", "is": "iss", "it":
"itt", "iv": "ivv", "iz": "izz", "lb": "lbb", "lc": "lcc", "ld": "ldd", "lf":
"lff", "lg": "lgg", "lk": "lkk", "lj": "ljj", "lp": "lpp", "ls": "lss", "lš":
"lšš", "lt": "ltt", "lv": "lvv", "lž": "lžž", "mb": "mbb", "mp": "mpp", "ms":
"mss", "mš": "mšš", "nc": "ncc", "nč": "nčč", "nd": "ndd", "ns": "nss", "nt":
"ntt", "nz": "nzz", "nž": "nžž", "ŋg": "ŋgg", "ŋk": "ŋkk", "rb": "rbb", "rc":
"rcc", "rč": "rčč", "rd": "rdd", "rf": "rff", "rg": "rgg", "rj": "rjj", "rk":
"rkk", "rp": "rpp", "rs": "rss", "rš": "ršš", "rt": "rtt", "rv": "rvv", "rz":
"rzz", "rž": "ržž", "sk": "skk", "sm": "smm", "sp": "spp", "st": "stt", "šk":
"škk", "šm": "šmm", "št": "štt", "šv": "švv", "tk": "tkk", "tm": "tmm", "vd":
"vdd", "vg": "vgg", "vgŋ": "vŋŋ", "vj": "vjj", "vk": "vkk", "vl": "vll", "vhl":
"vhll", "vp": "vpp", "vr": "vrr", "vt": "vtt", "vž": "vžž", "đbm": "đmm",
"đgŋ": "đŋŋ", "ibm": "imm", "idn": "inn", "igŋ": "iŋŋ", "lbm": "lmm", "ldn":
"lnn", "lgŋ": "lŋŋ", "vdn": "vnn", "vdnj": "vnnj", "isk": "iskk", "ist": "istt"
, "mšk": "mškk", "nsk": "nskk", "nst": "nstt", "rsk": "rskk", "rst": "rstt",
"vsk": "vskk", "kč": "včč", "ks": "vss", "kst": "vstt", "kš": "kšš", "kt":
"vtt"}
self.vowels = ['a', 'á', 'e', 'i', 'u', 'o', 'e']
self.consonants = ['b', 'c', 'č', 'd', 'đ', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n', 'ŋ', 'p', 'r', 's', 'š', 't', 'ŧ', 'v', 'z', 'ž']
self.diftons = {"ea": "e", "uo": "u", "ie": "i", "oa": "o"}
self.pronoms = ["Mun", "Don", "Son", "Moai", "Doai", "Soai", "Mii", "Dii", "Sii"]
self.expectionsList = ["vuoššat", "cissat"]
self.sfixAVerbPres = ["an", "at", "á", "e", "abeahtti", "aba", "at", "abehtet", "et"]
self.sfixIVerbPres = ["án", "át", "á", "e", "ibeahtti", "iba", "it", "ibehtet", "et"]
self.sfixUVerbPres = ["un", "ut", "u", "o", "ubeahtti", "uba", "ut", "ubehtet", "ot"]
self.sfixLeatPres = ["an", "at", "a", "tne", "ahppi", "aba", "at", "hpet", "at"]
self.sfixNegVerbPres = ["an", "at", "a", "etne", "eahppi", "eaba", "it", "ehpet", "it"]
self.sfixContrVerbPres = ["n", "t", "", "jetne", "beahtti", "ba", "t", "behtet", "jit"]
self.sfixAVerbPast = ["en", "et", "ai", "aime", "aide", "aiga", "aimet", "aidet", "e"]
self.sfixIVerbPast = ["en", "et", "ii", "iime", "iide", "iiga", "iimet", "iidet", "e"]
self.sfixUVerbPast = ["on", "ot", "ui", "uime", "uide", "uiga", "uimet", "uidet", "o"]
self.sfixLeatPast = ["djen", "djet", "i", "imme", "idde", "igga", "immet", "iddet", "dje"]
self.sfixNegVerbPast = ["in", "it", "ii", "eimme", "eidde", "eigga", "eimmet", "eiddet", "edje"]
self.sfixContrVerbPast = ["jin", "jit", "i", "ime", "ide", "iga", "imet", "idet", "jedje"]
self.sfixRegVerbImp = ["on", "", "os", "u", ["i", "u"], "oska", "ot", ["et", "it", "ot"], "oset"]
self.sfixOtherVerbImp = ["ehkon", "eage", ["ehkos", "us"], ["eahkku", "eadnu"], "eahkki", "ehkoska", ["ehkot", "eahkkut", "eatnot", "etnot", "eadnot"], ["ehket", "eahkkit"], "ehkoset"]
self.partImpNeg = ["allon", "ale", "allos", "allu", "alli", "alloska", "allot, allut", "allet, allit", "alloset"]
self.partNeg = ["in", "it", "ii", "ean", "eahppi", "eaba", "eat", "ehpet", "eai"]
def _isVowel(self, character):
""" Returns True if character is a vowel, otherwise False """
for x in self.vowels:
if character == x:
return True
return False
def _isConsonant(self, character):
""" Returns True if character is a consonant, otherwise False """
for x in self.consonants:
if character == x:
return True
return False
def lastCharGet(self, word):
""" Returns last character from word """
return list(word.decode('utf-8'))[-1].encode('utf-8')
def lastLastCharGet(self, word):
""" Returns last character from word """
return list(word.decode('utf-8'))[-2].encode('utf-8')
def syllableCount(self, word):
""" Counts the amount of syllables in a word """
# Little hack to list all characters including the special utf-8 chars
tmp = list(word.decode('utf-8'))
# Start analyzing the verb
if len(word) > 2:
syllables = 1
# Take in account the first two letters and find out if both are consonants (if true => no syllables confirmed yet)
if self._isConsonant(tmp[0].encode('utf-8')) and self._isConsonant(tmp[1].encode('utf-8')):
syllables = 0
for i in range(1, len(tmp)-1):
# For each vowel after a consontant, there's a syllable
if self._isConsonant(tmp[i].encode('utf-8')) and self._isVowel(tmp[i+1].encode('utf-8')):
syllables += 1
return syllables
return 0
def _fromStrongToWeakSuffixPast(self, x):
""" Returns weak form of suffix (past tense) """
if x == 'e':
self.popLetter()
return 'i'
elif x == 'o':
self.popLetter()
return 'u'
elif x == 'á'.decode('utf-8').encode('utf-8'):
self.popLetter()
return 'á'
def _fromStrongToWeakSuffixKond(self, x):
""" Returns weak form of suffix (conditionalis) """
if x == 'i':
self.popLetter()
return 'á'
elif x == 'u':
self.popLetter()
return 'o'
elif x == 'a':
self.popLetter()
return x
def _fromStrongToWeakSuffixPots(self, x):
""" Returns weak form of suffix (potentialis) """
if x == 'a':
self.popLetter()
return x
elif x == 'i':
self.popLetter()
return 'e'
elif x == 'u':
self.popLetter()
return 'o'
def _fromStrongToWeakSuffixNeg(self, x):
""" Returns weak form of suffix (negative) """
if x == 'i':
self.popLetter()
return 'e'
elif x == 'u':
self.popLetter()
return 'o'
elif x == 'a':
self.popLetter()
return x
elif x == 'h' or x == 'g':
self.popLetter()
return 't'
elif x == 'd':
self.popLetter()
if self.lastCharGet(self.verb) == 'r':
self.popLetter()
return 'r'
elif self.lastCharGet(self.verb) == 'l':
self.popLetter()
return 'l'
return 't'
elif x == 't':
self.popLetter()
if self.lastCharGet(self.verb) == 'š':
self.popLetter()
return 'š'
elif self.lastCharGet(self.verb) == 's':
self.popLetter()
return 's'
return 't'
elif x == 'k':
self.popLetter()
if self.lastCharGet(self.verb) == 's':
self.popLetter()
return 's'
return 'k'
elif x == 'm':
self.popLetter()
if self.lastCharGet(self.verb) == 's':
self.popLetter()
return 's'
return 'm'
elif x == 'š' or x == 'n' or x == 'l' or x == 's':
self.popLetter()
return x | mikevoets/samiverb | src/SamiverbBundle/Resources/public/py/samisyntaxlib.py | Python | mit | 7,445 |
# coding=utf-8
"""
This module contains config objects needed by paypal.interface.PayPalInterface.
Most of this is transparent to the end developer, as the PayPalConfig object
is instantiated by the PayPalInterface object.
"""
import logging
import os
from pprint import pformat
from paypal.compat import basestring
from paypal.exceptions import PayPalConfigError
logger = logging.getLogger('paypal.settings')
class PayPalConfig(object):
"""
The PayPalConfig object is used to allow the developer to perform API
queries with any number of different accounts or configurations. This
is done by instantiating paypal.interface.PayPalInterface, passing config
directives as keyword args.
"""
# Used to validate correct values for certain config directives.
_valid_ = {
'API_ENVIRONMENT': ['SANDBOX', 'PRODUCTION'],
'API_AUTHENTICATION_MODE': ['3TOKEN', 'CERTIFICATE'],
}
# Various API servers.
_API_ENDPOINTS = {
# In most cases, you want 3-Token. There's also Certificate-based
# authentication, which uses different servers, but that's not
# implemented.
'3TOKEN': {
'SANDBOX': 'https://api-3t.sandbox.paypal.com/nvp',
'PRODUCTION': 'https://api-3t.paypal.com/nvp',
}
}
_PAYPAL_URL_BASE = {
'SANDBOX': 'https://www.sandbox.paypal.com/webscr',
'PRODUCTION': 'https://www.paypal.com/webscr',
}
API_VERSION = '98.0'
# Defaults. Used in the absence of user-specified values.
API_ENVIRONMENT = 'SANDBOX'
API_AUTHENTICATION_MODE = '3TOKEN'
# 3TOKEN credentials
API_USERNAME = None
API_PASSWORD = None
API_SIGNATURE = None
# API Endpoints are just API server addresses.
API_ENDPOINT = None
PAYPAL_URL_BASE = None
# API Endpoint CA certificate chain. If this is True, do a simple SSL
# certificate check on the endpoint. If it's a full path, verify against
# a private cert.
# e.g. '/etc/ssl/certs/Verisign_Class_3_Public_Primary_Certification_Authority.pem'
API_CA_CERTS = True
# UNIPAY credentials
UNIPAY_SUBJECT = None
ACK_SUCCESS = "SUCCESS"
ACK_SUCCESS_WITH_WARNING = "SUCCESSWITHWARNING"
# In seconds. Depending on your setup, this may need to be higher.
HTTP_TIMEOUT = 15.0
def __init__(self, **kwargs):
"""
PayPalConfig constructor. **kwargs catches all of the user-specified
config directives at time of instantiation. It is fine to set these
values post-instantiation, too.
Some basic validation for a few values is performed below, and defaults
are applied for certain directives in the absence of
user-provided values.
"""
if kwargs.get('API_ENVIRONMENT'):
api_environment = kwargs['API_ENVIRONMENT'].upper()
# Make sure the environment is one of the acceptable values.
if api_environment not in self._valid_['API_ENVIRONMENT']:
raise PayPalConfigError('Invalid API_ENVIRONMENT')
else:
self.API_ENVIRONMENT = api_environment
if kwargs.get('API_AUTHENTICATION_MODE'):
auth_mode = kwargs['API_AUTHENTICATION_MODE'].upper()
# Make sure the auth mode is one of the known/implemented methods.
if auth_mode not in self._valid_['API_AUTHENTICATION_MODE']:
choices = ", ".join(self._valid_['API_AUTHENTICATION_MODE'])
raise PayPalConfigError(
"Not a supported auth mode. Use one of: %s" % choices
)
else:
self.API_AUTHENTICATION_MODE = auth_mode
# Set the API endpoints, which is a cheesy way of saying API servers.
self.API_ENDPOINT = self._API_ENDPOINTS[self.API_AUTHENTICATION_MODE][self.API_ENVIRONMENT]
self.PAYPAL_URL_BASE = self._PAYPAL_URL_BASE[self.API_ENVIRONMENT]
# Set the CA_CERTS location. This can either be a None, a bool, or a
# string path.
if kwargs.get('API_CA_CERTS'):
self.API_CA_CERTS = kwargs['API_CA_CERTS']
if isinstance(self.API_CA_CERTS, basestring) and not os.path.exists(self.API_CA_CERTS):
# A CA Cert path was specified, but it's invalid.
raise PayPalConfigError('Invalid API_CA_CERTS')
# set the 3TOKEN required fields
if self.API_AUTHENTICATION_MODE == '3TOKEN':
for arg in ('API_USERNAME', 'API_PASSWORD', 'API_SIGNATURE'):
if arg not in kwargs:
raise PayPalConfigError('Missing in PayPalConfig: %s ' % arg)
setattr(self, arg, kwargs[arg])
for arg in ['HTTP_TIMEOUT']:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
logger.debug(
'PayPalConfig object instantiated with kwargs: %s' % pformat(kwargs)
)
| eahneahn/free | lib/python2.7/site-packages/paypal/settings.py | Python | agpl-3.0 | 4,941 |
#!/usr/bin/env python
import os
import numpy as np
from ase.data import chemical_symbols
import matplotlib.pyplot as plt
from abipy.abilab import abiopen
from pyDFTutils.perovskite.perovskite_mode import label_zone_boundary, label_Gamma
from ase.units import Ha
from spglib import spglib
def displacement_cart_to_evec(displ_cart,
masses,
scaled_positions,
qpoint=None,
add_phase=True):
"""
displ_cart: cartisien displacement. (atom1_x, atom1_y, atom1_z, atom2_x, ...)
masses: masses of atoms.
scaled_postions: scaled postions of atoms.
qpoint: if phase needs to be added, qpoint must be given.
add_phase: whether to add phase to the eigenvectors.
"""
if add_phase and qpoint is None:
raise ValueError('qpoint must be given if adding phase is needed')
m = np.sqrt(np.kron(masses, [1, 1, 1]))
evec = displ_cart * m
if add_phase:
phase = [
np.exp(-2j * np.pi * np.dot(pos, qpoint))
for pos in scaled_positions
]
phase = np.kron(phase, [1, 1, 1])
evec *= phase
evec /= np.linalg.norm(evec)
return evec
def ixc_to_xc(ixc):
"""
translate ixc (positive: abinit. negative: libxc) to XC.
"""
xcdict = {
0: 'NO-XC',
1: 'LDA',
2: 'LDA-PZCA',
3: 'LDA-CA',
4: 'LDA-Winger',
5: 'LDA-Hedin-Lundqvist',
6: 'LDA-X-alpha',
7: 'LDA-PW92',
8: 'LDA-PW92-xonly',
9: 'LDA-PW92-xRPA',
11: 'GGA-PBE',
12: 'GGA-PBE-xonly',
14: 'GGA-revPBE',
15: 'GGA-RPBE',
16: 'GGA-HTCH93',
17: 'GGA-HTCH120',
23: 'GGA-WC',
40: 'Hartree-Fock',
41: 'GGA-PBE0',
42: 'GGA-PBE0-1/3',
-1009: 'LDA-PZCA',
-101130: 'GGA-PBE',
-106131: 'GGA-BLYP',
-106132: 'GGA-BP86',
-116133: 'GGA-PBEsol',
-118130: 'GGA-WC',
}
if ixc in xcdict:
return xcdict[ixc]
else:
return 'libxc_%s' % ixc
class mat_data():
def __init__(self,
name,
mag='PM',
description="None",
author='High Throughput Bot',
email='x.he@ulg.ac.be',
is_verified=False,
verification_info="",
tags=[]
):
self._already_in_db = False
self.name = name
self.db_directory = None
self.all_data_directory = None
self.mag = mag
self.insert_time = None
self.update_time = None
self.log = ""
self.description = description
self.author = author
self.email = email
self.tags=tags
self.is_verified = is_verified
self.verification_info = verification_info
# properties in database. should be band | phonon
self.has_ebands = False
self.has_phonon = False
self.is_cubic_perovskite = True
self.cellpar = [0] * 6
self.cell = [0]*9
self.natoms = 0
self.chemical_symbols = []
self.masses = []
self.scaled_positions = []
self.ispin = 0
self.spinat = []
self.spgroup = 1
self.spgroup_name = 'P1'
self.ixc = 1
self.XC = 'PBEsol'
self.pp_type = 'ONCV'
self.pp_info = 'Not implemented yet.'
self.U_type=0
self.species=[]
self.zion=[]
self.U_l=[]
self.U_u=[]
self.U_j=[]
self.GSR_parameters = {}
self.energy = 0
self.efermi = 0
self.bandgap = 0
self.ebands = {}
self.kptrlatt=[]
self.usepaw=0
self.pawecutdg=0.0
self.nsppol=1
self.nspden=1
self.emacro = [0.0] * 9
self.becs = {}
self.elastic = []
self.nqpts = [1, 1, 1]
self.special_qpts = {}
self.phonon_mode_freqs = {}
self.phonon_mode_names = {}
self.phonon_mode_evecs = {}
self.phonon_mode_phdispl = {}
self.phonon_mode_freqs_LOTO = {}
self.phonon_mode_names_LOTO = {}
self.phonon_mode_evecs_LOTO = {}
self.phonon_mode_phdispl_LOTO = {}
def read_BAND_nc(self, fname, outputfile='Ebands.png', plot_ebands=True):
try:
band_file = abiopen(fname)
self.has_ebands = True
except Exception:
raise IOError("can't read %s" % fname)
self.efermi = band_file.energy_terms.e_fermie
gap = band_file.ebands.fundamental_gaps
if len(gap) != 0:
for g in gap:
self.gap = g.energy
self.is_direct_gap = g.is_direct
self.bandgap = self.gap
if plot_ebands:
fig, ax = plt.subplots()
fig = band_file.ebands.plot(ax=ax, show=False, ylims=[-7, 5])
fig.savefig(outputfile)
def read_OUT_nc(self, fname):
f = abiopen(fname)
self.invars = f.get_allvars()
for key in self.invars:
if isinstance(self.invars[key], np.ndarray):
self.invars[key] = tuple(self.invars[key])
self.spgroup = f.spgroup[0]
self.ixc = f.ixc[0]
self.XC = ixc_to_xc(self.ixc)
self.ecut = f.ecut[0]
self.species = [chemical_symbols[int(i)] for i in f.znucl]
if 'usepawu' in self.invars:
self.U_type= f.usepawu[0]
else:
self.U_type= 0
if self.U_type:
self.U_l = f.lpawu
self.U_u= [ x * Ha for x in f.upawu]
self.U_j= [ x* Ha for x in f.jpawu ]
#self.nband = f.nband[0]
self.kptrlatt = tuple(f.kptrlatt)
def print_scf_info(self):
for key, val in self.invars:
print("%s : %s\n" % (key, val))
def read_GSR_nc(self, fname):
f = abiopen(fname)
self.energy = f.energy
self.stress_tensor = f.cart_stress_tensor # unit ?
self.forces = np.array(f.cart_forces) # unit eV/ang
def read_DDB(self,
fname=None,
do_label=True,
workdir=None,
phonon_output_dipdip='phonon_band_dipdip.png',
phonon_output_nodipdip='phonon_band_nodipdip.png'):
"""
read phonon related properties from DDB file.
"""
self.has_phonon = True
ddb = abiopen(fname)
self.ddb_header = ddb.header
self.atoms = ddb.structure.to_ase_atoms()
self.natoms = len(self.atoms)
self.cellpar = self.atoms.get_cell_lengths_and_angles()
self.cell=self.atoms.get_cell().flatten()
self.masses = self.atoms.get_masses()
self.scaled_positions = self.atoms.get_scaled_positions()
self.chemical_symbols = self.atoms.get_chemical_symbols()
self.spgroup_name = spglib.get_spacegroup(self.atoms,symprec=1e-4)
self.ixc = self.ddb_header['ixc']
self.XC = ixc_to_xc( self.ixc)
self.ispin = self.ddb_header['nsppol']
self.spinat = self.ddb_header['spinat']
self.nband = self.ddb_header['nband']
self.ecut = self.ddb_header['ecut']
self.tsmear =self.ddb_header['tsmear']
self.usepaw =self.ddb_header['usepaw']
self.pawecutdg = self.ddb_header['tsmear']
self.nsppol = self.ddb_header['nsppol']
self.nspden= self.ddb_header['nspden']
self.species = [chemical_symbols[int(i)] for i in self.ddb_header['znucl']]
self.zion = [int(x) for x in self.ddb_header['zion']]
self.znucl = [int(x) for x in self.ddb_header['znucl']]
emacror, becsr = ddb.anaget_emacro_and_becs()
emacro = emacror[0].cartesian_tensor
becs_array = becsr.values
becs = {}
for i, bec in enumerate(becs_array):
becs[str(i)] = bec
nqpts = ddb._guess_ngqpt()
qpts = tuple(ddb.qpoints.frac_coords)
self.emacro = emacro
self.becs = becs
self.nqpts = nqpts
self.qpts = qpts
for qpt in qpts:
qpt = tuple(qpt)
m = ddb.anaget_phmodes_at_qpoint(qpt)
#self.results['phonon'][qpt]['frequencies'] = m.phfreqs
#self.results['phonon'][qpt][
# 'eigen_displacements'] = m.phdispl_cart
qpoints, evals, evecs, edisps = self.phonon_band(
ddb,
lo_to_splitting=False,
phonon_output_dipdip=phonon_output_dipdip,
phonon_output_nodipdip=phonon_output_nodipdip)
#for i in range(15):
# print(evecs[0, :, i])
self.special_qpts = {
'X': (0, 0.5, 0.0),
'M': (0.5, 0.5, 0),
'R': (0.5, 0.5, 0.5)
}
zb_modes = self.label_zone_boundary_all(
qpoints, evals, evecs, label=do_label)
for qname in self.special_qpts:
self.phonon_mode_freqs[qname] = zb_modes[qname][0]
self.phonon_mode_names[qname] = zb_modes[qname][1]
self.phonon_mode_evecs[qname] = zb_modes[qname][2]
Gmodes = self.label_Gamma_all(qpoints, evals, evecs, label=do_label)
self.phonon_mode_freqs['Gamma'] = Gmodes[0]
self.phonon_mode_names['Gamma'] = Gmodes[1]
self.phonon_mode_evecs['Gamma'] = Gmodes[2]
def get_zb_mode(self, qname, mode_name):
"""
return the frequencies of mode name.
"""
ibranches = []
freqs = []
for imode, mode in enumerate(
self.results['phonon']['boundary_modes'][qname]):
freq, mname = mode
if mname == mode_name:
ibranches.append(imode)
freqs.append(freq)
return ibranches, freqs
def get_gamma_modes(self):
"""
return (Freqs, names, evecs)
"""
return self.phonon_mode_freqs['Gamma'], self.phonon_mode_names['Gamma'], self.phonon_mode_evecs['Gamma'],
def get_gamma_mode(self, mode_name):
"""
return the frequencies of mode name.
"""
ibranches = []
freqs = []
for imode, mode in enumerate(zip(self.phonon_mode_freqs['Gamma'], self.phonon_mode_names['Gamma'])):
freq, mname = mode
if mname == mode_name:
ibranches.append(imode)
freqs.append(freq)
return ibranches, freqs
def label_Gamma_all(self, qpoints, evals, evecs, label=True):
Gamma_mode_freqs = []
Gamma_mode_names = []
Gamma_mode_evecs = []
for i, qpt in enumerate(qpoints):
if np.isclose(qpt, [0, 0, 0], rtol=1e-5, atol=1e-3).all():
evecq = evecs[i]
for j, evec in enumerate(evecq.T):
freq = evals[i][j]
if label:
mode = label_Gamma(
evec=evec, masses=self.atoms.get_masses())
Gamma_mode_names.append(mode)
else:
Gamma_mode_names.append('')
Gamma_mode_freqs.append(freq)
Gamma_mode_evecs.append(np.real(evec))
return Gamma_mode_freqs, Gamma_mode_names, Gamma_mode_evecs
if Gamma_mode_names == []:
print("Warning: No Gamma point found in qpoints.\n")
return Gamma_mode_freqs, Gamma_mode_names, Gamma_mode_evecs
def label_zone_boundary_all(self, qpoints, evals, evecs, label=True):
mode_dict = {}
qdict = {'X': (0, 0.5, 0.0), 'M': (0.5, 0.5, 0), 'R': (0.5, 0.5, 0.5)}
for i, qpt in enumerate(qpoints):
for qname in qdict:
if np.isclose(qpt, qdict[qname], rtol=1e-5, atol=1e-3).all():
mode_freqs = []
mode_names = []
mode_evecs = []
#print "===================================="
#print qname
evecq = evecs[i]
for j, evec in enumerate(evecq.T):
freq = evals[i][j]
mode_freqs.append(freq)
if label:
mode = label_zone_boundary(qname, evec=evec)
mode_names.append(mode)
else:
mode_names.append('')
mode_evecs.append(np.real(evec))
mode_dict[qname] = (mode_freqs, mode_names, mode_evecs)
return mode_dict
def phonon_band(self,
ddb,
lo_to_splitting=False,
workdir=None,
phonon_output_dipdip='phonon_band_dipdip.png',
phonon_output_nodipdip='phonon_band_nodipdip.png',
show=False):
atoms = ddb.structure.to_ase_atoms()
if workdir is not None:
workdir_dip = os.path.join(workdir, '/phbst_dipdip')
#if os.path.exists(workdir_dip):
# os.system('rm -r %s' % workdir_dip)
else:
workdir_dip = None
phbst, phdos = ddb.anaget_phbst_and_phdos_files(
nqsmall=10,
asr=1,
chneut=1,
dipdip=1,
verbose=1,
lo_to_splitting=True,
anaddb_kwargs={'alphon': 1},
workdir=workdir_dip,
#qptbounds=kpath_bounds,
)
fig, ax = plt.subplots(nrows=1, ncols=1)
#plt.tight_layout(pad=2.19)
#plt.axis('tight')
plt.gcf().subplots_adjust(left=0.17)
ax.axhline(0, linestyle='--', color='black')
ax.set_title(self.name)
ticks, labels = phbst.phbands._make_ticks_and_labels(qlabels=None)
fig.axes[0].set_xlim([ticks[0],ticks[-1]])
fig = phbst.phbands.plot(
ax=ax,
units='cm-1',
match_bands=False,
linewidth=1.7,
color='blue',
show=False)
fig.axes[0].grid(False)
if show:
plt.show()
if phonon_output_dipdip:
fig.savefig(phonon_output_dipdip)
plt.close()
if workdir is not None:
workdir_nodip = os.path.join(workdir, 'phbst_nodipdip')
#if os.path.exists(workdir_dip):
# os.system('rm -r %s' % workdir_nodip)
else:
workdir_nodip = None
phbst, phdos = ddb.anaget_phbst_and_phdos_files(
nqsmall=5,
asr=1,
chneut=1,
dipdip=0,
verbose=1,
lo_to_splitting=False,
anaddb_kwargs={'alphon': 1},
workdir=workdir_nodip
#qptbounds=kpath_bounds,
)
fig, ax = plt.subplots(nrows=1, ncols=1)
#plt.tight_layout(pad=2.19)
#plt.axis('tight')
plt.gcf().subplots_adjust(left=0.17)
ax.axhline(0, linestyle='--', color='black')
ax.set_title(self.name)
ax.set_title(self.name)
ticks, labels = phbst.phbands._make_ticks_and_labels(qlabels=None)
fig.axes[0].set_xlim([ticks[0],ticks[-1]])
fig = phbst.phbands.plot(
ax=ax,
units='cm-1',
match_bands=False,
linewidth=1.4,
color='blue',
show=False)
fig.axes[0].grid(False)
if show:
plt.show()
if phonon_output_dipdip:
fig.savefig(phonon_output_nodipdip)
plt.close()
qpoints = phbst.qpoints.frac_coords
nqpts = len(qpoints)
nbranch = 3 * len(atoms)
evals = np.zeros([nqpts, nbranch])
evecs = np.zeros([nqpts, nbranch, nbranch], dtype='complex128')
edisps = np.zeros([nqpts, nbranch, nbranch], dtype='complex128')
masses = atoms.get_masses()
scaled_positions = atoms.get_scaled_positions()
for iqpt, qpt in enumerate(qpoints):
for ibranch in range(nbranch):
phmode = phbst.get_phmode(qpt, ibranch)
evals[iqpt, ibranch] = phmode.freq * 8065.6
evec = displacement_cart_to_evec(
phmode.displ_cart,
masses,
scaled_positions,
qpoint=qpt,
add_phase=False)
evecs[iqpt, :, ibranch] = evec / np.linalg.norm(evec)
edisps[iqpt, :, ibranch] = phmode.displ_cart
return qpoints, evals, evecs, edisps
def test():
m = mat_data()
m.read_BAND_nc('./BAND_GSR.nc')
m.read_OUT_nc('./OUT.nc')
m.read_DDB('out_DDB')
#test()
| mailhexu/pyDFTutils | pyDFTutils/phonon/parser.py | Python | lgpl-3.0 | 16,721 |
class AbstractPlugin(object):
@staticmethod
def read_from_file(stream):
raise NotImplementedError('read_to_file')
@staticmethod
def write_to_file(stream, data):
raise NotImplementedError('write_to_file')
| firemark/homework-parser | homework_parser/plugin.py | Python | mit | 239 |
# -*- coding: utf-8 -*-
"""
flask_jwt
~~~~~~~~~
Flask-JWT module
"""
from collections import OrderedDict
from datetime import datetime, timedelta
from functools import wraps
import jwt
from flask import current_app, request, jsonify, _request_ctx_stack
from flask.views import MethodView
from werkzeug.local import LocalProxy
__version__ = '0.1.0'
current_user = LocalProxy(lambda: getattr(_request_ctx_stack.top, 'current_user', None))
_jwt = LocalProxy(lambda: current_app.extensions['jwt'])
def _default_payload_handler(user):
return {
'user_id': user.id,
'exp': datetime.utcnow() + current_app.config['JWT_EXPIRATION_DELTA']
}
def _default_encode_handler(payload):
return jwt.encode(
payload,
current_app.config['JWT_SECRET_KEY'],
current_app.config['JWT_ALGORITHM']
).decode('utf-8')
def _default_decode_handler(token):
return jwt.decode(
token,
current_app.config['JWT_SECRET_KEY'],
current_app.config['JWT_VERIFY'],
current_app.config['JWT_VERIFY_EXPIRATION'],
current_app.config['JWT_LEEWAY']
)
CONFIG_DEFAULTS = {
'JWT_DEFAULT_REALM': 'Login Required',
'JWT_AUTH_URL_RULE': '/auth',
'JWT_AUTH_ENDPOINT': 'jwt',
'JWT_ENCODE_HANDLER': _default_encode_handler,
'JWT_DECODE_HANDLER': _default_decode_handler,
'JWT_PAYLOAD_HANDLER': _default_payload_handler,
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': timedelta(seconds=300)
}
def jwt_required(realm=None):
"""View decorator that requires a valid JWT token to be present in the request
:param realm: an optional realm
"""
def wrapper(fn):
@wraps(fn)
def decorator(*args, **kwargs):
verify_jwt(realm)
return fn(*args, **kwargs)
return decorator
return wrapper
class JWTError(Exception):
def __init__(self, error, description, status_code=400, headers=None):
self.error = error
self.description = description
self.status_code = status_code
self.headers = headers
def verify_jwt(realm=None):
"""Does the actual work of verifying the JWT data in the current request.
This is done automatically for you by `jwt_required()` but you could call it manually.
Doing so would be useful in the context of optional JWT access in your APIs.
:param realm: an optional realm
"""
realm = realm or current_app.config['JWT_DEFAULT_REALM']
auth = request.headers.get('Authorization', None)
if auth is None:
raise JWTError('Authorization Required', 'Authorization header was missing', 401, {
'WWW-Authenticate': 'JWT realm="%s"' % realm
})
parts = auth.split()
if parts[0].lower() != 'bearer':
raise JWTError('Invalid JWT header', 'Unsupported authorization type')
elif len(parts) == 1:
raise JWTError('Invalid JWT header', 'Token missing')
elif len(parts) > 2:
raise JWTError('Invalid JWT header', 'Token contains spaces')
try:
handler = current_app.config['JWT_DECODE_HANDLER']
payload = handler(parts[1])
except jwt.ExpiredSignature:
raise JWTError('Invalid JWT', 'Token is expired')
except jwt.DecodeError:
raise JWTError('Invalid JWT', 'Token is undecipherable')
_request_ctx_stack.top.current_user = user = _jwt.user_callback(payload)
if user is None:
raise JWTError('Invalid JWT', 'User does not exist')
class JWTAuthView(MethodView):
def post(self):
data = request.get_json(force=True)
username = data.get('username', None)
password = data.get('password', None)
criterion = [username, password, len(data) == 2]
if not all(criterion):
raise JWTError('Bad Request', 'Missing required credentials', status_code=400)
user = _jwt.authentication_callback(username=username, password=password)
if user:
payload_handler = current_app.config['JWT_PAYLOAD_HANDLER']
payload = payload_handler(user)
encode_handler = current_app.config['JWT_ENCODE_HANDLER']
return jsonify({'token': encode_handler(payload)})
else:
raise JWTError('Bad Request', 'Invalid credentials')
class JWT(object):
def __init__(self, app=None):
if app is not None:
self.app = app
self.init_app(app)
else:
self.app = None
def init_app(self, app):
for k, v in CONFIG_DEFAULTS.items():
app.config.setdefault(k, v)
app.config.setdefault('JWT_SECRET_KEY', app.config['SECRET_KEY'])
url_rule = app.config.get('JWT_AUTH_URL_RULE', None)
endpoint = app.config.get('JWT_AUTH_ENDPOINT', None)
if url_rule and endpoint:
auth_view = JWTAuthView.as_view(app.config['JWT_AUTH_ENDPOINT'])
app.add_url_rule(url_rule, methods=['POST'], view_func=auth_view)
app.errorhandler(JWTError)(self._on_jwt_error)
if not hasattr(app, 'extensions'): # pragma: no cover
app.extensions = {}
app.extensions['jwt'] = self
def _on_jwt_error(self, e):
return getattr(self, 'error_callback', self._error_callback)(e)
def _error_callback(self, e):
return jsonify(OrderedDict([
('status_code', e.status_code),
('error', e.error),
('description', e.description),
])), e.status_code, e.headers
def authentication_handler(self, callback):
"""Specifies the authentication handler function. This function receives two
positional arguments. The first being the username the second being the password.
It should return an object representing the authenticated user. Example::
@jwt.authentication_handler
def authenticate(username, password):
if username == 'joe' and password == 'pass':
return User(id=1, username='joe')
:param callback: the authentication handler function
"""
self.authentication_callback = callback
return callback
def user_handler(self, callback):
"""Specifies the user handler function. This function receives the token payload as
its only positional argument. It should return an object representing the current
user. Example::
@jwt.user_handler
def load_user(payload):
if payload['user_id'] == 1:
return User(id=1, username='joe')
:param callback: the user handler function
"""
self.user_callback = callback
return callback
def error_handler(self, callback):
"""Specifies the error handler function. This function receives a JWTError instance as
its only positional argument. It can optionally return a response. Example::
@jwt.error_handler
def error_handler(e):
return "Something bad happened", 400
:param callback: the error handler function
"""
self.error_callback = callback
return callback
| svenstaro/flask-jwt | flask_jwt/__init__.py | Python | mit | 7,241 |
from django.contrib.auth.models import User
from django.db import models
from .utils import create_slug
class BaseModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
class Meta():
abstract = True
| makaimc/txt2react | core/models.py | Python | mit | 292 |
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import web
from karesansui.lib.rest import Rest, auth
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, CHECK_CHAR
from karesansui.lib.utils import is_param, json_dumps
from karesansui.db.access.tag import findbyhost1guestall
class GuestTag(Rest):
@auth
def _GET(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
tags = findbyhost1guestall(self.orm, host_id)
if not tags:
self.logger.debug("No tags is found.")
return web.notfound()
if self.is_part() is True:
self.view.tags = tags
machine_ids = {}
for tag in tags:
tag_id = str(tag.id)
machine_ids[tag_id] = []
for machine in tag.machine:
if not machine.is_deleted:
machine_ids[tag_id].append("tag_machine%s"% machine.id)
machine_ids[tag_id] = " ".join(machine_ids[tag_id])
self.view.machine_ids = machine_ids
return True
elif self.is_json() is True:
tags_json = []
for tag in tags:
tags_json.append(tag.get_json(self.me.languages))
self.view.tags = json_dumps(tags_json)
return True
else:
return web.nomethod()
urls = (
'/host/(\d+)/guest/tag/?(\.part|\.json)$', GuestTag,
)
| karesansui/karesansui | karesansui/gadget/guesttag.py | Python | mit | 2,641 |
# Copyright 2009 Carl Sverre
#
# This file is part of FlickrFortune.
#
# FlickrFortune is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FlickrFortune is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FlickrFortune. If not, see <http://www.gnu.org/licenses/>.
# Config
# Your flickr api key
apiKey = "YOUR FLICKR API KEY HERE"
# Change wallpaper command with %s for filename
setWallpaperCommand = 'xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitor0/image-path -s "%s"'
# Where to store the wallpapers
wallpaperDir = "FULL PATH"
# The location of this file
localDir = "FULL PATH"
# the wallpaper prefix
wallpaperPrefix = "wallpaper"
# your screen size (w,h)
wallpaperSize = (1440,900)
# the fontsize for the text
fontsize = 25
# Max errors before quitting
maxErrors = 10
# how to sort the pics (choose [0,1,2])
sortType = ["interestingness-desc",
"interestingness-asc",
"relevance"][2]
noiseWords = ["shakespeare",
"william",
"twain",
"wodehouse",
"ocasey",
"george",
"gobel",
"carlsverre"]
| carlsverre/FlickrFortune | flickrconfig_sample.py | Python | gpl-3.0 | 1,580 |
#!/usr/bin/env python
# encoding: utf-8
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import copy
import math
import sys
import time
from collections import namedtuple
from PIL import Image
def is_nude(path_or_io):
nude = Nude(path_or_io)
return nude.parse().result
class Nude(object):
Skin = namedtuple("Skin", "id skin region x y checked")
def __init__(self, path_or_io):
if isinstance(path_or_io, Image.Image):
self.image = path_or_io
elif isinstance(path_or_io, (str, file)):
self.image = Image.open(path_or_io)
else:
self.image = path_or_io
bands = self.image.getbands()
# convert greyscale to rgb
if len(bands) == 1:
new_img = Image.new("RGB", self.image.size)
new_img.paste(self.image)
f = self.image.filename
self.image = new_img
self.image.filename = f
self.skin_map = []
self.skin_regions = []
self.detected_regions = []
self.merge_regions = []
self.last_from, self.last_to = -1, -1
self.result = None
self.message = None
self.width, self.height = self.image.size
self.total_pixels = self.width * self.height
def resize(self, maxwidth=1000, maxheight=1000):
"""
Will resize the image proportionately based on maxwidth and maxheight.
NOTE: This may effect the result of the detection algorithm.
Return value is 0 if no change was made, 1 if the image was changed
based on width, 2 if the image was changed based on height, 3 if it
was changed on both
maxwidth - The max size for the width of the picture
maxheight - The max size for the height of the picture
Both can be set to False to ignore
"""
ret = 0
if maxwidth:
if self.width > maxwidth:
wpercent = (maxwidth / float(self.width))
hsize = int((float(self.height) * float(wpercent)))
fname = self.image.filename
self.image = self.image.resize((maxwidth, hsize), Image.ANTIALIAS)
self.image.filename = fname
self.width, self.height = self.image.size
self.total_pixels = self.width * self.height
ret += 1
if maxheight:
if self.height > maxheight:
hpercent = (maxheight / float(self.height))
wsize = int((float(self.width) * float(hpercent)))
fname = self.image.filename
self.image = self.image.resize((wsize, maxheight), Image.ANTIALIAS)
self.image.filename = fname
self.width, self.height = self.image.size
self.total_pixels = self.width * self.height
ret += 2
return ret
def parse(self):
if self.result:
return self
pixels = self.image.load()
for y in range(self.height):
for x in range(self.width):
r = pixels[x, y][0] # red
g = pixels[x, y][1] # green
b = pixels[x, y][2] # blue
_id = x + y * self.width + 1
if not self._classify_skin(r, g, b):
self.skin_map.append(self.Skin(_id, False, 0, x, y, False))
else:
self.skin_map.append(self.Skin(_id, True, 0, x, y, False))
region = -1
check_indexes = [_id - 2,
_id - self.width - 2,
_id - self.width - 1,
_id - self.width]
checker = False
for index in check_indexes:
try:
self.skin_map[index]
except IndexError:
break
if self.skin_map[index].skin:
if (self.skin_map[index].region != region and
region != -1 and
self.last_from != region and
self.last_to != self.skin_map[index].region):
self._add_merge(region, self.skin_map[index].region)
region = self.skin_map[index].region
checker = True
if not checker:
_skin = self.skin_map[_id - 1]._replace(region=len(self.detected_regions))
self.skin_map[_id - 1] = _skin
self.detected_regions.append([self.skin_map[_id - 1]])
continue
else:
if region > -1:
try:
self.detected_regions[region]
except IndexError:
self.detected_regions.append([])
_skin = self.skin_map[_id - 1]._replace(region=region)
self.skin_map[_id - 1] = _skin
self.detected_regions[region].append(self.skin_map[_id - 1])
self._merge(self.detected_regions, self.merge_regions)
self._analyse_regions()
return self
def inspect(self):
_nude_class = "{_module}.{_class}:{_addr}".format(_module=self.__class__.__module__,
_class=self.__class__.__name__,
_addr=hex(id(self)))
_image = "'%s' '%s' '%dx%d'" % (
self.image.filename, self.image.format, self.width, self.height)
return "#<{_nude_class}({_image}): result={_result} message='{_message}'>".format(
_nude_class=_nude_class, _image=_image, _result=self.result, _message=self.message)
def _add_merge(self, _from, _to):
self.last_from = _from
self.last_to = _to
from_index = -1
to_index = -1
for index, region in enumerate(self.merge_regions):
for r_index in region:
if r_index == _from:
from_index = index
if r_index == _to:
to_index = index
if from_index != -1 and to_index != -1:
if from_index != to_index:
_tmp = copy.copy(self.merge_regions[from_index])
_tmp.extend(self.merge_regions[to_index])
self.merge_regions[from_index] = _tmp
del(self.merge_regions[to_index])
return
if from_index == -1 and to_index == -1:
self.merge_regions.append([_from, _to])
return
if from_index != -1 and to_index == -1:
self.merge_regions[from_index].append(_to)
return
if from_index == -1 and to_index != -1:
self.merge_regions[to_index].append(_from)
return
# function for merging detected regions
def _merge(self, detected_regions, merge_regions):
new_detected_regions = []
# merging detected regions
for index, region in enumerate(merge_regions):
try:
new_detected_regions[index]
except IndexError:
new_detected_regions.append([])
for r_index in region:
_tmp = copy.copy(new_detected_regions[index])
_tmp.extend(detected_regions[r_index])
new_detected_regions[index] = _tmp
detected_regions[r_index] = []
# push the rest of the regions to the detRegions array
# (regions without merging)
for region in detected_regions:
if len(region) > 0:
new_detected_regions.append(region)
# clean up
self._clear_regions(new_detected_regions)
# clean up function
# only pushes regions which are bigger than a specific amount to the final result
def _clear_regions(self, detected_regions):
for region in detected_regions:
if len(region) > 30:
self.skin_regions.append(region)
def _analyse_regions(self):
# if there are less than 3 regions
if len(self.skin_regions) < 3:
self.message = "Less than 3 skin regions ({_skin_regions_size})".format(
_skin_regions_size=len(self.skin_regions))
self.result = False
return self.result
# sort the skin regions
self.skin_regions = sorted(self.skin_regions, key=lambda s: len(s),
reverse=True)
# count total skin pixels
total_skin = float(sum([len(skin_region) for skin_region in self.skin_regions]))
# check if there are more than 15% skin pixel in the image
if total_skin / self.total_pixels * 100 < 15:
# if the percentage lower than 15, it's not nude!
self.message = "Total skin percentage lower than 15 (%.3f%%)" % (
total_skin / self.total_pixels * 100)
self.result = False
return self.result
# check if the largest skin region is less than 35% of the total skin count
# AND if the second largest region is less than 30% of the total skin count
# AND if the third largest region is less than 30% of the total skin count
if len(self.skin_regions[0]) / total_skin * 100 < 35 and \
len(self.skin_regions[1]) / total_skin * 100 < 30 and \
len(self.skin_regions[2]) / total_skin * 100 < 30:
self.message = 'Less than 35%, 30%, 30% skin in the biggest regions'
self.result = False
return self.result
# check if the number of skin pixels in the largest region is
# less than 45% of the total skin count
if len(self.skin_regions[0]) / total_skin * 100 < 45:
self.message = "The biggest region contains less than 45 (%.3f%%)" % (
len(self.skin_regions[0]) / total_skin * 100)
self.result = False
return self.result
# TODO:
# build the bounding polygon by the regions edge values:
# Identify the leftmost, the uppermost, the rightmost, and the lowermost
# skin pixels of the three largest skin regions.
# Use these points as the corner points of a bounding polygon.
# TODO:
# check if the total skin count is less than 30% of the total number of pixels
# AND the number of skin pixels within the bounding polygon is
# less than 55% of the size of the polygon if this condition is True, it's not nude.
# TODO: include bounding polygon functionality
# if there are more than 60 skin regions and the average intensity
# within the polygon is less than 0.25 the image is not nude
if len(self.skin_regions) > 60:
self.message = "More than 60 skin regions ({_skin_regions_size})".format(
_skin_regions_size=len(self.skin_regions))
self.result = False
return self.result
# otherwise it is nude
self.message = "Nude!!"
self.result = True
return self.result
# A Survey on Pixel-Based Skin Color Detection Techniques
def _classify_skin(self, r, g, b):
rgb_classifier = r > 95 and \
g > 40 and g < 100 and \
b > 20 and \
max([r, g, b]) - min([r, g, b]) > 15 and \
abs(r - g) > 15 and \
r > g and \
r > b
nr, ng, nb = self._to_normalized(r, g, b)
norm_rgb_classifier = nr / ng > 1.185 and \
float(r * b) / ((r + g + b) ** 2) > 0.107 and \
float(r * g) / ((r + g + b) ** 2) > 0.112
# TODO: Add normalized HSI, HSV, and a few non-parametric skin models too
h, s, v = self._to_hsv(r, g, b)
hsv_classifier = h > 0 and \
h < 35 and \
s > 0.23 and \
s < 0.68
y, cb, cr = self._to_ycbcr(r, g, b)
# Based on this paper http://research.ijcaonline.org/volume94/number6/pxc3895695.pdf
ycbcr_classifier = 97.5 <= cb <= 142.5 and 134 <= cr <= 176
nh, ns, nv = self._to_normalized(h, s, v)
# norm_hsv_classifier =
# ycc doesn't work
return rgb_classifier or norm_rgb_classifier or hsv_classifier or ycbcr_classifier
def _to_normalized_hsv(self, h, s, v):
if h == 0:
h = 0.0001
if s == 0:
s = 0.0001
if v == 0:
v = 0.0001
_sum = float(h + s + v)
return [h / 360.0, s / 100.0, v / 100.0]
def _to_normalized(self, r, g, b):
if r == 0:
r = 0.0001
if g == 0:
g = 0.0001
if b == 0:
b = 0.0001
_sum = float(r + g + b)
return [r / _sum, g / _sum, b / _sum]
def _to_ycbcr(self, r, g, b):
# Copied from here.
# http://stackoverflow.com/questions/19459831/rgb-to-ycbcr-conversion-problems
y = .299*r + .587*g + .114*b
cb = 128 - 0.168736*r - 0.331364*g + 0.5*b
cr = 128 + 0.5*r - 0.418688*g - 0.081312*b
return y, cb, cr
def _to_hsv(self, r, g, b):
h = 0
_sum = float(r + g + b)
_max = float(max([r, g, b]))
_min = float(min([r, g, b]))
diff = float(_max - _min)
if _sum == 0:
_sum = 0.0001
if _max == r:
if diff == 0:
h = sys.maxsize
else:
h = (g - b) / diff
elif _max == g:
h = 2 + ((g - r) / diff)
else:
h = 4 + ((r - g) / diff)
h *= 60
if h < 0:
h += 360
return [h, 1.0 - (3.0 * (_min / _sum)), (1.0 / 3.0) * _max]
def _testfile(fname, resize=False):
start = time.time()
n = Nude(fname)
if resize:
n.resize(maxheight=800, maxwidth=600)
n.parse()
totaltime = int(math.ceil(time.time() - start))
size = str(n.height) + 'x' + str(n.width)
return (fname, n.result, totaltime, size, n.message)
def _poolcallback(results):
fname, result, totaltime, size, message = results
print(fname, result, sep="\t")
def _poolcallbackverbose(results):
fname, result, totaltime, size, message = results
print(fname, result, totaltime, size, message, sep=', ')
def main():
"""
Command line interface
"""
import argparse
import os
import multiprocessing
parser = argparse.ArgumentParser(description='Detect nudity in images.')
parser.add_argument('files', metavar='image', nargs='+',
help='Images you wish to test')
parser.add_argument('-r', '--resize', action='store_true',
help='Reduce image size to increase speed of scanning')
parser.add_argument('-t', '--threads', metavar='int', type=int, required=False, default=0,
help='The number of threads to start.')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if args.threads <= 1:
args.threads = 0
if len(args.files) < args.threads:
args.threads = len(args.files)
callback = _poolcallback
if args.verbose:
print("#File Name, Result, Scan Time(sec), Image size, Message")
callback = _poolcallbackverbose
# If the user tuned on multi processing
if(args.threads):
threadlist = []
pool = multiprocessing.Pool(args.threads)
for fname in args.files:
if os.path.isfile(fname):
threadlist.append(pool.apply_async(_testfile, (fname, ),
{'resize': args.resize}, callback))
else:
print(fname, "is not a file")
pool.close()
try:
for t in threadlist:
t.wait()
except KeyboardInterrupt:
pool.terminate()
pool.join()
# Run without multiprocessing
else:
for fname in args.files:
if os.path.isfile(fname):
callback(_testfile(fname, resize=args.resize))
else:
print(fname, "is not a file")
if __name__ == "__main__":
main()
| fffy2366/image-processing | tests/python/nude.py | Python | mit | 16,501 |
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Module to support the loading and convertion of a GRIB2 message into
cube metadata.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
from collections import namedtuple, Iterable, OrderedDict
from datetime import datetime, timedelta
import math
import threading
import warnings
import cartopy.crs as ccrs
from cf_units import CALENDAR_GREGORIAN, date2num, Unit
import numpy as np
import numpy.ma as ma
from iris.aux_factory import HybridPressureFactory
import iris.coord_systems as icoord_systems
from iris.coords import AuxCoord, DimCoord, CellMethod
from iris.exceptions import TranslationError
from iris.fileformats.grib import grib_phenom_translation as itranslation
from iris.fileformats.rules import ConversionMetadata, Factory, Reference
from iris.util import _is_circular
# Restrict the names imported from this namespace.
__all__ = ['convert']
options = threading.local()
options.warn_on_unsupported = False
options.support_hindcast_values = True
ScanningMode = namedtuple('ScanningMode', ['i_negative',
'j_positive',
'j_consecutive',
'i_alternative'])
ProjectionCentre = namedtuple('ProjectionCentre',
['south_pole_on_projection_plane',
'bipolar_and_symmetric'])
ResolutionFlags = namedtuple('ResolutionFlags',
['i_increments_given',
'j_increments_given',
'uv_resolved'])
FixedSurface = namedtuple('FixedSurface', ['standard_name',
'long_name',
'units'])
# Regulations 92.1.6.
_GRID_ACCURACY_IN_DEGREES = 1e-6 # 1/1,000,000 of a degree
# Reference Common Code Table C-1.
_CENTRES = {
'ecmf': 'European Centre for Medium Range Weather Forecasts'
}
# Reference Code Table 1.0
_CODE_TABLES_MISSING = 255
# UDUNITS-2 units time string. Reference GRIB2 Code Table 4.4.
_TIME_RANGE_UNITS = {
0: 'minutes',
1: 'hours',
2: 'days',
# 3: 'months', Unsupported
# 4: 'years', Unsupported
# 5: '10 years', Unsupported
# 6: '30 years', Unsupported
# 7: '100 years', Unsupported
# 8-9 Reserved
10: '3 hours',
11: '6 hours',
12: '12 hours',
13: 'seconds'
}
# Reference Code Table 4.5.
_FIXED_SURFACE = {
100: FixedSurface(None, 'pressure', 'Pa'), # Isobaric surface
103: FixedSurface(None, 'height', 'm'), # Height level above ground
106: FixedSurface('depth', 'depth_below_land_surface', 'm') # depth_below_land_surface # required for NCMRWF
}
_TYPE_OF_FIXED_SURFACE_MISSING = 255
# Reference Code Table 6.0
_BITMAP_CODE_PRESENT = 0
_BITMAP_CODE_NONE = 255
# Reference Code Table 4.10.
_STATISTIC_TYPE_NAMES = {
0: 'mean',
1: 'sum',
2: 'maximum',
3: 'minimum',
6: 'standard_deviation'
}
# Reference Code Table 4.11.
_STATISTIC_TYPE_OF_TIME_INTERVAL = {
2: 'same start time of forecast, forecast time is incremented'
}
# NOTE: Our test data contains the value 2, which is all we currently support.
# The exact interpretation of this is still unclear.
# Class containing details of a probability analysis.
Probability = namedtuple('Probability',
('probability_type_name', 'threshold'))
# Regulation 92.1.12
def unscale(value, factor):
"""
Implements Regulation 92.1.12.
Args:
* value:
Scaled value or sequence of scaled values.
* factor:
Scale factor or sequence of scale factors.
Returns:
For scalar value and factor, the unscaled floating point
result is returned. If either value and/or factor are
MDI, then :data:`numpy.ma.masked` is returned.
For sequence value and factor, the unscaled floating point
:class:`numpy.ndarray` is returned. If either value and/or
factor contain MDI, then :class:`numpy.ma.core.MaskedArray`
is returned.
"""
_unscale = lambda v, f: v / 10.0 ** f
if isinstance(value, Iterable) or isinstance(factor, Iterable):
def _masker(item):
result = ma.masked_equal(item, _MDI)
if ma.count_masked(result):
# Circumvent downstream NumPy "RuntimeWarning"
# of "overflow encountered in power" in _unscale
# for data containing _MDI.
result.data[result.mask] = 0
return result
value = _masker(value)
factor = _masker(factor)
result = _unscale(value, factor)
if ma.count_masked(result) == 0:
result = result.data
else:
result = ma.masked
if value != _MDI and factor != _MDI:
result = _unscale(value, factor)
return result
# Regulations 92.1.4 and 92.1.5.
_MDI = 2 ** 32 - 1
# Note:
# 1. Integer "on-disk" values (aka. coded keys) in GRIB messages:
# - Are 8-, 16-, or 32-bit.
# - Are either signed or unsigned, with signed values stored as
# sign-and-magnitude (*not* twos-complement).
# - Use all bits set to indicate a missing value (MDI).
# 2. Irrespective of the on-disk form, the ECMWF GRIB API *always*:
# - Returns values as 64-bit signed integers, either as native
# Python 'int' or numpy 'int64'.
# - Returns missing values as 2**32 - 1, but not all keys are
# defined as supporting missing values.
# NB. For keys which support missing values, the MDI value is reliably
# distinct from the valid range of either signed or unsigned 8-, 16-,
# or 32-bit values. For example:
# unsigned 32-bit:
# min = 0b000...000 = 0
# max = 0b111...110 = 2**32 - 2
# MDI = 0b111...111 = 2**32 - 1
# signed 32-bit:
# MDI = 0b111...111 = 2**32 - 1
# min = 0b111...110 = -(2**31 - 2)
# max = 0b011...111 = 2**31 - 1
# Non-standardised usage for negative forecast times.
def _hindcast_fix(forecast_time):
"""Return a forecast time interpreted as a possibly negative value."""
uft = np.uint32(forecast_time)
HIGHBIT = 2**30
# Workaround grib api's assumption that forecast time is positive.
# Handles correctly encoded -ve forecast times up to one -1 billion.
if 2 * HIGHBIT < uft < 3 * HIGHBIT:
original_forecast_time = forecast_time
forecast_time = -(uft - 2 * HIGHBIT)
if options.warn_on_unsupported:
msg = ('Re-interpreting large grib forecastTime '
'from {} to {}.'.format(original_forecast_time,
forecast_time))
warnings.warn(msg)
return forecast_time
def fixup_float32_from_int32(value):
"""
Workaround for use when reading an IEEE 32-bit floating-point value
which the ECMWF GRIB API has erroneously treated as a 4-byte signed
integer.
"""
# Convert from two's complement to sign-and-magnitude.
# NB. The bit patterns 0x00000000 and 0x80000000 will both be
# returned by the ECMWF GRIB API as an integer 0. Because they
# correspond to positive and negative zero respectively it is safe
# to treat an integer 0 as a positive zero.
if value < 0:
value = 0x80000000 - value
value_as_uint32 = np.array(value, dtype='u4')
value_as_float32 = value_as_uint32.view(dtype='f4')
return float(value_as_float32)
def fixup_int32_from_uint32(value):
"""
Workaround for use when reading a signed, 4-byte integer which the
ECMWF GRIB API has erroneously treated as an unsigned, 4-byte
integer.
NB. This workaround is safe to use with values which are already
treated as signed, 4-byte integers.
"""
if value >= 0x80000000:
value = 0x80000000 - value
return value
###############################################################################
#
# Identification Section 1
#
###############################################################################
def reference_time_coord(section):
"""
Translate section 1 reference time according to its significance.
Reference section 1, year octets 13-14, month octet 15, day octet 16,
hour octet 17, minute octet 18, second octet 19.
Returns:
The scalar reference time :class:`iris.coords.DimCoord`.
"""
# Look-up standard name by significanceOfReferenceTime.
_lookup = {0: 'time',
1: 'forecast_reference_time',
2: 'time',
3: 'time'}
# Calculate the reference time and units.
dt = datetime(section['year'], section['month'], section['day'],
section['hour'], section['minute'], section['second'])
# XXX Defaulting to a Gregorian calendar.
# Current GRIBAPI does not cover GRIB Section 1 - Octets 22-nn (optional)
# which are part of GRIB spec v12.
unit = Unit('hours since epoch', calendar=CALENDAR_GREGORIAN)
point = unit.date2num(dt)
# Reference Code Table 1.2.
significanceOfReferenceTime = section['significanceOfReferenceTime']
standard_name = _lookup.get(significanceOfReferenceTime)
if standard_name is None:
msg = 'Identificaton section 1 contains an unsupported significance ' \
'of reference time [{}]'.format(significanceOfReferenceTime)
raise TranslationError(msg)
# Create the associated reference time of data coordinate.
coord = DimCoord(point, standard_name=standard_name, units=unit)
return coord
###############################################################################
#
# Grid Definition Section 3
#
###############################################################################
def projection_centre(projectionCentreFlag):
"""
Translate the projection centre flag bitmask.
Reference GRIB2 Flag Table 3.5.
Args:
* projectionCentreFlag
Message section 3, coded key value.
Returns:
A :class:`collections.namedtuple` representation.
"""
south_pole_on_projection_plane = bool(projectionCentreFlag & 0x80)
bipolar_and_symmetric = bool(projectionCentreFlag & 0x40)
return ProjectionCentre(south_pole_on_projection_plane,
bipolar_and_symmetric)
def scanning_mode(scanningMode):
"""
Translate the scanning mode bitmask.
Reference GRIB2 Flag Table 3.4.
Args:
* scanningMode:
Message section 3, coded key value.
Returns:
A :class:`collections.namedtuple` representation.
"""
i_negative = bool(scanningMode & 0x80)
j_positive = bool(scanningMode & 0x40)
j_consecutive = bool(scanningMode & 0x20)
i_alternative = bool(scanningMode & 0x10)
if i_alternative:
msg = 'Grid definition section 3 contains unsupported ' \
'alternative row scanning mode'
raise TranslationError(msg)
return ScanningMode(i_negative, j_positive,
j_consecutive, i_alternative)
def resolution_flags(resolutionAndComponentFlags):
"""
Translate the resolution and component bitmask.
Reference GRIB2 Flag Table 3.3.
Args:
* resolutionAndComponentFlags:
Message section 3, coded key value.
Returns:
A :class:`collections.namedtuple` representation.
"""
i_inc_given = bool(resolutionAndComponentFlags & 0x20)
j_inc_given = bool(resolutionAndComponentFlags & 0x10)
uv_resolved = bool(resolutionAndComponentFlags & 0x08)
return ResolutionFlags(i_inc_given, j_inc_given, uv_resolved)
def ellipsoid(shapeOfTheEarth, major, minor, radius):
"""
Translate the shape of the earth to an appropriate coordinate
reference system.
For MDI set either major and minor or radius to :data:`numpy.ma.masked`
Reference GRIB2 Code Table 3.2.
Args:
* shapeOfTheEarth:
Message section 3, octet 15.
* major:
Semi-major axis of the oblate spheroid in units determined by
the shapeOfTheEarth.
* minor:
Semi-minor axis of the oblate spheroid in units determined by
the shapeOfTheEarth.
* radius:
Radius of sphere (in m).
Returns:
:class:`iris.coord_systems.CoordSystem`
"""
# Supported shapeOfTheEarth values.
if shapeOfTheEarth not in (0, 1, 3, 6, 7):
msg = 'Grid definition section 3 contains an unsupported ' \
'shape of the earth [{}]'.format(shapeOfTheEarth)
raise TranslationError(msg)
if shapeOfTheEarth == 0:
# Earth assumed spherical with radius of 6 367 470.0m
result = icoord_systems.GeogCS(6367470)
elif shapeOfTheEarth == 1:
# Earth assumed spherical with radius specified (in m) by
# data producer.
if radius is ma.masked:
msg = 'Ellipsoid for shape of the earth {} requires a' \
'radius to be specified.'.format(shapeOfTheEarth)
raise ValueError(msg)
result = icoord_systems.GeogCS(radius)
elif shapeOfTheEarth in [3, 7]:
# Earth assumed oblate spheroid with major and minor axes
# specified (in km)/(in m) by data producer.
emsg_oblate = 'Ellipsoid for shape of the earth [{}] requires a' \
'semi-{} axis to be specified.'
if major is ma.masked:
raise ValueError(emsg_oblate.format(shapeOfTheEarth, 'major'))
if minor is ma.masked:
raise ValueError(emsg_oblate.format(shapeOfTheEarth, 'minor'))
# Check whether to convert from km to m.
if shapeOfTheEarth == 3:
major *= 1000
minor *= 1000
result = icoord_systems.GeogCS(major, minor)
elif shapeOfTheEarth == 6:
# Earth assumed spherical with radius of 6 371 229.0m
result = icoord_systems.GeogCS(6371229)
return result
def ellipsoid_geometry(section):
"""
Calculated the unscaled ellipsoid major-axis, minor-axis and radius.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message.
Returns:
Tuple containing the major-axis, minor-axis and radius.
"""
major = unscale(section['scaledValueOfEarthMajorAxis'],
section['scaleFactorOfEarthMajorAxis'])
minor = unscale(section['scaledValueOfEarthMinorAxis'],
section['scaleFactorOfEarthMinorAxis'])
radius = unscale(section['scaledValueOfRadiusOfSphericalEarth'],
section['scaleFactorOfRadiusOfSphericalEarth'])
return major, minor, radius
def grid_definition_template_0_and_1(section, metadata, y_name, x_name, cs):
"""
Translate templates representing regularly spaced latitude/longitude
on either a standard or rotated grid.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message
* metadata:
:class:`collections.OrderedDict` of metadata.
* y_name:
Name of the Y coordinate, e.g. latitude or grid_latitude.
* x_name:
Name of the X coordinate, e.g. longitude or grid_longitude.
* cs:
The :class:`iris.coord_systems.CoordSystem` to use when creating
the X and Y coordinates.
"""
# Abort if this is a reduced grid, that case isn't handled yet.
if section['numberOfOctectsForNumberOfPoints'] != 0 or \
section['interpretationOfNumberOfPoints'] != 0:
msg = 'Grid definition section 3 contains unsupported ' \
'quasi-regular grid'
raise TranslationError(msg)
scan = scanning_mode(section['scanningMode'])
# Calculate longitude points.
x_inc = section['iDirectionIncrement'] * _GRID_ACCURACY_IN_DEGREES
x_offset = section['longitudeOfFirstGridPoint'] * _GRID_ACCURACY_IN_DEGREES
x_direction = -1 if scan.i_negative else 1
Ni = section['Ni']
x_points = np.arange(Ni, dtype=np.float64) * x_inc * x_direction + x_offset
# Determine whether the x-points (in degrees) are circular.
circular = _is_circular(x_points, 360.0)
# Calculate latitude points.
y_inc = section['jDirectionIncrement'] * _GRID_ACCURACY_IN_DEGREES
y_offset = section['latitudeOfFirstGridPoint'] * _GRID_ACCURACY_IN_DEGREES
y_direction = 1 if scan.j_positive else -1
Nj = section['Nj']
y_points = np.arange(Nj, dtype=np.float64) * y_inc * y_direction + y_offset
# Create the lat/lon coordinates.
y_coord = DimCoord(y_points, standard_name=y_name, units='degrees',
coord_system=cs)
x_coord = DimCoord(x_points, standard_name=x_name, units='degrees',
coord_system=cs, circular=circular)
# Determine the lat/lon dimensions.
y_dim, x_dim = 0, 1
if scan.j_consecutive:
y_dim, x_dim = 1, 0
# Add the lat/lon coordinates to the metadata dim coords.
metadata['dim_coords_and_dims'].append((y_coord, y_dim))
metadata['dim_coords_and_dims'].append((x_coord, x_dim))
def grid_definition_template_0(section, metadata):
"""
Translate template representing regular latitude/longitude
grid (regular_ll).
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
# Determine the coordinate system.
major, minor, radius = ellipsoid_geometry(section)
cs = ellipsoid(section['shapeOfTheEarth'], major, minor, radius)
grid_definition_template_0_and_1(section, metadata,
'latitude', 'longitude', cs)
def grid_definition_template_1(section, metadata):
"""
Translate template representing rotated latitude/longitude grid.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
# Determine the coordinate system.
major, minor, radius = ellipsoid_geometry(section)
south_pole_lat = (section['latitudeOfSouthernPole'] *
_GRID_ACCURACY_IN_DEGREES)
south_pole_lon = (section['longitudeOfSouthernPole'] *
_GRID_ACCURACY_IN_DEGREES)
cs = icoord_systems.RotatedGeogCS(-south_pole_lat,
math.fmod(south_pole_lon + 180, 360),
section['angleOfRotation'],
ellipsoid(section['shapeOfTheEarth'],
major, minor, radius))
grid_definition_template_0_and_1(section, metadata,
'grid_latitude', 'grid_longitude', cs)
def grid_definition_template_4_and_5(section, metadata, y_name, x_name, cs):
"""
Translate template representing variable resolution latitude/longitude
and common variable resolution rotated latitude/longitude.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
* y_name:
Name of the Y coordinate, e.g. 'latitude' or 'grid_latitude'.
* x_name:
Name of the X coordinate, e.g. 'longitude' or 'grid_longitude'.
* cs:
The :class:`iris.coord_systems.CoordSystem` to use when createing
the X and Y coordinates.
"""
# Determine the (variable) units of resolution.
key = 'basicAngleOfTheInitialProductionDomain'
basicAngleOfTheInitialProductDomain = section[key]
subdivisionsOfBasicAngle = section['subdivisionsOfBasicAngle']
if basicAngleOfTheInitialProductDomain in [0, _MDI]:
basicAngleOfTheInitialProductDomain = 1.
if subdivisionsOfBasicAngle in [0, _MDI]:
subdivisionsOfBasicAngle = 1. / _GRID_ACCURACY_IN_DEGREES
resolution = np.float64(basicAngleOfTheInitialProductDomain)
resolution /= subdivisionsOfBasicAngle
flags = resolution_flags(section['resolutionAndComponentFlags'])
# Grid Definition Template 3.4. Notes (2).
# Flag bits 3-4 are not applicable for this template.
if flags.uv_resolved and options.warn_on_unsupported:
msg = 'Unable to translate resolution and component flags.'
warnings.warn(msg)
# Calculate the latitude and longitude points.
x_points = np.array(section['longitudes'], dtype=np.float64) * resolution
y_points = np.array(section['latitudes'], dtype=np.float64) * resolution
# Determine whether the x-points (in degrees) are circular.
circular = _is_circular(x_points, 360.0)
# Create the lat/lon coordinates.
y_coord = DimCoord(y_points, standard_name=y_name, units='degrees',
coord_system=cs)
x_coord = DimCoord(x_points, standard_name=x_name, units='degrees',
coord_system=cs, circular=circular)
scan = scanning_mode(section['scanningMode'])
# Determine the lat/lon dimensions.
y_dim, x_dim = 0, 1
if scan.j_consecutive:
y_dim, x_dim = 1, 0
# Add the lat/lon coordinates to the metadata dim coords.
metadata['dim_coords_and_dims'].append((y_coord, y_dim))
metadata['dim_coords_and_dims'].append((x_coord, x_dim))
def grid_definition_template_4(section, metadata):
"""
Translate template representing variable resolution latitude/longitude.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
# Determine the coordinate system.
major, minor, radius = ellipsoid_geometry(section)
cs = ellipsoid(section['shapeOfTheEarth'], major, minor, radius)
grid_definition_template_4_and_5(section, metadata,
'latitude', 'longitude', cs)
def grid_definition_template_5(section, metadata):
"""
Translate template representing variable resolution rotated
latitude/longitude.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
# Determine the coordinate system.
major, minor, radius = ellipsoid_geometry(section)
south_pole_lat = (section['latitudeOfSouthernPole'] *
_GRID_ACCURACY_IN_DEGREES)
south_pole_lon = (section['longitudeOfSouthernPole'] *
_GRID_ACCURACY_IN_DEGREES)
cs = icoord_systems.RotatedGeogCS(-south_pole_lat,
math.fmod(south_pole_lon + 180, 360),
section['angleOfRotation'],
ellipsoid(section['shapeOfTheEarth'],
major, minor, radius))
grid_definition_template_4_and_5(section, metadata,
'grid_latitude', 'grid_longitude', cs)
def grid_definition_template_12(section, metadata):
"""
Translate template representing transverse Mercator.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
major, minor, radius = ellipsoid_geometry(section)
geog_cs = ellipsoid(section['shapeOfTheEarth'], major, minor, radius)
lat = section['latitudeOfReferencePoint'] * _GRID_ACCURACY_IN_DEGREES
lon = section['longitudeOfReferencePoint'] * _GRID_ACCURACY_IN_DEGREES
scale = section['scaleFactorAtReferencePoint']
# Catch bug in ECMWF GRIB API (present at 1.12.1) where the scale
# is treated as a signed, 4-byte integer.
if isinstance(scale, int):
scale = fixup_float32_from_int32(scale)
CM_TO_M = 0.01
easting = section['XR'] * CM_TO_M
northing = section['YR'] * CM_TO_M
cs = icoord_systems.TransverseMercator(lat, lon, easting, northing,
scale, geog_cs)
# Deal with bug in ECMWF GRIB API (present at 1.12.1) where these
# values are treated as unsigned, 4-byte integers.
x1 = fixup_int32_from_uint32(section['X1'])
y1 = fixup_int32_from_uint32(section['Y1'])
x2 = fixup_int32_from_uint32(section['X2'])
y2 = fixup_int32_from_uint32(section['Y2'])
# Rather unhelpfully this grid definition template seems to be
# overspecified, and thus open to inconsistency. But for determining
# the extents the X1, Y1, X2, and Y2 points have the highest
# precision, as opposed to using Di and Dj.
# Check whether Di and Dj are as consistent as possible with that
# interpretation - i.e. they are within 1cm.
def check_range(v1, v2, n, d):
min_last = v1 + (n - 1) * (d - 1)
max_last = v1 + (n - 1) * (d + 1)
if not (min_last < v2 < max_last):
raise TranslationError('Inconsistent grid definition')
check_range(x1, x2, section['Ni'], section['Di'])
check_range(y1, y2, section['Nj'], section['Dj'])
x_points = np.linspace(x1 * CM_TO_M, x2 * CM_TO_M, section['Ni'])
y_points = np.linspace(y1 * CM_TO_M, y2 * CM_TO_M, section['Nj'])
# This has only been tested with +x/+y scanning, so raise an error
# for other permutations.
scan = scanning_mode(section['scanningMode'])
if scan.i_negative:
raise TranslationError('Unsupported -x scanning')
if not scan.j_positive:
raise TranslationError('Unsupported -y scanning')
# Create the X and Y coordinates.
y_coord = DimCoord(y_points, 'projection_y_coordinate', units='m',
coord_system=cs)
x_coord = DimCoord(x_points, 'projection_x_coordinate', units='m',
coord_system=cs)
# Determine the lat/lon dimensions.
y_dim, x_dim = 0, 1
scan = scanning_mode(section['scanningMode'])
if scan.j_consecutive:
y_dim, x_dim = 1, 0
# Add the X and Y coordinates to the metadata dim coords.
metadata['dim_coords_and_dims'].append((y_coord, y_dim))
metadata['dim_coords_and_dims'].append((x_coord, x_dim))
def grid_definition_template_20(section, metadata):
"""
Translate template representing a Polar Stereographic grid.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
major, minor, radius = ellipsoid_geometry(section)
geog_cs = ellipsoid(section['shapeOfTheEarth'], major, minor, radius)
proj_centre = projection_centre(section['projectionCentreFlag'])
if proj_centre.bipolar_and_symmetric:
raise TranslationError('Bipolar and symmetric polar stereo projections'
' are not supported by the '
'grid_definition_template_20 translation.')
if proj_centre.south_pole_on_projection_plane:
central_lat = -90.
else:
central_lat = 90.
central_lon = section['orientationOfTheGrid'] * _GRID_ACCURACY_IN_DEGREES
true_scale_lat = section['LaD'] * _GRID_ACCURACY_IN_DEGREES
cs = icoord_systems.Stereographic(central_lat=central_lat,
central_lon=central_lon,
true_scale_lat=true_scale_lat,
ellipsoid=geog_cs)
x_coord, y_coord, scan = _calculate_proj_coords_from_lon_lat(section, cs)
# Determine the order of the dimensions.
y_dim, x_dim = 0, 1
if scan.j_consecutive:
y_dim, x_dim = 1, 0
# Add the projection coordinates to the metadata dim coords.
metadata['dim_coords_and_dims'].append((y_coord, y_dim))
metadata['dim_coords_and_dims'].append((x_coord, x_dim))
def _calculate_proj_coords_from_lon_lat(section, cs):
# Construct the coordinate points, the start point is given in millidegrees
# but the distance measurement is in 10-3 m, so a conversion is necessary
# to find the origin in m.
scan = scanning_mode(section['scanningMode'])
lon_0 = section['longitudeOfFirstGridPoint'] * _GRID_ACCURACY_IN_DEGREES
lat_0 = section['latitudeOfFirstGridPoint'] * _GRID_ACCURACY_IN_DEGREES
x0_m, y0_m = cs.as_cartopy_crs().transform_point(
lon_0, lat_0, ccrs.Geodetic())
dx_m = section['Dx'] * 1e-3
dy_m = section['Dy'] * 1e-3
x_dir = -1 if scan.i_negative else 1
y_dir = 1 if scan.j_positive else -1
x_points = x0_m + dx_m * x_dir * np.arange(section['Nx'], dtype=np.float64)
y_points = y0_m + dy_m * y_dir * np.arange(section['Ny'], dtype=np.float64)
# Create the dimension coordinates.
x_coord = DimCoord(x_points, standard_name='projection_x_coordinate',
units='m', coord_system=cs)
y_coord = DimCoord(y_points, standard_name='projection_y_coordinate',
units='m', coord_system=cs)
return x_coord, y_coord, scan
def grid_definition_template_30(section, metadata):
"""
Translate template representing a Lambert Conformal grid.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
major, minor, radius = ellipsoid_geometry(section)
geog_cs = ellipsoid(section['shapeOfTheEarth'], major, minor, radius)
central_latitude = section['LaD'] * _GRID_ACCURACY_IN_DEGREES
central_longitude = section['LoV'] * _GRID_ACCURACY_IN_DEGREES
false_easting = 0
false_northing = 0
secant_latitudes = (section['Latin1'] * _GRID_ACCURACY_IN_DEGREES,
section['Latin2'] * _GRID_ACCURACY_IN_DEGREES)
cs = icoord_systems.LambertConformal(central_latitude,
central_longitude,
false_easting,
false_northing,
secant_latitudes=secant_latitudes,
ellipsoid=geog_cs)
# A projection centre flag is defined for GDT30. However, we don't need to
# know which pole is in the projection plane as Cartopy handles that. The
# Other component of the projection centre flag determines if there are
# multiple projection centres. There is no support for this in Proj4 or
# Cartopy so a translation error is raised if this flag is set.
proj_centre = projection_centre(section['projectionCentreFlag'])
if proj_centre.bipolar_and_symmetric:
msg = 'Unsupported projection centre: Bipolar and symmetric.'
raise TranslationError(msg)
res_flags = resolution_flags(section['resolutionAndComponentFlags'])
if not res_flags.uv_resolved and options.warn_on_unsupported:
# Vector components are given as relative to east an north, rather than
# relative to the projection coordinates, issue a warning in this case.
# (ideally we need a way to add this information to a cube)
msg = 'Unable to translate resolution and component flags.'
warnings.warn(msg)
x_coord, y_coord, scan = _calculate_proj_coords_from_lon_lat(section, cs)
# Determine the order of the dimensions.
y_dim, x_dim = 0, 1
if scan.j_consecutive:
y_dim, x_dim = 1, 0
# Add the projection coordinates to the metadata dim coords.
metadata['dim_coords_and_dims'].append((y_coord, y_dim))
metadata['dim_coords_and_dims'].append((x_coord, x_dim))
def grid_definition_template_40(section, metadata):
"""
Translate template representing a Gaussian grid.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
major, minor, radius = ellipsoid_geometry(section)
cs = ellipsoid(section['shapeOfTheEarth'], major, minor, radius)
if section['numberOfOctectsForNumberOfPoints'] != 0 or \
section['interpretationOfNumberOfPoints'] != 0:
grid_definition_template_40_reduced(section, metadata, cs)
else:
grid_definition_template_40_regular(section, metadata, cs)
def grid_definition_template_40_regular(section, metadata, cs):
"""
Translate template representing a regular Gaussian grid.
"""
scan = scanning_mode(section['scanningMode'])
# Calculate longitude points.
x_inc = section['iDirectionIncrement'] * _GRID_ACCURACY_IN_DEGREES
x_offset = section['longitudeOfFirstGridPoint'] * _GRID_ACCURACY_IN_DEGREES
x_direction = -1 if scan.i_negative else 1
Ni = section['Ni']
x_points = np.arange(Ni, dtype=np.float64) * x_inc * x_direction + x_offset
# Determine whether the x-points (in degrees) are circular.
circular = _is_circular(x_points, 360.0)
# Get the latitude points.
#
# Gaussian latitudes are defined by Gauss-Legendre quadrature and the Gauss
# quadrature rule (http://en.wikipedia.org/wiki/Gaussian_quadrature). The
# latitudes of a particular Gaussian grid are uniquely defined by the
# number of latitudes between the equator and the pole, N. The latitudes
# are calculated from the roots of a Legendre series which must be
# calculated numerically. This process involves forming a (possibly large)
# companion matrix, computing its eigenvalues, and usually at least one
# application of Newton's method to achieve best results
# (http://en.wikipedia.org/wiki/Newton%27s_method). The latitudes are given
# by the arcsine of the roots converted to degrees. This computation can be
# time-consuming, especially for large grid sizes.
#
# A direct computation would require:
# 1. Reading the coded key 'N' representing the number of latitudes
# between the equator and pole.
# 2. Computing the set of global Gaussian latitudes associated with the
# value of N.
# 3. Determining the direction of the latitude points from the scanning
# mode.
# 4. Producing a subset of the latitudes based on the given first and
# last latitude points, given by the coded keys La1 and La2.
#
# Given the complexity and potential for poor performance of calculating
# the Gaussian latitudes directly, the GRIB-API computed key
# 'distinctLatitudes' is utilised to obtain the latitude points from the
# GRIB2 message. This computed key provides a rapid calculation of the
# monotonic latitude points that form the Gaussian grid, accounting for
# the coverage of the grid.
y_points = section.get_computed_key('distinctLatitudes')
y_points.sort()
if not scan.j_positive:
y_points = y_points[::-1]
# Create lat/lon coordinates.
x_coord = DimCoord(x_points, standard_name='longitude',
units='degrees_east', coord_system=cs,
circular=circular)
y_coord = DimCoord(y_points, standard_name='latitude',
units='degrees_north', coord_system=cs)
# Determine the lat/lon dimensions.
y_dim, x_dim = 0, 1
if scan.j_consecutive:
y_dim, x_dim = 1, 0
# Add the lat/lon coordinates to the metadata dim coords.
metadata['dim_coords_and_dims'].append((y_coord, y_dim))
metadata['dim_coords_and_dims'].append((x_coord, x_dim))
def grid_definition_template_40_reduced(section, metadata, cs):
"""
Translate template representing a reduced Gaussian grid.
"""
# Get the latitude and longitude points.
#
# The same comments made in grid_definition_template_40_regular regarding
# computation of Gaussian lattiudes applies here too. Further to this the
# reduced Gaussian grid is not rectangular, the number of points along
# each latitude circle vary with latitude. Whilst it is possible to
# compute the latitudes and longitudes individually for each grid point
# from coded keys, it would be complex and time-consuming compared to
# loading the latitude and longitude arrays directly using the computed
# keys 'latitudes' and 'longitudes'.
x_points = section.get_computed_key('longitudes')
y_points = section.get_computed_key('latitudes')
# Create lat/lon coordinates.
x_coord = AuxCoord(x_points, standard_name='longitude',
units='degrees_east', coord_system=cs)
y_coord = AuxCoord(y_points, standard_name='latitude',
units='degrees_north', coord_system=cs)
# Add the lat/lon coordinates to the metadata dim coords.
metadata['aux_coords_and_dims'].append((y_coord, 0))
metadata['aux_coords_and_dims'].append((x_coord, 0))
def grid_definition_template_90(section, metadata):
"""
Translate template representing space view.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
if section['Nr'] == _MDI:
raise TranslationError('Unsupported orthographic grid.')
elif section['Nr'] == 0:
raise TranslationError('Unsupported zero height for space-view.')
if section['orientationOfTheGrid'] != 0:
raise TranslationError('Unsupported space-view orientation.')
# Determine the coordinate system.
sub_satellite_lat = (section['latitudeOfSubSatellitePoint']
* _GRID_ACCURACY_IN_DEGREES)
# The subsequent calculations to determine the apparent Earth
# diameters rely on the satellite being over the equator.
if sub_satellite_lat != 0:
raise TranslationError('Unsupported non-zero latitude for '
'space-view perspective.')
sub_satellite_lon = (section['longitudeOfSubSatellitePoint']
* _GRID_ACCURACY_IN_DEGREES)
major, minor, radius = ellipsoid_geometry(section)
geog_cs = ellipsoid(section['shapeOfTheEarth'], major, minor, radius)
height_above_centre = geog_cs.semi_major_axis * section['Nr'] / 1e6
height_above_ellipsoid = height_above_centre - geog_cs.semi_major_axis
cs = icoord_systems.VerticalPerspective(sub_satellite_lat,
sub_satellite_lon,
height_above_ellipsoid,
ellipsoid=geog_cs)
# Figure out how large the Earth would appear in projection coodinates.
# For both the apparent equatorial and polar diameters this is a
# two-step process:
# 1) Determine the angle subtended by the visible surface.
# 2) Convert that angle into projection coordinates.
# NB. The solutions given below assume the satellite is over the
# equator.
# The apparent equatorial angle uses simple, circular geometry.
# But to derive the apparent polar angle we use the auxiliary circle
# parametric form of the ellipse. In this form, the equation for the
# tangent line is given by:
# x cos(psi) y sin(psi)
# ---------- + ---------- = 1
# a b
# By considering the cases when x=0 and y=0, the apparent polar
# angle (theta) is given by:
# tan(theta) = b / sin(psi)
# ------------
# a / cos(psi)
# This can be simplified using: cos(psi) = a / height_above_centre
half_apparent_equatorial_angle = math.asin(geog_cs.semi_major_axis /
height_above_centre)
x_apparent_diameter = (2 * half_apparent_equatorial_angle *
height_above_ellipsoid)
parametric_angle = math.acos(geog_cs.semi_major_axis / height_above_centre)
half_apparent_polar_angle = math.atan(geog_cs.semi_minor_axis /
(height_above_centre *
math.sin(parametric_angle)))
y_apparent_diameter = (2 * half_apparent_polar_angle *
height_above_ellipsoid)
y_step = y_apparent_diameter / section['dy']
x_step = x_apparent_diameter / section['dx']
y_start = y_step * (section['Yo'] - section['Yp'] / 1000)
x_start = x_step * (section['Xo'] - section['Xp'] / 1000)
y_points = y_start + np.arange(section['Ny']) * y_step
x_points = x_start + np.arange(section['Nx']) * x_step
# This has only been tested with -x/+y scanning, so raise an error
# for other permutations.
scan = scanning_mode(section['scanningMode'])
if scan.i_negative:
x_points = -x_points
else:
raise TranslationError('Unsupported +x scanning')
if not scan.j_positive:
raise TranslationError('Unsupported -y scanning')
# Create the X and Y coordinates.
y_coord = DimCoord(y_points, 'projection_y_coordinate', units='m',
coord_system=cs)
x_coord = DimCoord(x_points, 'projection_x_coordinate', units='m',
coord_system=cs)
# Determine the lat/lon dimensions.
y_dim, x_dim = 0, 1
if scan.j_consecutive:
y_dim, x_dim = 1, 0
# Add the X and Y coordinates to the metadata dim coords.
metadata['dim_coords_and_dims'].append((y_coord, y_dim))
metadata['dim_coords_and_dims'].append((x_coord, x_dim))
def grid_definition_section(section, metadata):
"""
Translate section 3 from the GRIB2 message.
Update the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 3 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
# Reference GRIB2 Code Table 3.0.
value = section['sourceOfGridDefinition']
if value != 0:
msg = 'Grid definition section 3 contains unsupported ' \
'source of grid definition [{}]'.format(value)
raise TranslationError(msg)
# Reference GRIB2 Code Table 3.1.
template = section['gridDefinitionTemplateNumber']
if template == 0:
# Process regular latitude/longitude grid (regular_ll)
grid_definition_template_0(section, metadata)
elif template == 1:
# Process rotated latitude/longitude grid.
grid_definition_template_1(section, metadata)
elif template == 4:
# Process variable resolution latitude/longitude.
grid_definition_template_4(section, metadata)
elif template == 5:
# Process variable resolution rotated latitude/longitude.
grid_definition_template_5(section, metadata)
elif template == 12:
# Process transverse Mercator.
grid_definition_template_12(section, metadata)
elif template == 20:
# Polar stereographic.
grid_definition_template_20(section, metadata)
elif template == 30:
# Process Lambert conformal:
grid_definition_template_30(section, metadata)
elif template == 40:
grid_definition_template_40(section, metadata)
elif template == 90:
# Process space view.
grid_definition_template_90(section, metadata)
else:
msg = 'Grid definition template [{}] is not supported'.format(template)
raise TranslationError(msg)
###############################################################################
#
# Product Definition Section 4
#
###############################################################################
def translate_phenomenon(metadata, discipline, parameterCategory,
parameterNumber, probability=None):
"""
Translate GRIB2 phenomenon to CF phenomenon.
Updates the metadata in-place with the translations.
Args:
* metadata:
:class:`collections.OrderedDict` of metadata.
* discipline:
Message section 0, octet 7.
* parameterCategory:
Message section 4, octet 10.
* parameterNumber:
Message section 4, octet 11.
Kwargs:
* probability (:class:`Probability`):
If present, the data encodes a forecast probability analysis with the
given properties.
"""
cf = itranslation.grib2_phenom_to_cf_info(param_discipline=discipline,
param_category=parameterCategory,
param_number=parameterNumber)
if cf is not None:
if probability is None:
metadata['standard_name'] = cf.standard_name
metadata['long_name'] = cf.long_name
metadata['units'] = cf.units
else:
# The basic name+unit info goes into a 'threshold coordinate' which
# encodes probability threshold values.
threshold_coord = DimCoord(
probability.threshold,
standard_name=cf.standard_name, long_name=cf.long_name,
units=cf.units)
metadata['aux_coords_and_dims'].append((threshold_coord, None))
# The main cube has an adjusted name, and units of '1'.
base_name = cf.standard_name or cf.long_name
long_name = 'probability_of_{}_{}'.format(
base_name, probability.probability_type_name)
metadata['standard_name'] = None
metadata['long_name'] = long_name
metadata['units'] = Unit(1)
def time_range_unit(indicatorOfUnitOfTimeRange):
"""
Translate the time range indicator to an equivalent
:class:`cf_units.Unit`.
Args:
* indicatorOfUnitOfTimeRange:
Message section 4, octet 18.
Returns:
:class:`cf_units.Unit`.
"""
try:
unit = Unit(_TIME_RANGE_UNITS[indicatorOfUnitOfTimeRange])
except (KeyError, ValueError):
msg = 'Product definition section 4 contains unsupported ' \
'time range unit [{}]'.format(indicatorOfUnitOfTimeRange)
raise TranslationError(msg)
return unit
def hybrid_factories(section, metadata):
"""
Translate the section 4 optional hybrid vertical coordinates.
Updates the metadata in-place with the translations.
Reference GRIB2 Code Table 4.5.
Relevant notes:
[3] Hybrid pressure level (119) shall be used instead of Hybrid level (105)
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
NV = section['NV']
if NV > 0:
typeOfFirstFixedSurface = section['typeOfFirstFixedSurface']
if typeOfFirstFixedSurface == _TYPE_OF_FIXED_SURFACE_MISSING:
msg = 'Product definition section 4 contains missing ' \
'type of first fixed surface'
raise TranslationError(msg)
typeOfSecondFixedSurface = section['typeOfSecondFixedSurface']
if typeOfSecondFixedSurface != _TYPE_OF_FIXED_SURFACE_MISSING:
msg = 'Product definition section 4 contains unsupported type ' \
'of second fixed surface [{}]'.format(typeOfSecondFixedSurface)
raise TranslationError(msg)
if typeOfFirstFixedSurface in [105, 119]:
# Hybrid level (105) and Hybrid pressure level (119).
scaleFactor = section['scaleFactorOfFirstFixedSurface']
if scaleFactor != 0:
msg = 'Product definition section 4 contains invalid scale ' \
'factor of first fixed surface [{}]'.format(scaleFactor)
raise TranslationError(msg)
# Create the model level number scalar coordinate.
scaledValue = section['scaledValueOfFirstFixedSurface']
coord = DimCoord(scaledValue, standard_name='model_level_number',
attributes=dict(positive='up'))
metadata['aux_coords_and_dims'].append((coord, None))
# Create the level pressure scalar coordinate.
pv = section['pv']
offset = scaledValue
coord = DimCoord(pv[offset], long_name='level_pressure',
units='Pa')
metadata['aux_coords_and_dims'].append((coord, None))
# Create the sigma scalar coordinate.
offset += NV / 2
coord = AuxCoord(pv[offset], long_name='sigma')
metadata['aux_coords_and_dims'].append((coord, None))
# Create the associated factory reference.
args = [{'long_name': 'level_pressure'}, {'long_name': 'sigma'},
Reference('surface_air_pressure')]
factory = Factory(HybridPressureFactory, args)
metadata['factories'].append(factory)
else:
msg = 'Product definition section 4 contains unsupported ' \
'first fixed surface [{}]'.format(typeOfFirstFixedSurface)
raise TranslationError(msg)
def vertical_coords(section, metadata):
"""
Translate the vertical coordinates or hybrid vertical coordinates.
Updates the metadata in-place with the translations.
Reference GRIB2 Code Table 4.5.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
if section['NV'] > 0:
# Generate hybrid vertical coordinates.
hybrid_factories(section, metadata)
else:
# Generate vertical coordinate.
typeOfFirstFixedSurface = section['typeOfFirstFixedSurface']
key = 'scaledValueOfFirstFixedSurface'
scaledValueOfFirstFixedSurface = section[key]
fixed_surface = _FIXED_SURFACE.get(typeOfFirstFixedSurface)
if fixed_surface is None:
if typeOfFirstFixedSurface != _TYPE_OF_FIXED_SURFACE_MISSING:
if scaledValueOfFirstFixedSurface == _MDI:
if options.warn_on_unsupported:
msg = 'Unable to translate type of first fixed ' \
'surface with missing scaled value.'
warnings.warn(msg)
else:
if options.warn_on_unsupported:
msg = 'Unable to translate type of first fixed ' \
'surface with scaled value.'
warnings.warn(msg)
else:
key = 'scaleFactorOfFirstFixedSurface'
scaleFactorOfFirstFixedSurface = section[key]
typeOfSecondFixedSurface = section['typeOfSecondFixedSurface']
if typeOfSecondFixedSurface != _TYPE_OF_FIXED_SURFACE_MISSING:
if typeOfFirstFixedSurface != typeOfSecondFixedSurface:
msg = 'Product definition section 4 has different ' \
'types of first and second fixed surface'
raise TranslationError(msg)
key = 'scaledValueOfSecondFixedSurface'
scaledValueOfSecondFixedSurface = section[key]
if scaledValueOfSecondFixedSurface == _MDI:
msg = 'Product definition section 4 has missing ' \
'scaled value of second fixed surface'
raise TranslationError(msg)
else:
key = 'scaleFactorOfSecondFixedSurface'
scaleFactorOfSecondFixedSurface = section[key]
first = unscale(scaledValueOfFirstFixedSurface,
scaleFactorOfFirstFixedSurface)
second = unscale(scaledValueOfSecondFixedSurface,
scaleFactorOfSecondFixedSurface)
point = 0.5 * (first + second)
bounds = [first, second]
coord = DimCoord(point,
standard_name=fixed_surface.standard_name,
long_name=fixed_surface.long_name,
units=fixed_surface.units,
bounds=bounds)
# Add the vertical coordinate to metadata aux coords.
metadata['aux_coords_and_dims'].append((coord, None))
else:
point = unscale(scaledValueOfFirstFixedSurface,
scaleFactorOfFirstFixedSurface)
coord = DimCoord(point,
standard_name=fixed_surface.standard_name,
long_name=fixed_surface.long_name,
units=fixed_surface.units)
# Add the vertical coordinate to metadata aux coords.
metadata['aux_coords_and_dims'].append((coord, None))
def forecast_period_coord(indicatorOfUnitOfTimeRange, forecastTime):
"""
Create the forecast period coordinate.
Args:
* indicatorOfUnitOfTimeRange:
Message section 4, octets 18.
* forecastTime:
Message section 4, octets 19-22.
Returns:
The scalar forecast period :class:`iris.coords.DimCoord`.
"""
# Determine the forecast period and associated units.
unit = time_range_unit(indicatorOfUnitOfTimeRange)
point = unit.convert(forecastTime, 'hours')
# Create the forecast period scalar coordinate.
coord = DimCoord(point, standard_name='forecast_period', units='hours')
return coord
def statistical_forecast_period_coord(section, frt_coord):
"""
Create a forecast period coordinate for a time-statistic message.
This applies only with a product definition template 4.8.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* frt_coord:
The scalar forecast reference time :class:`iris.coords.DimCoord`.
Returns:
The scalar forecast period :class:`iris.coords.DimCoord`, containing a
single, bounded point (period value).
"""
# Get the period end time as a datetime.
end_time = datetime(section['yearOfEndOfOverallTimeInterval'],
section['monthOfEndOfOverallTimeInterval'],
section['dayOfEndOfOverallTimeInterval'],
section['hourOfEndOfOverallTimeInterval'],
section['minuteOfEndOfOverallTimeInterval'],
section['secondOfEndOfOverallTimeInterval'])
# Get forecast reference time (frt) as a datetime.
frt_point = frt_coord.units.num2date(frt_coord.points[0])
# Get the period start time (as a timedelta relative to the frt).
forecast_time = section['forecastTime']
if options.support_hindcast_values:
# Apply the hindcast fix.
forecast_time = _hindcast_fix(forecast_time)
forecast_units = time_range_unit(section['indicatorOfUnitOfTimeRange'])
forecast_seconds = forecast_units.convert(forecast_time, 'seconds')
start_time_delta = timedelta(seconds=forecast_seconds)
# Get the period end time (as a timedelta relative to the frt).
end_time_delta = end_time - frt_point
# Get the middle of the period (as a timedelta relative to the frt).
# Note: timedelta division in 2.7 is odd. Even though we request integer
# division, it's to the nearest _micro_second.
mid_time_delta = (start_time_delta + end_time_delta) // 2
# Create and return the forecast period coordinate.
def timedelta_hours(timedelta):
return timedelta.total_seconds() / 3600.0
mid_point_hours = timedelta_hours(mid_time_delta)
bounds_hours = [timedelta_hours(start_time_delta),
timedelta_hours(end_time_delta)]
fp_coord = DimCoord(mid_point_hours, bounds=bounds_hours,
standard_name='forecast_period', units='hours')
return fp_coord
def other_time_coord(rt_coord, fp_coord):
"""
Return the counterpart to the given scalar 'time' or
'forecast_reference_time' coordinate, by combining it with the
given forecast_period coordinate.
Bounds are not supported.
Args:
* rt_coord:
The scalar "reference time" :class:`iris.coords.DimCoord`,
as defined by section 1. This must be either a 'time' or
'forecast_reference_time' coordinate.
* fp_coord:
The scalar 'forecast_period' :class:`iris.coords.DimCoord`.
Returns:
The scalar :class:`iris.coords.DimCoord` for either 'time' or
'forecast_reference_time'.
"""
if not rt_coord.units.is_time_reference():
fmt = 'Invalid unit for reference time coord: {}'
raise ValueError(fmt.format(rt_coord.units))
if not fp_coord.units.is_time():
fmt = 'Invalid unit for forecast_period coord: {}'
raise ValueError(fmt.format(fp_coord.units))
if rt_coord.has_bounds() or fp_coord.has_bounds():
raise ValueError('Coordinate bounds are not supported')
if rt_coord.shape != (1,) or fp_coord.shape != (1,):
raise ValueError('Vector coordinates are not supported')
if rt_coord.standard_name == 'time':
rt_base_unit = str(rt_coord.units).split(' since ')[0]
fp = fp_coord.units.convert(fp_coord.points[0], rt_base_unit)
frt = rt_coord.points[0] - fp
return DimCoord(frt, 'forecast_reference_time', units=rt_coord.units)
elif rt_coord.standard_name == 'forecast_reference_time':
return validity_time_coord(rt_coord, fp_coord)
else:
fmt = 'Unexpected reference time coordinate: {}'
raise ValueError(fmt.format(rt_coord.name()))
def validity_time_coord(frt_coord, fp_coord):
"""
Create the validity or phenomenon time coordinate.
Args:
* frt_coord:
The scalar forecast reference time :class:`iris.coords.DimCoord`.
* fp_coord:
The scalar forecast period :class:`iris.coords.DimCoord`.
Returns:
The scalar time :class:`iris.coords.DimCoord`.
It has bounds if the period coord has them, otherwise not.
"""
if frt_coord.shape != (1,):
msg = 'Expected scalar forecast reference time coordinate when ' \
'calculating validity time, got shape {!r}'.format(frt_coord.shape)
raise ValueError(msg)
if fp_coord.shape != (1,):
msg = 'Expected scalar forecast period coordinate when ' \
'calculating validity time, got shape {!r}'.format(fp_coord.shape)
raise ValueError(msg)
def coord_timedelta(coord, value):
# Helper to convert a time coordinate value into a timedelta.
seconds = coord.units.convert(value, 'seconds')
return timedelta(seconds=seconds)
# Calculate validity (phenomenon) time in forecast-reference-time units.
frt_point = frt_coord.units.num2date(frt_coord.points[0])
point_delta = coord_timedelta(fp_coord, fp_coord.points[0])
point = frt_coord.units.date2num(frt_point + point_delta)
# Calculate bounds (if any) in the same way.
if fp_coord.bounds is None:
bounds = None
else:
bounds_deltas = [coord_timedelta(fp_coord, bound_point)
for bound_point in fp_coord.bounds[0]]
bounds = [frt_coord.units.date2num(frt_point + delta)
for delta in bounds_deltas]
# Create the time scalar coordinate.
coord = DimCoord(point, bounds=bounds,
standard_name='time', units=frt_coord.units)
return coord
def generating_process(section):
if options.warn_on_unsupported:
# Reference Code Table 4.3.
warnings.warn('Unable to translate type of generating process.')
warnings.warn('Unable to translate background generating '
'process identifier.')
warnings.warn('Unable to translate forecast generating '
'process identifier.')
def data_cutoff(hoursAfterDataCutoff, minutesAfterDataCutoff):
"""
Handle the after reference time data cutoff.
Args:
* hoursAfterDataCutoff:
Message section 4, octets 15-16.
* minutesAfterDataCutoff:
Message section 4, octet 17.
"""
if (hoursAfterDataCutoff != _MDI or
minutesAfterDataCutoff != _MDI):
if options.warn_on_unsupported:
warnings.warn('Unable to translate "hours and/or minutes '
'after data cutoff".')
def statistical_cell_method(section):
"""
Create a cell method representing a time statistic.
This applies only with a product definition template 4.8.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
Returns:
A cell method over 'time'.
"""
# Handle the number of time ranges -- we currently only support one.
n_time_ranges = section['numberOfTimeRange']
if n_time_ranges != 1:
if n_time_ranges == 0:
msg = ('Product definition section 4 specifies aggregation over '
'"0 time ranges".')
raise TranslationError(msg)
else:
msg = ('Product definition section 4 specifies aggregation over '
'multiple time ranges [{}], which is not yet '
'supported.'.format(n_time_ranges))
raise TranslationError(msg)
# Decode the type of statistic (aggregation method).
statistic_code = section['typeOfStatisticalProcessing']
statistic_name = _STATISTIC_TYPE_NAMES.get(statistic_code)
if statistic_name is None:
msg = ('grib statistical process type [{}] '
'is not supported'.format(statistic_code))
raise TranslationError(msg)
# Decode the type of time increment.
increment_typecode = section['typeOfTimeIncrement']
if increment_typecode not in (2, 255):
# NOTE: All our current test data seems to contain the value 2, which
# is all we currently support.
# The exact interpretation of this is still unclear so we also accept
# a missing value.
msg = ('grib statistic time-increment type [{}] '
'is not supported.'.format(increment_typecode))
raise TranslationError(msg)
interval_number = section['timeIncrement']
if interval_number == 0:
intervals_string = None
else:
units_string = _TIME_RANGE_UNITS[
section['indicatorOfUnitForTimeIncrement']]
intervals_string = '{} {}'.format(interval_number, units_string)
# Create a cell method to represent the time aggregation.
cell_method = CellMethod(method=statistic_name,
coords='time',
intervals=intervals_string)
return cell_method
def ensemble_identifier(section):
if options.warn_on_unsupported:
# Reference Code Table 4.6.
warnings.warn('Unable to translate type of ensemble forecast.')
warnings.warn('Unable to translate number of forecasts in ensemble.')
# Create the realization coordinates.
realization = DimCoord(section['perturbationNumber'],
standard_name='realization',
units='no_unit')
return realization
def product_definition_template_0(section, metadata, rt_coord):
"""
Translate template representing an analysis or forecast at a horizontal
level or in a horizontal layer at a point in time.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
* rt_coord:
The scalar "reference time" :class:`iris.coords.DimCoord`.
This will be either 'time' or 'forecast_reference_time'.
"""
# Handle generating process details.
generating_process(section)
# Handle the data cutoff.
data_cutoff(section['hoursAfterDataCutoff'],
section['minutesAfterDataCutoff'])
if 'forecastTime' in section.keys():
forecast_time = section['forecastTime']
# The gribapi encodes the forecast time as 'startStep' for pdt 4.4x;
# product_definition_template_40 makes use of this function. The
# following will be removed once the suspected bug is fixed.
elif 'startStep' in section.keys():
forecast_time = section['startStep']
# Calculate the forecast period coordinate.
fp_coord = forecast_period_coord(section['indicatorOfUnitOfTimeRange'],
forecast_time)
# Add the forecast period coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((fp_coord, None))
# Calculate the "other" time coordinate - i.e. whichever of 'time'
# or 'forecast_reference_time' we don't already have.
other_coord = other_time_coord(rt_coord, fp_coord)
# Add the time coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((other_coord, None))
# Add the reference time coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((rt_coord, None))
# Check for vertical coordinates.
vertical_coords(section, metadata)
def product_definition_template_1(section, metadata, frt_coord):
"""
Translate template representing individual ensemble forecast, control
and perturbed, at a horizontal level or in a horizontal layer at a
point in time.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* metadata:
:class:`collectins.OrderedDict` of metadata.
* frt_coord:
The scalar forecast reference time :class:`iris.coords.DimCoord`.
"""
# Perform identical message processing.
product_definition_template_0(section, metadata, frt_coord)
realization = ensemble_identifier(section)
# Add the realization coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((realization, None))
def product_definition_template_8(section, metadata, frt_coord):
"""
Translate template representing average, accumulation and/or extreme values
or other statistically processed values at a horizontal level or in a
horizontal layer in a continuous or non-continuous time interval.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
* frt_coord:
The scalar forecast reference time :class:`iris.coords.DimCoord`.
"""
# Handle generating process details.
generating_process(section)
# Handle the data cutoff.
data_cutoff(section['hoursAfterDataCutoff'],
section['minutesAfterDataCutoff'])
# Create a cell method to represent the time statistic.
time_statistic_cell_method = statistical_cell_method(section)
# Add the forecast cell method to the metadata.
metadata['cell_methods'].append(time_statistic_cell_method)
# Add the forecast reference time coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((frt_coord, None))
# Add a bounded forecast period coordinate.
fp_coord = statistical_forecast_period_coord(section, frt_coord)
metadata['aux_coords_and_dims'].append((fp_coord, None))
# Calculate a bounded validity time coord matching the forecast period.
t_coord = validity_time_coord(frt_coord, fp_coord)
# Add the time coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((t_coord, None))
# Check for vertical coordinates.
vertical_coords(section, metadata)
def product_definition_template_9(section, metadata, frt_coord):
"""
Translate template representing probability forecasts at a
horizontal level or in a horizontal layer in a continuous or
non-continuous time interval.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
* frt_coord:
The scalar forecast reference time :class:`iris.coords.DimCoord`.
"""
# Start by calling PDT8 as all elements of that are common to this.
product_definition_template_8(section, metadata, frt_coord)
# Remove the cell_method encoding the underlying statistic, as CF does not
# currently support this type of representation.
cell_method, = metadata['cell_methods']
metadata['cell_methods'] = []
# NOTE: we currently don't record the nature of the underlying statistic,
# as we don't have an agreed way of representing that in CF.
# Return a probability object to control the production of a probability
# result. This is done once the underlying phenomenon type is determined,
# in 'translate_phenomenon'.
probability_typecode = section['probabilityType']
if probability_typecode == 1:
# Type is "above upper level".
threshold_value = section['scaledValueOfUpperLimit']
if threshold_value == _MDI:
msg = 'Product definition section 4 has missing ' \
'scaled value of upper limit'
raise TranslationError(msg)
threshold_scaling = section['scaleFactorOfUpperLimit']
if threshold_scaling == _MDI:
msg = 'Product definition section 4 has missing ' \
'scale factor of upper limit'
raise TranslationError(msg)
# Encode threshold information.
threshold = unscale(threshold_value, threshold_scaling)
probability_type = Probability('above_threshold', threshold)
# Note that GRIB provides separate "above lower threshold" and "above
# upper threshold" probability types. This naming style doesn't
# recognise that distinction. For now, assume this is not important.
else:
msg = ('Product definition section 4 contains an unsupported '
'probability type [{}]'.format(probability_typecode))
raise TranslationError(msg)
return probability_type
def product_definition_template_11(section, metadata, frt_coord):
"""
Translate template representing individual ensemble forecast, control
or perturbed; average, accumulation and/or extreme values
or other statistically processed values at a horizontal level or in a
horizontal layer in a continuous or non-continuous time interval.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
* frt_coord:
The scalar forecast reference time :class:`iris.coords.DimCoord`.
"""
product_definition_template_8(section, metadata, frt_coord)
realization = ensemble_identifier(section)
# Add the realization coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((realization, None))
def product_definition_template_31(section, metadata, rt_coord):
"""
Translate template representing a satellite product.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
* rt_coord:
The scalar observation time :class:`iris.coords.DimCoord'.
"""
if options.warn_on_unsupported:
warnings.warn('Unable to translate type of generating process.')
warnings.warn('Unable to translate observation generating '
'process identifier.')
# Number of contributing spectral bands.
NB = section['NB']
if NB > 0:
# Create the satellite series coordinate.
satelliteSeries = section['satelliteSeries']
coord = AuxCoord(satelliteSeries, long_name='satellite_series')
# Add the satellite series coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((coord, None))
# Create the satellite number coordinate.
satelliteNumber = section['satelliteNumber']
coord = AuxCoord(satelliteNumber, long_name='satellite_number')
# Add the satellite number coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((coord, None))
# Create the satellite instrument type coordinate.
instrumentType = section['instrumentType']
coord = AuxCoord(instrumentType, long_name='instrument_type')
# Add the instrument type coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((coord, None))
# Create the central wave number coordinate.
scaleFactor = section['scaleFactorOfCentralWaveNumber']
scaledValue = section['scaledValueOfCentralWaveNumber']
wave_number = unscale(scaledValue, scaleFactor)
standard_name = 'sensor_band_central_radiation_wavenumber'
coord = AuxCoord(wave_number,
standard_name=standard_name,
units=Unit('m-1'))
# Add the central wave number coordinate to the metadata aux coords.
metadata['aux_coords_and_dims'].append((coord, None))
# Add the observation time coordinate.
metadata['aux_coords_and_dims'].append((rt_coord, None))
def product_definition_template_40(section, metadata, frt_coord):
"""
Translate template representing an analysis or forecast at a horizontal
level or in a horizontal layer at a point in time for atmospheric chemical
constituents.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* metadata:
:class:`collectins.OrderedDict` of metadata.
* frt_coord:
The scalar forecast reference time :class:`iris.coords.DimCoord`.
"""
# Perform identical message processing.
product_definition_template_0(section, metadata, frt_coord)
constituent_type = section['constituentType']
# Add the constituent type as an attribute.
metadata['attributes']['WMO_constituent_type'] = constituent_type
def product_definition_section(section, metadata, discipline, tablesVersion,
rt_coord):
"""
Translate section 4 from the GRIB2 message.
Updates the metadata in-place with the translations.
Args:
* section:
Dictionary of coded key/value pairs from section 4 of the message.
* metadata:
:class:`collections.OrderedDict` of metadata.
* discipline:
Message section 0, octet 7.
* tablesVersion:
Message section 1, octet 10.
* rt_coord:
The scalar reference time :class:`iris.coords.DimCoord`.
"""
# Reference GRIB2 Code Table 4.0.
template = section['productDefinitionTemplateNumber']
probability = None
if template == 0:
# Process analysis or forecast at a horizontal level or
# in a horizontal layer at a point in time.
product_definition_template_0(section, metadata, rt_coord)
elif template == 1:
# Process individual ensemble forecast, control and perturbed, at
# a horizontal level or in a horizontal layer at a point in time.
product_definition_template_1(section, metadata, rt_coord)
elif template == 8:
# Process statistically processed values at a horizontal level or in a
# horizontal layer in a continuous or non-continuous time interval.
product_definition_template_8(section, metadata, rt_coord)
elif template == 9:
probability = \
product_definition_template_9(section, metadata, rt_coord)
elif template == 11:
product_definition_template_11(section, metadata, rt_coord)
elif template == 31:
# Process satellite product.
product_definition_template_31(section, metadata, rt_coord)
elif template == 40:
product_definition_template_40(section, metadata, rt_coord)
else:
msg = 'Product definition template [{}] is not ' \
'supported'.format(template)
raise TranslationError(msg)
# Translate GRIB2 phenomenon to CF phenomenon.
if tablesVersion != _CODE_TABLES_MISSING:
translate_phenomenon(metadata, discipline,
section['parameterCategory'],
section['parameterNumber'],
probability=probability)
###############################################################################
#
# Data Representation Section 5
#
###############################################################################
def data_representation_section(section):
"""
Translate section 5 from the GRIB2 message.
Grid point template decoding is fully provided by the ECMWF GRIB API,
all grid point and spectral templates are supported, the data payload
is returned from the GRIB API already unpacked.
"""
# Reference GRIB2 Code Table 5.0.
template = section['dataRepresentationTemplateNumber']
# Supported templates for both grid point and spectral data:
grid_point_templates = (0, 1, 2, 3, 4, 40, 41, 61)
spectral_templates = (50, 51)
supported_templates = grid_point_templates + spectral_templates
if template not in supported_templates:
msg = 'Data Representation Section Template [{}] is not ' \
'supported'.format(template)
raise TranslationError(msg)
###############################################################################
#
# Bitmap Section 6
#
###############################################################################
def bitmap_section(section):
"""
Translate section 6 from the GRIB2 message.
The bitmap can take the following values:
* 0: Bitmap applies to the data and is specified in this section
of this message.
* 1-253: Bitmap applies to the data, is specified by originating
centre and is not specified in section 6 of this message.
* 254: Bitmap applies to the data, is specified in an earlier
section 6 of this message and is not specified in this
section 6 of this message.
* 255: Bitmap does not apply to the data.
Only values 0 and 255 are supported.
"""
# Reference GRIB2 Code Table 6.0.
bitMapIndicator = section['bitMapIndicator']
if bitMapIndicator not in [_BITMAP_CODE_NONE, _BITMAP_CODE_PRESENT]:
msg = 'Bitmap Section 6 contains unsupported ' \
'bitmap indicator [{}]'.format(bitMapIndicator)
raise TranslationError(msg)
###############################################################################
def grib2_convert(field, metadata):
"""
Translate the GRIB2 message into the appropriate cube metadata.
Updates the metadata in-place with the translations.
Args:
* field:
GRIB2 message to be translated.
* metadata:
:class:`collections.OrderedDict` of metadata.
"""
# Section 1 - Identification Section.
centre = _CENTRES.get(field.sections[1]['centre'])
if centre is not None:
metadata['attributes']['centre'] = centre
rt_coord = reference_time_coord(field.sections[1])
# Section 3 - Grid Definition Section (Grid Definition Template)
grid_definition_section(field.sections[3], metadata)
# Section 4 - Product Definition Section (Product Definition Template)
product_definition_section(field.sections[4], metadata,
field.sections[0]['discipline'],
field.sections[1]['tablesVersion'],
rt_coord)
# Section 5 - Data Representation Section (Data Representation Template)
data_representation_section(field.sections[5])
# Section 6 - Bitmap Section.
bitmap_section(field.sections[6])
###############################################################################
def convert(field):
"""
Translate the GRIB message into the appropriate cube metadata.
Args:
* field:
GRIB message to be translated.
Returns:
A :class:`iris.fileformats.rules.ConversionMetadata` object.
"""
editionNumber = field.sections[0]['editionNumber']
if editionNumber != 2:
msg = 'GRIB edition {} is not supported'.format(editionNumber)
raise TranslationError(msg)
# Initialise the cube metadata.
metadata = OrderedDict()
metadata['factories'] = []
metadata['references'] = []
metadata['standard_name'] = None
metadata['long_name'] = None
metadata['units'] = None
metadata['attributes'] = {}
metadata['cell_methods'] = []
metadata['dim_coords_and_dims'] = []
metadata['aux_coords_and_dims'] = []
# Convert GRIB2 message to cube metadata.
grib2_convert(field, metadata)
return ConversionMetadata._make(metadata.values())
| arulalant/UMRider | others/ncmrwfIRIS/_load_convert.py | Python | gpl-2.0 | 82,618 |
import numpy as np
from cytokine_settings import build_intracell_model, DEFAULT_CYTOKINE_MODEL, APP_FIELD_STRENGTH, RUNS_SUBDIR_CYTOKINES, BETA_CYTOKINE
from cytokine_simulate import cytokine_sim
from singlecell.singlecell_class import Cell
from singlecell.singlecell_constants import NUM_STEPS, BETA
from singlecell.singlecell_data_io import run_subdir_setup, runinfo_append
from singlecell.singlecell_functions import state_to_label, label_to_state
def state_landscape(model_name=DEFAULT_CYTOKINE_MODEL, iterations=NUM_STEPS, applied_field_strength=APP_FIELD_STRENGTH,
external_field=None, flag_write=False):
spin_labels, intxn_matrix, applied_field_const, init_state = build_intracell_model(model_name=DEFAULT_CYTOKINE_MODEL)
N = len(spin_labels)
labels_to_states = {idx:label_to_state(idx, N) for idx in xrange(2 ** N)}
states_to_labels = {tuple(v): k for k, v in labels_to_states.iteritems()}
for state_label in range(2**N):
init_cond = labels_to_states[state_label]
print "\n\nSimulating with init state label", state_label, ":", init_cond
state_array, dirs = cytokine_sim(iterations=iterations, beta=BETA_CYTOKINE, flag_write=False,
applied_field_strength=applied_field_strength, external_field=external_field,
init_state_force=init_cond)
label_timeseries = [states_to_labels[tuple(state_array[:,t])] for t in xrange(iterations)]
for elem in label_timeseries:
print elem, "|",
return
if __name__ == '__main__':
# For model A:
# - deterministic oscillations between state 0 (all-off) and state 15 (all-on)
# - if sufficient field is added, the oscillations disappear and its just stuck in the all-on state 15
# - threshold h_0 strength is cancelling the negative feedback term J_2on0 = J[0,2] of SOCS (s_2) on R (s_0)
# - TODO: issue seen in multicell may 10 that SOCS off => R on, logical wiring problem... need to resolve
external_field = np.array([1,0,0,0])
state_landscape(iterations=20, applied_field_strength=1.0, external_field=external_field)
| mattsmart/biomodels | celltypes/cytokine/cytokine_landscape.py | Python | mit | 2,172 |
from typing import List
class IO:
def mode(self) -> str: ...
def name(self) -> str: ...
def close(self) -> None: ...
def closed(self) -> bool: ...
def fileno(self) -> int: ...
def flush(self) -> None: ...
def isatty(self) -> bool: ...
def read(self, n: int = 0) -> str: ...
def readable(self) -> bool: ...
def readline(self, limit: int = 1) -> str: ...
def readlines(self, hint: int = 1) -> List[str]: ...
def seek(self, offset: int, whence: int = 1) -> int: ...
def seekable(self) -> bool: ...
def tell(self) -> int: ...
def truncate(self, size: int = 1) -> int: ...
def writable(self) -> bool: ...
def write(self, s: str) -> int: ...
def writelines(self, lines: List[str]) -> None: ...
def next(self) -> str: ...
class TextIO():
def write(self, arg: str) -> None: ...
argv = [""]
stdout = TextIO()
stderr = IO()
def exit(arg: object) -> None:
pass
| caterinaurban/Typpete | typpete/src/stubs/libraries/sys.py | Python | mpl-2.0 | 958 |
#!/usr/bin/python
#
# Copyright (c) 2012 Joshua Hughes <kivhift@gmail.com>
#
import nose
nose.main()
| kivhift/pu | src/tests/run-em.py | Python | mit | 101 |
import django # this verifies local libraries can be packed into the egg
import additiondependency
def addition(first, second):
additiondependency.dependantMethod()
return first + second
def addition2(first, second, third):
additiondependency.dependantMethod()
return first + second + third
| Stratoscale/pyracktest | example_seeds/addition.py | Python | apache-2.0 | 312 |
import logging
import asyncio
import random
import sortedcontainers
import collections
from hailtop.utils import (
AsyncWorkerPool, WaitableSharedPool, retry_long_running, run_if_changed,
time_msecs, secret_alnum_string)
from hailtop import aiotools
from ..batch import schedule_job, unschedule_job, mark_job_complete
from ..utils import WindowFractionCounter
log = logging.getLogger('driver')
class Box:
def __init__(self, value):
self.value = value
class ExceededSharesCounter:
def __init__(self):
self._global_counter = WindowFractionCounter(10)
def push(self, success: bool):
self._global_counter.push('exceeded_shares', success)
def rate(self) -> float:
return self._global_counter.fraction()
def __repr__(self):
return f'global {self._global_counter}'
class Scheduler:
def __init__(self, app):
self.app = app
self.scheduler_state_changed = app['scheduler_state_changed']
self.cancel_ready_state_changed = app['cancel_ready_state_changed']
self.cancel_running_state_changed = app['cancel_running_state_changed']
self.db = app['db']
self.inst_pool = app['inst_pool']
self.async_worker_pool = AsyncWorkerPool(parallelism=100, queue_size=100)
self.exceeded_shares_counter = ExceededSharesCounter()
self.task_manager = aiotools.BackgroundTaskManager()
async def async_init(self):
self.task_manager.ensure_future(retry_long_running(
'schedule_loop',
run_if_changed, self.scheduler_state_changed, self.schedule_loop_body))
self.task_manager.ensure_future(retry_long_running(
'cancel_cancelled_ready_jobs_loop',
run_if_changed, self.cancel_ready_state_changed, self.cancel_cancelled_ready_jobs_loop_body))
self.task_manager.ensure_future(retry_long_running(
'cancel_cancelled_running_jobs_loop',
run_if_changed, self.cancel_running_state_changed, self.cancel_cancelled_running_jobs_loop_body))
self.task_manager.ensure_future(retry_long_running(
'bump_loop',
self.bump_loop))
def shutdown(self):
try:
self.task_manager.shutdown()
finally:
self.async_worker_pool.shutdown()
async def compute_fair_share(self):
free_cores_mcpu = sum([
worker.free_cores_mcpu
for worker in self.inst_pool.healthy_instances_by_free_cores
])
user_running_cores_mcpu = {}
user_total_cores_mcpu = {}
result = {}
pending_users_by_running_cores = sortedcontainers.SortedSet(
key=lambda user: user_running_cores_mcpu[user])
allocating_users_by_total_cores = sortedcontainers.SortedSet(
key=lambda user: user_total_cores_mcpu[user])
records = self.db.execute_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(n_ready_jobs), 0) AS SIGNED) AS n_ready_jobs,
CAST(COALESCE(SUM(ready_cores_mcpu), 0) AS SIGNED) AS ready_cores_mcpu,
CAST(COALESCE(SUM(n_running_jobs), 0) AS SIGNED) AS n_running_jobs,
CAST(COALESCE(SUM(running_cores_mcpu), 0) AS SIGNED) AS running_cores_mcpu
FROM user_resources
GROUP BY user;
''',
timer_description='in compute_fair_share: aggregate user_resources')
async for record in records:
user = record['user']
user_running_cores_mcpu[user] = record['running_cores_mcpu']
user_total_cores_mcpu[user] = record['running_cores_mcpu'] + record['ready_cores_mcpu']
pending_users_by_running_cores.add(user)
record['allocated_cores_mcpu'] = 0
result[user] = record
def allocate_cores(user, mark):
result[user]['allocated_cores_mcpu'] = int(mark - user_running_cores_mcpu[user] + 0.5)
mark = 0
while free_cores_mcpu > 0 and (pending_users_by_running_cores or allocating_users_by_total_cores):
lowest_running = None
lowest_total = None
if pending_users_by_running_cores:
lowest_running_user = pending_users_by_running_cores[0]
lowest_running = user_running_cores_mcpu[lowest_running_user]
if lowest_running == mark:
pending_users_by_running_cores.remove(lowest_running_user)
allocating_users_by_total_cores.add(lowest_running_user)
continue
if allocating_users_by_total_cores:
lowest_total_user = allocating_users_by_total_cores[0]
lowest_total = user_total_cores_mcpu[lowest_total_user]
if lowest_total == mark:
allocating_users_by_total_cores.remove(lowest_total_user)
allocate_cores(lowest_total_user, mark)
continue
allocation = min([c for c in [lowest_running, lowest_total] if c is not None])
n_allocating_users = len(allocating_users_by_total_cores)
cores_to_allocate = n_allocating_users * (allocation - mark)
if cores_to_allocate > free_cores_mcpu:
mark += int(free_cores_mcpu / n_allocating_users + 0.5)
free_cores_mcpu = 0
break
mark = allocation
free_cores_mcpu -= cores_to_allocate
for user in allocating_users_by_total_cores:
allocate_cores(user, mark)
return result
async def bump_loop(self):
while True:
log.info('bump loop')
self.scheduler_state_changed.set()
self.cancel_ready_state_changed.set()
self.cancel_running_state_changed.set()
await asyncio.sleep(60)
async def cancel_cancelled_ready_jobs_loop_body(self):
records = self.db.select_and_fetchall(
'''
SELECT user, n_cancelled_ready_jobs
FROM (SELECT user,
CAST(COALESCE(SUM(n_cancelled_ready_jobs), 0) AS SIGNED) AS n_cancelled_ready_jobs
FROM user_resources
GROUP BY user) AS t
WHERE n_cancelled_ready_jobs > 0;
''',
timer_description='in cancel_cancelled_ready_jobs: aggregate n_cancelled_ready_jobs')
user_n_cancelled_ready_jobs = {
record['user']: record['n_cancelled_ready_jobs'] async for record in records
}
total = sum(user_n_cancelled_ready_jobs.values())
if not total:
should_wait = True
return should_wait
user_share = {
user: max(int(300 * user_n_jobs / total + 0.5), 20)
for user, user_n_jobs in user_n_cancelled_ready_jobs.items()
}
async def user_cancelled_ready_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT id, cancelled
FROM batches
WHERE user = %s AND `state` = 'running';
''',
(user,),
timer_description=f'in cancel_cancelled_ready_jobs: get {user} running batches'):
if batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0
LIMIT %s;
''',
(batch['id'], remaining.value),
timer_description=f'in cancel_cancelled_ready_jobs: get {user} batch {batch["id"]} ready cancelled jobs (1)'):
record['batch_id'] = batch['id']
yield record
else:
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0 AND cancelled = 1
LIMIT %s;
''',
(batch['id'], remaining.value),
timer_description=f'in cancel_cancelled_ready_jobs: get {user} batch {batch["id"]} ready cancelled jobs (2)'):
record['batch_id'] = batch['id']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, share in user_share.items():
remaining = Box(share)
async for record in user_cancelled_ready_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
log.info(f'cancelling job {id}')
async def cancel_with_error_handling(app, batch_id, job_id, id):
try:
resources = []
await mark_job_complete(
app, batch_id, job_id, None, None,
'Cancelled', None, None, None, 'cancelled', resources)
except Exception:
log.info(f'error while cancelling job {id}', exc_info=True)
await waitable_pool.call(
cancel_with_error_handling,
self.app, batch_id, job_id, id)
remaining.value -= 1
if remaining.value <= 0:
should_wait = False
break
await waitable_pool.wait()
return should_wait
async def cancel_cancelled_running_jobs_loop_body(self):
records = self.db.select_and_fetchall(
'''
SELECT user, n_cancelled_running_jobs
FROM (SELECT user,
CAST(COALESCE(SUM(n_cancelled_running_jobs), 0) AS SIGNED) AS n_cancelled_running_jobs
FROM user_resources
GROUP BY user) AS t
WHERE n_cancelled_running_jobs > 0;
''',
timer_description='in cancel_cancelled_running_jobs: aggregate n_cancelled_running_jobs')
user_n_cancelled_running_jobs = {
record['user']: record['n_cancelled_running_jobs'] async for record in records
}
total = sum(user_n_cancelled_running_jobs.values())
if not total:
should_wait = True
return should_wait
user_share = {
user: max(int(300 * user_n_jobs / total + 0.5), 20)
for user, user_n_jobs in user_n_cancelled_running_jobs.items()
}
async def user_cancelled_running_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT id
FROM batches
WHERE user = %s AND `state` = 'running' AND cancelled = 1;
''',
(user,),
timer_description=f'in cancel_cancelled_running_jobs: get {user} cancelled batches'):
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, attempts.attempt_id, attempts.instance_name
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
STRAIGHT_JOIN attempts
ON attempts.batch_id = jobs.batch_id AND attempts.job_id = jobs.job_id
WHERE jobs.batch_id = %s AND state = 'Running' AND always_run = 0 AND cancelled = 0
LIMIT %s;
''',
(batch['id'], remaining.value),
timer_description=f'in cancel_cancelled_running_jobs: get {user} batch {batch["id"]} running cancelled jobs'):
record['batch_id'] = batch['id']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, share in user_share.items():
remaining = Box(share)
async for record in user_cancelled_running_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
async def unschedule_with_error_handling(app, record, instance_name, id):
try:
await unschedule_job(app, record)
except Exception:
log.info(f'unscheduling job {id} on instance {instance_name}', exc_info=True)
await waitable_pool.call(
unschedule_with_error_handling, self.app, record, record['instance_name'], id)
remaining.value -= 1
if remaining.value <= 0:
should_wait = False
break
await waitable_pool.wait()
return should_wait
async def schedule_loop_body(self):
log.info('schedule: starting')
start = time_msecs()
n_scheduled = 0
user_resources = await self.compute_fair_share()
total = sum(resources['allocated_cores_mcpu']
for resources in user_resources.values())
if not total:
log.info('schedule: no allocated cores')
should_wait = True
return should_wait
user_share = {
user: max(int(300 * resources['allocated_cores_mcpu'] / total + 0.5), 20)
for user, resources in user_resources.items()
}
async def user_runnable_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT id, cancelled, userdata, user, format_version
FROM batches
WHERE user = %s AND `state` = 'running';
''',
(user,),
timer_description=f'in schedule: get {user} running batches'):
async for record in self.db.select_and_fetchall(
'''
SELECT job_id, spec, cores_mcpu
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 1
LIMIT %s;
''',
(batch['id'], remaining.value),
timer_description=f'in schedule: get {user} batch {batch["id"]} runnable jobs (1)'):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
if not batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT job_id, spec, cores_mcpu
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0 AND cancelled = 0
LIMIT %s;
''',
(batch['id'], remaining.value),
timer_description=f'in schedule: get {user} batch {batch["id"]} runnable jobs (2)'):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
def get_instance(user, cores_mcpu):
i = self.inst_pool.healthy_instances_by_free_cores.bisect_key_left(cores_mcpu)
while i < len(self.inst_pool.healthy_instances_by_free_cores):
instance = self.inst_pool.healthy_instances_by_free_cores[i]
assert cores_mcpu <= instance.free_cores_mcpu
if user != 'ci' or (user == 'ci' and instance.zone.startswith('us-central1')):
return instance
i += 1
histogram = collections.defaultdict(int)
for instance in self.inst_pool.healthy_instances_by_free_cores:
histogram[instance.free_cores_mcpu] += 1
log.info(f'schedule: no viable instances for {cores_mcpu}: {histogram}')
return None
should_wait = True
for user, resources in user_resources.items():
allocated_cores_mcpu = resources['allocated_cores_mcpu']
if allocated_cores_mcpu == 0:
continue
scheduled_cores_mcpu = 0
share = user_share[user]
log.info(f'schedule: user-share: {user}: {allocated_cores_mcpu} {share}')
remaining = Box(share)
async for record in user_runnable_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
attempt_id = secret_alnum_string(6)
record['attempt_id'] = attempt_id
if scheduled_cores_mcpu + record['cores_mcpu'] > allocated_cores_mcpu:
if random.random() > self.exceeded_shares_counter.rate():
self.exceeded_shares_counter.push(True)
self.scheduler_state_changed.set()
break
self.exceeded_shares_counter.push(False)
instance = get_instance(user, record['cores_mcpu'])
if instance:
instance.adjust_free_cores_in_memory(-record['cores_mcpu'])
scheduled_cores_mcpu += record['cores_mcpu']
n_scheduled += 1
should_wait = False
async def schedule_with_error_handling(app, record, id, instance):
try:
await schedule_job(app, record, instance)
except Exception:
log.info(f'scheduling job {id} on {instance}', exc_info=True)
await waitable_pool.call(
schedule_with_error_handling, self.app, record, id, instance)
remaining.value -= 1
if remaining.value <= 0:
break
await waitable_pool.wait()
end = time_msecs()
log.info(f'schedule: scheduled {n_scheduled} jobs in {end - start}ms')
return should_wait
| danking/hail | batch/batch/driver/scheduler.py | Python | mit | 17,953 |
from taskw import TaskWarriorShellout
class TwCurrent(object):
def __init__(self, file=None):
self.tw = TaskWarriorShellout()
self.tw.config_filename = file
def get_current(self):
tw = TaskWarriorShellout()
tw.config_filename = self.tw.config_filename
tasks = tw.filter_tasks({'tags.contains': 'current'})
current = tasks[0]
return current
def set_current(self, id):
tasks = self.tw.filter_tasks({'tags.contains': 'current'})
for task in tasks:
task['tags'].remove('current')
self.tw.task_update(task)
id, task = self.tw.get_task()
try:
task['tags'].extend('current')
except KeyError:
task['tags'] = ['current']
self.tw.task_update(task)
def get_pending(self):
tasks = self.tw.filter_tasks({'status': 'pending'})
return tasks
if __name__ == '__main__':
tw = TwCurrent()
tw.get_current()
| DavidParkin/pomodoro-indicator | app/twcurrent.py | Python | gpl-3.0 | 986 |
import numpy as np
import gnumpy as gnp
import utils
from itertools import izip
class Hm():
def __init__(self, p_layers, q_layers, prior):
'''
p_net has all the generative layers starting from the one closest to the prior ending with the one closest to the data
q_net has all the approximate inference layers starting from the one closest to the data
'''
self.p_layers = p_layers
self.q_layers = q_layers
self.prior = prior
def q_samplesIx(self, x, repeat=1):
sample = utils.gnp_repeat(x, repeat)
yield sample
for layer in self.q_layers:
sample = layer.sampleIx(sample)
yield sample
def p_samplesIprior_sample(self, x, to_sample=True):
sample = x
yield sample
for layer in self.p_layers[:-1]:
sample = layer.sampleIx(sample)
yield sample
if to_sample:
sample = self.p_layers[-1].sampleIx(sample)
yield sample
else:
p = self.p_layers[-1].pIx(sample)
yield p
def log_pIq_samples(self, q_samples):
logp = 0
for layer, inpt, sample in izip(reversed(self.p_layers), q_samples[1:], q_samples):
logp += layer.log_likelihood_sampleIx(sample, inpt)
return logp
def log_qIq_samples(self, q_samples):
logq = 0
for layer, inpt, sample in izip(self.q_layers, q_samples, q_samples[1:]):
logq += layer.log_likelihood_sampleIx(sample, inpt)
return logq
def posterior_importance_weightsIq_samples(self, q_samples, repeat):
log_q = self.log_qIq_samples(q_samples)
log_p = self.log_pIq_samples(q_samples)
log_prior_unnormalized = self.prior.log_likelihood_unnormalized(q_samples[-1])
log_q = log_q.reshape(-1, repeat)
log_p = log_p.reshape(-1, repeat)
log_prior_unnormalized = log_prior_unnormalized.reshape(-1, repeat)
log_weights = log_p + log_prior_unnormalized - log_q
log_weights -= gnp.max(log_weights, axis=1)[:, None]
weights = gnp.exp(log_weights)
weights /= gnp.sum(weights, axis=1)[:, None]
return weights
def wake_sleep_reweight(self, minibatch, repeat, learning_rate, cd_steps):
q_samples = list(self.q_samplesIx(minibatch, repeat))
prior_sample = self.prior.run_mcmc_chain(5,q_samples[-1],to_sample=True)
p_samples = list(self.p_samplesIprior_sample(prior_sample, to_sample=True))
# weights = self.posterior_importance_weightsIq_samples(q_samples, repeat=repeat)
# # weights /= weights.shape[0]
# weights = weights.reshape((-1, 1))
# # weights = np.empty((minibatch.shape[0]*repeat,1))
# # weights.fill(1./minibatch.shape[0]*repeat)
# # weights = gnp.garray(weights)
weights = 1./(minibatch.shape[0]*repeat)
self.update_q_wake(learning_rate=learning_rate, q_samples=p_samples, weights=weights)
# self.update_q_wake(learning_rate=learning_rate, q_samples=q_samples, weights=weights)
self.prior.increase_weighted_log_likelihood(learning_rate=learning_rate, samples=q_samples[-1], weights=weights, cd_steps=cd_steps)
self.update_p_wake(learning_rate=learning_rate, q_samples=q_samples, weights=weights)
def update_q_wake(self, learning_rate, q_samples, weights):
for layer, inpt, sample in izip(self.q_layers, reversed(q_samples), reversed(q_samples[:-1])):
layer.increase_weighted_log_likelihood(samples=sample, inputs=inpt, weights=weights, learning_rate=learning_rate)
def update_p_wake(self, learning_rate, q_samples, weights):
for layer, inpt, sample in izip(reversed(self.p_layers), q_samples[1:], q_samples):
layer.increase_weighted_log_likelihood(samples=sample, inputs=inpt, weights=weights, learning_rate=learning_rate)
if __name__ == '__main__':
import argparse
import os
import config
import prob_distributions
import nn
import rbm
import evaluate
import cPickle as pkl
import datasets
dataset = datasets.mnist()
parser = argparse.ArgumentParser(description='Train a DBN with jont wake sleep training procedure on the MNIST dataset')
parser.add_argument('--learning_rate1', '-l1', type=float, default=3e-3)
parser.add_argument('--learning_rate2', '-l2', type=float, default=3e-6)
parser.add_argument('--learning_ratep', '-lp', type=float, default=1e-3)
parser.add_argument('--cd_steps', '-c', type=int, default=10)
parser.add_argument('--num_epochs', '-e', type=int, default=50)
parser.add_argument('--num_pre_epochs', '-p', type=int, default=20)
parser.add_argument('--repeat', '-k', type=int, default=10)
parser.add_argument('--units', '-u', nargs='+', type=int, default=[500,500])
args = parser.parse_args()
num_epochs = args.num_epochs
num_pre_epochs = args.num_pre_epochs
learning_rate1 = args.learning_rate1
learning_rate2 = args.learning_rate2
learning_ratep = args.learning_ratep
cd_steps = args.cd_steps
repeat = args.repeat
u = args.units
minibatch_size = 20
data_dim = dataset.get_data_dim()
directory_name = os.path.join(config.RESULTS_DIR, 'hm_contrastq{}'.format(u))
directory_name = os.path.join(directory_name, 'e{}c{}p{}k{}l1{}l2{}lp{}'.format(num_epochs, cd_steps, num_pre_epochs, repeat, learning_rate1, learning_rate2, learning_ratep))
if not os.path.exists(directory_name):
os.makedirs(directory_name)
u.insert(0,data_dim)
minibatches_per_epoch = dataset.get_n_examples('train') // minibatch_size
model = Hm([], [], prior=0)
'''pretraining layers'''
for u_prev, u_next in izip(u[:-2],u[1:-1]):
layer_pretrain = rbm.Rbm.random(u_prev,u_next)
for i in xrange(num_pre_epochs):
for j in xrange(minibatches_per_epoch):
minibatch = dataset.get_minibatch_at_index(j, minibatch_size=minibatch_size)
samples = list(model.q_samplesIx(minibatch))
layer_pretrain.increase_weighted_log_likelihood(samples=samples[-1], weights=1./minibatch_size, learning_rate=learning_ratep, cd_steps=cd_steps)
model.p_layers.insert(0,prob_distributions.Bernoulli(nn.Linear(layer_pretrain.w.T,layer_pretrain.bv)))
model.q_layers.append(prob_distributions.Bernoulli(nn.Linear(layer_pretrain.w,layer_pretrain.bh)))
'''pretraining prior'''
prior_pretrain = rbm.Rbm.random(u[-2], u[-1])
for i in xrange(num_pre_epochs):
for j in xrange(minibatches_per_epoch):
minibatch = dataset.get_minibatch_at_index(j, minibatch_size=minibatch_size)
samples = list(model.q_samplesIx(minibatch))
prior_pretrain.increase_weighted_log_likelihood(samples=samples[-1], weights=1./minibatch_size, learning_rate=learning_ratep, cd_steps=cd_steps)
model.prior = prior_pretrain
'''wake-sleep'''
if num_epochs is not 0: delta = (learning_rate1-learning_rate2)/(minibatches_per_epoch*num_epochs)
for i in xrange(num_epochs):
for j in xrange(minibatches_per_epoch):
learning_rate = learning_rate1 - (i*minibatches_per_epoch+j)*delta
minibatch = dataset.get_minibatch_at_index(j, minibatch_size=minibatch_size)
model.wake_sleep_reweight(minibatch=minibatch, repeat=repeat, learning_rate=learning_rate, cd_steps=cd_steps)
f = open(os.path.join(directory_name,'hm_contrastq.pkl'), 'wb')
pkl.dump(model,f,protocl=pkl.HIGHEST_PROTOCOL)
f.close()
evaluate.visualize_samples(directory_name, model)
evaluate.ais(directory_name, dataset, model) | jackklys/reweightedwakesleep | hm_contrastq.py | Python | mit | 7,680 |
# -*- coding: ISO-8859-1 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 20 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $"
from reportlab.lib.units import inch, cm
from reportlab.lib.styles import *
from reportlab.lib.enums import *
from reportlab.lib.colors import *
from reportlab.lib.pagesizes import *
from reportlab.pdfbase import pdfmetrics
# from reportlab.platypus import *
# from reportlab.platypus.flowables import Flowable
# from reportlab.platypus.tableofcontents import TableOfContents
# from reportlab.platypus.para import Para, PageNumberObject, UNDERLINE, HotLink
import reportlab
import copy
import types
import os
import os.path
import pprint
import sys
import string
import re
import base64
import urlparse
import mimetypes
import urllib2
import urllib
import httplib
import tempfile
import shutil
rgb_re = re.compile("^.*?rgb[(]([0-9]+).*?([0-9]+).*?([0-9]+)[)].*?[ ]*$")
if not(reportlab.Version[0] == "2" and reportlab.Version[2] >= "1"):
raise ImportError("Reportlab Version 2.1+ is needed!")
REPORTLAB22 = (reportlab.Version[0] == "2" and reportlab.Version[2] >= "2")
# print "***", reportlab.Version, REPORTLAB22, reportlab.__file__
import logging
log = logging.getLogger("ho.pisa")
try:
import cStringIO as StringIO
except:
import StringIO
try:
import pyPdf
except:
pyPdf = None
try:
from reportlab.graphics import renderPM
except:
renderPM = None
try:
from reportlab.graphics import renderSVG
except:
renderSVG = None
def ErrorMsg():
"""
Helper to get a nice traceback as string
"""
import traceback, sys, cgi
type = value = tb = limit = None
type, value, tb = sys.exc_info()
list = traceback.format_tb(tb, limit) + traceback.format_exception_only(type, value)
return "Traceback (innermost last):\n" + "%-20s %s" % (
string.join(list[: - 1], ""),
list[ - 1])
def toList(value):
if type(value) not in (types.ListType, types.TupleType):
return [value]
return list(value)
def flatten(x):
"""flatten(sequence) -> list
copied from http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def _toColor(arg, default=None):
'''try to map an arbitrary arg to a color instance'''
if isinstance(arg, Color): return arg
tArg = type(arg)
if tArg in (types.ListType, types.TupleType):
assert 3 <= len(arg) <= 4, 'Can only convert 3 and 4 sequences to color'
assert 0 <= min(arg) and max(arg) <= 1
return len(arg) == 3 and Color(arg[0], arg[1], arg[2]) or CMYKColor(arg[0], arg[1], arg[2], arg[3])
elif tArg == types.StringType:
C = getAllNamedColors()
s = arg.lower()
if C.has_key(s): return C[s]
try:
return toColor(eval(arg))
except:
pass
try:
return HexColor(arg)
except:
if default is None:
raise ValueError('Invalid color value %r' % arg)
return default
def getColor(value, default=None):
" Convert to color value "
try:
original = value
if isinstance(value, Color):
return value
value = str(value).strip().lower()
if value == "transparent" or value == "none":
return default
if value in COLOR_BY_NAME:
return COLOR_BY_NAME[value]
if value.startswith("#") and len(value) == 4:
value = "#" + value[1] + value[1] + value[2] + value[2] + value[3] + value[3]
elif rgb_re.search(value):
# e.g., value = "<css function: rgb(153, 51, 153)>", go figure:
r, g, b = [int(x) for x in rgb_re.search(value).groups()]
value = "#%02x%02x%02x" % (r, g, b)
else:
# Shrug
pass
# XXX Throws illegal in 2.1 e.g. toColor('none'),
# therefore we have a workaround here
return _toColor(value)
except ValueError, e:
log.warn("Unknown color %r", original)
return default
def getBorderStyle(value, default=None):
# log.debug(value)
if value and (str(value).lower() not in ("none", "hidden")):
return value
return default
mm = cm / 10.0
dpi96 = (1.0 / 96.0 * inch)
_absoluteSizeTable = {
"1": 50.0 / 100.0,
"xx-small": 50.0 / 100.0,
"x-small": 50.0 / 100.0,
"2": 75.0 / 100.0,
"small": 75.0 / 100.0,
"3": 100.0 / 100.0,
"medium": 100.0 / 100.0,
"4": 125.0 / 100.0,
"large": 125.0 / 100.0,
"5": 150.0 / 100.0,
"x-large": 150.0 / 100.0,
"6": 175.0 / 100.0,
"xx-large": 175.0 / 100.0,
"7": 200.0 / 100.0,
"xxx-large": 200.0 / 100.0,
#"xx-small" : 3./5.,
#"x-small": 3./4.,
#"small": 8./9.,
#"medium": 1./1.,
#"large": 6./5.,
#"x-large": 3./2.,
#"xx-large": 2./1.,
#"xxx-large": 3./1.,
}
_relativeSizeTable = {
"larger": 1.25,
"smaller": 0.75,
"+4": 200.0 / 100.0,
"+3": 175.0 / 100.0,
"+2": 150.0 / 100.0,
"+1": 125.0 / 100.0,
"-1": 75.0 / 100.0,
"-2": 50.0 / 100.0,
"-3": 25.0 / 100.0,
}
MIN_FONT_SIZE = 1.0
def getSize(value, relative=0, base=None, default=0.0):
"""
Converts strings to standard sizes
"""
try:
original = value
if value is None:
return relative
elif type(value) is types.FloatType:
return value
elif type(value) is types.IntType:
return float(value)
elif type(value) in (types.TupleType, types.ListType):
value = "".join(value)
value = str(value).strip().lower().replace(",", ".")
if value[ - 2:] == 'cm':
return float(value[: - 2].strip()) * cm
elif value[ - 2:] == 'mm':
return (float(value[: - 2].strip()) * mm) # 1mm = 0.1cm
elif value[ - 2:] == 'in':
return float(value[: - 2].strip()) * inch # 1pt == 1/72inch
elif value[ - 2:] == 'inch':
return float(value[: - 4].strip()) * inch # 1pt == 1/72inch
elif value[ - 2:] == 'pt':
return float(value[: - 2].strip())
elif value[ - 2:] == 'pc':
return float(value[: - 2].strip()) * 12.0 # 1pc == 12pt
elif value[ - 2:] == 'px':
return float(value[: - 2].strip()) * dpi96 # XXX W3C says, use 96pdi http://www.w3.org/TR/CSS21/syndata.html#length-units
elif value[ - 1:] == 'i': # 1pt == 1/72inch
return float(value[: - 1].strip()) * inch
elif value in ("none", "0", "auto"):
return 0.0
elif relative:
if value[ - 2:] == 'em': # XXX
return (float(value[: - 2].strip()) * relative) # 1em = 1 * fontSize
elif value[ - 2:] == 'ex': # XXX
return (float(value[: - 2].strip()) * (relative / 2.0)) # 1ex = 1/2 fontSize
elif value[ - 1:] == '%':
# print "%", value, relative, (relative * float(value[:-1].strip())) / 100.0
return (relative * float(value[: - 1].strip())) / 100.0 # 1% = (fontSize * 1) / 100
elif value in ("normal", "inherit"):
return relative
elif _relativeSizeTable.has_key(value):
if base:
return max(MIN_FONT_SIZE, base * _relativeSizeTable[value])
return max(MIN_FONT_SIZE, relative * _relativeSizeTable[value])
elif _absoluteSizeTable.has_key(value):
if base:
return max(MIN_FONT_SIZE, base * _absoluteSizeTable[value])
return max(MIN_FONT_SIZE, relative * _absoluteSizeTable[value])
try:
value = float(value)
except:
log.warn("getSize: Not a float %r", value)
return default #value = 0
return max(0, value)
except Exception:
log.warn("getSize %r %r", original, relative, exc_info=1)
# print "ERROR getSize", repr(value), repr(value), e
return default
def getCoords(x, y, w, h, pagesize):
"""
As a stupid programmer I like to use the upper left
corner of the document as the 0,0 coords therefore
we need to do some fancy calculations
"""
#~ print pagesize
ax, ay = pagesize
if x < 0:
x = ax + x
if y < 0:
y = ay + y
if w != None and h != None:
if w <= 0:
w = (ax - x + w)
if h <= 0:
h = (ay - y + h)
return x, (ay - y - h), w, h
return x, (ay - y)
def getBox(box, pagesize):
"""
Parse sizes by corners in the form:
<X-Left> <Y-Upper> <Width> <Height>
The last to values with negative values are interpreted as offsets form
the right and lower border.
"""
box = str(box).split()
if len(box) != 4:
raise Exception, "box not defined right way"
x, y, w, h = map(getSize, box)
return getCoords(x, y, w, h, pagesize)
def getPos(position, pagesize):
"""
Pair of coordinates
"""
position = str(position).split()
if len(position) != 2:
raise Exception, "position not defined right way"
x, y = map(getSize, position)
return getCoords(x, y, None, None, pagesize)
def getBool(s):
" Is it a boolean? "
return str(s).lower() in ("y", "yes", "1", "true")
_uid = 0
def getUID():
" Unique ID "
global _uid
_uid += 1
return str(_uid)
_alignments = {
"left": TA_LEFT,
"center": TA_CENTER,
"middle": TA_CENTER,
"right": TA_RIGHT,
"justify": TA_JUSTIFY,
}
def getAlign(value, default=TA_LEFT):
return _alignments.get(str(value).lower(), default)
#def getVAlign(value):
# # Unused
# return str(value).upper()
GAE = "google.appengine" in sys.modules
if GAE:
STRATEGIES = (
StringIO.StringIO,
StringIO.StringIO)
else:
STRATEGIES = (
StringIO.StringIO,
tempfile.NamedTemporaryFile)
class pisaTempFile(object):
"""A temporary file implementation that uses memory unless
either capacity is breached or fileno is requested, at which
point a real temporary file will be created and the relevant
details returned
If capacity is -1 the second strategy will never be used.
Inspired by:
http://code.activestate.com/recipes/496744/
"""
STRATEGIES = STRATEGIES
CAPACITY = 10 * 1024
def __init__(self, buffer="", capacity=CAPACITY):
"""Creates a TempFile object containing the specified buffer.
If capacity is specified, we use a real temporary file once the
file gets larger than that size. Otherwise, the data is stored
in memory.
"""
#if hasattr(buffer, "read"):
#shutil.copyfileobj( fsrc, fdst[, length])
self.capacity = capacity
self.strategy = int(len(buffer) > self.capacity)
try:
self._delegate = self.STRATEGIES[self.strategy]()
except:
# Fallback for Google AppEnginge etc.
self._delegate = self.STRATEGIES[0]()
self.write(buffer)
def makeTempFile(self):
" Switch to next startegy. If an error occured stay with the first strategy "
if self.strategy == 0:
try:
new_delegate = self.STRATEGIES[1]()
new_delegate.write(self.getvalue())
self._delegate = new_delegate
self.strategy = 1
log.warn("Created temporary file %s", self.name)
except:
self.capacity = - 1
def getFileName(self):
" Get a named temporary file "
self.makeTempFile()
return self.name
def fileno(self):
"""Forces this buffer to use a temporary file as the underlying.
object and returns the fileno associated with it.
"""
self.makeTempFile()
return self._delegate.fileno()
def getvalue(self):
" Get value of file. Work around for second strategy "
if self.strategy == 0:
return self._delegate.getvalue()
self._delegate.flush()
self._delegate.seek(0)
return self._delegate.read()
def write(self, value):
" If capacity != -1 and length of file > capacity it is time to switch "
if self.capacity > 0 and self.strategy == 0:
len_value = len(value)
if len_value >= self.capacity:
needs_new_strategy = True
else:
self.seek(0, 2) # find end of file
needs_new_strategy = \
(self.tell() + len_value) >= self.capacity
if needs_new_strategy:
self.makeTempFile()
self._delegate.write(value)
def __getattr__(self, name):
try:
return getattr(self._delegate, name)
except AttributeError:
# hide the delegation
e = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, name)
raise AttributeError(e)
_rx_datauri = re.compile("^data:(?P<mime>[a-z]+/[a-z]+);base64,(?P<data>.*)$", re.M | re.DOTALL)
class pisaFileObject:
"""
XXX
"""
def __init__(self, uri, basepath=None):
self.basepath = basepath
self.mimetype = None
self.file = None
self.data = None
self.uri = None
self.local = None
self.tmp_file = None
uri = str(uri)
log.debug("FileObject %r, Basepath: %r", uri, basepath)
# Data URI
if uri.startswith("data:"):
m = _rx_datauri.match(uri)
self.mimetype = m.group("mime")
self.data = base64.decodestring(m.group("data"))
else:
# Check if we have an external scheme
if basepath and not (uri.startswith("http://") or uri.startswith("https://")):
urlParts = urlparse.urlparse(basepath)
else:
urlParts = urlparse.urlparse(uri)
log.debug("URLParts: %r", urlParts)
# Drive letters have len==1 but we are looking for things like http:
if len(urlParts[0]) > 1 :
# External data
if basepath:
uri = urlparse.urljoin(basepath, uri)
#path = urlparse.urlsplit(url)[2]
#mimetype = getMimeType(path)
# Using HTTPLIB
server, path = urllib.splithost(uri[uri.find("//"):])
if uri.startswith("https://"):
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.request("GET", path)
r1 = conn.getresponse()
# log.debug("HTTP %r %r %r %r", server, path, uri, r1)
if (r1.status, r1.reason) == (200, "OK"):
# data = r1.read()
self.mimetype = r1.getheader("Content-Type", None).split(";")[0]
self.uri = uri
if r1.getheader("content-encoding") == "gzip":
# zbuf = cStringIO.StringIO(data)
import gzip
self.file = gzip.GzipFile(mode="rb", fileobj=r1)
#data = zfile.read()
#zfile.close()
else:
self.file = r1
# self.file = urlResponse
else:
urlResponse = urllib2.urlopen(uri)
self.mimetype = urlResponse.info().get("Content-Type", None).split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
else:
# Local data
if basepath:
uri = os.path.normpath(os.path.join(basepath, uri))
if os.path.isfile(uri):
self.uri = uri
self.local = uri
self.setMimeTypeByName(uri)
self.file = open(uri, "rb")
def getFile(self):
if self.file is not None:
return self.file
if self.data is not None:
return pisaTempFile(self.data)
return None
def getNamedFile(self):
if self.notFound():
return None
if self.local:
return str(self.local)
if not self.tmp_file:
self.tmp_file = tempfile.NamedTemporaryFile()
if self.file:
shutil.copyfileobj(self.file, self.tmp_file)
else:
self.tmp_file.write(self.getData())
self.tmp_file.flush()
return self.tmp_file.name
def getData(self):
if self.data is not None:
return self.data
if self.file is not None:
self.data = self.file.read()
return self.data
return None
def notFound(self):
return (self.file is None) and (self.data is None)
def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0]
def getFile(*a , **kw):
file = pisaFileObject(*a, **kw)
if file.notFound():
return None
return file
COLOR_BY_NAME = {
'activeborder': Color(212, 208, 200),
'activecaption': Color(10, 36, 106),
'aliceblue': Color(.941176, .972549, 1),
'antiquewhite': Color(.980392, .921569, .843137),
'appworkspace': Color(128, 128, 128),
'aqua': Color(0, 1, 1),
'aquamarine': Color(.498039, 1, .831373),
'azure': Color(.941176, 1, 1),
'background': Color(58, 110, 165),
'beige': Color(.960784, .960784, .862745),
'bisque': Color(1, .894118, .768627),
'black': Color(0, 0, 0),
'blanchedalmond': Color(1, .921569, .803922),
'blue': Color(0, 0, 1),
'blueviolet': Color(.541176, .168627, .886275),
'brown': Color(.647059, .164706, .164706),
'burlywood': Color(.870588, .721569, .529412),
'buttonface': Color(212, 208, 200),
'buttonhighlight': Color(255, 255, 255),
'buttonshadow': Color(128, 128, 128),
'buttontext': Color(0, 0, 0),
'cadetblue': Color(.372549, .619608, .627451),
'captiontext': Color(255, 255, 255),
'chartreuse': Color(.498039, 1, 0),
'chocolate': Color(.823529, .411765, .117647),
'coral': Color(1, .498039, .313725),
'cornflowerblue': Color(.392157, .584314, .929412),
'cornsilk': Color(1, .972549, .862745),
'crimson': Color(.862745, .078431, .235294),
'cyan': Color(0, 1, 1),
'darkblue': Color(0, 0, .545098),
'darkcyan': Color(0, .545098, .545098),
'darkgoldenrod': Color(.721569, .52549, .043137),
'darkgray': Color(.662745, .662745, .662745),
'darkgreen': Color(0, .392157, 0),
'darkgrey': Color(.662745, .662745, .662745),
'darkkhaki': Color(.741176, .717647, .419608),
'darkmagenta': Color(.545098, 0, .545098),
'darkolivegreen': Color(.333333, .419608, .184314),
'darkorange': Color(1, .54902, 0),
'darkorchid': Color(.6, .196078, .8),
'darkred': Color(.545098, 0, 0),
'darksalmon': Color(.913725, .588235, .478431),
'darkseagreen': Color(.560784, .737255, .560784),
'darkslateblue': Color(.282353, .239216, .545098),
'darkslategray': Color(.184314, .309804, .309804),
'darkslategrey': Color(.184314, .309804, .309804),
'darkturquoise': Color(0, .807843, .819608),
'darkviolet': Color(.580392, 0, .827451),
'deeppink': Color(1, .078431, .576471),
'deepskyblue': Color(0, .74902, 1),
'dimgray': Color(.411765, .411765, .411765),
'dimgrey': Color(.411765, .411765, .411765),
'dodgerblue': Color(.117647, .564706, 1),
'firebrick': Color(.698039, .133333, .133333),
'floralwhite': Color(1, .980392, .941176),
'forestgreen': Color(.133333, .545098, .133333),
'fuchsia': Color(1, 0, 1),
'gainsboro': Color(.862745, .862745, .862745),
'ghostwhite': Color(.972549, .972549, 1),
'gold': Color(1, .843137, 0),
'goldenrod': Color(.854902, .647059, .12549),
'gray': Color(.501961, .501961, .501961),
'graytext': Color(128, 128, 128),
'green': Color(0, .501961, 0),
'greenyellow': Color(.678431, 1, .184314),
'grey': Color(.501961, .501961, .501961),
'highlight': Color(10, 36, 106),
'highlighttext': Color(255, 255, 255),
'honeydew': Color(.941176, 1, .941176),
'hotpink': Color(1, .411765, .705882),
'inactiveborder': Color(212, 208, 200),
'inactivecaption': Color(128, 128, 128),
'inactivecaptiontext': Color(212, 208, 200),
'indianred': Color(.803922, .360784, .360784),
'indigo': Color(.294118, 0, .509804),
'infobackground': Color(255, 255, 225),
'infotext': Color(0, 0, 0),
'ivory': Color(1, 1, .941176),
'khaki': Color(.941176, .901961, .54902),
'lavender': Color(.901961, .901961, .980392),
'lavenderblush': Color(1, .941176, .960784),
'lawngreen': Color(.486275, .988235, 0),
'lemonchiffon': Color(1, .980392, .803922),
'lightblue': Color(.678431, .847059, .901961),
'lightcoral': Color(.941176, .501961, .501961),
'lightcyan': Color(.878431, 1, 1),
'lightgoldenrodyellow': Color(.980392, .980392, .823529),
'lightgray': Color(.827451, .827451, .827451),
'lightgreen': Color(.564706, .933333, .564706),
'lightgrey': Color(.827451, .827451, .827451),
'lightpink': Color(1, .713725, .756863),
'lightsalmon': Color(1, .627451, .478431),
'lightseagreen': Color(.12549, .698039, .666667),
'lightskyblue': Color(.529412, .807843, .980392),
'lightslategray': Color(.466667, .533333, .6),
'lightslategrey': Color(.466667, .533333, .6),
'lightsteelblue': Color(.690196, .768627, .870588),
'lightyellow': Color(1, 1, .878431),
'lime': Color(0, 1, 0),
'limegreen': Color(.196078, .803922, .196078),
'linen': Color(.980392, .941176, .901961),
'magenta': Color(1, 0, 1),
'maroon': Color(.501961, 0, 0),
'mediumaquamarine': Color(.4, .803922, .666667),
'mediumblue': Color(0, 0, .803922),
'mediumorchid': Color(.729412, .333333, .827451),
'mediumpurple': Color(.576471, .439216, .858824),
'mediumseagreen': Color(.235294, .701961, .443137),
'mediumslateblue': Color(.482353, .407843, .933333),
'mediumspringgreen': Color(0, .980392, .603922),
'mediumturquoise': Color(.282353, .819608, .8),
'mediumvioletred': Color(.780392, .082353, .521569),
'menu': Color(212, 208, 200),
'menutext': Color(0, 0, 0),
'midnightblue': Color(.098039, .098039, .439216),
'mintcream': Color(.960784, 1, .980392),
'mistyrose': Color(1, .894118, .882353),
'moccasin': Color(1, .894118, .709804),
'navajowhite': Color(1, .870588, .678431),
'navy': Color(0, 0, .501961),
'oldlace': Color(.992157, .960784, .901961),
'olive': Color(.501961, .501961, 0),
'olivedrab': Color(.419608, .556863, .137255),
'orange': Color(1, .647059, 0),
'orangered': Color(1, .270588, 0),
'orchid': Color(.854902, .439216, .839216),
'palegoldenrod': Color(.933333, .909804, .666667),
'palegreen': Color(.596078, .984314, .596078),
'paleturquoise': Color(.686275, .933333, .933333),
'palevioletred': Color(.858824, .439216, .576471),
'papayawhip': Color(1, .937255, .835294),
'peachpuff': Color(1, .854902, .72549),
'peru': Color(.803922, .521569, .247059),
'pink': Color(1, .752941, .796078),
'plum': Color(.866667, .627451, .866667),
'powderblue': Color(.690196, .878431, .901961),
'purple': Color(.501961, 0, .501961),
'red': Color(1, 0, 0),
'rosybrown': Color(.737255, .560784, .560784),
'royalblue': Color(.254902, .411765, .882353),
'saddlebrown': Color(.545098, .270588, .07451),
'salmon': Color(.980392, .501961, .447059),
'sandybrown': Color(.956863, .643137, .376471),
'scrollbar': Color(212, 208, 200),
'seagreen': Color(.180392, .545098, .341176),
'seashell': Color(1, .960784, .933333),
'sienna': Color(.627451, .321569, .176471),
'silver': Color(.752941, .752941, .752941),
'skyblue': Color(.529412, .807843, .921569),
'slateblue': Color(.415686, .352941, .803922),
'slategray': Color(.439216, .501961, .564706),
'slategrey': Color(.439216, .501961, .564706),
'snow': Color(1, .980392, .980392),
'springgreen': Color(0, 1, .498039),
'steelblue': Color(.27451, .509804, .705882),
'tan': Color(.823529, .705882, .54902),
'teal': Color(0, .501961, .501961),
'thistle': Color(.847059, .74902, .847059),
'threeddarkshadow': Color(64, 64, 64),
'threedface': Color(212, 208, 200),
'threedhighlight': Color(255, 255, 255),
'threedlightshadow': Color(212, 208, 200),
'threedshadow': Color(128, 128, 128),
'tomato': Color(1, .388235, .278431),
'turquoise': Color(.25098, .878431, .815686),
'violet': Color(.933333, .509804, .933333),
'wheat': Color(.960784, .870588, .701961),
'white': Color(1, 1, 1),
'whitesmoke': Color(.960784, .960784, .960784),
'window': Color(255, 255, 255),
'windowframe': Color(0, 0, 0),
'windowtext': Color(0, 0, 0),
'yellow': Color(1, 1, 0),
'yellowgreen': Color(.603922, .803922, .196078)}
| holtwick/xhtml2pdf | sx/pisa3/pisa_util.py | Python | apache-2.0 | 26,127 |
import sys
import eventlet
from eventlet import event
import logging
import msgpack
from .settings import BUF_LEN
LOG = logging.getLogger('Server')
class Server(object):
exit_event = event.Event()
def __init__(self, conf):
super(Server, self).__init__()
self._node_listen_ip = conf.get('server', 'node_listen_ip')
self._node_listen_port = int(conf.get('server', 'node_listen_port'))
self._node_listen_sock = None
self._client_listen_ip = conf.get('server', 'client_listen_ip')
self._client_listen_port = int(conf.get('server', 'client_listen_port'))
self._client_listen_sock = None
self._threads = []
def _handle_node_sock(self, node_sock):
LOG.debug("Get a node socket")
unpacker = msgpack.Unpacker()
while True:
try:
chunk = node_sock.recv(BUF_LEN)
if not chunk:
break
unpacker.feed(chunk)
for unpacked_msg in unpacker:
self._on_handle_node_msg(unpacked_msg)
except Exception as e:
LOG.exception("node sock error: %s" % str(e))
break
def _on_handle_node_msg(self, msg):
pass
def _handle_client_sock(self, client_sock):
LOG.info("Get a client socket")
unpacker = msgpack.Unpacker()
while True:
try:
chunk = client_sock.recv(BUF_LEN)
if not chunk:
break
unpacker.feed(chunk)
for unpacked_msg in unpacker:
LOG.info(unpacked_msg)
self._on_handle_client_msg(client_sock, unpacked_msg)
except Exception as e:
LOG.exception("client sock error: %s" % str(e))
break
def _on_handle_client_msg(self, msg):
pass
def _on_node_connect(self, node_sock, address):
pass
def _handle_node_accept(self):
while True:
node_sock, address = self._node_listen_sock.accept()
self._on_node_connect(node_sock, address)
self._threads.append(
eventlet.spawn(self._handle_node_sock, node_sock)
)
def _on_client_connect(self, client_sock, address):
pass
def _handle_client_accept(self):
while True:
client_sock, address = self._client_listen_sock.accept()
self._on_client_connect(client_sock, address)
self._threads.append(
eventlet.spawn(self._handle_client_sock, client_sock)
)
def _on_start(self):
pass
def start(self):
self._node_listen_sock = eventlet.listen(
(self._node_listen_ip, self._node_listen_port)
)
self._threads.append(eventlet.spawn(self._handle_node_accept))
self._client_listen_sock = eventlet.listen(
(self._client_listen_ip, self._client_listen_port)
)
self._threads.append(eventlet.spawn(self._handle_client_accept))
self._on_start()
def _shutdown(self):
LOG.debug("Exiting...")
self._on_exit()
for thread in self._threads:
if thread:
thread.kill()
else:
LOG.debug("--- none thread")
sys.exit(0)
def _on_exit(self):
pass
def wait(self):
LOG.debug("Waiting for msg to exit")
self.exit_event.wait()
LOG.debug("Received exit event")
self._shutdown()
def main():
from util import config_log
from conf import set_conf
set_conf('test.conf')
from .conf import CONF
config_log()
server = Server(CONF)
server.start()
server.wait()
if __name__ == '__main__':
main()
| jason-ni/eventlet-raft | eventlet_raft/server.py | Python | apache-2.0 | 3,811 |
algorithm = "spawning_adiabatic"
#algorithm = "hagedorn"
potential = "eckart"
T = 70
dt = 0.005
eps = 0.0234218**0.5
basis_size = 300
parameters = [ (0.1935842258501978j, 5.1657101481699996, 0.0, 0.24788547371, -7.55890450883) ]
coefficients = [[(0, 1.0)]]
leading_component = 0
f = 9.0
ngn = 4096
write_nth = 20
spawn_method = "projection"
spawn_max_order = 16
spawn_order = 0
spawn_condition = "high_k_norm_threshold"
spawn_K0 = 100
# 'Magic number' 0.32 usually not known apriori!
spawn_threshold = 0.32
| WaveBlocks/WaveBlocks | demos/demo_tunneling_spawning/demo_tunneling_spawn_propagation_norm_threshold.py | Python | bsd-3-clause | 517 |
"""Ray-triangle intersection."""
# pylint: disable=invalid-name
from cgmath.vector import cross, dot
# TODO
EPSILON = 0.000001
# Adapted from:
# en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
def ray_triangle_intersect(ray, triangle):
"""Ray-triangle intersection.
ray: cgmath.ray.Ray instance
triangle: sequence of three points (cgmath.vector.vec3)
Returns a scalar indicating the distance along the ray from the
origin to the intersection. Returns None if no intersection found
or multiple.
"""
v1 = triangle[0]
v2 = triangle[1]
v3 = triangle[2]
# Find vectors for two edges sharing v1
e1 = v2 - v1
e2 = v3 - v1
# Begin calculating determinant - also used to calculate u
# parameter
P = cross(ray.direction, e2)
# If determinant is near zero, ray lies in plane of triangle or
# ray is parallel to plane of triangle
det = dot(e1, P)
# NOT CULLING
if det > -EPSILON and det < EPSILON:
return None
inv_det = 1.0 / det
# calculate distance from v1 to ray origin
T = ray.origin - v1
# Calculate u parameter and test bound
u = dot(T, P) * inv_det
# The intersection lies outside of the triangle
if u < 0 or u > 1:
return None
# Prepare to test v parameter
Q = cross(T, e1)
# Calculate v parameter and test bound
v = dot(ray.direction, Q) * inv_det
# The intersection lies outside of the triangle
if v < 0.0 or (u + v) > 1.0:
return None
t = dot(e2, Q) * inv_det
if t > EPSILON: # ray intersection
return t
# No hit, no win
return None
| nicholasbishop/bel | cgmath/ray_triangle_intersect.py | Python | gpl-3.0 | 1,661 |
# Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
"""Tests for the evil Twisted reactor-spinning we do."""
import os
import signal
from testtools import (
skipIf,
TestCase,
)
from testtools.helpers import try_import
from testtools.matchers import (
Equals,
Is,
MatchesException,
Raises,
)
_spinner = try_import('testtools._spinner')
defer = try_import('twisted.internet.defer')
Failure = try_import('twisted.python.failure.Failure')
class NeedsTwistedTestCase(TestCase):
def setUp(self):
super(NeedsTwistedTestCase, self).setUp()
if defer is None or Failure is None:
self.skipTest("Need Twisted to run")
class TestNotReentrant(NeedsTwistedTestCase):
def test_not_reentrant(self):
# A function decorated as not being re-entrant will raise a
# _spinner.ReentryError if it is called while it is running.
calls = []
@_spinner.not_reentrant
def log_something():
calls.append(None)
if len(calls) < 5:
log_something()
self.assertThat(
log_something, Raises(MatchesException(_spinner.ReentryError)))
self.assertEqual(1, len(calls))
def test_deeper_stack(self):
calls = []
@_spinner.not_reentrant
def g():
calls.append(None)
if len(calls) < 5:
f()
@_spinner.not_reentrant
def f():
calls.append(None)
if len(calls) < 5:
g()
self.assertThat(f, Raises(MatchesException(_spinner.ReentryError)))
self.assertEqual(2, len(calls))
class TestExtractResult(NeedsTwistedTestCase):
def test_not_fired(self):
# _spinner.extract_result raises _spinner.DeferredNotFired if it's
# given a Deferred that has not fired.
self.assertThat(lambda:_spinner.extract_result(defer.Deferred()),
Raises(MatchesException(_spinner.DeferredNotFired)))
def test_success(self):
# _spinner.extract_result returns the value of the Deferred if it has
# fired successfully.
marker = object()
d = defer.succeed(marker)
self.assertThat(_spinner.extract_result(d), Equals(marker))
def test_failure(self):
# _spinner.extract_result raises the failure's exception if it's given
# a Deferred that is failing.
try:
1/0
except ZeroDivisionError:
f = Failure()
d = defer.fail(f)
self.assertThat(lambda:_spinner.extract_result(d),
Raises(MatchesException(ZeroDivisionError)))
class TestTrapUnhandledErrors(NeedsTwistedTestCase):
def test_no_deferreds(self):
marker = object()
result, errors = _spinner.trap_unhandled_errors(lambda: marker)
self.assertEqual([], errors)
self.assertIs(marker, result)
def test_unhandled_error(self):
failures = []
def make_deferred_but_dont_handle():
try:
1/0
except ZeroDivisionError:
f = Failure()
failures.append(f)
defer.fail(f)
result, errors = _spinner.trap_unhandled_errors(
make_deferred_but_dont_handle)
self.assertIs(None, result)
self.assertEqual(failures, [error.failResult for error in errors])
class TestRunInReactor(NeedsTwistedTestCase):
def make_reactor(self):
from twisted.internet import reactor
return reactor
def make_spinner(self, reactor=None):
if reactor is None:
reactor = self.make_reactor()
return _spinner.Spinner(reactor)
def make_timeout(self):
return 0.01
def test_function_called(self):
# run_in_reactor actually calls the function given to it.
calls = []
marker = object()
self.make_spinner().run(self.make_timeout(), calls.append, marker)
self.assertThat(calls, Equals([marker]))
def test_return_value_returned(self):
# run_in_reactor returns the value returned by the function given to
# it.
marker = object()
result = self.make_spinner().run(self.make_timeout(), lambda: marker)
self.assertThat(result, Is(marker))
def test_exception_reraised(self):
# If the given function raises an error, run_in_reactor re-raises that
# error.
self.assertThat(
lambda:self.make_spinner().run(self.make_timeout(), lambda: 1/0),
Raises(MatchesException(ZeroDivisionError)))
def test_keyword_arguments(self):
# run_in_reactor passes keyword arguments on.
calls = []
function = lambda *a, **kw: calls.extend([a, kw])
self.make_spinner().run(self.make_timeout(), function, foo=42)
self.assertThat(calls, Equals([(), {'foo': 42}]))
def test_not_reentrant(self):
# run_in_reactor raises an error if it is called inside another call
# to run_in_reactor.
spinner = self.make_spinner()
self.assertThat(lambda: spinner.run(
self.make_timeout(), spinner.run, self.make_timeout(),
lambda: None), Raises(MatchesException(_spinner.ReentryError)))
def test_deferred_value_returned(self):
# If the given function returns a Deferred, run_in_reactor returns the
# value in the Deferred at the end of the callback chain.
marker = object()
result = self.make_spinner().run(
self.make_timeout(), lambda: defer.succeed(marker))
self.assertThat(result, Is(marker))
def test_preserve_signal_handler(self):
signals = ['SIGINT', 'SIGTERM', 'SIGCHLD']
signals = filter(
None, (getattr(signal, name, None) for name in signals))
for sig in signals:
self.addCleanup(signal.signal, sig, signal.getsignal(sig))
new_hdlrs = list(lambda *a: None for _ in signals)
for sig, hdlr in zip(signals, new_hdlrs):
signal.signal(sig, hdlr)
spinner = self.make_spinner()
spinner.run(self.make_timeout(), lambda: None)
self.assertEqual(new_hdlrs, map(signal.getsignal, signals))
def test_timeout(self):
# If the function takes too long to run, we raise a
# _spinner.TimeoutError.
timeout = self.make_timeout()
self.assertThat(
lambda:self.make_spinner().run(timeout, lambda: defer.Deferred()),
Raises(MatchesException(_spinner.TimeoutError)))
def test_no_junk_by_default(self):
# If the reactor hasn't spun yet, then there cannot be any junk.
spinner = self.make_spinner()
self.assertThat(spinner.get_junk(), Equals([]))
def test_clean_do_nothing(self):
# If there's nothing going on in the reactor, then clean does nothing
# and returns an empty list.
spinner = self.make_spinner()
result = spinner._clean()
self.assertThat(result, Equals([]))
def test_clean_delayed_call(self):
# If there's a delayed call in the reactor, then clean cancels it and
# returns an empty list.
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
call = reactor.callLater(10, lambda: None)
results = spinner._clean()
self.assertThat(results, Equals([call]))
self.assertThat(call.active(), Equals(False))
def test_clean_delayed_call_cancelled(self):
# If there's a delayed call that's just been cancelled, then it's no
# longer there.
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
call = reactor.callLater(10, lambda: None)
call.cancel()
results = spinner._clean()
self.assertThat(results, Equals([]))
def test_clean_selectables(self):
# If there's still a selectable (e.g. a listening socket), then
# clean() removes it from the reactor's registry.
#
# Note that the socket is left open. This emulates a bug in trial.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
port = reactor.listenTCP(0, ServerFactory())
spinner.run(self.make_timeout(), lambda: None)
results = spinner.get_junk()
self.assertThat(results, Equals([port]))
def test_clean_running_threads(self):
import threading
import time
current_threads = list(threading.enumerate())
reactor = self.make_reactor()
timeout = self.make_timeout()
spinner = self.make_spinner(reactor)
spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0)
# Python before 2.5 has a race condition with thread handling where
# join() does not remove threads from enumerate before returning - the
# thread being joined does the removal. This was fixed in Python 2.5
# but we still support 2.4, so we have to workaround the issue.
# http://bugs.python.org/issue1703448.
self.assertThat(
[thread for thread in threading.enumerate() if thread.isAlive()],
Equals(current_threads))
def test_leftover_junk_available(self):
# If 'run' is given a function that leaves the reactor dirty in some
# way, 'run' will clean up the reactor and then store information
# about the junk. This information can be got using get_junk.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
port = spinner.run(
self.make_timeout(), reactor.listenTCP, 0, ServerFactory())
self.assertThat(spinner.get_junk(), Equals([port]))
def test_will_not_run_with_previous_junk(self):
# If 'run' is called and there's still junk in the spinner's junk
# list, then the spinner will refuse to run.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
self.assertThat(lambda: spinner.run(timeout, lambda: None),
Raises(MatchesException(_spinner.StaleJunkError)))
def test_clear_junk_clears_previous_junk(self):
# If 'run' is called and there's still junk in the spinner's junk
# list, then the spinner will refuse to run.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
junk = spinner.clear_junk()
self.assertThat(junk, Equals([port]))
self.assertThat(spinner.get_junk(), Equals([]))
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_sigint_raises_no_result_error(self):
# If we get a SIGINT during a run, we raise _spinner.NoResultError.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
self.skipTest("SIGINT not available")
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
Raises(MatchesException(_spinner.NoResultError)))
self.assertEqual([], spinner._clean())
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_sigint_raises_no_result_error_second_time(self):
# If we get a SIGINT during a run, we raise _spinner.NoResultError.
# This test is exactly the same as test_sigint_raises_no_result_error,
# and exists to make sure we haven't futzed with state.
self.test_sigint_raises_no_result_error()
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_fast_sigint_raises_no_result_error(self):
# If we get a SIGINT during a run, we raise _spinner.NoResultError.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
self.skipTest("SIGINT not available")
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
Raises(MatchesException(_spinner.NoResultError)))
self.assertEqual([], spinner._clean())
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_fast_sigint_raises_no_result_error_second_time(self):
self.test_fast_sigint_raises_no_result_error()
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| zarboz/XBMC-PVR-mac | tools/darwin/depends/samba/samba-3.6.6/lib/testtools/testtools/tests/test_spinner.py | Python | gpl-2.0 | 13,058 |
from collections import OrderedDict
import pytest
import numpy as np
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.modeling.functional_models import (Gaussian1D,
Sersic1D, Sine1D, Linear1D,
Lorentz1D, Voigt1D, Const1D,
Box1D, Trapezoid1D, MexicanHat1D,
Moffat1D, Gaussian2D, Const2D, Ellipse2D,
Disk2D, Ring2D, Box2D, TrapezoidDisk2D,
MexicanHat2D, AiryDisk2D, Moffat2D, Sersic2D, KingProjectedAnalytic1D)
from astropy.modeling.powerlaws import (PowerLaw1D, BrokenPowerLaw1D, SmoothlyBrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D, LogParabola1D)
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
FUNC_MODELS_1D = [
{'class': Gaussian1D,
'parameters': {'amplitude': 3 * u.Jy, 'mean': 2 * u.m, 'stddev': 30 * u.cm},
'evaluation': [(2600 * u.mm, 3 * u.Jy * np.exp(-2))],
'bounding_box': [0.35, 3.65] * u.m},
{'class': Sersic1D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'r_eff': 2 * u.arcsec, 'n': 4},
'evaluation': [(3 * u.arcsec, 1.3237148119468918 * u.MJy/u.sr)],
'bounding_box': False},
{'class': Sine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': False},
{'class': Linear1D,
'parameters': {'slope': 3 * u.km / u.s, 'intercept': 5000 * u.m},
'evaluation': [(6000 * u.ms, 23 * u.km)],
'bounding_box': False},
{'class': Lorentz1D,
'parameters': {'amplitude': 2 * u.Jy, 'x_0': 505 * u.nm, 'fwhm': 100 * u.AA},
'evaluation': [(0.51 * u.micron, 1 * u.Jy)],
'bounding_box': [255, 755] * u.nm},
{'class': Voigt1D,
'parameters': {'amplitude_L': 2 * u.Jy, 'x_0': 505 * u.nm,
'fwhm_L': 100 * u.AA, 'fwhm_G': 50 * u.AA},
'evaluation': [(0.51 * u.micron, 1.06264568 * u.Jy)],
'bounding_box': False},
{'class': Const1D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 3 * u.Jy)],
'bounding_box': False},
{'class': Box1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.9, 4.9] * u.um},
{'class': Trapezoid1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um, 'slope': 5 * u.Jy / u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.3, 5.5] * u.um},
{'class': MexicanHat1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'sigma': 1e-3 * u.mm},
'evaluation': [(1000 * u.nm, -0.09785050 * u.Jy)],
'bounding_box': [-5.6, 14.4] * u.um},
{'class': Moffat1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 0.238853503 * u.Jy)],
'bounding_box': False},
{'class': KingProjectedAnalytic1D,
'parameters': {'amplitude': 1. * u.Msun/u.pc**2, 'r_core': 1. * u.pc, 'r_tide': 2. * u.pc},
'evaluation': [(0.5 * u.pc, 0.2 * u.Msun/u.pc**2)],
'bounding_box': [0. * u.pc, 2. * u.pc]}
]
FUNC_MODELS_2D = [
{'class': Gaussian2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,
'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},
'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],
'bounding_box': [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]] * u.m},
{'class': Const2D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)],
'bounding_box': False},
{'class': Disk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'R_0': 300 * u.cm},
'evaluation': [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)],
'bounding_box': [[-1, 5], [0, 6]] * u.m},
{'class': TrapezoidDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,
'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},
'evaluation': [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)],
'bounding_box': [[-2, 6], [-3, 5]] * u.m},
{'class': Ellipse2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},
'evaluation': [(4 * u.m, 300 * u.cm, 3 * u.Jy)],
'bounding_box': [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m},
{'class': Ring2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},
'evaluation': [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)],
'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m},
{'class': Box2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.s,
'x_width': 4 * u.cm, 'y_width': 3 * u.s},
'evaluation': [(301 * u.cm, 3 * u.s, 3 * u.Jy)],
'bounding_box': [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]]},
{'class': MexicanHat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'sigma': 1 * u.m},
'evaluation': [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)],
'bounding_box': False},
{'class': AiryDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'radius': 1 * u.m},
'evaluation': [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
'bounding_box': False},
{'class': Moffat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,
'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)],
'bounding_box': False},
{'class': Sersic2D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,
'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,
'ellip': 0, 'theta': 0},
'evaluation': [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],
'bounding_box': False},
]
POWERLAW_MODELS = [
{'class': PowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1},
'evaluation': [(1 * u.m, 500 * u.g)],
'bounding_box': False},
{'class': BrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1},
'evaluation': [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)],
'bounding_box': False},
{'class': SmoothlyBrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1, 'delta': 1},
'evaluation': [(1 * u.m, 15.125 * u.kg), (1 * u.cm, 15.125 * u.kg)],
'bounding_box': False},
{'class': ExponentialCutoffPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'x_cutoff': 1 * u.m},
'evaluation': [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)],
'bounding_box': False},
{'class': LogParabola1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'beta': 2},
'evaluation': [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)],
'bounding_box': False}
]
POLY_MODELS = [
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.one, 'c1': 2 / u.m, 'c2': 3 / u.m**2},
'evaluation': [(3 * u.m, 36 * u.one)],
'bounding_box': False},
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg / u.m, 'c2': 3 * u.kg / u.m**2},
'evaluation': [(3 * u.m, 36 * u.kg)],
'bounding_box': False},
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg, 'c2': 3 * u.kg},
'evaluation': [(3 * u.one, 36 * u.kg)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.one, 'c1_0': 2 / u.m, 'c2_0': 3 / u.m**2,
'c0_1': 3 / u.s, 'c0_2': -2 / u.s**2, 'c1_1': 5 / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.one)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg / u.m, 'c2_0': 3 * u.kg / u.m**2,
'c0_1': 3 * u.kg / u.s, 'c0_2': -2 * u.kg / u.s**2, 'c1_1': 5 * u.kg / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.kg)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg, 'c2_0': 3 * u.kg,
'c0_1': 3 * u.kg, 'c0_2': -2 * u.kg, 'c1_1': 5 * u.kg},
'evaluation': [(3 * u.one, 2 * u.one, 64 * u.kg)],
'bounding_box': False},
]
MODELS = FUNC_MODELS_1D + FUNC_MODELS_2D + POWERLAW_MODELS
SCIPY_MODELS = set([Sersic1D, Sersic2D, AiryDisk2D])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_without_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
kwargs = OrderedDict(zip(('x', 'y'), args))
else:
kwargs = OrderedDict(zip(('x', 'y', 'z'), args))
if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):
kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)
mnu = m.without_units_for_data(**kwargs)
args = [x.value for x in kwargs.values()]
assert_quantity_allclose(mnu(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
assert_quantity_allclose(m(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_x_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_param_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
params = {}
for key, value in model['parameters'].items():
if value is None or key == 'degree':
params[key] = value
else:
params[key] = np.repeat(value, 2)
params['n_models'] = 2
m = model['class'](**params)
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize('model', MODELS)
def test_models_bounding_box(model):
# In some cases, having units in parameters caused bounding_box to break,
# so this is to ensure that it works correctly.
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
# In the following we need to explicitly test that the value is False
# since Quantities no longer evaluate as as True
if model['bounding_box'] is False:
# Check that NotImplementedError is raised, so that if bounding_box is
# implemented we remember to set bounding_box=True in the list of models
# above
with pytest.raises(NotImplementedError):
m.bounding_box
else:
# A bounding box may have inhomogeneous units so we need to check the
# values one by one.
for i in range(len(model['bounding_box'])):
bbox = m.bounding_box
assert_quantity_allclose(bbox[i], model['bounding_box'][i])
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('model', MODELS)
def test_models_fitting(model):
m = model['class'](**model['parameters'])
if len(model['evaluation'][0]) == 2:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.exp(-x.value ** 2) * model['evaluation'][0][1].unit
args = [x, y]
else:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.linspace(1, 3, 100) * model['evaluation'][0][1].unit
z = np.exp(-x.value**2 - y.value**2) * model['evaluation'][0][2].unit
args = [x, y, z]
# Test that the model fits even if it has units on parameters
fitter = LevMarLSQFitter()
m_new = fitter(m, *args)
# Check that units have been put back correctly
for param_name in m.param_names:
par_bef = getattr(m, param_name)
par_aft = getattr(m_new, param_name)
if par_bef.unit is None:
# If the parameter used to not have a unit then had a radian unit
# for example, then we should allow that
assert par_aft.unit is None or par_aft.unit is u.rad
else:
assert par_aft.unit.is_equivalent(par_bef.unit)
| bsipocz/astropy | astropy/modeling/tests/test_models_quantities.py | Python | bsd-3-clause | 13,621 |
"""유틸리티 함수와 클래스 모음"""
from importlib import import_module
__all__ = ['import_string']
UNDEFINED = type('Undefined', (object,),
{'__repr__': lambda self: 'UNDEFINED'})()
def import_string(import_name, package=None, default=UNDEFINED):
"""지정한 경로에 있는 파이썬 모듈이나 객체를 가져온다
.. code-block:: pycon
>>> from urllib.request import urlopen
>>> import_string('urllib.request:urlopen') is urlopen
True
:param str import_name: 불러올 모듈이나 객체. 콜론(:)이 없을 경우 모듈 경로,
있을 경우에는 콜론 이후를 모듈 안의 객체 이름으로 해석한다.
:param package: 모듈이 속한 패키지 경로
:type package: str or None
:param default: 모듈을 찾지 못했을 경우 예외를 발생하는 대신에 돌려줄 값
:return: 가져온 모듈 또는 객체
:rtype: module or object
:raises ImportError: 모듈을 찾지 못함
:raises AttributeError: 모듈은 찾았으나 객체를 찾지 못함
"""
try:
module_name, object_name = import_name.split(':', 1)
except ValueError:
module_name = import_name
object_name = None
try:
obj = import_module(module_name, package)
if object_name is not None:
obj = getattr(obj, object_name)
return obj
except (ImportError, AttributeError):
if default is not UNDEFINED:
return default
raise
| flask-kr/githubarium | githubarium/util.py | Python | mit | 1,545 |
import numpy
import linreg
from perceptron import Perceptron
from pocketperceptron import PocketPerceptron
from logitreg import LogisticRegression
######################PERCEPTRON
'''
perc = Perceptron(3)
for line in open('classification.txt', 'r'):
line = line.split(',')
row = [float(line[0]), float(line[1]), float(line[2])]
val = int(line[3])
perc.digest(row, val)
print('perceptron weights are:')
print(perc.weights)
correct = 0
total = 0
for line in open('classification.txt', 'r'):
line = line.split(',')
row = [float(line[0]), float(line[1]), float(line[2])]
val = int(line[3])
prediction = perc.predict(row)
total += 1
if val == prediction:
correct += 1
print('perceptron error: ')
error = 1 - float(correct) / total
print(error)
'''
#########################POCKET PERCEPTRON
'''
perc = PocketPerceptron(3)
allrows = []
allvals = []
for line in open('classification.txt', 'r'):
line = line.split(',')
row = [float(line[0]), float(line[1]), float(line[2])]
val = int(line[4])
allrows.append(row)
allvals.append(val)
for i in range(7000):
perc.digest(allrows, allvals)
perc.finalize()
print('pocket perceptron weights are:')
print(perc.weights)
correct = 0
total = 0
for i in range(len(allvals)):
row = allrows[i]
val = allvals[i]
prediction = perc.predict(row)
total += 1
if val == prediction:
correct += 1
print('pocket perceptron error: ')
error = 1 - float(correct) / total
print(error)
'''
#######################LOGIT
perc = LogisticRegression(3)
allrows = []
allvals = []
for line in open('classification.txt', 'r'):
line = line.split(',')
row = [float(line[0]), float(line[1]), float(line[2])]
val = int(line[4])
allrows.append(row)
allvals.append(val)
for i in range(7000):
perc.digest(allrows, allvals)
print('logistic regression weights are:')
print(perc.weights)
countcorrect = 0
for i in range(len(allvals)):
val = allvals[i]
row = allrows[i]
p = perc.predict(row)
#just a mock for now
prediction = 1
if(p < .5):
prediction = -1
if(prediction == val):
countcorrect += 1
error = 1 - float(countcorrect) / len(allvals)
print('error for logit:')
print(error)
#######################LINEAR REGRESSION
'''
x = []
y = []
for line in open('linear-regression.txt', 'r'):
line = line.split(',')
row = [1,float(line[0]), float(line[1])]
val = float(line[2])
x.append(row)
y.append([val])
x = numpy.transpose(x)
w = linreg.getW(x, y)
print('linear regression weights:')
print w
''' | hakuliu/inf552 | hw4/hw4.py | Python | apache-2.0 | 2,592 |
# -*- coding: utf-8 -*-
from datetime import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models.managers import TitleManager
from cms.models.pagemodel import Page
from cms.utils.helpers import reversion_register
class Title(models.Model):
language = models.CharField(_("language"), max_length=15, db_index=True)
title = models.CharField(_("title"), max_length=255)
menu_title = models.CharField(_("title"), max_length=255, blank=True, null=True, help_text=_("overwrite the title in the menu"))
slug = models.SlugField(_("slug"), max_length=255, db_index=True, unique=False)
path = models.CharField(_("Path"), max_length=255, db_index=True)
has_url_overwrite = models.BooleanField(_("has url overwrite"), default=False, db_index=True, editable=False)
application_urls = models.CharField(_('application'), max_length=200, blank=True, null=True, db_index=True)
redirect = models.CharField(_("redirect"), max_length=255, blank=True, null=True)
meta_description = models.TextField(_("description"), max_length=255, blank=True, null=True)
meta_keywords = models.CharField(_("keywords"), max_length=255, blank=True, null=True)
page_title = models.CharField(_("title"), max_length=255, blank=True, null=True, help_text=_("overwrite the title (html title tag)"))
page = models.ForeignKey(Page, verbose_name=_("page"), related_name="title_set")
creation_date = models.DateTimeField(_("creation date"), editable=False, default=datetime.now)
objects = TitleManager()
class Meta:
unique_together = (('language', 'page'),)
app_label = 'cms'
def __unicode__(self):
return "%s (%s)" % (self.title, self.slug)
def save(self, *args, **kwargs):
# Build path from parent page's path and slug
current_path = self.path
parent_page = self.page.parent
slug = u'%s' % self.slug
if not self.has_url_overwrite:
self.path = u'%s' % slug
if parent_page:
parent_title = Title.objects.get_title(parent_page, language=self.language, language_fallback=True)
if parent_title:
self.path = u'%s/%s' % (parent_title.path, slug)
super(Title, self).save(*args, **kwargs)
@property
def overwrite_url(self):
"""Return overrwriten url, or None
"""
if self.has_url_overwrite:
return self.path
return None
class EmptyTitle(object):
"""Empty title object, can be returned from Page.get_title_obj() if required
title object doesn't exists.
"""
title = ""
slug = ""
path = ""
meta_description = ""
meta_keywords = ""
redirect = ""
has_url_overwite = False
application_urls = ""
menu_title = ""
page_title = ""
@property
def overwrite_url(self):
return None
reversion_register(Title)
| hzlf/openbroadcast | website/cms/models/titlemodels.py | Python | gpl-3.0 | 2,944 |
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i", "--IP",
action="store", type="float", dest="IP", default=5.0,
help="The first material's IP, default 5.0")
parser.add_option("-e", "--EA",
action="store", type="float", dest="EA", default=4.0,
help="The first material's EA, default 4.0")
parser.add_option("-w", "--window",
action="store", type="float", dest="window", default=1.0,
help="The window around the IP/EA to allow +/- , eq w=1 gives +/- 0.5. Default 1.0")
parser.add_option("-g", "--gap",
action="store", type="float", dest="gap", default=4.0,
help="The bandgap above which a layer is considered insulating and disregarded Default 4.0")
(options, args) = parser.parse_args()
f = open('CollatedData.txt','r')
lines = f.readlines()
f.close()
HTL = []
ETL = []
conducting_ETL = []
conducting_HTL = []
window = options.window
for line in lines:
inp = line.split()
if inp[0] != "Species":
Eg = float(inp[1])
EA = float(inp[2])
IP = float(inp[3])
if Eg > 2.0:
if EA >= options.EA - window * 0.5 and EA <= options.EA + window * 0.5:
ETL.append(inp[0])
if Eg < options.gap:
conducting_ETL.append(inp[0])
if IP <= options.IP + window * 0.5 and IP >= options.IP - window * 0.5:
if EA < 3.9:
HTL.append(inp[0])
if Eg < options.gap:
conducting_HTL.append(inp[0])
print "Number of potential electron contacting layers: ", len(conducting_ETL)
print "Number of potential hole contacting layers: ", len(conducting_HTL)
print "Conductive electron contacting layers: "
print len(conducting_ETL)
print conducting_ETL
print "Conductive hole contacting layers: "
print len(conducting_HTL)
print conducting_HTL
| WMD-group/SMACT | examples/Practical_tutorial/Electronic/scan_energies.py | Python | mit | 1,910 |
from django.contrib import admin
from django.contrib.admin.widgets import AdminIntegerFieldWidget
from django.core.validators import MaxValueValidator, MinValueValidator
from modeltranslation.admin import TranslationAdmin
from django.urls import reverse
from django.utils import timezone as tz
from django.utils.html import format_html
from django.utils.translation import gettext as _
from django import forms
from reversion.admin import VersionAdmin
from ..models import Issue
from ..models import Journal
from ..models import JournalInformation
from ..models import JournalType
from ..models import Language
from ..models import Discipline
JOURNAL_INFORMATION_COMPARE_EXCLUDE = [
# Exclude the translated base fields (ie. about) because the translation fields (ie. about_fr)
# are already displayed.
"about",
"contact",
"editorial_policy",
"instruction_for_authors",
"partners",
"publishing_ethics",
"subscriptions",
"team",
# Exclude the auto_now date field.
"updated",
# Exclude ID fields.
"id",
"journal_id",
]
class JournalDisciplineInline(admin.TabularInline):
model = Journal.disciplines.through
def get_field_queryset(self, db, db_field, request):
if db_field.name == "discipline":
# Filter the discipline field's queryset based on the parent journal's type.
if request._obj:
return db_field.remote_field.model._default_manager.using(db).filter(
type__code=request._obj.type.code
)
# If there is no parent journal (during journal creation), return an empty queryset.
else:
return db_field.remote_field.model._default_manager.using(db).none()
return super().get_field_queryset(db, db_field, request)
class JournalForm(forms.ModelForm):
fields = "all"
model = Journal
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit `year_of_addition` field values to the current year and the next two years.
now = tz.now()
min_year = now.year
max_year = min_year + 2
self.fields["year_of_addition"].validators = [
MinValueValidator(min_year),
MaxValueValidator(max_year),
]
self.fields["year_of_addition"].widget = AdminIntegerFieldWidget(
attrs={
"min": min_year,
"max": max_year,
},
)
def clean(self):
# In Django < 2.0, CharField stores empty values as empty strings, causing
# a unicity constraint error when multiple objects have an empty value for
# the same field. When we upgrade to Django 2.0, it will not be necessary
# to convert empty strings to None values.
if self.cleaned_data["localidentifier"] == "":
self.cleaned_data["localidentifier"] = None
return self.cleaned_data
class JournalAdmin(admin.ModelAdmin):
form = JournalForm
search_fields = (
"code",
"name",
"issn_print",
"issn_web",
"external_url",
)
list_display = (
"__str__",
"code",
"type",
"open_access",
"external_url",
"active",
)
list_display_links = (
"__str__",
"code",
)
list_filter = (
"collection",
"type",
"paper",
"open_access",
"active",
"is_new",
"year_of_addition",
)
filter_horizontal = ("members",)
fieldsets = [
(
"Identification",
{
"fields": (
(
"collection",
"type",
),
(
"code",
"localidentifier",
),
(
"name",
"subtitle",
),
("is_new", "year_of_addition"),
(
"previous_journal",
"next_journal",
),
(
"issn_print",
"issn_web",
),
("external_url", "redirect_to_external_url"),
),
},
),
(
None,
{
"fields": (
("open_access", "charges_apc", "paper"),
("first_publication_year", "last_publication_year"),
),
},
),
("Membres", {"fields": ("members",)}),
(
"État",
{
"classes": ("collapse",),
"fields": ("active",),
},
),
]
inlines = (JournalDisciplineInline,)
def get_form(self, request, obj=None, change=False, **kwargs):
# Save the journal object on the request to have access to it in `JournalDisciplineInline`.
request._obj = obj
return super().get_form(request, obj, change, **kwargs)
class IssueAdmin(admin.ModelAdmin):
list_display = (
"journal",
"year",
"volume",
"number",
"title",
"localidentifier",
"is_published",
"view_issue_on_site",
)
search_fields = (
"id",
"localidentifier",
)
list_filter = (
"is_published",
"journal__collection",
"journal__name",
)
actions = [
"make_published",
"make_unpublished",
"force_free_access_to_true",
"force_free_access_to_false",
]
def force_free_access_to_true(self, request, queryset):
"""Mark a set of issues as open access"""
queryset.update(force_free_access=True)
force_free_access_to_true.short_description = _(
"Contraindre les numéros sélectionnés en libre d'accès"
)
def force_free_access_to_false(self, request, queryset):
"""Mark a set of issues as not open access"""
queryset.update(force_free_access=False)
force_free_access_to_false.short_description = _(
"Ne pas contraindre ces numéros au libre accès"
)
def view_issue_on_site(self, obj):
""" Display the link leading to the issue on website """
url = reverse(
"public:journal:issue_detail",
kwargs={
"journal_code": obj.journal.code,
"issue_slug": obj.volume_slug,
"localidentifier": obj.localidentifier,
},
)
if not obj.is_published and obj.journal.collection.is_main_collection:
url = "{url}?ticket={ticket}".format(url=url, ticket=obj.prepublication_ticket)
return format_html("<a href={}>{}</a>", url, _("Voir sur le site"))
view_issue_on_site.short_description = _("Voir le numéro sur le site")
def get_readonly_fields(self, request, obj=None):
return self.readonly_fields + ("is_published",)
class JournalInformationAdminForm(forms.ModelForm):
class Meta:
model = JournalInformation
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Exclude French & English from other_languages field. These languages are set in the
# main_languages field.
self.fields["other_languages"].queryset = Language.objects.exclude(id__in=[1, 2])
class JournalInformationAdmin(VersionAdmin, TranslationAdmin):
form = JournalInformationAdminForm
class JournalTypeAdmin(TranslationAdmin):
pass
class DisciplineAdmin(TranslationAdmin):
def get_types(self, obj):
return ", ".join([t.name for t in obj.type.all()])
list_display = [
"name",
"get_types",
]
list_filter = [
"type",
]
admin.site.register(Journal, JournalAdmin)
admin.site.register(Issue, IssueAdmin)
admin.site.register(JournalInformation, JournalInformationAdmin)
admin.site.unregister(JournalType)
admin.site.register(JournalType, JournalTypeAdmin)
admin.site.register(Discipline, DisciplineAdmin)
| erudit/eruditorg | eruditorg/erudit/admin/journal.py | Python | gpl-3.0 | 8,218 |
from ase import Atoms
from ase.optimize import QuasiNewton
from gpaw import GPAW
a = 6
b = a / 2
mol = Atoms('H2O',
[(b, 0.7633 + b, -0.4876 + b),
(b, -0.7633 + b, -0.4876 + b),
(b, b, 0.1219 + b)],
cell=[a, a, a])
calc = GPAW(nbands=4,
h=0.2,
mode='lcao',
basis='dzp')
mol.set_calculator(calc)
dyn = QuasiNewton(mol, trajectory='lcao_h2o.traj')
dyn.run(fmax=0.05)
| qsnake/gpaw | doc/documentation/lcao/lcao_h2o.py | Python | gpl-3.0 | 455 |
#!/usr/bin/python
from os import listdir,makedirs
from os.path import isfile, join, exists
import traceback,sys
def list_modules(folder):
types = []
for f in listdir(folder):
if isfile(join(folder,f)):
name, extension = f.split(".")
if extension == "py" and name != "__init__":
types.append(name)
return types
def load_module(folder,name,listofimports):
try:
module = __import__("ea.%s.%s" % (folder,name), fromlist=listofimports)
except ImportError:
# Display error message
traceback.print_exc(file=sys.stdout)
raise ImportError("Failed to import module {0} from folder {1} using fromlist {2}".format(name,folder,listofimports))
return module
def select_class(name, name_list):
for n, c in name_list:
if n == name:
return c
def degToRad(deg):
return 3.14/180.0*deg
def create_folder(folder):
if not exists(folder):
makedirs(folder)
return folder
| sondree/Master-thesis | Python EA/ea/utility.py | Python | gpl-3.0 | 1,009 |
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from types import NoneType, GeneratorType
_get = object.__getattribute__
class Struct(dict):
"""
Struct is an anonymous class with some properties good for manipulating JSON
0) a.b==a["b"]
1) the IDE does tab completion, so my spelling mistakes get found at "compile time"
2) it deals with missing keys gracefully, so I can put it into set operations (database
operations) without choking
2b) missing keys is important when dealing with JSON, which is often almost anything
3) you can access JSON paths as a variable: a["b.c"]==a.b.c
4) attribute names (keys) are corrected to unicode - it appears Python object.getattribute()
is called with str() even when using from __future__ import unicode_literals
MORE ON MISSING VALUES: http://www.numpy.org/NA-overview.html
IT ONLY CONSIDERS THE LEGITIMATE-FIELD-WITH-MISSING-VALUE (Statistical Null)
AND DOES NOT LOOK AT FIELD-DOES-NOT-EXIST-IN-THIS-CONTEXT (Database Null)
The Struct is a common pattern in many frameworks (I am still working on this list)
jinja2.environment.Environment.getattr()
argparse.Environment() - code performs setattr(e, name, value) on instances of Environment
collections.namedtuple() - gives attribute names to tuple indicies
"""
def __init__(self, **map):
"""
THIS WILL MAKE A COPY, WHICH IS UNLIKELY TO BE USEFUL
USE wrap() INSTEAD
"""
dict.__init__(self)
object.__setattr__(self, "__dict__", map) #map IS A COPY OF THE PARAMETERS
def __bool__(self):
return True
def __nonzero__(self):
d = _get(self, "__dict__")
return True if d else False
def __str__(self):
return dict.__str__(_get(self, "__dict__"))
def __getitem__(self, key):
if isinstance(key, str):
key = key.decode("utf8")
d = _get(self, "__dict__")
if key.find(".") >= 0:
key = key.replace("\.", "\a")
seq = [k.replace("\a", ".") for k in key.split(".")]
for n in seq:
d = _getdefault(d, n)
return wrap(d)
return getdefaultwrapped(d, key)
def __setitem__(self, key, value):
if key == "":
from ...env.logs import Log
Log.error("key is empty string. Probably a bad idea")
if isinstance(key, str):
key = key.decode("utf8")
try:
d = _get(self, "__dict__")
value = unwrap(value)
if key.find(".") == -1:
if value is None:
d.pop(key, None)
else:
d[key] = value
return self
key = key.replace("\.", "\a")
seq = [k.replace("\a", ".") for k in key.split(".")]
for k in seq[:-1]:
d = _getdefault(d, k)
if value == None:
d.pop(seq[-1], None)
else:
d[seq[-1]] = value
return self
except Exception, e:
raise e
def __getattribute__(self, key):
if isinstance(key, str):
key = key.decode("utf8")
try:
output = _get(self, key)
if key=="__dict__":
return output
return wrap(output)
except Exception:
d = _get(self, "__dict__")
return _Null(d, key)
def __setattr__(self, key, value):
if isinstance(key, str):
ukey = key.decode("utf8")
else:
ukey = key
value = unwrap(value)
if value is None:
d = _get(self, "__dict__")
d.pop(key, None)
else:
object.__setattr__(self, ukey, value)
return self
def items(self):
d = _get(self, "__dict__")
return ((k, wrap(v)) for k, v in d.items())
def iteritems(self):
#LOW LEVEL ITERATION, NO WRAPPING
d = _get(self, "__dict__")
return d.iteritems()
def keys(self):
d = _get(self, "__dict__")
return set(d.keys())
def values(self):
d = _get(self, "__dict__")
return (wrap(v) for v in d.values())
@property
def dict(self):
return _get(self, "__dict__")
@property
def __class__(self):
return dict
def copy(self):
d = _get(self, "__dict__")
return Struct(**d)
def __delitem__(self, key):
if isinstance(key, str):
key = key.decode("utf8")
if key.find(".") == -1:
d = _get(self, "__dict__")
d.pop(key, None)
return
d = _get(self, "__dict__")
key = key.replace("\.", "\a")
seq = [k.replace("\a", ".") for k in key.split(".")]
for k in seq[:-1]:
d = d[k]
d.pop(seq[-1], None)
def __delattr__(self, key):
if isinstance(key, str):
key = key.decode("utf8")
d = _get(self, "__dict__")
d.pop(key, None)
def keys(self):
d = _get(self, "__dict__")
return d.keys()
def setdefault(self, k, d=None):
if not self[k]:
self[k]=d
# KEEP TRACK OF WHAT ATTRIBUTES ARE REQUESTED, MAYBE SOME (BUILTIN) ARE STILL USEFUL
requested = set()
def _setdefault(obj, key, value):
"""
DO NOT USE __dict__.setdefault(obj, key, value), IT DOES NOT CHECK FOR obj[key] == None
"""
v = obj.get(key, None)
if v == None:
obj[key] = value
return value
return v
def set_default(original, default):
return wrap(_all_default(unwrap(original), unwrap(default)))
def _all_default(d, default):
"""
ANY VALUE NOT SET WILL BE SET BY THE default
THIS IS RECURSIVE
"""
for k, default_value in default.items():
existing_value = d.get(k, None)
if existing_value is None:
d[k] = default_value
elif isinstance(existing_value, dict) and isinstance(default_value, dict):
_all_default(existing_value, default_value)
def _getdefault(obj, key):
try:
return obj[key]
except Exception, e:
return _Null(obj, key)
def getdefaultwrapped(obj, key):
o = obj.get(key, None)
if o == None:
return _Null(obj, key)
return wrap(o)
def _assign(null, key, value, force=True):
"""
value IS ONLY ASSIGNED IF self.obj[self.path][key] DOES NOT EXIST
"""
d = _get(null, "__dict__")
o = d["obj"]
if isinstance(o, _Null):
o = _assign(o, d["path"], {}, False)
else:
o = _setdefault(o, d["path"], {})
if force:
o[key] = value
else:
value = _setdefault(o, key, value)
return value
class _Null(object):
"""
Structural Null provides closure under the dot (.) operator
Null[x] == Null
Null.x == Null
"""
def __init__(self, obj=None, path=None):
d = _get(self, "__dict__")
d["obj"] = obj
d["path"] = path
def __bool__(self):
return False
def __nonzero__(self):
return False
def __add__(self, other):
return Null
def __radd__(self, other):
return Null
def __sub__(self, other):
return Null
def __rsub__(self, other):
return Null
def __mul__(self, other):
return Null
def __rmul__(self, other):
return Null
def __div__(self, other):
return Null
def __rdiv__(self, other):
return Null
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __le__(self, other):
return False
def __lt__(self, other):
return False
def __eq__(self, other):
return other is None or isinstance(other, _Null)
def __ne__(self, other):
return other is not None and not isinstance(other, _Null)
def __getitem__(self, key):
return _Null(self, key)
def __len__(self):
return 0
def __iter__(self):
return ZeroList.__iter__()
def last(self):
"""
IN CASE self IS INTERPRETED AS A list
"""
return Null
def right(self, num=None):
return EmptyList
def __getattribute__(self, key):
try:
output = _get(self, key)
return output
except Exception, e:
return _Null(self, key)
def __setattr__(self, key, value):
_Null.__setitem__(self, key, value)
def __setitem__(self, key, value):
try:
value = unwrap(value)
if key.find(".") == -1:
_assign(self, key, value)
return self
key = key.replace("\.", "\a")
seq = [k.replace("\a", ".") for k in key.split(".")]
d = _assign(self, seq[0], {}, False)
for k in seq[1:-1]:
o = {}
d[k] = o
d = o
d[seq[-1]] = value
return self
except Exception, e:
raise e
def keys(self):
return set()
def pop(self, key, default=None):
return Null
def __str__(self):
return "None"
def __repr__(self):
return "Null"
def __class__(self):
return NoneType
Null = _Null()
EmptyList = Null
ZeroList = []
class StructList(list):
"""
ENCAPSULATES HANDING OF Nulls BY wrapING ALL MEMBERS AS NEEDED
ENCAPSULATES FLAT SLICES ([::]) FOR USE IN WINDOW FUNCTIONS
"""
def __init__(self, vals=None):
""" USE THE vals, NOT A COPY """
# list.__init__(self)
if vals == None:
self.list = []
elif isinstance(vals, StructList):
self.list = vals.list
else:
self.list = vals
def __getitem__(self, index):
if isinstance(index, slice):
# IMPLEMENT FLAT SLICES (for i not in range(0, len(self)): assert self[i]==None)
if index.step is not None:
from ...env.logs import Log
Log.error("slice step must be None, do not know how to deal with values")
length = len(_get(self, "list"))
i = index.start
i = min(max(i, 0), length)
j = index.stop
if j is None:
j = length
else:
j = max(min(j, length), 0)
return StructList(_get(self, "list")[i:j])
if index < 0 or len(_get(self, "list")) <= index:
return Null
return wrap(_get(self, "list")[index])
def __setitem__(self, i, y):
_get(self, "list")[i] = unwrap(y)
def __iter__(self):
return (wrap(v) for v in _get(self, "list"))
def __contains__(self, item):
return list.__contains__(_get(self, "list"), item)
def append(self, val):
_get(self, "list").append(unwrap(val))
return self
def __str__(self):
return _get(self, "list").__str__()
def __len__(self):
return _get(self, "list").__len__()
@property
def __class__(self):
return list
def __getslice__(self, i, j):
from .env.logs import Log
Log.error("slicing is broken in Python 2.7: a[i:j] == a[i+len(a), j] sometimes. Use [start:stop:step]")
def copy(self):
return StructList(list(_get(self, "list")))
def remove(self, x):
_get(self, "list").remove(x)
return self
def extend(self, values):
for v in values:
_get(self, "list").append(unwrap(v))
return self
def pop(self):
return wrap(_get(self, "list").pop())
def __add__(self, value):
output = list(_get(self, "list"))
output.extend(value)
return StructList(vals=output)
def __or__(self, value):
output = list(_get(self, "list"))
output.append(value)
return StructList(vals=output)
def __radd__(self, other):
output = list(other)
output.extend(_get(self, "list"))
return StructList(vals=output)
def right(self, num=None):
"""
WITH SLICES BEING FLAT, WE NEED A SIMPLE WAY TO SLICE FROM THE RIGHT
"""
if num == None:
return StructList([_get(self, "list")[-1]])
if num <= 0:
return EmptyList
return StructList(_get(self, "list")[-num])
def leftBut(self, num):
"""
WITH SLICES BEING FLAT, WE NEED A SIMPLE WAY TO SLICE FROM THE LEFT [:-num:]
"""
if num == None:
return StructList([_get(self, "list")[:-1:]])
if num <= 0:
return EmptyList
return StructList(_get(self, "list")[:-num:])
def last(self):
"""
RETURN LAST ELEMENT IN StructList
"""
if _get(self, "list"):
return wrap(_get(self, "list")[-1])
return Null
def __getattribute__(self, key):
try:
output = _get(self, key)
return output
except Exception, e:
return StructList([v[key] for v in _get(self, "list")])
def wrap(v):
v_type = v.__class__
if v_type is dict:
if isinstance(v, Struct):
return v
m = Struct()
object.__setattr__(m, "__dict__", v) # INJECT m.__dict__=v SO THERE IS NO COPY
return m
if v_type is list:
if isinstance(v, StructList):
return v
for vv in v:
# IN PRACTICE WE DO NOT EXPECT TO GO THROUGH THIS LIST, IF ANY ARE WRAPPED, THE FIRST IS PROBABLY WRAPPED
if vv is not unwrap(vv):
return StructList([unwrap(vv) for vv in v])
return StructList(v)
if v_type is NoneType:
if v is None:
return Null
return v
if v_type is GeneratorType:
return (wrap(vv) for vv in v)
return v
def unwrap(v):
if isinstance(v, Struct):
return _get(v, "__dict__")
if isinstance(v, StructList):
return v.list
if v == None:
return None
return v
def inverse(d):
"""
reverse the k:v pairs
"""
output = {}
for k, v in unwrap(d).iteritems():
output[v] = output.get(v, [])
output[v].append(k)
return output
def nvl(*args):
#pick the first none-null value
for a in args:
if a != None:
return a
return Null
def zip(keys, values):
output = Struct()
for i, k in enumerate(keys):
output[k] = values[i]
return output
def listwrap(value):
"""
OFTEN IT IS NICE TO ALLOW FUNCTION PARAMETERS TO BE ASSIGNED A VALUE,
OR A list-OF-VALUES, OR NULL. CHECKING FOR THIS IS TEDIOUS AND WE WANT TO CAST
FROM THOSE THREE CASES TO THE SINGLE CASE OF A LIST
Null -> []
value -> [value]
[...] -> [...] (unchanged list)
#BEFORE
if a is not None:
if not isinstance(a, list):
a=[a]
for x in a:
#do something
#AFTER
for x in listwrap(a):
#do something
"""
if value == None:
return []
elif isinstance(value, list):
return wrap(value)
else:
return wrap([value])
def split_field(field):
"""
RETURN field AS ARRAY OF DOT-SEPARATED FIELDS
"""
if field.find(".") >= 0:
field = field.replace("\.", "\a")
return [k.replace("\a", "\.") for k in field.split(".")]
else:
return [field]
def join_field(field):
"""
RETURN field SEQUENCE AS STRING
"""
return ".".join([f.replace(".", "\.") for f in field])
| klahnakoski/eideticker | util/struct.py | Python | mpl-2.0 | 15,880 |
# Authors: Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
#
# License: BSD (3-clause)
import numpy as np
from ..evoked import Evoked
from ..epochs import BaseEpochs
from ..io import BaseRaw
from ..event import find_events
from ..io.pick import _pick_data_channels
from ..utils import _check_preload, _check_option
def _get_window(start, end):
"""Return window which has length as much as parameter start - end."""
from scipy.signal import hann
window = 1 - np.r_[hann(4)[:2],
np.ones(np.abs(end - start) - 4),
hann(4)[-2:]].T
return window
def _fix_artifact(data, window, picks, first_samp, last_samp, mode):
"""Modify original data by using parameter data."""
from scipy.interpolate import interp1d
if mode == 'linear':
x = np.array([first_samp, last_samp])
f = interp1d(x, data[:, (first_samp, last_samp)][picks])
xnew = np.arange(first_samp, last_samp)
interp_data = f(xnew)
data[picks, first_samp:last_samp] = interp_data
if mode == 'window':
data[picks, first_samp:last_samp] = \
data[picks, first_samp:last_samp] * window[np.newaxis, :]
def fix_stim_artifact(inst, events=None, event_id=None, tmin=0.,
tmax=0.01, mode='linear', stim_channel=None):
"""Eliminate stimulation's artifacts from instance.
.. note:: This function operates in-place, consider passing
``inst.copy()`` if this is not desired.
Parameters
----------
inst : instance of Raw or Epochs or Evoked
The data.
events : array, shape (n_events, 3)
The list of events. Required only when inst is Raw.
event_id : int
The id of the events generating the stimulation artifacts.
If None, read all events. Required only when inst is Raw.
tmin : float
Start time of the interpolation window in seconds.
tmax : float
End time of the interpolation window in seconds.
mode : 'linear' | 'window'
Way to fill the artifacted time interval.
'linear' does linear interpolation
'window' applies a (1 - hanning) window.
stim_channel : str | None
Stim channel to use.
Returns
-------
inst : instance of Raw or Evoked or Epochs
Instance with modified data.
"""
_check_option('mode', mode, ['linear', 'window'])
s_start = int(np.ceil(inst.info['sfreq'] * tmin))
s_end = int(np.ceil(inst.info['sfreq'] * tmax))
if (mode == "window") and (s_end - s_start) < 4:
raise ValueError('Time range is too short. Use a larger interval '
'or set mode to "linear".')
window = None
if mode == 'window':
window = _get_window(s_start, s_end)
picks = _pick_data_channels(inst.info)
_check_preload(inst, 'fix_stim_artifact')
if isinstance(inst, BaseRaw):
if events is None:
events = find_events(inst, stim_channel=stim_channel)
if len(events) == 0:
raise ValueError('No events are found')
if event_id is None:
events_sel = np.arange(len(events))
else:
events_sel = (events[:, 2] == event_id)
event_start = events[events_sel, 0]
data = inst._data
for event_idx in event_start:
first_samp = int(event_idx) - inst.first_samp + s_start
last_samp = int(event_idx) - inst.first_samp + s_end
_fix_artifact(data, window, picks, first_samp, last_samp, mode)
elif isinstance(inst, BaseEpochs):
if inst.reject is not None:
raise RuntimeError('Reject is already applied. Use reject=None '
'in the constructor.')
e_start = int(np.ceil(inst.info['sfreq'] * inst.tmin))
first_samp = s_start - e_start
last_samp = s_end - e_start
data = inst._data
for epoch in data:
_fix_artifact(epoch, window, picks, first_samp, last_samp, mode)
elif isinstance(inst, Evoked):
first_samp = s_start - inst.first
last_samp = s_end - inst.first
data = inst.data
_fix_artifact(data, window, picks, first_samp, last_samp, mode)
else:
raise TypeError('Not a Raw or Epochs or Evoked (got %s).' % type(inst))
return inst
| Teekuningas/mne-python | mne/preprocessing/stim.py | Python | bsd-3-clause | 4,333 |
import random,math
from MB import music as music
class Stub:
def __init__(self,client):
self.time=0.0
self.dt=0.01
self.engine=music.Engine(self.dt,self.callback)
self.bpm_average=60.0
self.freq=0.1
self.freq_depth=0.0
self.noise_depth=0.0
self.client=client
self.engine.start()
def quit(self):
self.engine.stop()
def set_client(self,client):
self.client=client
def noise(self):
return 2*(0.5-random.random())
def set_bpm(self,bpm):
self.bpm_average=bpm
def set_freq(self,freq):
self.freq=freq
def set_freq_depth(self,d):
self.freq_depth=d
def set_noise_depth(self,d):
self.noise_depth=d
def callback(self):
if self.client==None:
return
bpm=self.bpm_average\
# +self.freq_depth*math.sin(2*math.pi*self.freq*self.time)
# bpm+=self.noise_depth*self.noise()
self.client.set_bpm(bpm) | pauljohnleonard/MusicBox | src/ai/stub.py | Python | gpl-2.0 | 1,164 |
from collections.abc import Iterable
from difflib import get_close_matches
from numbers import Real
import itertools
import os
import re
import shutil
import tempfile
from warnings import warn
import numpy as np
import h5py
import openmc.checkvalue as cv
from openmc.mixin import EqualityMixin
from . import HDF5_VERSION, HDF5_VERSION_MAJOR
from .data import K_BOLTZMANN, ATOMIC_SYMBOL, EV_PER_MEV, NATURAL_ABUNDANCE
from .ace import Table, get_table, Library
from .angle_energy import AngleEnergy
from .function import Tabulated1D
from .correlated import CorrelatedAngleEnergy
from .njoy import make_ace_thermal
from openmc.stats import Discrete, Tabular
_THERMAL_NAMES = {
'c_Al27': ('al', 'al27', 'al-27'),
'c_Be': ('be', 'be-metal', 'be-met'),
'c_BeO': ('beo',),
'c_Be_in_BeO': ('bebeo', 'be-beo', 'be-o', 'be/o'),
'c_C6H6': ('benz', 'c6h6'),
'c_C_in_SiC': ('csic', 'c-sic'),
'c_Ca_in_CaH2': ('cah',),
'c_D_in_D2O': ('dd2o', 'd-d2o', 'hwtr', 'hw'),
'c_Fe56': ('fe', 'fe56', 'fe-56'),
'c_Graphite': ('graph', 'grph', 'gr'),
'c_Graphite_10p': ('grph10',),
'c_Graphite_30p': ('grph30',),
'c_H_in_CaH2': ('hcah2',),
'c_H_in_CH2': ('hch2', 'poly', 'pol', 'h-poly'),
'c_H_in_CH4_liquid': ('lch4', 'lmeth'),
'c_H_in_CH4_solid': ('sch4', 'smeth'),
'c_H_in_H2O': ('hh2o', 'h-h2o', 'lwtr', 'lw'),
'c_H_in_H2O_solid': ('hice', 'h-ice'),
'c_H_in_C5O2H8': ('lucite', 'c5o2h8', 'h-luci'),
'c_H_in_YH2': ('hyh2', 'h-yh2'),
'c_H_in_ZrH': ('hzrh', 'h-zrh', 'h-zr', 'h/zr', 'hzr'),
'c_Mg24': ('mg', 'mg24'),
'c_O_in_BeO': ('obeo', 'o-beo', 'o-be', 'o/be'),
'c_O_in_D2O': ('od2o', 'o-d2o'),
'c_O_in_H2O_ice': ('oice', 'o-ice'),
'c_O_in_UO2': ('ouo2', 'o-uo2', 'o2-u', 'o2/u'),
'c_N_in_UN': ('n-un',),
'c_ortho_D': ('orthod', 'orthoD', 'dortho'),
'c_ortho_H': ('orthoh', 'orthoH', 'hortho'),
'c_Si_in_SiC': ('sisic', 'si-sic'),
'c_SiO2_alpha': ('sio2', 'sio2a'),
'c_SiO2_beta': ('sio2b',),
'c_para_D': ('parad', 'paraD', 'dpara'),
'c_para_H': ('parah', 'paraH', 'hpara'),
'c_U_in_UN': ('u-un',),
'c_U_in_UO2': ('uuo2', 'u-uo2', 'u-o2', 'u/o2'),
'c_Y_in_YH2': ('yyh2', 'y-yh2'),
'c_Zr_in_ZrH': ('zrzrh', 'zr-zrh', 'zr-h', 'zr/h')
}
def get_thermal_name(name):
"""Get proper S(a,b) table name, e.g. 'HH2O' -> 'c_H_in_H2O'
Parameters
----------
name : str
Name of an ACE thermal scattering table
Returns
-------
str
GND-format thermal scattering name
"""
if name in _THERMAL_NAMES:
return name
else:
for proper_name, names in _THERMAL_NAMES.items():
if name.lower() in names:
return proper_name
else:
# Make an educated guess?? This actually works well for
# JEFF-3.2 which stupidly uses names like lw00.32t,
# lw01.32t, etc. for different temperatures
# First, construct a list of all the values/keys in the names
# dictionary
all_names = itertools.chain(_THERMAL_NAMES.keys(),
*_THERMAL_NAMES.values())
matches = get_close_matches(name, all_names, cutoff=0.5)
if len(matches) > 0:
# Figure out the key for the corresponding match
match = matches[0]
if match not in _THERMAL_NAMES:
for key, value_list in _THERMAL_NAMES.items():
if match in value_list:
match = key
break
warn('Thermal scattering material "{}" is not recognized. '
'Assigning a name of {}.'.format(name, match))
return match
else:
# OK, we give up. Just use the ACE name.
warn('Thermal scattering material "{0}" is not recognized. '
'Assigning a name of c_{0}.'.format(name))
return 'c_' + name
class CoherentElastic(EqualityMixin):
r"""Coherent elastic scattering data from a crystalline material
Parameters
----------
bragg_edges : Iterable of float
Bragg edge energies in eV
factors : Iterable of float
Partial sum of structure factors, :math:`\sum\limits_{i=1}^{E_i<E} S_i`
Attributes
----------
bragg_edges : Iterable of float
Bragg edge energies in eV
factors : Iterable of float
Partial sum of structure factors, :math:`\sum\limits_{i=1}^{E_i<E} S_i`
"""
def __init__(self, bragg_edges, factors):
self.bragg_edges = bragg_edges
self.factors = factors
def __call__(self, E):
if isinstance(E, Iterable):
E = np.asarray(E)
idx = np.searchsorted(self.bragg_edges, E)
return self.factors[idx] / E
def __len__(self):
return len(self.bragg_edges)
@property
def bragg_edges(self):
return self._bragg_edges
@property
def factors(self):
return self._factors
@bragg_edges.setter
def bragg_edges(self, bragg_edges):
cv.check_type('Bragg edges', bragg_edges, Iterable, Real)
self._bragg_edges = np.asarray(bragg_edges)
@factors.setter
def factors(self, factors):
cv.check_type('structure factor cumulative sums', factors,
Iterable, Real)
self._factors = np.asarray(factors)
def to_hdf5(self, group, name):
"""Write coherent elastic scattering to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
name : str
Name of the dataset to create
"""
dataset = group.create_dataset(name, data=np.vstack(
[self.bragg_edges, self.factors]))
dataset.attrs['type'] = np.string_('bragg')
@classmethod
def from_hdf5(cls, dataset):
"""Read coherent elastic scattering from an HDF5 dataset
Parameters
----------
group : h5py.Dataset
HDF5 group to write to
Returns
-------
openmc.data.CoherentElastic
Coherent elastic scattering cross section
"""
bragg_edges = dataset.value[0, :]
factors = dataset.value[1, :]
return cls(bragg_edges, factors)
class ThermalScattering(EqualityMixin):
"""A ThermalScattering object contains thermal scattering data as represented by
an S(alpha, beta) table.
Parameters
----------
name : str
Name of the material using GND convention, e.g. c_H_in_H2O
atomic_weight_ratio : float
Atomic mass ratio of the target nuclide.
kTs : Iterable of float
List of temperatures of the target nuclide in the data set.
The temperatures have units of eV.
Attributes
----------
atomic_weight_ratio : float
Atomic mass ratio of the target nuclide.
elastic_xs : openmc.data.Tabulated1D or openmc.data.CoherentElastic
Elastic scattering cross section derived in the coherent or incoherent
approximation
inelastic_xs : openmc.data.Tabulated1D
Inelastic scattering cross section derived in the incoherent
approximation
name : str
Name of the material using GND convention, e.g. c_H_in_H2O
temperatures : Iterable of str
List of string representations the temperatures of the target nuclide
in the data set. The temperatures are strings of the temperature,
rounded to the nearest integer; e.g., '294K'
kTs : Iterable of float
List of temperatures of the target nuclide in the data set.
The temperatures have units of eV.
nuclides : Iterable of str
Nuclide names that the thermal scattering data applies to
"""
def __init__(self, name, atomic_weight_ratio, kTs):
self.name = name
self.atomic_weight_ratio = atomic_weight_ratio
self.kTs = kTs
self.elastic_xs = {}
self.elastic_mu_out = {}
self.inelastic_xs = {}
self.inelastic_e_out = {}
self.inelastic_mu_out = {}
self.inelastic_dist = {}
self.secondary_mode = None
self.nuclides = []
def __repr__(self):
if hasattr(self, 'name'):
return "<Thermal Scattering Data: {0}>".format(self.name)
else:
return "<Thermal Scattering Data>"
@property
def temperatures(self):
return ["{}K".format(int(round(kT / K_BOLTZMANN))) for kT in self.kTs]
def export_to_hdf5(self, path, mode='a', libver='earliest'):
"""Export table to an HDF5 file.
Parameters
----------
path : str
Path to write HDF5 file to
mode : {'r', r+', 'w', 'x', 'a'}
Mode that is used to open the HDF5 file. This is the second argument
to the :class:`h5py.File` constructor.
libver : {'earliest', 'latest'}
Compatibility mode for the HDF5 file. 'latest' will produce files
that are less backwards compatible but have performance benefits.
"""
# Open file and write version
f = h5py.File(str(path), mode, libver=libver)
f.attrs['filetype'] = np.string_('data_thermal')
f.attrs['version'] = np.array(HDF5_VERSION)
# Write basic data
g = f.create_group(self.name)
g.attrs['atomic_weight_ratio'] = self.atomic_weight_ratio
g.attrs['nuclides'] = np.array(self.nuclides, dtype='S')
g.attrs['secondary_mode'] = np.string_(self.secondary_mode)
ktg = g.create_group('kTs')
for i, temperature in enumerate(self.temperatures):
ktg.create_dataset(temperature, data=self.kTs[i])
for T in self.temperatures:
Tg = g.create_group(T)
# Write thermal elastic scattering
if self.elastic_xs:
elastic_group = Tg.create_group('elastic')
self.elastic_xs[T].to_hdf5(elastic_group, 'xs')
if self.elastic_mu_out:
elastic_group.create_dataset('mu_out',
data=self.elastic_mu_out[T])
# Write thermal inelastic scattering
if self.inelastic_xs:
inelastic_group = Tg.create_group('inelastic')
self.inelastic_xs[T].to_hdf5(inelastic_group, 'xs')
if self.secondary_mode in ('equal', 'skewed'):
inelastic_group.create_dataset('energy_out',
data=self.inelastic_e_out[T])
inelastic_group.create_dataset('mu_out',
data=self.inelastic_mu_out[T])
elif self.secondary_mode == 'continuous':
self.inelastic_dist[T].to_hdf5(inelastic_group)
f.close()
def add_temperature_from_ace(self, ace_or_filename, name=None):
"""Add data to the ThermalScattering object from an ACE file at a
different temperature.
Parameters
----------
ace_or_filename : openmc.data.ace.Table or str
ACE table to read from. If given as a string, it is assumed to be
the filename for the ACE file.
name : str
GND-conforming name of the material, e.g. c_H_in_H2O. If none is
passed, the appropriate name is guessed based on the name of the ACE
table.
Returns
-------
openmc.data.ThermalScattering
Thermal scattering data
"""
data = ThermalScattering.from_ace(ace_or_filename, name)
# Check if temprature already exists
strT = data.temperatures[0]
if strT in self.temperatures:
warn('S(a,b) data at T={} already exists.'.format(strT))
return
# Check that name matches
if data.name != self.name:
raise ValueError('Data provided for an incorrect material.')
# Add temperature
self.kTs += data.kTs
# Add inelastic cross section and distributions
if strT in data.inelastic_xs:
self.inelastic_xs[strT] = data.inelastic_xs[strT]
if strT in data.inelastic_e_out:
self.inelastic_e_out[strT] = data.inelastic_e_out[strT]
if strT in data.inelastic_mu_out:
self.inelastic_mu_out[strT] = data.inelastic_mu_out[strT]
if strT in data.inelastic_dist:
self.inelastic_dist[strT] = data.inelastic_dist[strT]
# Add elastic cross sectoin and angular distribution
if strT in data.elastic_xs:
self.elastic_xs[strT] = data.elastic_xs[strT]
if strT in data.elastic_mu_out:
self.elastic_mu_out[strT] = data.elastic_mu_out[strT]
@classmethod
def from_hdf5(cls, group_or_filename):
"""Generate thermal scattering data from HDF5 group
Parameters
----------
group_or_filename : h5py.Group or str
HDF5 group containing interaction data. If given as a string, it is
assumed to be the filename for the HDF5 file, and the first group
is used to read from.
Returns
-------
openmc.data.ThermalScattering
Neutron thermal scattering data
"""
if isinstance(group_or_filename, h5py.Group):
group = group_or_filename
else:
h5file = h5py.File(str(group_or_filename), 'r')
# Make sure version matches
if 'version' in h5file.attrs:
major, minor = h5file.attrs['version']
if major != HDF5_VERSION_MAJOR:
raise IOError(
'HDF5 data format uses version {}.{} whereas your '
'installation of the OpenMC Python API expects version '
'{}.x.'.format(major, minor, HDF5_VERSION_MAJOR))
else:
raise IOError(
'HDF5 data does not indicate a version. Your installation of '
'the OpenMC Python API expects version {}.x data.'
.format(HDF5_VERSION_MAJOR))
group = list(h5file.values())[0]
name = group.name[1:]
atomic_weight_ratio = group.attrs['atomic_weight_ratio']
kTg = group['kTs']
kTs = []
for temp in kTg:
kTs.append(kTg[temp].value)
temperatures = [str(int(round(kT / K_BOLTZMANN))) + "K" for kT in kTs]
table = cls(name, atomic_weight_ratio, kTs)
table.nuclides = [nuc.decode() for nuc in group.attrs['nuclides']]
table.secondary_mode = group.attrs['secondary_mode'].decode()
# Read thermal elastic scattering
for T in temperatures:
Tgroup = group[T]
if 'elastic' in Tgroup:
elastic_group = Tgroup['elastic']
# Cross section
elastic_xs_type = elastic_group['xs'].attrs['type'].decode()
if elastic_xs_type == 'Tabulated1D':
table.elastic_xs[T] = Tabulated1D.from_hdf5(
elastic_group['xs'])
elif elastic_xs_type == 'bragg':
table.elastic_xs[T] = CoherentElastic.from_hdf5(
elastic_group['xs'])
# Angular distribution
if 'mu_out' in elastic_group:
table.elastic_mu_out[T] = elastic_group['mu_out'].value
# Read thermal inelastic scattering
if 'inelastic' in Tgroup:
inelastic_group = Tgroup['inelastic']
table.inelastic_xs[T] = Tabulated1D.from_hdf5(
inelastic_group['xs'])
if table.secondary_mode in ('equal', 'skewed'):
table.inelastic_e_out[T] = inelastic_group['energy_out'].value
table.inelastic_mu_out[T] = inelastic_group['mu_out'].value
elif table.secondary_mode == 'continuous':
table.inelastic_dist[T] = AngleEnergy.from_hdf5(
inelastic_group)
return table
@classmethod
def from_ace(cls, ace_or_filename, name=None):
"""Generate thermal scattering data from an ACE table
Parameters
----------
ace_or_filename : openmc.data.ace.Table or str
ACE table to read from. If given as a string, it is assumed to be
the filename for the ACE file.
name : str
GND-conforming name of the material, e.g. c_H_in_H2O. If none is
passed, the appropriate name is guessed based on the name of the ACE
table.
Returns
-------
openmc.data.ThermalScattering
Thermal scattering data
"""
if isinstance(ace_or_filename, Table):
ace = ace_or_filename
else:
ace = get_table(ace_or_filename)
# Get new name that is GND-consistent
ace_name, xs = ace.name.split('.')
name = get_thermal_name(ace_name)
# Assign temperature to the running list
kTs = [ace.temperature*EV_PER_MEV]
temperatures = [str(int(round(ace.temperature*EV_PER_MEV
/ K_BOLTZMANN))) + "K"]
table = cls(name, ace.atomic_weight_ratio, kTs)
# Incoherent inelastic scattering cross section
idx = ace.jxs[1]
n_energy = int(ace.xss[idx])
energy = ace.xss[idx+1 : idx+1+n_energy]*EV_PER_MEV
xs = ace.xss[idx+1+n_energy : idx+1+2*n_energy]
table.inelastic_xs[temperatures[0]] = Tabulated1D(energy, xs)
if ace.nxs[7] == 0:
table.secondary_mode = 'equal'
elif ace.nxs[7] == 1:
table.secondary_mode = 'skewed'
elif ace.nxs[7] == 2:
table.secondary_mode = 'continuous'
n_energy_out = ace.nxs[4]
if table.secondary_mode in ('equal', 'skewed'):
n_mu = ace.nxs[3]
idx = ace.jxs[3]
table.inelastic_e_out[temperatures[0]] = \
ace.xss[idx:idx + n_energy * n_energy_out * (n_mu + 2):
n_mu + 2]*EV_PER_MEV
table.inelastic_e_out[temperatures[0]].shape = \
(n_energy, n_energy_out)
table.inelastic_mu_out[temperatures[0]] = \
ace.xss[idx:idx + n_energy * n_energy_out * (n_mu + 2)]
table.inelastic_mu_out[temperatures[0]].shape = \
(n_energy, n_energy_out, n_mu+2)
table.inelastic_mu_out[temperatures[0]] = \
table.inelastic_mu_out[temperatures[0]][:, :, 1:]
else:
n_mu = ace.nxs[3] - 1
idx = ace.jxs[3]
locc = ace.xss[idx:idx + n_energy].astype(int)
n_energy_out = \
ace.xss[idx + n_energy:idx + 2 * n_energy].astype(int)
energy_out = []
mu_out = []
for i in range(n_energy):
idx = locc[i]
# Outgoing energy distribution for incoming energy i
e = ace.xss[idx + 1:idx + 1 + n_energy_out[i]*(n_mu + 3):
n_mu + 3]*EV_PER_MEV
p = ace.xss[idx + 2:idx + 2 + n_energy_out[i]*(n_mu + 3):
n_mu + 3]/EV_PER_MEV
c = ace.xss[idx + 3:idx + 3 + n_energy_out[i]*(n_mu + 3):
n_mu + 3]
eout_i = Tabular(e, p, 'linear-linear', ignore_negative=True)
eout_i.c = c
# Outgoing angle distribution for each
# (incoming, outgoing) energy pair
mu_i = []
for j in range(n_energy_out[i]):
mu = ace.xss[idx + 4:idx + 4 + n_mu]
p_mu = 1. / n_mu * np.ones(n_mu)
mu_ij = Discrete(mu, p_mu)
mu_ij.c = np.cumsum(p_mu)
mu_i.append(mu_ij)
idx += 3 + n_mu
energy_out.append(eout_i)
mu_out.append(mu_i)
# Create correlated angle-energy distribution
breakpoints = [n_energy]
interpolation = [2]
energy = table.inelastic_xs[temperatures[0]].x
table.inelastic_dist[temperatures[0]] = CorrelatedAngleEnergy(
breakpoints, interpolation, energy, energy_out, mu_out)
# Incoherent/coherent elastic scattering cross section
idx = ace.jxs[4]
n_mu = ace.nxs[6] + 1
if idx != 0:
n_energy = int(ace.xss[idx])
energy = ace.xss[idx + 1: idx + 1 + n_energy]*EV_PER_MEV
P = ace.xss[idx + 1 + n_energy: idx + 1 + 2 * n_energy]
if ace.nxs[5] == 4:
# Coherent elastic
table.elastic_xs[temperatures[0]] = CoherentElastic(
energy, P*EV_PER_MEV)
# Coherent elastic shouldn't have angular distributions listed
assert n_mu == 0
else:
# Incoherent elastic
table.elastic_xs[temperatures[0]] = Tabulated1D(energy, P)
# Angular distribution
assert n_mu > 0
idx = ace.jxs[6]
table.elastic_mu_out[temperatures[0]] = \
ace.xss[idx:idx + n_energy * n_mu]
table.elastic_mu_out[temperatures[0]].shape = \
(n_energy, n_mu)
# Get relevant nuclides -- NJOY only allows one to specify three
# nuclides that the S(a,b) table applies to. Thus, for all elements
# other than H and Fe, we automatically add all the naturally-occurring
# isotopes.
for zaid, awr in ace.pairs:
if zaid > 0:
Z, A = divmod(zaid, 1000)
element = ATOMIC_SYMBOL[Z]
if element in ['H', 'Fe']:
table.nuclides.append(element + str(A))
else:
if element + '0' not in table.nuclides:
table.nuclides.append(element + '0')
for isotope in sorted(NATURAL_ABUNDANCE):
if re.match(r'{}\d+'.format(element), isotope):
if isotope not in table.nuclides:
table.nuclides.append(isotope)
return table
@classmethod
def from_njoy(cls, filename, filename_thermal, temperatures=None, **kwargs):
"""Generate incident neutron data by running NJOY.
Parameters
----------
filename : str
Path to ENDF neutron sublibrary file
filename_thermal : str
Path to ENDF thermal scattering sublibrary file
temperatures : iterable of float
Temperatures in Kelvin to produce data at. If omitted, data is
produced at all temperatures in the ENDF thermal scattering
sublibrary.
**kwargs
Keyword arguments passed to :func:`openmc.data.njoy.make_ace_thermal`
Returns
-------
data : openmc.data.ThermalScattering
Thermal scattering data
"""
with tempfile.TemporaryDirectory() as tmpdir:
# Run NJOY to create an ACE library
ace_file = os.path.join(tmpdir, 'ace')
xsdir_file = os.path.join(tmpdir, 'xsdir')
make_ace_thermal(filename, filename_thermal, temperatures,
ace_file, xsdir_file, **kwargs)
# Create instance from ACE tables within library
lib = Library(ace_file)
data = cls.from_ace(lib.tables[0])
for table in lib.tables[1:]:
data.add_temperature_from_ace(table)
return data
| johnnyliu27/openmc | openmc/data/thermal.py | Python | mit | 23,923 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from .base import * # noqa
from .django import * # noqa
| barseghyanartur/django-url-filter | url_filter/filtersets/__init__.py | Python | mit | 156 |
#!/usr/bin/env python
"""Build and write out the NGC-star-clusters.fits catalog.
"""
import os
import numpy as np
import numpy.ma as ma
from astropy.io import ascii
from astropy.table import Table, vstack
from astrometry.util.starutil_numpy import hmsstring2ra, dmsstring2dec
from astrometry.libkd.spherematch import match_radec
from pkg_resources import resource_filename
#import desimodel.io
#import desimodel.footprint
#tiles = desimodel.io.load_tiles(onlydesi=True)
if not os.path.isfile('/tmp/NGC.csv'):
os.system('wget -P /tmp https://raw.githubusercontent.com/mattiaverga/OpenNGC/master/NGC.csv')
names = ('name', 'type', 'ra_hms', 'dec_dms', 'const', 'majax', 'minax',
'pa', 'bmag', 'vmag', 'jmag', 'hmag', 'kmag', 'sbrightn', 'hubble',
'cstarumag', 'cstarbmag', 'cstarvmag', 'messier', 'ngc', 'ic',
'cstarnames', 'identifiers', 'commonnames', 'nednotes', 'ongcnotes')
NGC = ascii.read('/tmp/NGC.csv', delimiter=';', names=names)
NGC = NGC[(NGC['ra_hms'] != 'N/A')]
ra, dec = [], []
for _ra, _dec in zip(ma.getdata(NGC['ra_hms']), ma.getdata(NGC['dec_dms'])):
ra.append(hmsstring2ra(_ra.replace('h', ':').replace('m', ':').replace('s','')))
dec.append(dmsstring2dec(_dec.replace('d', ':').replace('m', ':').replace('s','')))
NGC['ra'] = ra
NGC['dec'] = dec
objtype = np.char.strip(ma.getdata(NGC['type']))
# Keep all globular clusters and planetary nebulae
keeptype = ('PN', 'GCl')
keep = np.zeros(len(NGC), dtype=bool)
for otype in keeptype:
ww = [otype == tt for tt in objtype]
keep = np.logical_or(keep, ww)
print(np.sum(keep))
clusters = NGC[keep]
# Fill missing major axes with a nominal 0.4 arcmin (roughly works
# for NGC7009, which is the only missing PN in the footprint).
ma.set_fill_value(clusters['majax'], 0.4)
clusters['majax'] = ma.filled(clusters['majax'].data)
# Increase the radius of IC4593
# https://github.com/legacysurvey/legacypipe/issues/347
clusters[clusters['name'] == 'IC4593']['majax'] = 0.5
#indesi = desimodel.footprint.is_point_in_desi(tiles, ma.getdata(clusters['ra']),
# ma.getdata(clusters['dec']))
#print(np.sum(indesi))
#bb = clusters[indesi]
#bb[np.argsort(bb['majax'])[::-1]]['name', 'ra', 'dec', 'majax', 'type']
# Build the output catalog: select a subset of the columns and rename
# majax-->radius (arcmin-->degree)
out = Table()
out['name'] = clusters['name']
out['alt_name'] = ['' if mm == 0 else 'M{}'.format(str(mm))
for mm in ma.getdata(clusters['messier'])]
out['type'] = clusters['type']
out['ra'] = clusters['ra']
out['dec'] = clusters['dec']
out['radius'] = (clusters['majax'] / 60).astype('f4') # [degrees]
#out['radius'] = out['radius_orig']
# Read the updated radii based on visual inspection by Arjun Dey (Feb 2020):
radiifile = resource_filename('legacypipe', 'data/NGC-star-clusters-radii.csv')
newname, newradii = np.loadtxt(radiifile, dtype=str, delimiter=',', unpack=True)
out['radius'][np.isin(out['name'], newname)] = newradii.astype('f4')
#oldradii = out['radius'].copy()
#import matplotlib.pyplot as plt
#plt.scatter(oldradii*60, oldradii/out['radius'], s=15)
#plt.xlabel('Old radii [arcmin]')
#plt.ylabel('Old radii / New radii')
#plt.show()
# Read the ancillary globular cluster catalog and update the radii in the NGC.
#https://heasarc.gsfc.nasa.gov/db-perl/W3Browse/w3table.pl?tablehead=name%3Dglobclust&Action=More+Options
if False:
gcfile = resource_filename('legacypipe', 'data/globular_clusters.fits')
gcs = Table.read(gcfile)
I, J, _ = match_radec(clusters['ra'], clusters['dec'], gcs['RA'], gcs['DEC'], 10./3600., nearest=True)
out['radius'][I] = (gcs['HALF_LIGHT_RADIUS'][J] / 60).astype('f4') # [degrees]
# Read the supplemental catalog of globular clusters and (compact) open clusters
# from Arjun Dey (Mar 2020). Note that the NGC open clusters were culled above,
# but we put them back here because the diameters have been vetted.
names = ('name', 'alt_name', 'ra', 'dec', 'type', 'radius')
suppfile = resource_filename('legacypipe', 'data/star-clusters-supplemental.csv')
supp = ascii.read(suppfile, delimiter=',', names=names, fill_values='')
#supp['alt_name'] = supp['alt_name'].astype('U4')
supp['radius'] = supp['radius'].astype('f4')
out = vstack((out, supp))
# add a position angle and ellipticity (b/a)
out['pa'] = np.zeros(len(out), dtype='f4')
out['ba'] = np.ones(len(out), dtype='f4')
# add Fornax and Scluptor -- LG dwarfs by hand
# GALAXY RA DEC D25 PA BA
# bytes24 float64 float64 float32 int16 float32
# ------- ----------------- ------------------- ------- ----- -------
# Fornax 39.99708333333332 -34.449166666666656 66.4 41 0.7
# Sculptor 15.039166666666665 -33.70916666666666 45.2 99 0.68
dwarfs = Table()
dwarfs['name'] = ['Sculptor', 'Fornax']
dwarfs['alt_name'] = ['', '']
dwarfs['type'] = ['Dwrf', 'Dwrf']
dwarfs['ra'] = np.array([15.039166666666665, 39.99708333333332]).astype('f8')
dwarfs['dec'] = np.array([-33.70916666666666, -34.449166666666656]).astype('f8')
dwarfs['radius'] = np.array([45.2/2/60, 66.4/2/60]).astype('f4')
dwarfs['pa'] = np.array([99, 41]).astype('f4')
dwarfs['ba'] = np.array([0.68, 0.7]).astype('f4')
out = vstack((out, dwarfs))
if False: # debugging
bb = out[['M' in nn for nn in out['alt_name']]]
bb[np.argsort(bb['radius'])]
bb['radius'] *= 60
bb['radius_orig'] *= 60
print(bb)
out = out[np.argsort(out['ra'])]
print(out)
clusterfile = resource_filename('legacypipe', 'data/NGC-star-clusters.fits')
print('Writing {}'.format(clusterfile))
out.write(clusterfile, overwrite=True)
# Code to help visually check all the globular clusters.
if False:
checktype = ('GCl', 'PN')
check = np.zeros(len(NGC), dtype=bool)
for otype in checktype:
ww = [otype == tt for tt in objtype]
check = np.logical_or(check, ww)
check_clusters = NGC[check] # 845 of them
# Write out a catalog, load it into the viewer and look at each of them.
check_clusters[['ra', 'dec', 'name']].write('/tmp/check.fits', overwrite=True) # 25 of them
| legacysurvey/pipeline | bin/build-cluster-catalog.py | Python | gpl-2.0 | 6,152 |
from btcommon import *
import socket
import struct
import threading
import os
import _widcomm
DEFAULT_MTU = 672
def dbg (*args):
return
sys.stdout.write (*args)
sys.stdout.write ("\n")
def BD_ADDR_to_str (bda):
return "%02X:%02X:%02X:%02X:%02X:%02X" % \
(ord(bda[0]), ord(bda[1]), ord(bda[2]),
ord(bda[3]), ord(bda[4]), ord(bda[5]))
def str_to_BD_ADDR (s):
digits = [ int (c, 16) for c in s.split(":") ]
return struct.pack ("6B", *digits)
class WCInquirer:
DEVST_DOWN = 0
DEVST_UP = 1
DEVST_ERROR = 2
DEVST_UNLOADED = 3
DEVST_RELOADED = 4
def __init__ (self):
self._wcinq = _widcomm._WCInquirer ()
port = self._wcinq.get_sockport ()
self.readsock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
self.readsock.connect (("127.0.0.1", port))
self._wcinq.accept_client ()
self.recently_discovered = []
self.inquiry_in_progress = False
self.sdp_query_in_progress = False
def fileno ():
return self.readsock.fileno ()
def start_inquiry (self):
self.recently_discovered = []
self.inquiry_in_progress = self._wcinq.start_inquiry ()
def read_msg (self):
intsize = struct.calcsize ("=i")
msg_type = struct.unpack ("=i", self.readsock.recv (intsize))[0]
if msg_type == _widcomm.INQ_DEVICE_RESPONDED:
fmt = "=6s3s248si"
data = self.readsock.recv (struct.calcsize (fmt))
bda, devclass, bdname, connected = struct.unpack (fmt, data)
bdaddr = BD_ADDR_to_str (bda)
bdname = bdname.strip ("\0")
self.recently_discovered.append ((bdaddr, devclass, bdname,
connected))
elif msg_type == _widcomm.INQ_INQUIRY_COMPLETE:
fmt = "=ih"
data = self.readsock.recv (struct.calcsize (fmt))
success, num_responses = struct.unpack (fmt, data)
self.inquiry_in_progress = False
elif msg_type == _widcomm.INQ_DISCOVERY_COMPLETE:
self.sdp_query_in_progress = False
elif msg_type == _widcomm.INQ_STACK_STATUS_CHANGE:
fmt = "=i"
data = self.readsock.recv (struct.calcsize (fmt))
new_status = struct.unpack (fmt, data)[0]
def start_discovery (self, addr, uuid = None):
bd_addr = str_to_BD_ADDR (addr)
if uuid is not None:
self.sdp_query_in_progress = \
self._wcinq.start_discovery (bd_addr, to_full_uuid (uuid))
else:
self.sdp_query_in_progress = \
self._wcinq.start_discovery (bd_addr)
self.sdp_query_in_progress = True
def read_discovery_records (self, addr, uuid = None):
if not is_valid_address (addr):
raise ValueError ("invalid Bluetooth address")
bd_addr = str_to_BD_ADDR (addr)
if uuid is not None:
dbg ("read_discovery_records (%s, %s)" % (addr, uuid))
return self._wcinq.read_discovery_records (bd_addr,
to_full_uuid (uuid))
else:
return self._wcinq.read_discovery_records (bd_addr)
def is_device_ready (self):
return self._wcinq.is_device_ready ()
def get_local_device_address (self):
return self._wcinq.get_local_device_address ()
inquirer = WCInquirer ()
def discover_devices (duration=8, flush_cache=True, lookup_names=False):
inquirer.start_inquiry ()
while inquirer.inquiry_in_progress:
inquirer.read_msg ()
discovered = inquirer.recently_discovered[:]
if not lookup_names:
return [ tup[0] for tup in discovered ]
if lookup_names:
result = []
for bdaddr, devClass, bdName, bConnected in discovered:
if bdName:
result.append ((bdaddr, bdName))
else:
result.append ((bdAddr, None))
return result
def lookup_name (address, timeout=10):
discover_devices ()
for bdaddr, devClass, bdName, bConnected in inquirer.recently_discovered:
if bdaddr == address:
return bdName
def advertise_service (sock, name, service_id = "", service_classes = [], \
profiles = [], provider = "", description = "", protocols = []):
sock._advertise_service (name, service_id, service_classes,
profiles, provider, description, protocols)
def stop_advertising (sock):
sock._stop_advertising ()
def find_service (name = None, uuid = None, address = None):
if address:
if address == "localhost": raise NotImplementedError
if not is_valid_address (address):
raise ValueError ("invalid Bluetooth address")
addresses = [ address ]
else:
addresses = discover_devices ()
if uuid and not is_valid_uuid (uuid):
raise ValueError ("invalid uuid ", uuid)
results = []
for addr in addresses:
inquirer.start_discovery (addr, uuid)
while inquirer.sdp_query_in_progress:
inquirer.read_msg ()
results.extend (inquirer.read_discovery_records (addr, uuid))
return results
def _port_return_code_to_str (code):
k = { _widcomm.RFCOMM_SUCCESS : "Success",
_widcomm.RFCOMM_ALREADY_OPENED : "Port already opened",
_widcomm.RFCOMM_NOT_OPENED : "Connection not open",
_widcomm.RFCOMM_HANDLE_ERROR: "This error should never occur " \
"(HANDLE_ERROR) and is a stack bug",
_widcomm.RFCOMM_LINE_ERR: "Line error",
_widcomm.RFCOMM_START_FAILED: "Connection attempt failed",
_widcomm.RFCOMM_PAR_NEG_FAILED: "Parameter negotion (MTU) failed",
_widcomm.RFCOMM_PORT_NEG_FAILED: "Port negotiation failed",
_widcomm.RFCOMM_PEER_CONNECTION_FAILED: "Connection ended by remote "\
"side",
_widcomm.RFCOMM_PEER_TIMEOUT: "Timeout by remote side",
_widcomm.RFCOMM_INVALID_PARAMETER: "Invalid parameter",
_widcomm.RFCOMM_UNKNOWN_ERROR: "Unknown error" }
if code in k:
return k[code]
else:
return "Invalid RFCOMM error code %s" % str (code)
def _port_ev_code_to_str (code):
d = { _widcomm.PORT_EV_RXFLAG : "Received certain character",
_widcomm.PORT_EV_TXEMPTY : "Transmit Queue Empty",
_widcomm.PORT_EV_CTS : "CTS changed state",
_widcomm.PORT_EV_DSR : "DSR changed state",
_widcomm.PORT_EV_RLSD : "RLSD changed state",
_widcomm.PORT_EV_BREAK : "BREAK received",
_widcomm.PORT_EV_ERR : "Line status error occurred",
_widcomm.PORT_EV_RING : "Ring signal detected",
_widcomm.PORT_EV_CTSS : "CTS state",
_widcomm.PORT_EV_DSRS : "DSR state",
_widcomm.PORT_EV_RLSDS : "RLSD state",
_widcomm.PORT_EV_OVERRUN : "Receiver buffer overrun",
_widcomm.PORT_EV_TXCHAR : "Any character transmitted",
_widcomm.PORT_EV_CONNECTED : "RFCOMM connection established",
_widcomm.PORT_EV_CONNECT_ERR : "Was not able to establish " \
"connection or disconnected",
_widcomm.PORT_EV_FC : "Flow control enabled flag changed by remote",
_widcomm.PORT_EV_FCS : "Flow control status true = enabled" }
result = []
for k, v in d.items ():
if code & k:
result.append (v)
if len (result) == 0:
return "Invalid event code %d" % code
else:
return "\n".join (result)
def _sdp_checkraise (code):
if code == _widcomm.SDP_OK: return
elif code == _widcomm.SDP_COULD_NOT_ADD_RECORD:
raise BluetoothError ("Could not add SDP record")
elif code == _widcomm.SDP_INVALID_RECORD:
raise BluetoothError ("Invalid SDP record")
elif code == _widcomm.SDP_INVALID_PARAMETERS:
raise BluetoothError ("SDP: invalid parameters")
raise RuntimeError ("unknown SDP status code %s" % code)
class BluetoothSocket:
def __init__ (self, proto = RFCOMM, _sockdata = None):
if not proto in [ RFCOMM, L2CAP ]:
raise ValueError ("invalid protocol")
self.proto = proto
if proto == RFCOMM:
self.bind = self.rfcomm_bind
self.listen = self.rfcomm_listen
self.accept = self.rfcomm_accept
self.connect = self.rfcomm_connect
self.send = self.rfcomm_send
self.recv = self.rfcomm_recv
self.close = self.rfcomm_close
self.getsockname = self.rfcomm_getsockname
self.setblocking = self.rfcomm_setblocking
self.settimeout = self.rfcomm_settimeout
self.gettimeout = self.rfcomm_gettimeout
self.dup = self.rfcomm_dup
self.makefile = self.rfcomm_makefile
self.fileno = self.rfcomm_fileno
self.__make_cobjects = self.__rfcomm_make_cobjects
self._advertise_service = self.__rfcomm_advertise_service
if _sockdata:
self._wc, self._if, self.readsock = _sockdata
else:
self.__make_cobjects ()
self.connected = self._wc.is_connected ()
elif proto == L2CAP:
dbg ("creating l2cap socket")
self.bind = self.l2cap_bind
self.listen = self.l2cap_listen
self.accept = self.l2cap_accept
self.connect = self.l2cap_connect
self.send = self.l2cap_send
self.recv = self.l2cap_recv
self.close = self.l2cap_close
self.getsockname = self.l2cap_getsockname
self.setblocking = self.l2cap_setblocking
self.settimeout = self.l2cap_settimeout
self.gettimeout = self.l2cap_gettimeout
self.dup = self.l2cap_dup
self.makefile = self.l2cap_makefile
self.fileno = self.l2cap_fileno
self.__make_cobjects = self.__l2cap_make_cobjects
self._advertise_service = self.__l2cap_advertise_service
if _sockdata:
self._wc, self._if, self.readsock = _sockdata
self.connected = True
else:
self.__make_cobjects ()
self.connected = False
else:
raise NotImplementedError ()
self.nonblocking = False
self.connecting = False
self.listening = False
self.bound = False
self.received_data = []
self.last_event_code = None
self.port = 0
self._sdpservice = None
def _stop_advertising (self):
if not self._sdpservice:
raise BluetoothError ("not advertising any services")
self._sdpservice = None
def __rfcomm_make_cobjects (self):
self._wc = _widcomm._WCRfCommPort ()
self._if = _widcomm._WCRfCommIf ()
self.readsock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
self.readsock.connect (("127.0.0.1", self._wc.get_sockport ()))
self._wc.accept_client ()
def rfcomm_read_msg (self):
intsize = struct.calcsize ("=i")
msg_type_data = self.readsock.recv (intsize)
msg_type = struct.unpack ("=i", msg_type_data)[0]
if msg_type == _widcomm.RFCOMM_DATA_RECEIVED:
datalen_fmt = "=i"
datalen_data = self.readsock.recv (struct.calcsize (datalen_fmt))
datalen = struct.unpack (datalen_fmt, datalen_data)[0]
self.received_data.append (self.readsock.recv (datalen))
elif msg_type == _widcomm.RFCOMM_EVENT_RECEIVED:
fmt = "=I"
data = self.readsock.recv (struct.calcsize (fmt))
code = struct.unpack (fmt, data)[0]
dbg ("event %X received" % code)
if code & _widcomm.PORT_EV_CONNECTED:
self.connecting = False
self.listening = False
self.connected = True
if code & _widcomm.PORT_EV_CONNECT_ERR:
self.connecting = False
self.listening = False
self.connected = False
raise BluetoothError ("Connection failed")
if code & _widcomm.PORT_EV_RXFLAG:
dbg ("Rx flag")
if code & _widcomm.PORT_EV_TXEMPTY:
dbg ("Tx queue empty")
if code & _widcomm.PORT_EV_CTS:
dbg ("CTS changed state")
if code & _widcomm.PORT_EV_DSR:
dbg ("DSR changed state")
if code & _widcomm.PORT_EV_RLSD:
dbg ("RLSD changed state")
if code & _widcomm.PORT_EV_BREAK:
dbg ("BREAK received")
if code & _widcomm.PORT_EV_ERR:
dbg ("Line status error")
if code & _widcomm.PORT_EV_RING:
dbg ("Ring")
if code & _widcomm.PORT_EV_CTSS:
dbg ("CTS state")
if code & _widcomm.PORT_EV_DSRS:
dbg ("DSR state")
if code & _widcomm.PORT_EV_RLSDS:
dbg ("RLSD state")
if code & _widcomm.PORT_EV_OVERRUN:
dbg ("Receive buffer overrun")
if code & _widcomm.PORT_EV_TXCHAR:
dbg ("Data transmitted")
if code & _widcomm.PORT_EV_FC:
dbg ("Flow control changed by remote")
if code & _widcomm.PORT_EV_FCS:
dbg ("Flow control status true = enabled")
self.last_event_code = code
def rfcomm_bind (self, addrport):
addr, port = addrport
if len (addr):
raise ValueError ("Widcomm stack can't bind to " \
"user-specified adapter")
result = self._if.assign_scn_value (RFCOMM_UUID, port)
if not result:
raise BluetoothError ("unable to bind to port")
self.bound = True
self.port = self._if.get_scn ()
def rfcomm_listen (self, backlog):
if self.connected:
raise BluetoothError ("already connected")
if self.listening:
raise BluetoothError ("already listening/connecting")
if backlog != 1:
raise ValueError ("Widcomm stack requires backlog == 1")
port = self._if.get_scn ()
self._if.set_security_level ("", _widcomm.BTM_SEC_NONE, True)
if not port:
raise BluetoothError ("not bound to a port")
result = self._wc.open_server (port, DEFAULT_MTU)
if result != _widcomm.RFCOMM_SUCCESS:
raise BluetoothError (_port_return_code_to_str (result))
self.listening = True
def rfcomm_accept (self):
if self.connected:
raise BluetoothError ("already connected")
while self.listening and not self.connected:
dbg ("waiting for connection")
self.rfcomm_read_msg ()
if self.connected:
port = self._if.get_scn ()
client_bdaddr = BD_ADDR_to_str (self._wc.is_connected ())
# XXX widcomm API doesn't provide a way to determine the RFCOMM
# channel number of the client
client_port = 0
# create a new socket object and give it ownership of the
# wrapped C++ objects, since those are the ones actually connected
_sockdata = self._wc, self._if, self.readsock
clientsock = BluetoothSocket (RFCOMM, _sockdata)
# now create new C++ objects
self.__rfcomm_make_cobjects ()
# self.bind (("", port))
# self.listen (1)
return clientsock, (client_bdaddr, client_port)
def rfcomm_connect (self, addrport):
addr, port = addrport
dbg ("connecting to %s port %d" % (addr, port))
if not is_valid_address (addr):
raise ValueError ("invalid address %s" % addr)
self._if.assign_scn_value (RFCOMM_UUID, port)
self._if.set_security_level ("", _widcomm.BTM_SEC_NONE, False)
result = self._wc.open_client (port, str_to_BD_ADDR (addr), DEFAULT_MTU)
if result != _widcomm.RFCOMM_SUCCESS:
raise BluetoothError (_port_return_code_to_str (result))
self.connecting = True
while self.connecting:
self.rfcomm_read_msg ()
if not self._wc.is_connected ():
raise BluetoothError ("connection failed")
def rfcomm_send (self, data):
dbg ("sending: [%s]" % data)
status, written = self._wc.write (data)
if status == _widcomm.RFCOMM_SUCCESS:
dbg ("sent okay")
return written
else:
raise BluetoothError (_port_return_code_to_str (status))
def rfcomm_recv (self, numbytes):
if self.nonblocking and not self.received_data:
# XXX are we supposed to raise an exception, or just return None?
return None
while not self.received_data and self._wc.is_connected ():
self.rfcomm_read_msg ()
if self.received_data:
data = self.received_data.pop (0)
if len(data) > numbytes:
self.received_data.insert (0, data[numbytes:])
return data[:numbytes]
else:
return data
def rfcomm_close (self):
self._wc.close ()
self._wc = None
self.bound = False
self.connecting = False
self.listening = False
self.connected = False
# return bt.close (self._sockfd)
def rfcomm_getsockname (self):
if not self.bound:
raise BluetoothError ("Socket not bound")
addr = inquirer.get_local_device_address ()
port = self._if.get_scn ()
return addr, port
def rfcomm_setblocking (self, blocking):
self.nonblocking = not blocking
self.readsock.setblocking (blocking)
def rfcomm_settimeout (self, timeout):
raise NotImplementedError
pass
# if timeout < 0: raise ValueError ("invalid timeout")
#
# if timeout == 0:
# self.setblocking (False)
# else:
# self.setblocking (True)
# # XXX this doesn't look correct
# timeout = 0 # winsock timeout still needs to be set 0
#
# s = bt.settimeout (self._sockfd, timeout)
# self._timeout = timeout
def rfcomm_gettimeout (self):
raise NotImplementedError
# if self._blocking and not self._timeout: return None
# return bt.gettimeout (self._sockfd)
def rfcomm_fileno (self):
return self.readsock.fileno ()
def rfcomm_dup (self):
raise NotImplementedError
def rfcomm_makefile (self):
raise NotImplementedError
def __rfcomm_advertise_service (self, name, service_id,
service_classes, profiles, provider, description,
protocols):
if self._sdpservice is not None:
raise BluetoothError ("Service already advertised")
if not self.listening:
raise BluetoothError ("Socket must be listening before advertised")
if protocols:
raise NotImplementedError ("extra protocols not yet supported in Widcomm stack")
self._sdpservice = _widcomm._WCSdpService ()
if service_classes:
service_classes = [ to_full_uuid (s) for s in service_classes ]
_sdp_checkraise (self._sdpservice.add_service_class_id_list ( \
service_classes))
# self._if.set_security_level (name, _widcomm.BTM_SEC_NONE, True)
_sdp_checkraise (self._sdpservice.add_rfcomm_protocol_descriptor ( \
self.port))
if profiles:
for uuid, version in profiles:
uuid = to_full_uuid (uuid)
_sdp_checkraise (self._sdpservice.add_profile_descriptor_list (\
uuid, version))
_sdp_checkraise (self._sdpservice.add_service_name (name))
_sdp_checkraise (self._sdpservice.make_public_browseable ())
def __l2cap_make_cobjects (self):
dbg ("__l2cap_make_cobjects")
self._wc = _widcomm._WCL2CapConn ()
self._if = _widcomm._WCL2CapIf ()
self.readsock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
self.readsock.connect (("127.0.0.1", self._wc.get_sockport ()))
self._wc.accept_client ()
def l2cap_read_msg (self):
intsize = struct.calcsize ("=i")
msg_type_data = self.readsock.recv (intsize)
msg_type = struct.unpack ("=i", msg_type_data)[0]
if msg_type == _widcomm.L2CAP_DATA_RECEIVED:
datalen_fmt = "=i"
datalen_data = self.readsock.recv (struct.calcsize (datalen_fmt))
datalen = struct.unpack (datalen_fmt, datalen_data)[0]
self.received_data.append (self.readsock.recv (datalen))
elif msg_type == _widcomm.L2CAP_INCOMING_CONNECTION:
result = self._wc.accept ()
if not result: raise BluetoothError ("accept() failed")
elif msg_type == _widcomm.L2CAP_REMOTE_DISCONNECTED:
dbg ("L2CAP_REMOTE_DISCONNECTED")
self.connecting = False
self.listening = False
self.connected = False
elif msg_type == _widcomm.L2CAP_CONNECTED:
self.connecting = False
self.listening = False
self.connected = True
# elif msg_type == _widcomm.PORT_EV_CONNECT_ERR:
# self.connecting = False
# self.listening = False
# raise BluetoothError ("Connection failed")
def l2cap_bind (self, addrport):
dbg ("l2cap_bind %s" % str(addrport))
addr, port = addrport
if len (addr):
raise ValueError ("Widcomm stack can't bind to " \
"user-specified adapter")
result = self._if.assign_psm_value (L2CAP_UUID, port)
if not result:
raise BluetoothError ("unable to bind to port")
self.bound = True
self.port = self._if.get_psm ()
result = self._if.register ()
if not result:
raise BluetoothError ("register() failed")
def l2cap_listen (self, backlog):
dbg ("l2cap_listen %s" % backlog)
if self.connected:
raise BluetoothError ("already connected")
if self.listening:
raise BluetoothError ("already listening/connecting")
if backlog != 1:
raise ValueError ("Widcomm stack requires backlog == 1")
port = self._if.get_psm ()
self._if.set_security_level ("", _widcomm.BTM_SEC_NONE, True)
if not port:
raise BluetoothError ("not bound to a port")
result = self._wc.listen (self._if)
if not result:
raise BluetoothError ("listen() failed. don't know why")
self.listening = True
def l2cap_accept (self):
dbg ("l2cap_accept")
if self.connected:
raise BluetoothError ("already connected")
while self.listening and not self.connected:
dbg ("waiting for connection")
self.l2cap_read_msg ()
if self.connected:
port = self._if.get_psm ()
client_bdaddr = BD_ADDR_to_str (self._wc.remote_bd_addr ())
# XXX widcomm API doesn't provide a way to determine the L2CAP
# PSM of the client
client_port = 0
# create a new socket object and give it ownership of the
# wrapped C++ objects, since those are the ones actually connected
_sockdata = self._wc, self._if, self.readsock
clientsock = BluetoothSocket (L2CAP, _sockdata)
# now create new C++ objects
self.__l2cap_make_cobjects ()
# self.bind (("", port))
# self.listen (1)
return clientsock, (client_bdaddr, client_port)
def l2cap_connect (self, addrport):
addr, port = addrport
dbg ("connecting to %s port %d" % (addr, port))
if not is_valid_address (addr):
raise ValueError ("invalid address %s" % addr)
if not self._if.assign_psm_value (L2CAP_UUID, port):
raise BluetoothError ("Failed to assign PSM %d" % port)
if not self._if.set_security_level ("", _widcomm.BTM_SEC_NONE, False):
raise BluetoothError ("Failed to set security level")
if not self._if.register ():
raise BluetoothError ("Failed to register PSM")
self.connecting = True
if not self._wc.connect (self._if, str_to_BD_ADDR (addr)):
raise BluetoothError ("Connect failed")
while self.connecting:
self.l2cap_read_msg ()
if not self.connected:
raise BluetoothError ("connection failed")
def l2cap_send (self, data):
dbg ("sending: [%s]" % data)
status, written = self._wc.write (data)
if status:
dbg ("sent okay")
return written
else:
raise BluetoothError (_port_return_code_to_str (status))
def l2cap_recv (self, numbytes):
if self.nonblocking and not self.received_data:
# XXX are we supposed to raise an exception, or just return None?
return None
while not self.received_data and self.connected:
self.l2cap_read_msg ()
if self.received_data:
data = self.received_data.pop (0)
if len(data) > numbytes:
self.received_data.insert (0, data[numbytes:])
return data[:numbytes]
else:
return data
def l2cap_close (self):
self._wc.disconnect ()
self._if.deregister ()
self._wc = None
self.bound = False
self.connecting = False
self.listening = False
self.connected = False
# return bt.close (self._sockfd)
def l2cap_getsockname (self):
if not self.bound:
raise BluetoothError ("Socket not bound")
addr = inquirer.get_local_device_address ()
port = self._if.get_psm ()
return addr, port
def l2cap_setblocking (self, blocking):
self.nonblocking = not blocking
self.readsock.setblocking (blocking)
def l2cap_settimeout (self, timeout):
raise NotImplementedError
# if timeout < 0: raise ValueError ("invalid timeout")
#
# if timeout == 0:
# self.setblocking (False)
# else:
# self.setblocking (True)
# # XXX this doesn't look correct
# timeout = 0 # winsock timeout still needs to be set 0
#
# s = bt.settimeout (self._sockfd, timeout)
# self._timeout = timeout
def l2cap_gettimeout (self):
raise NotImplementedError
# if self._blocking and not self._timeout: return None
# return bt.gettimeout (self._sockfd)
def l2cap_fileno (self):
return self.readsock.fileno ()
def l2cap_dup (self):
raise NotImplementedError
# return BluetoothSocket (self._proto, sockfd=bt.dup (self._sockfd))
def l2cap_makefile (self):
raise NotImplementedError
def __l2cap_advertise_service (self, name, service_id,
service_classes, profiles, provider, description,
protocols):
if self._sdpservice is not None:
raise BluetoothError ("Service already advertised")
if not self.listening:
raise BluetoothError ("Socket must be listening before advertised")
if protocols:
raise NotImplementedError ("extra protocols not yet supported in Widcomm stack")
self._sdpservice = _widcomm._WCSdpService ()
if service_classes:
service_classes = [ to_full_uuid (s) for s in service_classes ]
_sdp_checkraise (self._sdpservice.add_service_class_id_list ( \
service_classes))
_sdp_checkraise (self._sdpservice.add_l2cap_protocol_descriptor ( \
self.port))
if profiles:
for uuid, version in profiles:
uuid = to_full_uuid (uuid)
_sdp_checkraise (self._sdpservice.add_profile_descriptor_list (\
uuid, version))
_sdp_checkraise (self._sdpservice.add_service_name (name))
_sdp_checkraise (self._sdpservice.make_public_browseable ())
class DeviceDiscoverer:
def __init__ (self):
raise NotImplementedError
| Lh4cKg/sl4a | python-modules/pybluez/python/bluetooth/widcomm.py | Python | apache-2.0 | 28,737 |
# -*- coding: utf-8 -*-
# © 2016 Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests import common
class TestL10nEsToponyms(common.TransactionCase):
def setUp(self):
super(TestL10nEsToponyms, self).setUp()
self.wizard = self.env['config.es.toponyms'].create({
'name': '',
'state': 'official',
'city_info': 'yes'
})
self.state_model = self.env['res.country.state']
def test_official_state_names(self):
self.wizard.with_context(max_import=10).execute()
state = self.state_model.search([('name', '=', 'Araba')])
self.assertTrue(state)
self.assertEqual(state.code, '01')
def test_spanish_state_names(self):
self.wizard.state = 'spanish'
self.wizard.with_context(max_import=10).execute()
state = self.state_model.search([('name', '=', 'Alava')])
self.assertTrue(state)
def test_both_state_names(self):
self.wizard.state = 'both'
self.wizard.with_context(max_import=10).execute()
state = self.state_model.search([('name', '=', 'Alava / Araba')])
self.assertTrue(state)
| syci/l10n-spain | l10n_es_toponyms/tests/test_l10n_es_toponyms.py | Python | agpl-3.0 | 1,234 |
# Copyright (c) 2013 OpenStack, LLC.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import requests
from cinderclient import exceptions
from cinderclient.v2 import client
from cinderclient.tests import utils
class AuthenticateAgainstKeystoneTests(utils.TestCase):
def test_authenticate_success(self):
cs = client.Client("username", "password", "project_id",
"http://localhost:8776/v2", service_type='volumev2')
resp = {
"access": {
"token": {
"expires": "12345",
"id": "FAKE_ID",
},
"serviceCatalog": [
{
"type": "volumev2",
"endpoints": [
{
"region": "RegionOne",
"adminURL": "http://localhost:8776/v2",
"internalURL": "http://localhost:8776/v2",
"publicURL": "http://localhost:8776/v2",
},
],
},
],
},
}
auth_response = utils.TestResponse({
"status_code": 200,
"text": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantName': cs.client.projectid,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data=json.dumps(body),
allow_redirects=True,
**self.TEST_REQUEST_BASE)
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(cs.client.management_url, public_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(cs.client.auth_token, token_id)
test_auth_call()
def test_authenticate_tenant_id(self):
cs = client.Client("username", "password",
auth_url="http://localhost:8776/v2",
tenant_id='tenant_id', service_type='volumev2')
resp = {
"access": {
"token": {
"expires": "12345",
"id": "FAKE_ID",
"tenant": {
"description": None,
"enabled": True,
"id": "tenant_id",
"name": "demo"
} # tenant associated with token
},
"serviceCatalog": [
{
"type": 'volumev2',
"endpoints": [
{
"region": "RegionOne",
"adminURL": "http://localhost:8776/v2",
"internalURL": "http://localhost:8776/v2",
"publicURL": "http://localhost:8776/v2",
},
],
},
],
},
}
auth_response = utils.TestResponse({
"status_code": 200,
"text": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantId': cs.client.tenant_id,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data=json.dumps(body),
allow_redirects=True,
**self.TEST_REQUEST_BASE)
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(cs.client.management_url, public_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(cs.client.auth_token, token_id)
tenant_id = resp["access"]["token"]["tenant"]["id"]
self.assertEqual(cs.client.tenant_id, tenant_id)
test_auth_call()
def test_authenticate_failure(self):
cs = client.Client("username", "password", "project_id",
"http://localhost:8776/v2")
resp = {"unauthorized": {"message": "Unauthorized", "code": "401"}}
auth_response = utils.TestResponse({
"status_code": 401,
"text": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
test_auth_call()
def test_auth_redirect(self):
cs = client.Client("username", "password", "project_id",
"http://localhost:8776/v2", service_type='volumev2')
dict_correct_response = {
"access": {
"token": {
"expires": "12345",
"id": "FAKE_ID",
},
"serviceCatalog": [
{
"type": "volumev2",
"endpoints": [
{
"adminURL": "http://localhost:8776/v2",
"region": "RegionOne",
"internalURL": "http://localhost:8776/v2",
"publicURL": "http://localhost:8776/v2/",
},
],
},
],
},
}
correct_response = json.dumps(dict_correct_response)
dict_responses = [
{"headers": {'location': 'http://127.0.0.1:5001'},
"status_code": 305,
"text": "Use proxy"},
# Configured on admin port, cinder redirects to v2.0 port.
# When trying to connect on it, keystone auth succeed by v1.0
# protocol (through headers) but tokens are being returned in
# body (looks like keystone bug). Leaved for compatibility.
{"headers": {},
"status_code": 200,
"text": correct_response},
{"headers": {},
"status_code": 200,
"text": correct_response}
]
responses = [(utils.TestResponse(resp)) for resp in dict_responses]
def side_effect(*args, **kwargs):
return responses.pop(0)
mock_request = mock.Mock(side_effect=side_effect)
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantName': cs.client.projectid,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data=json.dumps(body),
allow_redirects=True,
**self.TEST_REQUEST_BASE)
resp = dict_correct_response
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(cs.client.management_url, public_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(cs.client.auth_token, token_id)
test_auth_call()
def test_ambiguous_endpoints(self):
cs = client.Client("username", "password", "project_id",
"http://localhost:8776/v2", service_type='volumev2')
resp = {
"access": {
"token": {
"expires": "12345",
"id": "FAKE_ID",
},
"serviceCatalog": [
{
"adminURL": "http://localhost:8776/v1",
"type": "volumev2",
"name": "Cinder Volume Service",
"endpoints": [
{
"region": "RegionOne",
"internalURL": "http://localhost:8776/v1",
"publicURL": "http://localhost:8776/v1",
},
],
},
{
"adminURL": "http://localhost:8776/v2",
"type": "volumev2",
"name": "Cinder Volume V2",
"endpoints": [
{
"internalURL": "http://localhost:8776/v2",
"publicURL": "http://localhost:8776/v2",
},
],
},
],
},
}
auth_response = utils.TestResponse({
"status_code": 200,
"text": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.AmbiguousEndpoints,
cs.client.authenticate)
test_auth_call()
class AuthenticationTests(utils.TestCase):
def test_authenticate_success(self):
cs = client.Client("username", "password", "project_id", "auth_url")
management_url = 'https://localhost/v2.1/443470'
auth_response = utils.TestResponse({
'status_code': 204,
'headers': {
'x-server-management-url': management_url,
'x-auth-token': '1b751d74-de0c-46ae-84f0-915744b582d1',
},
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'Accept': 'application/json',
'X-Auth-User': 'username',
'X-Auth-Key': 'password',
'X-Auth-Project-Id': 'project_id',
'User-Agent': cs.client.USER_AGENT
}
mock_request.assert_called_with(
"GET",
cs.client.auth_url,
headers=headers,
**self.TEST_REQUEST_BASE)
self.assertEqual(cs.client.management_url,
auth_response.headers['x-server-management-url'])
self.assertEqual(cs.client.auth_token,
auth_response.headers['x-auth-token'])
test_auth_call()
def test_authenticate_failure(self):
cs = client.Client("username", "password", "project_id", "auth_url")
auth_response = utils.TestResponse({"status_code": 401})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
test_auth_call()
def test_auth_automatic(self):
cs = client.Client("username", "password", "project_id", "auth_url")
http_client = cs.client
http_client.management_url = ''
mock_request = mock.Mock(return_value=(None, None))
@mock.patch.object(http_client, 'request', mock_request)
@mock.patch.object(http_client, 'authenticate')
def test_auth_call(m):
http_client.get('/')
m.assert_called()
mock_request.assert_called()
test_auth_call()
def test_auth_manual(self):
cs = client.Client("username", "password", "project_id", "auth_url")
@mock.patch.object(cs.client, 'authenticate')
def test_auth_call(m):
cs.authenticate()
m.assert_called()
test_auth_call()
| tylertian/Openstack | openstack F/python-cinderclient/cinderclient/tests/v2/test_auth.py | Python | apache-2.0 | 14,390 |
""" JobRunningMatchedRatioPolicy
Policy that calculates the efficiency following the formula::
( running ) / ( running + matched + received + checking )
if the denominator is smaller than 10, it does not take any decision.
"""
from DIRAC import S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase
class JobRunningMatchedRatioPolicy(PolicyBase):
"""
The JobRunningMatchedRatioPolicy class is a policy that checks the efficiency of the
jobs according to what is on JobDB.
Evaluates the JobRunningMatchedRatioPolicy results given by the JobCommand.JobCommand
"""
@staticmethod
def _evaluate(commandResult):
"""_evaluate
efficiency < 0.5 :: Banned
efficiency < 0.9 :: Degraded
"""
result = {"Status": None, "Reason": None}
if not commandResult["OK"]:
result["Status"] = "Error"
result["Reason"] = commandResult["Message"]
return S_OK(result)
commandResult = commandResult["Value"]
if not commandResult:
result["Status"] = "Unknown"
result["Reason"] = "No values to take a decision"
return S_OK(result)
commandResult = commandResult[0]
if not commandResult:
result["Status"] = "Unknown"
result["Reason"] = "No values to take a decision"
return S_OK(result)
running = commandResult["Running"]
matched = commandResult["Matched"]
received = commandResult["Received"]
checking = commandResult["Checking"]
total = running + matched + received + checking
# we want a minimum amount of jobs to take a decision ( at least 10 pilots )
if total < 10:
result["Status"] = "Unknown"
result["Reason"] = "Not enough jobs to take a decision"
return S_OK(result)
efficiency = running / total
if efficiency <= 0.5:
result["Status"] = "Banned"
elif efficiency <= 0.9:
result["Status"] = "Degraded"
else:
result["Status"] = "Active"
result["Reason"] = "Job Running / Matched ratio of %.2f" % efficiency
return S_OK(result)
| DIRACGrid/DIRAC | src/DIRAC/ResourceStatusSystem/Policy/JobRunningMatchedRatioPolicy.py | Python | gpl-3.0 | 2,249 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os, os.path
import sys
sys.path.append('/home/will/PatientPicker/')
import LoadingTools
# <codecell>
import glob
import pandas as pd
files = glob.glob('/home/will/HIVReportGen/Data/PatientFasta/*LTR.fasta')
#redcap_data = LoadingTools.load_redcap_data()
has_ltr = []
for f in files:
fname = f.rsplit('/',1)[-1]
pid, vnum, _ = fname.split('-')
has_ltr.append({
'PatientID':pid,
'VisitNum':vnum,
'HasLTR':'has_ltr'
})
has_ltr = pd.DataFrame(has_ltr).groupby(['PatientID', 'VisitNum']).first()
# <codecell>
redcap_data = LoadingTools.load_redcap_data().groupby(['Patient ID', 'VisitNum']).first()
# <codecell>
jean_comments = pd.read_csv('/home/will/HIVVariation/ProblemPCRpatientsamples.csv').groupby(['Patient ID', 'VisitNum']).first()
# <codecell>
left, right = jean_comments.align(has_ltr, join='outer')
ltr_comments = left.copy()
ltr_comments['HasLTR'] = ltr_comments['HasLTR'].combine_first(right['HasLTR'])
# <codecell>
red_data = pd.merge(ltr_comments, redcap_data,
left_index=True,
right_index=True,
how='outer')
red_data
# <codecell>
group_key = 'HasLTR'
check_cols = ['Latest CD4 count (cells/uL)', 'Latest CD8 count (cells/uL)', 'LVL']
# <codecell>
red_data['LVL'].describe()
# <codecell>
from statsmodels.graphics.boxplots import violinplot
import numpy as np
red_data['LVL'] = red_data['Latest viral load'].map(np.log10)
fig, axs = plt.subplots(3,1, figsize=(10,10))
for col, ax in zip(check_cols, axs.flatten()):
boxes = []
labels = []
for key, group in red_data.groupby(group_key):
labels.append(key)
print key, len(group[col].dropna().unique())
boxes.append(group[col].dropna())
#iolinplot(boxes, ax=ax, labels=labels)
ax.boxplot(boxes)
# <codecell>
list(red_data.columns)
# <codecell>
| JudoWill/ResearchNotebooks | CheckNoLTR.py | Python | mit | 2,003 |
import LocalVault.Database as data
import hashlib
import json
import re
import pdb
class hm65Vault():
def __init__(self,dbName="hm65VaultTest.db"):
self.DATABASE_NAME = dbName
def addItem(self, vaultItem):
"""
Add vaultItem to the database. Either as a new item or an update
"""
db = self.getDB()
key = self._getItemKey(vaultItem)
self._addNewTags(key, vaultItem)
db_key = "item/" + key
itemObj = {}
itemObj['url'] = vaultItem['url']
itemObj['title'] = vaultItem['title']
itemObj['notes'] = vaultItem['notes']
itemObj['to_read'] = vaultItem['to_read']
itemObj['tags'] = vaultItem['tags']
db.set(db_key, self._serialiseObj(itemObj))
return key
def _getTagList(self, tagListStr):
l = tagListStr.split()
resSet = set(l)
return resSet
def _addNewTags(self, itemKey, vaultItem):
self._removeExistingTags(itemKey)
tagsList = self._getTagList(vaultItem['tags'])
for tag in tagsList:
self._addTag(itemKey, tag)
self._addTagRelationships(tagsList)
def _addTagRelationships(self, tagsList):
db = self.getDB()
tagsStr = self._serialiseObj(list(tagsList))
for it1 in tagsList:
dbkey1 = "tagRel/" + it1
keyPair = db.get(dbkey1)
if keyPair is None:
db.set(dbkey1, tagsStr)
else:
# If a tagRel exists already then update it
tagsList2 = set(tagsList)
oldRelList = self._deserialiseObj(keyPair[1])
tagsList2.update(oldRelList)
tagsStr2 = self._serialiseObj(list(tagsList2))
db.set(dbkey1, tagsStr2)
def _removeExistingTags(self, itemKey):
oldItem = self.getItem(itemKey)
if not(oldItem is None):
db = self.getDB()
tagsList = self._getTagList(oldItem['tags'])
for tag in tagsList:
dbkey1 = "tags/"+tag+"/"+itemKey
dbkey2 = "tagInfo/"+tag+"/count"
db.rm(dbkey1)
count = int(db.get(dbkey2)[1])
count -= 1
db.set(dbkey2, count)
def _addTag(self, itemKey, tag):
db = self.getDB()
dbkey1 = "tags/"+tag+"/"+itemKey
dbkey2 = "tagInfo/"+tag+"/count"
keyPair = db.get(dbkey2)
if not(keyPair is None):
count = int(keyPair[1])
count += 1
else:
count = 1
db.set(dbkey2, count)
db.set(dbkey1,1)
def getItem(self, key):
"""
get vaultItem from the database.
"""
db = self.getDB()
db_key = "item/" + key
keyPair = db.get(db_key)
if not(keyPair is None):
objStr = keyPair[1]
return self._deserialiseObj(objStr)
else:
return None
def _getItemKey(self, vaultItem):
urlToHash = bytes(vaultItem['url'],'utf-8')
hash_object = hashlib.sha256(urlToHash)
hex_dig = hash_object.hexdigest()
return hex_dig
def _serialiseObj(self, obj):
return json.dumps(obj)
def _deserialiseObj(self, objStr):
return json.loads(objStr)
def tagCount(self, tagStr):
db = self.getDB()
dbkey = "tagInfo/"+tagStr+"/count"
keyPair = db.get(dbkey)
if keyPair is None:
return 0
else:
return keyPair[1]
def listItems(self, matchingTagList):
resDict = {}
completeMatches = {}
db = self.getDB()
tagsList = self._getTagList(matchingTagList)
numTags = len(tagsList)
for tag in tagsList:
dbkey1 = "tags/"+tag+"/"
rows = db.list(dbkey1)
self._addToMatchResults(dbkey1, resDict, rows)
for (key,value) in resDict.items():
if value == numTags:
# Only add keys that match all tags
completeMatches[key] = 1
return completeMatches
def _addToMatchResults(self, prefixStr, resDict, rows):
for row in rows:
key = row[0]
itemKey = key[len(prefixStr):]
cc = resDict.get(itemKey, 0)
cc += 1
resDict[itemKey] = cc
def listTagsAnywhere(self, tagPattern, context=None):
tagPattern = r"[^\b]"+tagPattern
return self.listTags(tagPattern, context)
def listTags(self, tagPattern, context=None):
'''
Find tags that match the tagPattern.
context is the scope to search for tags in and is a dictionary of item keys
'''
tagsStr = self._buildSearchString(context)
matchList = self._matchTags(tagPattern)
tidyList = []
for it in matchList:
it = it.strip()
tidyList.append(it)
return tidyList
def _buildSearchString(self, context):
'''
context is list of other tags
'''
resSet = self._getTagsInContext(context)
resList = list(resSet)
resList = sorted(resList)
resStr = ""
for it in resList:
# Not efficient but will do for now
resStr += it + " "
self.TAGS_SEARCH_STR = resStr
return resStr
def _getTagsInContext(self, context):
resSet = set()
if context is None:
db = self.getDB()
dbkey1 = "tagInfo/"
results = db.list(dbkey1)
for (key, _) in results:
tagName = key[len(dbkey1):]
tagName = tagName[:tagName.find('/')]
resSet.add(tagName)
else:
db = self.getDB()
resDict = {}
for tag in context:
dbkey1 = "tagRel/" + tag
results = db.list(dbkey1)
for (key, val) in results:
relTags = self._deserialiseObj(val)
for tagName in relTags:
if tagName==tag:
continue
count = resDict.get(tagName,0)
count += 1
resDict[tagName] = count
contextCount = len(context)
# Results are the ones that matched all context items
for (key,val) in resDict.items():
if val == contextCount:
resSet.add(key)
return resSet
def _matchTags(self, tagPattern):
# search self.TAGS_SEARCH_STR for tags matching tagPattern
tagToSearch = r"\b" + tagPattern + "[^ ]*[ ]"
res = re.findall(tagToSearch, self.TAGS_SEARCH_STR, re.IGNORECASE)
return res
def getDB(self):
db = data.Database(self.DATABASE_NAME)
db.createDatabase()
return db
| humanist1965/hm65Vault | LocalVault/Vault.py | Python | gpl-3.0 | 5,715 |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.config.ports import MacPort, MacWK2Port
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.update import Update
class UpdateTest(unittest.TestCase):
def test_update_command_non_interactive(self):
tool = MockTool()
options = MockOptions(non_interactive=True)
step = Update(tool, options)
self.assertEqual(["mock-update-webkit"], step._update_command())
tool._deprecated_port = MacPort()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
tool._deprecated_port = MacWK2Port()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
def test_update_command_interactive(self):
tool = MockTool()
options = MockOptions(non_interactive=False)
step = Update(tool, options)
self.assertEqual(["mock-update-webkit"], step._update_command())
tool._deprecated_port = MacPort()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
tool._deprecated_port = MacWK2Port()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
| you21979/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/update_unittest.py | Python | bsd-3-clause | 2,746 |
# Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and set.
#
# get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
# set(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item.
#
class LRUCache:
# @param capacity, an integer
def __init__(self, capacity):
self.capacity = capacity
self.size = 0
self.contents = {}
self.lru = []
# @return an integer
def get(self, key):
if key not in self.contents: return -1
else: self.lru.remove(key); self.lru.append(key)
return self.contents[key]
# @param key, an integer
# @param value, an integer
# @return nothing
def set(self, key, value):
if key in self.contents:
self.contents[key]=value
self.lru.remove(key);
self.lru.append(key)
else: #not exist the key
self.size = self.size + 1
self.contents[key]=value
self.lru.append(key)
if self.size > self.capacity:#remove one
dk = self.lru.pop(0)
del self.contents[dk]
self.size = self.capacity
if __name__ == '__main__': #2,[set(2,1),set(1,1),set(2,3),set(4,1),get(1),get(2)]
cache = LRUCache(2)
cache.set(2,1)
cache.set(1,1)
cache.set(2,3)
cache.set(4,1)
print cache.get(1)
print cache.get(2)
| hujiaweibujidao/XSolutions | python/LRUCache.py | Python | apache-2.0 | 1,620 |
class Explanation(object):
def __init__(self, trees=[]):
'''
Constructor
'''
self._trees = [] # A list of the current trees
self._pendingSets = [] # A list of each tree's pending set
for tree in self._trees:
self._pendingSets.append(tree.getFrontier(True))
self._probRoots = 1.0
self._probChoices = 1.0
self._age = 0
self._probability = -1.0
def myCopy(self):
'''
Deep-copy constructor
'''
newone = type(self)()
for tree in self._trees:
newone._trees.append(tree.myCopy())
newone._probRoots = self._probRoots
newone._probchoices=self._probChoices
newone._age = self._age
newone._probability = self._probability
return newone
# toString
def __repr__(self):
if self._trees == []:
return "Empty Explanation"
res = "Explanation ("
res += str(self.getExpProbability())
res += "):\n"
treeNums = []
for tree in self._trees:
treeNums.append(tree.getID())
res += tree.reprWithParams(depth="\t")
res += "\tfrontier: \n"
res += "\t"+str(tree.getFrontier(withIndices=True))
res += "\n"
res+="Trees:"
res+=str(treeNums)
res+="\n"
return res
#Prints the Explanation's probability
def printExpProbability(self):
print "roots=",self._probRoots
print "probChoices=",self._probChoices
psIndex=0
for PS in self._pendingSets:
if len(PS)!=0:
print "ps at ",psIndex,"=",len(PS)
psIndex+=1
#---------------------------Getters and Setters -----------------------------------------------
#Returns all tress in the explanation (list of trees)
def getTrees(self):
return self._trees
#Returns the index-th tree in the explanation (tree)
def getTree(self, index=0):
if len(self._trees)<index:
return self._trees[-1]
else:
return self._trees[index]
#Replaces the index-th tree in the explanation with newTree (void)
def setTree(self, newTree, index=-1):
if len(self._trees)<index or -1==index:
self._trees.append(newTree)
self._pendingSets.append(newTree.getFrontier(True))
else:
self._trees[index]=newTree
#Returns a list of all the trees in the explanation (list of ints)
def getTreeIDs(self):
treeNums = []
for tree in self._trees:
treeNums.append(tree.getID())
return treeNums
#Removes the index-th tree from the list of trees
def removeTree(self, index):
if not (len(self._trees)<index or -1==index):
self._trees.pop(index)
#Returns the probability of this explanation (double)
def getExpProbability(self):
if self._probability != -1:
return self._probability
else:
res = 1.0
res *= self._probRoots
res *= self._probChoices
for PS in self._pendingSets:
if len(PS)!=0:
res *= 1.0/len(PS)
return res
#Sets the probability of this explanation to be prob (void)
#This function is mostly used for normalization of probabilities
def setExpProbability(self, prob):
self._probability = prob
#Returns the size of the frontier (int)
def getFrontierSize(self):
res = 0
for tree in self._trees:
res += len(tree.getFrontier(False))
return res
#Returns the average size of a tree in the explanation (int)
def getAvgTreeSize(self):
res = 0.0
amoutOfTrees=0
for tree in self._trees:
res += tree.getDepth()
amoutOfTrees+=1
return res / len(self._trees)
#Returns the number of trees in the explanation (int)
def getSize(self):
return len(self.getTrees())
#Increment the age counter (void)
def incrementAge(self):
self._age += 1
#Resets the age counter (void)
def resetAge(self):
self._age = 0
#Returns the age of this explanation (integer)
def getAge(self):
return self._age
#-------------------------Probability Related Functions -----------------------------------------------------
#Assisting function to help calculate probabilities correctly - because a new tree might be added in a later step
# we need to remember that it was an optional tree to fulfill in previous steps.
def backpatchPS(self, tAddition):
for PS in self._pendingSets:
PS.append(tAddition)
def updateLocalProbChoices(self, tree):
self._probChoices *= tree.getProbability()
def updateLocalProbRoots(self, newRootProbability):
self._probRoots *= newRootProbability
# End of Explanation.py | ReuthMirsky/SPR | Source/Explanation.py | Python | gpl-3.0 | 5,291 |
#
# Walldo - A wallpaper downloader
# Copyright (C) 2012 Fernando Castillo
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import unittest
from walldo.parser import Parser;
class ParserTestCase(unittest.TestCase):
lines = ['<select class="select" style="margin: 0 2px 0 0; margin-top: 4px; float: left; width: 145px; max-width: 145px;" name="resolution" onChange="javascript:imgload(\'ithilien\', this,\'2949\')">']
expected = ['/wallpaper/7yz4ma1/2949_ithilien_1024x768.jpg']
def setUp(self):
self.parser = Parser()
def testParse(self):
current = self.parser.parse(self.lines, '1024x768')
for i in range(len(current)):
self.assertEquals(self.expected[i], current[i], 'Entry incorrect')
| skibyte/walldo | walldo/parsertestcase.py | Python | gpl-2.0 | 1,409 |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from st2common.runners.base_action import Action
from st2client.models.action_alias import ActionAliasMatch
from st2client.models.aliasexecution import ActionAliasExecution
from st2client.commands.action import (
LIVEACTION_STATUS_REQUESTED,
LIVEACTION_STATUS_SCHEDULED,
LIVEACTION_STATUS_RUNNING,
LIVEACTION_STATUS_CANCELING,
)
from st2client.client import Client
class ExecuteActionAliasAction(Action):
def __init__(self, config=None):
super(ExecuteActionAliasAction, self).__init__(config=config)
api_url = os.environ.get("ST2_ACTION_API_URL", None)
token = os.environ.get("ST2_ACTION_AUTH_TOKEN", None)
self.client = Client(api_url=api_url, token=token)
def run(self, text, source_channel=None, user=None):
alias_match = ActionAliasMatch()
alias_match.command = text
alias, representation = self.client.managers["ActionAlias"].match(alias_match)
execution = ActionAliasExecution()
execution.name = alias.name
execution.format = representation
execution.command = text
execution.source_channel = source_channel # ?
execution.notification_channel = None
execution.notification_route = None
execution.user = user
action_exec_mgr = self.client.managers["ActionAliasExecution"]
execution = action_exec_mgr.create(execution)
self._wait_execution_to_finish(execution.execution["id"])
return execution.execution["id"]
def _wait_execution_to_finish(self, execution_id):
pending_statuses = [
LIVEACTION_STATUS_REQUESTED,
LIVEACTION_STATUS_SCHEDULED,
LIVEACTION_STATUS_RUNNING,
LIVEACTION_STATUS_CANCELING,
]
action_exec_mgr = self.client.managers["LiveAction"]
execution = action_exec_mgr.get_by_id(execution_id)
while execution.status in pending_statuses:
time.sleep(1)
execution = action_exec_mgr.get_by_id(execution_id)
| StackStorm/st2 | contrib/chatops/actions/match_and_execute.py | Python | apache-2.0 | 2,668 |
"""Tests for CP2K calculator interface."""
import os
import numpy as np
import pytest
from phonopy.interface.cp2k import read_cp2k
from phonopy.interface.phonopy_yaml import read_cell_yaml
data_dir = os.path.dirname(os.path.abspath(__file__))
CP2K_INPUT_TOOLS_AVAILABLE = True
try:
import cp2k_input_tools # noqa F401
except ImportError:
CP2K_INPUT_TOOLS_AVAILABLE = False
@pytest.mark.skipif(
not CP2K_INPUT_TOOLS_AVAILABLE, reason="not found cp2k-input-tools package"
)
def test_read_cp2k():
"""Test read_cp2k."""
cell, _ = read_cp2k(os.path.join(data_dir, "Si-CP2K.inp"))
cell_ref = read_cell_yaml(os.path.join(data_dir, "Si-CP2K.yaml"))
assert (np.abs(cell.cell - cell_ref.cell) < 1e-5).all()
diff_pos = cell.scaled_positions - cell_ref.scaled_positions
diff_pos -= np.rint(diff_pos)
assert (np.abs(diff_pos) < 1e-5).all()
for s, s_r in zip(cell.symbols, cell_ref.symbols):
assert s == s_r
| atztogo/phonopy | test/interface/test_CP2K.py | Python | bsd-3-clause | 954 |
#
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from jinja2 import Environment, FileSystemLoader, Template
from ochopod.bindings.ec2.kubernetes import Pod
from ochopod.models.piped import Actor as Piped
from ochopod.models.reactive import Actor as Reactive
from os.path import join, dirname
logger = logging.getLogger('ochopod')
if __name__ == '__main__':
class Model(Reactive):
damper = 10.0
sequential = True
class Strategy(Piped):
cwd = '/opt/zookeeper-3.4.6'
strict = True
def configure(self, cluster):
#
# - assign the server/id bindings to enable clustering
# - lookup the port mappings for each pod (TCP 2888 and 3888)
#
peers = {}
local = cluster.index + 1
for n, key in enumerate(sorted(cluster.pods.keys()), 1):
pod = cluster.pods[key]
suffix = '%d:%d' % (pod['ports']['2888'], pod['ports']['3888'])
peers[n] = '%s:%s' % (pod['ip'], suffix)
# - set "this" node as 0.0.0.0:2888:3888
# - i've observed weird behavior with docker 1.3 where zk can't bind the address if specified
#
peers[local] = '0.0.0.0:2888:3888'
logger.debug('local id #%d, peer configuration ->\n%s' %
(local, '\n'.join(['\t#%d\t-> %s' % (n, mapping) for n, mapping in peers.items()])))
#
# - set our server index
#
template = Template('{{id}}')
with open('/var/lib/zookeeper/myid', 'wb') as f:
f.write(template.render(id=local))
#
# - render the zk config template with our peer bindings
#
env = Environment(loader=FileSystemLoader(join(dirname(__file__), 'templates')))
template = env.get_template('zoo.cfg')
mapping = \
{
'peers': peers
}
with open('%s/conf/zoo.cfg' % self.cwd, 'wb') as f:
f.write(template.render(mapping))
return 'bin/zkServer.sh start-foreground', {}
Pod().boot(Strategy, model=Model) | autodesk-cloud/ochonetes | images/zookeeper/resources/pod/pod.py | Python | apache-2.0 | 2,784 |
from __future__ import absolute_import
import warnings
from datetime import datetime
from pathlib import Path
import six
import netCDF4
import numpy as np
import pytest
import yaml
from click.testing import CliRunner
from affine import Affine
import datacube.scripts.cli_app
from datacube.model import GeoBox, CRS
from datacube.utils import read_documents
from .conftest import EXAMPLE_LS5_DATASET_ID
PROJECT_ROOT = Path(__file__).parents[1]
CONFIG_SAMPLES = PROJECT_ROOT / 'docs/config_samples/'
LS5_SAMPLES = CONFIG_SAMPLES / 'ga_landsat_5/'
LS5_MATCH_RULES = CONFIG_SAMPLES / 'match_rules' / 'ls5_scenes.yaml'
LS5_NBAR_STORAGE_TYPE = LS5_SAMPLES / 'ls5_geographic.yaml'
LS5_NBAR_ALBERS_STORAGE_TYPE = LS5_SAMPLES / 'ls5_albers.yaml'
TEST_STORAGE_SHRINK_FACTOR = 100
TEST_STORAGE_NUM_MEASUREMENTS = 2
GEOGRAPHIC_VARS = ('latitude', 'longitude')
PROJECTED_VARS = ('x', 'y')
EXPECTED_STORAGE_UNIT_DATA_SHAPE = (1, 40, 40)
EXPECTED_NUMBER_OF_STORAGE_UNITS = 12
JSON_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
COMPLIANCE_CHECKER_NORMAL_LIMIT = 2
@pytest.mark.usefixtures('default_metadata_type',
'indexed_ls5_scene_dataset_type')
def test_full_ingestion(global_integration_cli_args, index, example_ls5_dataset, ls5_nbar_ingest_config):
opts = list(global_integration_cli_args)
opts.extend(
[
'-v',
'dataset',
'add',
'--auto-match',
str(example_ls5_dataset)
]
)
result = CliRunner().invoke(
datacube.scripts.cli_app.cli,
opts,
catch_exceptions=False
)
print(result.output)
assert not result.exception
assert result.exit_code == 0
ensure_dataset_is_indexed(index)
config_path, config = ls5_nbar_ingest_config
opts = list(global_integration_cli_args)
opts.extend(
[
'-v',
'ingest',
'--config-file',
str(config_path)
]
)
result = CliRunner().invoke(
datacube.scripts.cli_app.cli,
opts,
catch_exceptions=False
)
print(result.output)
assert not result.exception
assert result.exit_code == 0
datasets = index.datasets.search_eager(product='ls5_nbar_albers')
assert len(datasets) > 0
assert datasets[0].managed
ds_path = str(datasets[0].local_path)
with netCDF4.Dataset(ds_path) as nco:
check_data_shape(nco)
check_grid_mapping(nco)
check_cf_compliance(nco)
check_dataset_metadata_in_storage_unit(nco, example_ls5_dataset)
check_attributes(nco, config['global_attributes'])
name = config['measurements'][0]['name']
check_attributes(nco[name], config['measurements'][0]['attrs'])
check_open_with_xarray(ds_path)
check_open_with_api(index)
def ensure_dataset_is_indexed(index):
datasets = index.datasets.search_eager(product='ls5_nbar_scene')
assert len(datasets) == 1
assert datasets[0].id == EXAMPLE_LS5_DATASET_ID
def check_grid_mapping(nco):
assert 'grid_mapping' in nco.variables['blue'].ncattrs()
grid_mapping = nco.variables['blue'].grid_mapping
assert grid_mapping in nco.variables
assert 'GeoTransform' in nco.variables[grid_mapping].ncattrs()
assert 'spatial_ref' in nco.variables[grid_mapping].ncattrs()
def check_data_shape(nco):
assert nco.variables['blue'].shape == EXPECTED_STORAGE_UNIT_DATA_SHAPE
def check_cf_compliance(dataset):
try:
from compliance_checker.runner import CheckSuite, ComplianceChecker
except ImportError:
warnings.warn('compliance_checker unavailable, skipping NetCDF-CF Compliance Checks')
return
cs = CheckSuite()
cs.load_all_available_checkers()
score_groups = cs.run(dataset, 'cf')
groups = ComplianceChecker.stdout_output(cs, score_groups, verbose=1, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
assert cs.passtree(groups, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
def check_attributes(obj, attrs):
for k, v in attrs.items():
assert k in obj.ncattrs()
assert obj.getncattr(k) == v
def check_dataset_metadata_in_storage_unit(nco, dataset_dir):
assert len(nco.variables['dataset']) == 1 # 1 time slice
stored_metadata = nco.variables['dataset'][0]
if not isinstance(stored_metadata, str):
stored_metadata = netCDF4.chartostring(stored_metadata)
stored_metadata = str(np.char.decode(stored_metadata))
ds_filename = dataset_dir / 'agdc-metadata.yaml'
stored = yaml.safe_load(stored_metadata)
[(_, original)] = read_documents(ds_filename)
assert len(stored['lineage']['source_datasets']) == 1
assert next(iter(stored['lineage']['source_datasets'].values())) == original
def check_open_with_xarray(file_path):
import xarray
xarray.open_dataset(str(file_path))
def check_open_with_api(index):
from datacube.api.core import Datacube
datacube = Datacube(index=index)
input_type_name = 'ls5_nbar_albers'
input_type = datacube.index.datasets.types.get_by_name(input_type_name)
geobox = GeoBox(200, 200, Affine(25, 0.0, 1500000, 0.0, -25, -3900000), CRS('EPSG:3577'))
observations = datacube.product_observations(product='ls5_nbar_albers', geopolygon=geobox.extent)
sources = datacube.product_sources(observations, lambda ds: ds.center_time, 'time',
'seconds since 1970-01-01 00:00:00')
data = datacube.product_data(sources, geobox, input_type.measurements.values())
assert data.blue.shape == (1, 200, 200)
| ceos-seo/Data_Cube_v2 | agdc-v2/integration_tests/test_full_ingestion.py | Python | apache-2.0 | 5,552 |
# This file is part of Checkbox.
#
# Copyright 2013 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.providers.v1` -- Implementation of V1 provider
==================================================================
"""
import logging
import os
import io
from plainbox.abc import IProvider1, IProviderBackend1
from plainbox.impl.applogic import WhiteList
from plainbox.impl.job import JobDefinition
from plainbox.impl.plugins import PlugInCollection
from plainbox.impl.rfc822 import load_rfc822_records
logger = logging.getLogger("plainbox.providers.v1")
class Provider1(IProvider1, IProviderBackend1):
"""
A v1 provider implementation.
This base class implements a checkbox-like provider object. Subclasses are
only required to implement a single method that designates the base
location for all other data.
"""
def __init__(self, base_dir, name, description):
"""
Initialize the provider with the associated base directory.
All of the typical v1 provider data is relative to this directory. It
can be customized by subclassing and overriding the particular methods
of the IProviderBackend1 class but that should not be necessary in
normal operation.
"""
self._base_dir = base_dir
self._name = name
self._description = description
@property
def name(self):
"""
name of this provider
"""
return self._name
@property
def description(self):
"""
description of this provider
"""
return self._description
@property
def jobs_dir(self):
"""
Return an absolute path of the jobs directory
"""
return os.path.join(self._base_dir, "jobs")
@property
def scripts_dir(self):
"""
Return an absolute path of the scripts directory
.. note::
The scripts may not work without setting PYTHONPATH and
CHECKBOX_SHARE.
"""
return os.path.join(self._base_dir, "scripts")
@property
def whitelists_dir(self):
"""
Return an absolute path of the whitelist directory
"""
return os.path.join(self._base_dir, "data", "whitelists")
@property
def CHECKBOX_SHARE(self):
"""
Return the required value of CHECKBOX_SHARE environment variable.
.. note::
This variable is only required by one script.
It would be nice to remove this later on.
"""
return self._base_dir
@property
def extra_PYTHONPATH(self):
"""
Return additional entry for PYTHONPATH, if needed.
This entry is required for CheckBox scripts to import the correct
CheckBox python libraries.
.. note::
The result may be None
"""
return None
@property
def extra_PATH(self):
"""
Return additional entry for PATH
This entry is required to lookup CheckBox scripts.
"""
# NOTE: This is always the script directory. The actual logic for
# locating it is implemented in the property accessors.
return self.scripts_dir
def get_builtin_whitelists(self):
logger.debug("Loading built-in whitelists...")
whitelist_list = []
for name in os.listdir(self.whitelists_dir):
if name.endswith(".whitelist"):
whitelist_list.append(
WhiteList.from_file(os.path.join(
self.whitelists_dir, name)))
return sorted(whitelist_list, key=lambda whitelist: whitelist.name)
def get_builtin_jobs(self):
logger.debug("Loading built-in jobs...")
job_list = []
for name in os.listdir(self.jobs_dir):
if name.endswith(".txt") or name.endswith(".txt.in"):
job_list.extend(
self.load_jobs(
os.path.join(self.jobs_dir, name)))
return sorted(job_list, key=lambda job: job.name)
def load_jobs(self, somewhere):
"""
Load job definitions from somewhere
"""
if isinstance(somewhere, str):
# Load data from a file with the given name
filename = somewhere
with open(filename, 'rt', encoding='UTF-8') as stream:
return self.load_jobs(stream)
if isinstance(somewhere, io.TextIOWrapper):
stream = somewhere
logger.debug("Loading jobs definitions from %r...", stream.name)
record_list = load_rfc822_records(stream)
job_list = []
for record in record_list:
job = JobDefinition.from_rfc822_record(record)
job._provider = self
logger.debug("Loaded %r", job)
job_list.append(job)
return job_list
else:
raise TypeError(
"Unsupported type of 'somewhere': {!r}".format(
type(somewhere)))
class DummyProvider1(IProvider1, IProviderBackend1):
"""
Dummy provider useful for creating isolated test cases
"""
def __init__(self, job_list=None, whitelist_list=None, **extras):
self._job_list = job_list or []
self._whitelist_list = whitelist_list or []
self._extras = extras
self._patch_provider_field()
def _patch_provider_field(self):
# NOTE: each v1 job needs a _provider attribute that points to the
# provider. Since many tests use make_job() which does not set it for
# obvious reasons it needs to be patched-in.
for job in self._job_list:
if job._provider is None:
job._provider = self
@property
def name(self):
return self._extras.get('name', "dummy")
@property
def description(self):
return self._extras.get(
'description', "A dummy provider useful for testing")
@property
def CHECKBOX_SHARE(self):
return self._extras.get('CHECKBOX_SHARE', "")
@property
def extra_PYTHONPATH(self):
return self._extras.get("PYTHONPATH")
@property
def extra_PATH(self):
return self._extras.get("PATH", "")
def get_builtin_whitelists(self):
return self._whitelist_list
def get_builtin_jobs(self):
return self._job_list
# Collection of all providers
all_providers = PlugInCollection('plainbox.provider.v1')
| jds2001/ocp-checkbox | plainbox/plainbox/impl/providers/v1.py | Python | gpl-3.0 | 7,162 |
##############################################################################################
# Copyright 2014-2015 Cloud Media Sdn. Bhd.
#
# This file is part of Xuan Application Development SDK.
#
# Xuan Application Development SDK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Xuan Application Development SDK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Xuan Application Development SDK. If not, see <http://www.gnu.org/licenses/>.
##############################################################################################
from com.cloudMedia.theKuroBox.sdk.paramTypes.kbxNumber import KBXNumberType
from com.cloudMedia.theKuroBox.sdk.paramTypes.kbxParamWrapper import KBXParamWrapper
from com.cloudMedia.theKuroBox.sdk.util.logger import Logger
class KBXTimeType(KBXNumberType):
TYPE_NAME = "kbxTime"
def __init__(self, kbxParamIsRequired=True):
pass
def cast(self, value):
pass
class DTO(int):
@staticmethod
def build(value):
pass
def get_date_time_obj(self):
pass
def get_second(self):
pass
def get_minute(self):
pass
def get_hour(self):
pass
class KBXTime(KBXTimeType, KBXParamWrapper):
def __init__(self, kbxParamName, kbxParamIsRequired=True, **kbxParamProps):
pass
| TheStackBox/xuansdk | SDKLibrary/com/cloudMedia/theKuroBox/sdk/paramTypes/kbxTime.py | Python | gpl-3.0 | 1,860 |
from elixir.models import *
from lxml import etree
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
from rest_framework.decorators import renderer_classes
from django.views.decorators.csrf import csrf_exempt
from elixirapp import settings
from rest_framework.response import Response
from rest_framework.renderers import BaseRenderer
from rest_framework.views import APIView
class GoogleSitemapRenderer(BaseRenderer):
media_type = 'application/xml'
format = 'xml'
charset = 'iso-8859-1'
def render(self, data, media_type=None, renderer_context=None):
root = ET.Element('urlset')
root.attrib['xmlns'] = 'http://www.sitemaps.org/schemas/sitemap/0.9'
root.attrib['xmlns:xsi'] = 'http://www.w3.org/2001/XMLSchema-instance'
root.attrib['xsi:schemaLocation'] = 'http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd'
# add landing page
url = ET.SubElement(root, 'url')
loc = ET.SubElement(url, 'loc')
loc.text = escape(settings.URL_FRONT)
# add pages for each resource
for el in Resource.objects.filter(visibility=1):
url = ET.SubElement(root, 'url')
loc = ET.SubElement(url, 'loc')
loc.text = escape(settings.URL_FRONT + el.biotoolsID)
lastmod = ET.SubElement(url, 'lastmod')
lastmod.text = el.lastUpdate.isoformat()
return ET.tostring(root)
class Sitemap(APIView):
"""
Generate the sitemap
"""
renderer_classes = (GoogleSitemapRenderer,)
def get(self, request, format=None):
return Response() | bio-tools/biotoolsregistry | backend/elixir/sitemap.py | Python | gpl-3.0 | 1,660 |
from egasub.submission.submit import submittable_status, submit_dataset, perform_submission
from egasub.ega.entities.ega_enums import EgaEnums
import pytest
import os
import shutil
from egasub.submission.submittable import Unaligned, Variation, Alignment
#from mock import patch, Mock
import ftplib
def test_submittable_status():
assert submittable_status("fail") == None
assert submittable_status("tests/submission/test_submit.py") == ['']
def test_submit(ctx, mock_server):
with pytest.raises(Exception):
perform_submission(ctx, '///')
with pytest.raises(AttributeError):
submit_dataset(ctx)
ctx.obj['SETTINGS']['ega_submitter_account'] = 'test_account'
ctx.obj['SETTINGS']['ega_submitter_password'] = 'test_password'
ctx.obj['SETTINGS']['ega_policy_id'] = 'test_id'
ctx.obj['CURRENT_DIR'] = os.path.join(os.getcwd(),'tests/data/workspace/submittable/')
ctx.obj['CURRENT_DIR_TYPE'] = "unaligned"
ctx.obj['EGA_ENUMS'] = EgaEnums()
ctx.obj['log_file'] = 'tests/data/workspace/submittable/test_u/.status'
perform_submission(ctx, '///')
initial_directory = os.getcwd()
os.chdir('tests/data/workspace/submittable/')
unaligned = Unaligned('test_u')
variation = Variation('test_v')
alignment = Alignment('test_a')
unaligned.record_object_status('sample', True, "test", "test")
variation.record_object_status('sample', True, "test", "test")
alignment.record_object_status('sample', True, "test", "test")
with pytest.raises(Exception):
#mock_ftp = Mock()
perform_submission(ctx, ['test_u', 'test_a', 'test_v'])
with pytest.raises(AttributeError):
submit_dataset(ctx)
ctx.obj['SETTINGS']['ega_submitter_account'] = None
ctx.obj['SETTINGS']['ega_submitter_password'] = None
ctx.obj['SETTINGS']['ega_policy_id'] = None
ctx.obj['CURRENT_DIR'] = None
ctx.obj['EGA_ENUMS'] = None
shutil.rmtree(os.path.join(os.getcwd(), 'test_u/.status'))
shutil.rmtree(os.path.join(os.getcwd(), 'test_a/.status'))
shutil.rmtree(os.path.join(os.getcwd(), 'test_v/.status'))
os.chdir(initial_directory)
def test_submit_dataset(ctx):
ctx.obj['SETTINGS']['ega_submitter_account'] = 'test_account'
ctx.obj['SETTINGS']['ega_submitter_password'] = 'test_password'
ctx.obj['SETTINGS']['ega_policy_id'] = 'test_id'
ctx.obj['CURRENT_DIR'] = os.path.join(os.getcwd(), 'tests/data/workspace/submitted/')
ctx.obj['CURRENT_DIR_TYPE'] = "unaligned"
ctx.obj['EGA_ENUMS'] = EgaEnums()
ctx.obj['log_file'] = 'tests/data/workspace/submitted/test_u/.status'
initial_directory = os.getcwd()
os.chdir('tests/data/workspace/submitted/')
#unaligned = Unaligned('test_u')
#variation = Variation('test_v')
#alignment = Alignment('test_a')
with pytest.raises(IOError):
submit_dataset(ctx)
ctx.obj['SETTINGS']['ega_submitter_account'] = None
ctx.obj['SETTINGS']['ega_submitter_password'] = None
ctx.obj['SETTINGS']['ega_policy_id'] = None
ctx.obj['CURRENT_DIR'] = None
ctx.obj['CURRENT_DIR_TYPE'] = None
ctx.obj['EGA_ENUMS'] = None
os.chdir(initial_directory)
pass
| icgc-dcc/egasub | tests/submission/test_submit.py | Python | gpl-3.0 | 3,186 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.