repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 18
values | size
stringlengths 4
7
| content
stringlengths 736
1.04M
| license
stringclasses 15
values | hash
int64 -9,222,983,980,000,580,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
hguemar/cinder | cinder/api/v2/volumes.py | 1 | 18236 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes api."""
import ast
import webob
from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.v2.views import volumes as volume_views
from cinder.api import xmlutil
from cinder import consistencygroup as consistencygroupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder.image import glance
from cinder.openstack.common import log as logging
from cinder.openstack.common import uuidutils
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
SCHEDULER_HINTS_NAMESPACE =\
"http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2"
def make_attachment(elem):
elem.set('id')
elem.set('server_id')
elem.set('host_name')
elem.set('volume_id')
elem.set('device')
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availability_zone')
elem.set('created_at')
elem.set('name')
elem.set('bootable')
elem.set('description')
elem.set('volume_type')
elem.set('snapshot_id')
elem.set('source_volid')
elem.set('consistencygroup_id')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
# Attach metadata node
elem.append(common.MetadataTemplate())
volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V2, 'atom': xmlutil.XMLNS_ATOM}
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_scheduler_hints(self, volume_node):
"""Marshal the scheduler hints attribute of a parsed request."""
node =\
self.find_first_child_named_in_namespace(volume_node,
SCHEDULER_HINTS_NAMESPACE,
"scheduler_hints")
if node:
scheduler_hints = {}
for child in self.extract_elements(node):
scheduler_hints.setdefault(child.nodeName, [])
value = self.extract_text(child).strip()
scheduler_hints[child.nodeName].append(value)
return scheduler_hints
else:
return None
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
volume = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['name', 'description', 'size',
'volume_type', 'availability_zone', 'imageRef',
'image_id', 'snapshot_id', 'source_volid',
'consistencygroup_id']
for attr in attributes:
if volume_node.getAttribute(attr):
volume[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
volume['metadata'] = self.extract_metadata(metadata_node)
scheduler_hints = self._extract_scheduler_hints(volume_node)
if scheduler_hints:
volume['scheduler_hints'] = scheduler_hints
return volume
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted create volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = utils.safe_minidom_parse_string(string)
volume = self._extract_volume(dom)
return {'body': {'volume': volume}}
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
_view_builder_class = volume_views.ViewBuilder
def __init__(self, ext_mgr):
self.volume_api = cinder_volume.API()
self.consistencygroup_api = consistencygroupAPI.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['cinder.context']
try:
vol = self.volume_api.get(context, id, viewable_admin_meta=True)
req.cache_db_volume(vol)
except exception.NotFound:
msg = _("Volume could not be found")
raise exc.HTTPNotFound(explanation=msg)
utils.add_visible_admin_metadata(vol)
return self._view_builder.detail(req, vol)
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['cinder.context']
LOG.info(_LI("Delete volume with id: %s"), id, context=context)
try:
volume = self.volume_api.get(context, id)
self.volume_api.delete(context, volume)
except exception.NotFound:
msg = _("Volume could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.VolumeAttached:
msg = _("Volume cannot be deleted while in attached state")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._get_volumes(req, is_detail=False)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._get_volumes(req, is_detail=True)
def _get_volumes(self, req, is_detail):
"""Returns a list of volumes, transformed through view builder."""
context = req.environ['cinder.context']
params = req.params.copy()
marker = params.pop('marker', None)
limit = params.pop('limit', None)
sort_key = params.pop('sort_key', 'created_at')
sort_dir = params.pop('sort_dir', 'desc')
params.pop('offset', None)
filters = params
utils.remove_invalid_filter_options(context,
filters,
self._get_volume_filter_options())
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in filters:
filters['display_name'] = filters['name']
del filters['name']
for k, v in filters.iteritems():
try:
filters[k] = ast.literal_eval(v)
except (ValueError, SyntaxError):
LOG.debug('Could not evaluate value %s, assuming string', v)
volumes = self.volume_api.get_all(context, marker, limit, sort_key,
sort_dir, filters,
viewable_admin_meta=True)
volumes = [dict(vol.iteritems()) for vol in volumes]
for volume in volumes:
utils.add_visible_admin_metadata(volume)
limited_list = common.limited(volumes, req)
req.cache_db_volumes(limited_list)
if is_detail:
volumes = self._view_builder.detail_list(req, limited_list)
else:
volumes = self._view_builder.summary_list(req, limited_list)
return volumes
def _image_uuid_from_ref(self, image_ref, context):
# If the image ref was generated by nova api, strip image_ref
# down to an id.
image_uuid = None
try:
image_uuid = image_ref.split('/').pop()
except AttributeError:
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
image_service = glance.get_default_image_service()
# First see if this is an actual image ID
if uuidutils.is_uuid_like(image_uuid):
try:
image = image_service.show(context, image_uuid)
if 'id' in image:
return image['id']
except Exception:
# Pass and see if there is a matching image name
pass
# Could not find by ID, check if it is an image name
try:
params = {'filters': {'name': image_ref}}
images = list(image_service.detail(context, **params))
if len(images) > 1:
msg = _("Multiple matches found for '%s', use an ID to be more"
" specific.") % image_ref
raise exc.HTTPConflict(msg)
for img in images:
return img['id']
except Exception:
# Pass and let default not found error handling take care of it
pass
msg = _("Invalid image identifier or unable to "
"access requested image.")
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=VolumeTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new volume."""
if not self.is_valid_body(body, 'volume'):
msg = _("Missing required element '%s' in request body") % 'volume'
raise exc.HTTPBadRequest(explanation=msg)
LOG.debug('Create volume request body: %s', body)
context = req.environ['cinder.context']
volume = body['volume']
kwargs = {}
# NOTE(thingee): v2 API allows name instead of display_name
if volume.get('name'):
volume['display_name'] = volume.get('name')
del volume['name']
# NOTE(thingee): v2 API allows description instead of
# display_description
if volume.get('description'):
volume['display_description'] = volume.get('description')
del volume['description']
if 'image_id' in volume:
volume['imageRef'] = volume.get('image_id')
del volume['image_id']
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
try:
if not uuidutils.is_uuid_like(req_volume_type):
kwargs['volume_type'] = \
volume_types.get_volume_type_by_name(
context, req_volume_type)
else:
kwargs['volume_type'] = volume_types.get_volume_type(
context, req_volume_type)
except exception.VolumeTypeNotFound:
msg = _("Volume type not found.")
raise exc.HTTPNotFound(explanation=msg)
kwargs['metadata'] = volume.get('metadata', None)
snapshot_id = volume.get('snapshot_id')
if snapshot_id is not None:
try:
kwargs['snapshot'] = self.volume_api.get_snapshot(context,
snapshot_id)
except exception.NotFound:
explanation = _('snapshot id:%s not found') % snapshot_id
raise exc.HTTPNotFound(explanation=explanation)
else:
kwargs['snapshot'] = None
source_volid = volume.get('source_volid')
if source_volid is not None:
try:
kwargs['source_volume'] = \
self.volume_api.get_volume(context,
source_volid)
except exception.NotFound:
explanation = _('source volume id:%s not found') % source_volid
raise exc.HTTPNotFound(explanation=explanation)
else:
kwargs['source_volume'] = None
source_replica = volume.get('source_replica')
if source_replica is not None:
try:
src_vol = self.volume_api.get_volume(context,
source_replica)
if src_vol['replication_status'] == 'disabled':
explanation = _('source volume id:%s is not'
' replicated') % source_volid
raise exc.HTTPNotFound(explanation=explanation)
kwargs['source_replica'] = src_vol
except exception.NotFound:
explanation = (_('replica source volume id:%s not found') %
source_replica)
raise exc.HTTPNotFound(explanation=explanation)
else:
kwargs['source_replica'] = None
consistencygroup_id = volume.get('consistencygroup_id')
if consistencygroup_id is not None:
try:
kwargs['consistencygroup'] = \
self.consistencygroup_api.get(context,
consistencygroup_id)
except exception.NotFound:
explanation = _('Consistency group id:%s not found') % \
consistencygroup_id
raise exc.HTTPNotFound(explanation=explanation)
else:
kwargs['consistencygroup'] = None
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:
size = kwargs['snapshot']['volume_size']
elif size is None and kwargs['source_volume'] is not None:
size = kwargs['source_volume']['size']
elif size is None and kwargs['source_replica'] is not None:
size = kwargs['source_replica']['size']
LOG.info(_LI("Create volume of %s GB"), size, context=context)
if self.ext_mgr.is_loaded('os-image-create'):
image_ref = volume.get('imageRef')
if image_ref is not None:
image_uuid = self._image_uuid_from_ref(image_ref, context)
kwargs['image_id'] = image_uuid
kwargs['availability_zone'] = volume.get('availability_zone', None)
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
new_volume = self.volume_api.create(context,
size,
volume.get('display_name'),
volume.get('display_description'),
**kwargs)
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
new_volume = dict(new_volume.iteritems())
retval = self._view_builder.detail(req, new_volume)
return retval
def _get_volume_filter_options(self):
"""Return volume search options allowed by non-admin."""
return ('name', 'status', 'metadata')
@wsgi.serializers(xml=VolumeTemplate)
def update(self, req, id, body):
"""Update a volume."""
context = req.environ['cinder.context']
if not body:
msg = _("Missing request body")
raise exc.HTTPBadRequest(explanation=msg)
if 'volume' not in body:
msg = _("Missing required element '%s' in request body") % 'volume'
raise exc.HTTPBadRequest(explanation=msg)
volume = body['volume']
update_dict = {}
valid_update_keys = (
'name',
'description',
'display_name',
'display_description',
'metadata',
)
for key in valid_update_keys:
if key in volume:
update_dict[key] = volume[key]
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in update_dict:
update_dict['display_name'] = update_dict['name']
del update_dict['name']
# NOTE(thingee): v2 API allows name instead of display_name
if 'description' in update_dict:
update_dict['display_description'] = update_dict['description']
del update_dict['description']
try:
volume = self.volume_api.get(context, id, viewable_admin_meta=True)
volume_utils.notify_about_volume_usage(context, volume,
'update.start')
self.volume_api.update(context, volume, update_dict)
except exception.NotFound:
msg = _("Volume could not be found")
raise exc.HTTPNotFound(explanation=msg)
volume.update(update_dict)
utils.add_visible_admin_metadata(volume)
volume_utils.notify_about_volume_usage(context, volume,
'update.end')
return self._view_builder.detail(req, volume)
def create_resource(ext_mgr):
return wsgi.Resource(VolumeController(ext_mgr))
| apache-2.0 | 206,777,529,314,163,600 | 36.14053 | 79 | 0.578197 | false |
boxed/CMi | web_frontend/CMi/tvshows/api.py | 1 | 1578 | from django.template.loader import render_to_string
from django.conf.urls import patterns
from CMi.tvshows.models import Episode, Category
def tv_show_tile(title, episodes, category=None):
return (
10, render_to_string('tile.html', {
'url': '/tvshows/' + (('category/%s/' % category.pk) if category else ''),
'image': '/site-media/tv.svg',
'title': title,
'content': '%s new / %s total' % (episodes.filter(watched=False).count(), episodes.count()),
}))
def tiles():
return [tv_show_tile(title='TV Shows', episodes=Episode.objects.filter(show__category=None).exclude(filepath=''))] + [tv_show_tile(category=category, title=category.name, episodes=Episode.objects.filter(show__category=category).exclude(filepath='')) for category in Category.objects.order_by('name')]
def urls():
return patterns('CMi.tvshows.views',
(r'^tvshows/$', 'index'),
(r'^tvshows/category/(?P<category_id>\d+)/$', 'index'),
(r'^tvshows/(?P<show_id>\d+)/$', 'episode_list'),
(r'^tvshows/(?P<show_id>\d+)/(?P<episode_id>\d+)/$', 'play_episode'),
(r'^tvshows/(?P<show_id>\d+)/(?P<episode_id>\d+)/ended$', 'episode_ended'),
(r'^tvshows/(?P<show_id>\d+)/(?P<episode_id>\d+)/position/(?P<position>\d+)$', 'episode_position'),
(r'^tvshows/suggested/$', 'suggested_shows'),
(r'^tvshows/suggested/(?P<suggested_show_id>\d+)/add/(?P<option>.*)/$', 'add_suggested_show'),
(r'^tvshows/suggested/(?P<suggested_show_id>\d+)/ignore/$', 'ignore_suggested_show'),
) | mit | 6,253,094,463,314,585,000 | 53.448276 | 304 | 0.607731 | false |
jandom/GromacsWrapper | staging/SunGridEngine.py | 1 | 17146 | # $Id: SunGridEngine.py 2765 2009-01-20 13:02:14Z oliver $
"""
:mod:`staging.SunGridEngine` --- staging class for SunGridEngine
================================================================
Primitive framework for staging jobs in `Sun Grid Engine`_ via a
customized :class:`Job` class.
Example python submission script
--------------------------------
Write the SGE script like this::
#!/usr/bin/env python
#$ -N bulk
#$ -S /usr/bin/python
#$ -v PYTHONPATH=/home/oliver/Library/python-lib
#$ -v LD_LIBRARY_PATH=/opt/intel/cmkl/8.0/lib/32:/opt/intel/itc60/slib:/opt/intel/ipp41/ia32_itanium/sharedlib:/opt/intel/ipp41/ia32_itanium/sharedlib/linux32:/opt/intel/fc/9.0/lib:/opt/intel/cc/9.0/lib
#$ -r n
#$ -j y
# The next line is IMPORTANT when you are using the default for Job(startdir=None)
#$ -cwd
from staging.SunGridEngine import Job
job = Job(inputfiles=dict(psf = 'inp/crbp_apo.psf',
dcd = 'trj/rmsfit_1opa_salt_ewald_shake_10ang_prod.dcd'),
outputfiles=dict(dx = '*.dx', pickle = '*.pickle'),
variables=dict(normalize = True, ...))
job.stage()
F = job.filenames # use F[key] to reference filenames from inputfiles or outputfiles
V = job.variables # and V[key] for the variables
# your python script here...
print "psf: %(psf)s dcd: %(dcd)" % F
print "normalize = %(normalize)s" % V
job.unstage()
job.cleanup() # removes stage dir, careful!
.. _`Sun Grid Engine`: http://gridengine.sunsource.net/
Description of the :class:`Job` class
-------------------------------------
.. autoclass:: Job
:members:
Helper functions for building job arrays
----------------------------------------
.. autofunction:: getline_from_arraylist
.. autofunction:: get_fields_from_arraylist
.. autofunction:: get_value_from_arraylist
"""
import os
import errno
import shutil
from staging.common import joindicts,pathjoin
# TODO: SGE_job should really inherit from Job so that one could
# derive Jobs classes for different queuing systems.
class SGE_job(object):
"""Specifics for a Sun Gridengine job."""
def __init__(self,*args,**kwargs):
"""Set up a SGE job.
If the environment contains JOB_NAME, JOB_ID, and SGE_TASK_ID
then this is a job submitted through Sun Gridengine (we only
check for JOB_NAME) and staging proceeds as usual.
If there is no JOB_NAME this is a 'Local' job and we do not do
any staging, just providing the same framework but on local
files.
If the JOB_NAME='my_identifier' *keyword argument* is given
then staging proceeds as if this was a regular job; in this
case one can also supply SGE_TASK_ID.
Arguments:
JOB_NAME force use of this JOB_NAME and force staging even
when not submitting throgh Gridengine (no effect when SGE controlled)
SGE_TASK_ID fake a task id in an SGE array job
"""
super(SGE_job,self).__init__()
self.__MODE__ = "init"
if 'JOB_NAME' in os.environ: # SGE submission
self.queuingsystem = 'SGE'
self.JOB_NAME = os.environ['JOB_NAME'].strip()
self.JOB_ID = os.environ['JOB_ID'].strip()
self.TASK_ID = os.environ['SGE_TASK_ID'].strip()
elif 'JOB_NAME' in kwargs:
self.queuingsystem = 'Copy'
self.JOB_NAME = kwargs['JOB_NAME'] or "noname"
self.JOB_ID = str(os.getpid()) # potentially unsafe, use hash or mktemp?
self.TASK_ID = str(kwargs.setdefault('SGE_TASK_ID','undefined'))
else: # running job locally (shouldn't this be in staging.Local?)
self.queuingsystem = 'Local'
self.JOB_NAME = 'Local'
self.JOB_ID = str(os.getpid())
self.TASK_ID = str(kwargs.setdefault('SGE_TASK_ID','undefined'))
self.jobdir_name = self.get_jobdir_name()
self.hostname = self.get_hostname()
self.__continue_this_line = False # for msg(), helper for continuing lines
def get_jobdir_name(self):
# create canonical name
if self.TASK_ID == "undefined":
jobdir_name=self.JOB_NAME+'.'+str(self.JOB_ID)
else:
# part of an array job
jobdir_name=self.JOB_NAME+'.'+str(self.JOB_ID)+'.'+str(self.TASK_ID)
return jobdir_name
def get_hostname(self):
import socket
return socket.gethostname()
def msg(self,string,newline=True):
# suppress newline here
if not self.__continue_this_line:
print "{0!s}(): ".format(self.__MODE__) + str(string),
else:
print str(string),
# add newline if requested
if newline:
print
# tell next invocation what to do
self.__continue_this_line = not newline
def statusmessage(self):
self.msg("hostname: {0!s}".format(self.hostname))
self.msg("queuing system: {0!s}".format(self.queuingsystem))
self.msg("JOB_NAME: {0!s}".format(self.JOB_NAME))
self.msg("JOB_ID: {0!s}".format(self.JOB_ID))
self.msg("SGE_TASK_ID: {0!s}".format(self.TASK_ID))
self.msg("jobdir_name: {0!s}".format(self.jobdir_name))
class Job(SGE_job):
"""The Job class encapsulates the SGE job and allows for clean staging and unstaging.
Set up the Job::
job = Job(inputfiles=dict(...),outputfiles=dict(...),variables=dict(...),**kwargs)
*inputfiles* and *outputfiles* are dictionaries with arbitrary
keys; each item is a path to a file relative to the startdir
(which by default is the directory from which the SGE job starts
--- use the ``#$ -cwd`` flag!). If the files are not relative to the
start dir then new directories are constructed under the stage
dir; in this instance it uis important that the user script *only*
uses the filenames in :attr:`Job.filenames`: These have the proper paths
of the local (staged) files for the script to operate on.
With ::
job.stage()
inputfiles are copied to the stagedir on the node's scratch
dir and sub directories are created as necessary; directories
mentioned as part of the outputfiles are created, too. ::
job.unstage()
copies back all files mentioned in output files (again, use
directories as part of the path as necessary) and create the
directories in the startdir if needed. For the outputfiles one
can also use shell-style glob patterns, e.g. ``outfiles =
{'all_dcd': '*.dcd', 'last_data':'*[5-9].dat'}``
Sensible defaults are automatically selected for startdir
(cwd) and stagedir (/scratch/USER/JOB_NAME.JOB_ID).
If the script is not run through SGE (i.e. the environment
variable :envvar:`JOB_NAME` is not set) then the script is run without
staging; this is pretty much equivalent to using ::
from staging.Local import Job
(i.e. using the :class:`staging.Local.Job` class).
:Attributes:
:attr:`input`
inputfiles dict (relative to startdir or absolute)
:attr:`output`
outputfiles dict (relative to startdir or absolute, can contain globs)
:attr:`filenames`
merged dict of input and output, pointing to *staged* files
:attr:`variables`
variables dict
:Methods:
:meth:`stage`
setup job on the nodes in stagedir
:meth:`unstage`
retrieve results to startdir
:meth:`cleanup`
remove all files on the node (rm -rf stagedir)
"""
def __init__(self,*args,**kwargs):
"""Set up SGE job.
:Arguments:
inputfiles
dict of input files (with relative path to startdir);
globs are not supported.
outputfiles
dict of result files or glob patterns (relative to
stagedir == relative to startdir)
variables
key/value pairs that can be used in the script as
Job.variables[key]
startdir
path to the directory where the input can be found
(must be nfs-mounted on node)
stagedir
local scratch directory on node; all input files are copied
there. The default should be ok.
JOB_NAME
unique identifier (only set this if this NOT submitted through
the Gridengine queuing system AND if the files should be copied
to a scratch disk (i.e. staging proceeds as it would for a
SGE-submitted job).)
SGE_TASK_ID
fake a task id (use with JOB_NAME)
"""
self.__MODE__ = "init" # current state, for self.msg
super(Job,self).__init__(*args,**kwargs)
self.input = kwargs.setdefault('inputfiles',{})
self.output = kwargs.setdefault('outputfiles',{})
self.variables = kwargs.setdefault('variables',{})
# where we find input files and copy back results
self.startdir = self.startdir_name(kwargs.setdefault('startdir',None))
# local directory on node
self.stagedir = self.stagedir_name(kwargs.setdefault('stagedir',None))
# normalized filenames (always under stagedir)
self.filenames = {k: pathjoin(self.stagedir,path,refdir=self.startdir)
for k,path in joindicts(self.input,self.output).items()}
self.statusmessage()
def statusmessage(self):
super(Job,self).statusmessage()
self.msg("startdir: {0!s}".format(self.startdir))
self.msg("stagedir: {0!s}".format(self.stagedir))
def startdir_name(self,startdir=None):
if startdir is None:
# use canonical setup (relies on -cwd SGE flag)
startdir=os.path.realpath(os.path.curdir)
return startdir
def stagedir_name(self,stagedir=None):
if self.queuingsystem is 'Local':
return None
if stagedir is None:
# use canonical setup
stagedir = pathjoin('/scratch',os.environ['USER'],self.jobdir_name)
return stagedir
def stage(self):
"""Copy all input files to the scratch directory."""
self.__MODE__ = "stage"
if self.queuingsystem is 'Local':
return
stagedir = self.stagedir
try:
os.makedirs(stagedir)
self.msg("Created stage dir {stagedir!s}.".format(**locals()))
except os.error,e:
if e.errno == errno.EEXIST:
self.msg("WARNING {stagedir!s} already exists.".format(**locals()))
else:
raise
self._make_all_dirs(stagedir,self.input,refdir=self.startdir) # copy input and preserve directory structure
self._make_all_dirs(stagedir,self.output,refdir=self.startdir) # also create directories for the output files
for key,p in self.input.items(): # copy input files
srcpath = pathjoin(self.startdir,p, sanitize=False) # may be absolute (and ignores startdir!)
destpath = self.filenames[key] # ALWAYS under stagedir
self.msg("item={key!s}: copying {srcpath!s}".format(**locals()), newline=False)
shutil.copyfile(srcpath,destpath)
self.msg(" --> {destpath!s}".format(**locals()))
# finally, change current directory to the stage dir: all further
# commands can assume that staging has been completed
os.chdir(stagedir)
self.msg("chdir to {stagedir!s} successful.".format(**locals()))
def unstage(self):
"""Copy results back. Shell-style glob patterns are allowed."""
self.__MODE__ = "unstage"
if self.queuingsystem is 'Local':
return
import glob
self._make_all_dirs(self.startdir,self.output,sanitize=False) # make result directories, may be absolute!
for key,p in self.output.items():
src = self.filenames[key] # always relative to stagedir
srcdir = os.path.dirname(p)
destdir = pathjoin(self.startdir,srcdir, sanitize=False) # may be absolute
self.msg("item={key!s}: looking for {p!s} [={src!s}]...".format(**locals()))
for srcpath in glob.glob(src):
srcname = os.path.basename(srcpath)
destpath = pathjoin(destdir,srcname, sanitize=False)
self.msg("item={key!s}: copying {srcpath!s}".format(**locals()), newline=False)
shutil.copyfile(srcpath,destpath) # silently replaces files !
self.msg(" --> {destpath!s}".format(**locals()))
def cleanup(self):
"""Remove stage dir"""
self.__MODE__ = "cleanup"
os.chdir(self.startdir)
if self.queuingsystem is 'Local':
return
try:
shutil.rmtree(self.stagedir)
self.msg("removed stage dir {0!s}".format(self.stagedir))
except os.error,e:
if e.errno == errno.ENOENT:
self.msg("{0!s} does not exist any more".format(self.stagedir))
else:
raise
def _make_all_dirs(self,topdir,filedict,**kwargs):
"""Create directories under topdir, based on paths in filedict."""
for key,p in filedict.items():
srcdir = os.path.dirname(p)
destdir = pathjoin(topdir,srcdir,**kwargs)
try:
os.makedirs(destdir) # recursive
self.msg("item={key!s}: created dir {destdir!s}".format(**locals()))
except os.error,e:
if e.errno == errno.EEXIST:
pass
else:
raise
def save(self,filename):
"""Save the Job() as a pickled file.
Restore with ::
import staging.SunGridengine
import cPickle
job = cPickle.load(open(<filename>,'r'))
"""
import cPickle
cPickle.dump(self,open(filename,'wb'),cPickle.HIGHEST_PROTOCOL)
def getline_from_arraylist(filename=None,ENVNAME='ARRAYLIST',default="arraylist.txt"):
"""Read a list of values from filename and return the line that corresponds to the current SGE_TASK_ID.
line = get_line_from_arraylist(filename=None,ENVNAME='ARRAYLIST',default="arraylist.txt")
fields will be different depending on the value of :envvar:`SGE_TASK_ID`
(set by SunGridengine). The lines are simply numbered consecutively.
:Arguments:
*filename*
name of the arraylist file
*ENVNAME*
try to get filename from environment variable if filename is not set
*default*
if all fails, try this as a default filename
File format::
# comment lines are ignored as are whitespace lines
# only the first column is read; the internal numbering starts at 1
line1 ... <---- task id 1
line2 ... <---- task id 2
# more comments, they are NOT counted for the task id
line3 ... <---- task id 3
...
Ignores white space lines and lines starting with ``#``. Lines are
stripped of left and right white space.
"""
if filename is None:
filename = os.environ.setdefault(ENVNAME, default)
values = {}
ival = 0
# read in file list as dict, indexed by taskid, hence one-based
for line in open(filename):
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
ival +=1
values[ival] = line
# print values
# this varies from task to task (and raises an exception if this is not an array job)
TASK_ID = os.environ['SGE_TASK_ID']
if TASK_ID == "undefined":
raise RuntimeError("This must be run from a SGE task array job.")
return values[int(TASK_ID)]
def get_fields_from_arraylist(**kwargs):
"""Read a list of values from filename and return the line that corresponds to the current SGE_TASK_ID.
get_line_from_arraylist(filename=None,ENVNAME='ARRAYLIST',default="arraylist.txt") -> fields
fields will be different depending on the value of SGE_TASK_ID (set by SunGridengine).
The lines are simply numbered consecutively.
See :func:`getline_from_arraylist` for more details.
"""
return getline_from_arraylist(**kwargs).split()
def get_value_from_arraylist(index=0,**kwargs):
"""Get field[index] of the entry in the array list corresponding to SGE_TASK_ID.
See :func:`get_fields_from_arraylist` for details.
"""
return get_fields_from_arraylist(**kwargs)[index]
| gpl-3.0 | 3,551,175,902,556,135,000 | 38.874419 | 205 | 0.589059 | false |
Yelp/paasta | paasta_tools/paastaapi/model/marathon_mesos_nonrunning_task.py | 1 | 7535 | # coding: utf-8
"""
Paasta API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from paasta_tools.paastaapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from paasta_tools.paastaapi.model.task_tail_lines import TaskTailLines
globals()['TaskTailLines'] = TaskTailLines
class MarathonMesosNonrunningTask(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'deployed_timestamp': (float,), # noqa: E501
'hostname': (str,), # noqa: E501
'id': (str,), # noqa: E501
'state': (str,), # noqa: E501
'tail_lines': (TaskTailLines,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'deployed_timestamp': 'deployed_timestamp', # noqa: E501
'hostname': 'hostname', # noqa: E501
'id': 'id', # noqa: E501
'state': 'state', # noqa: E501
'tail_lines': 'tail_lines', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""MarathonMesosNonrunningTask - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
deployed_timestamp (float): The unix timestamp at which the task was deployed. [optional] # noqa: E501
hostname (str): Name of the Mesos agent on which this task is running. [optional] # noqa: E501
id (str): The ID of the task in Mesos. [optional] # noqa: E501
state (str): The current state of the task. [optional] # noqa: E501
tail_lines (TaskTailLines): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| apache-2.0 | 6,798,839,742,654,500,000 | 39.294118 | 124 | 0.563769 | false |
rashoodkhan/DjangoBB | djangobb_forum/models.py | 1 | 19091 | # coding: utf-8
from hashlib import sha1
import os
from django.conf import settings
from django.contrib.auth.models import Group
from django.db import models
from django.db.models import aggregates
from django.db.models.signals import post_save
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import pytz
from djangobb_forum.fields import AutoOneToOneField, ExtendedImageField, JSONField
from djangobb_forum.util import smiles, convert_text_to_html
from djangobb_forum import settings as forum_settings
if 'south' in settings.INSTALLED_APPS:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ['^djangobb_forum\.fields\.AutoOneToOneField',
'^djangobb_forum\.fields\.JSONField',
'^djangobb_forum\.fields\.ExtendedImageField'])
TZ_CHOICES = [(tz_name, tz_name) for tz_name in pytz.common_timezones]
SIGN_CHOICES = (
(1, 'PLUS'),
(-1, 'MINUS'),
)
PRIVACY_CHOICES = (
(0, _(u'Display your e-mail address.')),
(1, _(u'Hide your e-mail address but allow form e-mail.')),
(2, _(u'Hide your e-mail address and disallow form e-mail.')),
)
MARKUP_CHOICES = [('bbcode', 'bbcode')]
try:
import markdown
MARKUP_CHOICES.append(("markdown", "markdown"))
except ImportError:
pass
path = os.path.join(settings.STATIC_ROOT, 'djangobb_forum', 'themes')
if os.path.exists(path):
# fix for collectstatic
THEME_CHOICES = [(theme, theme) for theme in os.listdir(path)
if os.path.isdir(os.path.join(path, theme))]
else:
THEME_CHOICES = []
class Category(models.Model):
name = models.CharField(_('Name'), max_length=80)
groups = models.ManyToManyField(Group, blank=True, null=True, verbose_name=_('Groups'), help_text=_('Only users from these groups can see this category'))
position = models.IntegerField(_('Position'), blank=True, default=0)
class Meta:
ordering = ['position']
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __unicode__(self):
return self.name
def forum_count(self):
return self.forums.all().count()
@property
def topics(self):
return Topic.objects.filter(forum__category__id=self.id).select_related()
@property
def posts(self):
return Post.objects.filter(topic__forum__category__id=self.id).select_related()
def has_access(self, user):
if user.is_superuser:
return True
if self.groups.exists():
if user.is_authenticated():
if not self.groups.filter(user__pk=user.id).exists():
return False
else:
return False
return True
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums', verbose_name=_('Category'))
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
description = models.TextField(_('Description'), blank=True, default='')
moderators = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, null=True, verbose_name=_('Moderators'))
updated = models.DateTimeField(_('Updated'), auto_now=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
topic_count = models.IntegerField(_('Topic count'), blank=True, default=0)
last_post = models.ForeignKey('Post', related_name='last_forum_post', blank=True, null=True)
forum_logo = ExtendedImageField(_('Forum Logo'), blank=True, default='',
upload_to=forum_settings.FORUM_LOGO_UPLOAD_TO,
width=forum_settings.FORUM_LOGO_WIDTH,
height=forum_settings.FORUM_LOGO_HEIGHT)
class Meta:
ordering = ['position']
verbose_name = _('Forum')
verbose_name_plural = _('Forums')
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('djangobb:forum', [self.id])
@property
def posts(self):
return Post.objects.filter(topic__forum__id=self.id).select_related()
class Topic(models.Model):
forum = models.ForeignKey(Forum, related_name='topics', verbose_name=_('Forum'))
name = models.CharField(_('Subject'), max_length=255)
created = models.DateTimeField(_('Created'), auto_now_add=True)
updated = models.DateTimeField(_('Updated'), null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('User'))
views = models.IntegerField(_('Views count'), blank=True, default=0)
sticky = models.BooleanField(_('Sticky'), blank=True, default=False)
closed = models.BooleanField(_('Closed'), blank=True, default=False)
subscribers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='subscriptions', verbose_name=_('Subscribers'), blank=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
last_post = models.ForeignKey('Post', related_name='last_topic_post', blank=True, null=True)
class Meta:
ordering = ['-updated']
get_latest_by = 'updated'
verbose_name = _('Topic')
verbose_name_plural = _('Topics')
def __unicode__(self):
return self.name
def delete(self, *args, **kwargs):
try:
last_post = self.posts.latest()
last_post.last_forum_post.clear()
except Post.DoesNotExist:
pass
else:
last_post.last_forum_post.clear()
forum = self.forum
super(Topic, self).delete(*args, **kwargs)
try:
forum.last_post = Topic.objects.filter(forum__id=forum.id).latest().last_post
except Topic.DoesNotExist:
forum.last_post = None
forum.topic_count = Topic.objects.filter(forum__id=forum.id).count()
forum.post_count = Post.objects.filter(topic__forum__id=forum.id).count()
forum.save()
@property
def head(self):
try:
return self.posts.select_related().order_by('created')[0]
except IndexError:
return None
@property
def reply_count(self):
return self.post_count - 1
@models.permalink
def get_absolute_url(self):
return ('djangobb:topic', [self.id])
def update_read(self, user):
tracking = user.posttracking
#if last_read > last_read - don't check topics
if tracking.last_read and (tracking.last_read > self.last_post.created):
return
if isinstance(tracking.topics, dict):
#clear topics if len > 5Kb and set last_read to current time
if len(tracking.topics) > 5120:
tracking.topics = None
tracking.last_read = timezone.now()
tracking.save()
#update topics if exist new post or does't exist in dict
if self.last_post_id > tracking.topics.get(str(self.id), 0):
tracking.topics[str(self.id)] = self.last_post_id
tracking.save()
else:
#initialize topic tracking dict
tracking.topics = {self.id: self.last_post_id}
tracking.save()
class Post(models.Model):
topic = models.ForeignKey(Topic, related_name='posts', verbose_name=_('Topic'))
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='posts', verbose_name=_('User'))
created = models.DateTimeField(_('Created'), auto_now_add=True)
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
updated_by = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('Updated by'), blank=True, null=True)
markup = models.CharField(_('Markup'), max_length=15, default=forum_settings.DEFAULT_MARKUP, choices=MARKUP_CHOICES)
body = models.TextField(_('Message'))
body_html = models.TextField(_('HTML version'))
user_ip = models.GenericIPAddressField(_('User IP'), blank=True, null=True)
class Meta:
ordering = ['created']
get_latest_by = 'created'
verbose_name = _('Post')
verbose_name_plural = _('Posts')
def save(self, *args, **kwargs):
self.body_html = convert_text_to_html(self.body, self.markup)
if forum_settings.SMILES_SUPPORT and self.user.forum_profile.show_smilies:
self.body_html = smiles(self.body_html)
super(Post, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self_id = self.id
head_post_id = self.topic.posts.order_by('created')[0].id
forum = self.topic.forum
topic = self.topic
profile = self.user.forum_profile
self.last_topic_post.clear()
self.last_forum_post.clear()
super(Post, self).delete(*args, **kwargs)
#if post was last in topic - remove topic
if self_id == head_post_id:
topic.delete()
else:
try:
topic.last_post = Post.objects.filter(topic__id=topic.id).latest()
except Post.DoesNotExist:
topic.last_post = None
topic.post_count = Post.objects.filter(topic__id=topic.id).count()
topic.save()
try:
forum.last_post = Post.objects.filter(topic__forum__id=forum.id).latest()
except Post.DoesNotExist:
forum.last_post = None
#TODO: for speedup - save/update only changed fields
forum.post_count = Post.objects.filter(topic__forum__id=forum.id).count()
forum.topic_count = Topic.objects.filter(forum__id=forum.id).count()
forum.save()
profile.post_count = Post.objects.filter(user__id=self.user_id).count()
profile.save()
@models.permalink
def get_absolute_url(self):
return ('djangobb:post', [self.id])
def summary(self):
LIMIT = 50
tail = len(self.body) > LIMIT and '...' or ''
return self.body[:LIMIT] + tail
__unicode__ = summary
class Reputation(models.Model):
from_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='reputations_from', verbose_name=_('From'))
to_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='reputations_to', verbose_name=_('To'))
post = models.ForeignKey(Post, related_name='post', verbose_name=_('Post'))
time = models.DateTimeField(_('Time'), auto_now_add=True)
sign = models.IntegerField(_('Sign'), choices=SIGN_CHOICES, default=0)
reason = models.TextField(_('Reason'), max_length=1000)
class Meta:
verbose_name = _('Reputation')
verbose_name_plural = _('Reputations')
unique_together = (('from_user', 'post'),)
def __unicode__(self):
time = timezone.localtime(self.time)
return u'T[%d], FU[%d], TU[%d]: %s' % (self.post.id, self.from_user.id, self.to_user.id, unicode(time))
class ProfileManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
qs = super(ProfileManager, self).get_query_set()
if forum_settings.REPUTATION_SUPPORT:
qs = qs.extra(select={
'reply_total': 'SELECT SUM(sign) FROM djangobb_forum_reputation WHERE to_user_id = djangobb_forum_profile.user_id GROUP BY to_user_id',
'reply_count_minus': "SELECT SUM(sign) FROM djangobb_forum_reputation WHERE to_user_id = djangobb_forum_profile.user_id AND sign = '-1' GROUP BY to_user_id",
'reply_count_plus': "SELECT SUM(sign) FROM djangobb_forum_reputation WHERE to_user_id = djangobb_forum_profile.user_id AND sign = '1' GROUP BY to_user_id",
})
return qs
class Profile(models.Model):
user = AutoOneToOneField(settings.AUTH_USER_MODEL, related_name='forum_profile', verbose_name=_('User'))
status = models.CharField(_('Status'), max_length=30, blank=True)
site = models.URLField(_('Site'), blank=True)
jabber = models.CharField(_('Jabber'), max_length=80, blank=True)
icq = models.CharField(_('ICQ'), max_length=12, blank=True)
msn = models.CharField(_('MSN'), max_length=80, blank=True)
aim = models.CharField(_('AIM'), max_length=80, blank=True)
yahoo = models.CharField(_('Yahoo'), max_length=80, blank=True)
location = models.CharField(_('Location'), max_length=30, blank=True)
signature = models.TextField(_('Signature'), blank=True, default='', max_length=forum_settings.SIGNATURE_MAX_LENGTH)
signature_html = models.TextField(_('Signature'), blank=True, default='', max_length=forum_settings.SIGNATURE_MAX_LENGTH)
time_zone = models.CharField(_('Time zone'),max_length=50, choices=TZ_CHOICES, default=settings.TIME_ZONE)
language = models.CharField(_('Language'), max_length=5, default='', choices=settings.LANGUAGES)
avatar = ExtendedImageField(_('Avatar'), blank=True, default='', upload_to=forum_settings.AVATARS_UPLOAD_TO, width=forum_settings.AVATAR_WIDTH, height=forum_settings.AVATAR_HEIGHT)
theme = models.CharField(_('Theme'), choices=THEME_CHOICES, max_length=80, default='default')
show_avatar = models.BooleanField(_('Show avatar'), blank=True, default=True)
show_signatures = models.BooleanField(_('Show signatures'), blank=True, default=True)
show_smilies = models.BooleanField(_('Show smilies'), blank=True, default=True)
privacy_permission = models.IntegerField(_('Privacy permission'), choices=PRIVACY_CHOICES, default=1)
auto_subscribe = models.BooleanField(_('Auto subscribe'), help_text=_("Auto subscribe all topics you have created or reply."), blank=True, default=False)
markup = models.CharField(_('Default markup'), max_length=15, default=forum_settings.DEFAULT_MARKUP, choices=MARKUP_CHOICES)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
objects = ProfileManager()
class Meta:
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
def last_post(self):
posts = Post.objects.filter(user__id=self.user_id).order_by('-created')
if posts:
return posts[0].created
else:
return None
class PostTracking(models.Model):
"""
Model for tracking read/unread posts.
In topics stored ids of topics and last_posts as dict.
"""
user = AutoOneToOneField(settings.AUTH_USER_MODEL)
topics = JSONField(null=True, blank=True)
last_read = models.DateTimeField(null=True, blank=True)
class Meta:
verbose_name = _('Post tracking')
verbose_name_plural = _('Post tracking')
def __unicode__(self):
return self.user.username
class Report(models.Model):
reported_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='reported_by', verbose_name=_('Reported by'))
post = models.ForeignKey(Post, verbose_name=_('Post'))
zapped = models.BooleanField(_('Zapped'), blank=True, default=False)
zapped_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='zapped_by', blank=True, null=True, verbose_name=_('Zapped by'))
created = models.DateTimeField(_('Created'), blank=True)
reason = models.TextField(_('Reason'), blank=True, default='', max_length='1000')
class Meta:
verbose_name = _('Report')
verbose_name_plural = _('Reports')
def __unicode__(self):
return u'%s %s' % (self.reported_by , self.zapped)
class Ban(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, verbose_name=_('Banned user'), related_name='ban_users')
ban_start = models.DateTimeField(_('Ban start'), default=timezone.now)
ban_end = models.DateTimeField(_('Ban end'), blank=True, null=True)
reason = models.TextField(_('Reason'))
class Meta:
verbose_name = _('Ban')
verbose_name_plural = _('Bans')
def __unicode__(self):
return self.user.username
def save(self, *args, **kwargs):
self.user.is_active = False
self.user.save()
super(Ban, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.user.is_active = True
self.user.save()
super(Ban, self).delete(*args, **kwargs)
class Attachment(models.Model):
post = models.ForeignKey(Post, verbose_name=_('Post'), related_name='attachments')
size = models.IntegerField(_('Size'))
content_type = models.CharField(_('Content type'), max_length=255)
path = models.CharField(_('Path'), max_length=255)
name = models.TextField(_('Name'))
hash = models.CharField(_('Hash'), max_length=40, blank=True, default='', db_index=True)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
super(Attachment, self).save(*args, **kwargs)
if not self.hash:
self.hash = sha1(str(self.id) + settings.SECRET_KEY).hexdigest()
super(Attachment, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
return ('djangobb:forum_attachment', [self.hash])
def get_absolute_path(self):
return os.path.join(settings.MEDIA_ROOT, forum_settings.ATTACHMENT_UPLOAD_TO,
self.path)
#------------------------------------------------------------------------------
class Poll(models.Model):
topic = models.ForeignKey(Topic)
question = models.CharField(max_length=200)
choice_count = models.PositiveSmallIntegerField(default=1,
help_text=_("How many choices are allowed simultaneously."),
)
active = models.BooleanField(default=True,
help_text=_("Can users vote to this poll or just see the result?"),
)
deactivate_date = models.DateTimeField(null=True, blank=True,
help_text=_("Point of time after this poll would be automatic deactivated"),
)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, null=True,
help_text=_("Users who has voted this poll."),
)
def deactivate_if_expired(self):
if self.active and self.deactivate_date:
now = timezone.now()
if now > self.deactivate_date:
self.active = False
self.save()
def single_choice(self):
return self.choice_count == 1
def __unicode__(self):
return self.question
class PollChoice(models.Model):
poll = models.ForeignKey(Poll, related_name="choices")
choice = models.CharField(max_length=200)
votes = models.IntegerField(default=0, editable=False)
def percent(self):
if not self.votes:
return 0.0
result = PollChoice.objects.filter(poll=self.poll).aggregate(aggregates.Sum("votes"))
votes_sum = result["votes__sum"]
return float(self.votes) / votes_sum * 100
def __unicode__(self):
return self.choice
#------------------------------------------------------------------------------
from .signals import post_saved, topic_saved
post_save.connect(post_saved, sender=Post, dispatch_uid='djangobb_post_save')
post_save.connect(topic_saved, sender=Topic, dispatch_uid='djangobb_topic_save')
| bsd-3-clause | -5,691,613,692,580,210,000 | 39.705757 | 184 | 0.634592 | false |
lightbase/WSCacicNeo | wscacicneo/model/all_reports.py | 1 | 7688 | #!/usr/env python
# -*- coding: utf-8 -*-
__author__ = 'adley'
from requests.exceptions import HTTPError
from wscacicneo import config
import logging
from liblightbase.lbbase.struct import Base, BaseMetadata
from liblightbase.lbbase.lbstruct.group import *
from liblightbase.lbbase.lbstruct.field import *
from liblightbase.lbbase.content import Content
from liblightbase.lbrest.base import BaseREST
from liblightbase.lbrest.document import DocumentREST
from liblightbase.lbutils import conv
from liblightbase.lbsearch.search import Search, OrderBy
log = logging.getLogger()
class AllReports():
"""
Classe para a base de usuários
"""
def __init__(self, rest_url=None):
"""
Método construtor
"""
if rest_url is None:
self.rest_url = config.REST_URL
else:
self.rest_url = rest_url
self.baserest = BaseREST(rest_url=self.rest_url, response_object=False)
self.documentrest = DocumentREST(rest_url=self.rest_url,
base=self.lbbase, response_object=False)
@property
def lbbase(self):
"""
COnfiguração da Coleta
"""
nome_orgao = Field(**dict(
name='nome_orgao',
description='Nome do Órgão',
alias='nome_orgao',
datatype='Text',
indices=['Textual'],
multivalued=False,
required=True
))
nome_relatorio = Field(**dict(
name='nome_relatorio',
description='Nome do Relatório',
alias='nome_relatorio',
datatype='Text',
indices=['Textual'],
multivalued=False,
required=True
))
data_coleta = Field(**dict(
name='data_coleta',
description='Data da Coleta',
alias='data_coleta',
datatype='DateTime',
indices=['Ordenado'],
multivalued=False,
required=True
))
total_computadores = Field(**dict(
name='total_computadores',
description='Total de Computadores',
alias='total_computadores',
datatype='Integer',
indices=['Ordenado'],
multivalued=False,
required=True
))
nome_item = Field(**dict(
name='nome_item',
description='Nome do Item',
alias='nome_item',
datatype='Text',
indices=['Textual'],
multivalued=False,
required=True
))
quantidade_item = Field(**dict(
name='quantidade_item',
description='Quantidades total do item',
alias='quantidade_item',
datatype='Integer',
indices=['Ordenado'],
multivalued=False,
required=True
))
descricao_item = Field(**dict(
name='descricao_item',
description='Descrição',
alias='descricao_item',
datatype='Text',
indices=['Textual'],
multivalued=False,
required=True
))
"""
GROUP Sistema Operacional
"""
ItensGroup_content = Content()
ItensGroup_content.append(nome_item)
ItensGroup_content.append(quantidade_item)
ItensGroup_content.append(descricao_item)
ItensGroup_metadata = GroupMetadata(
name='ItensGroup',
alias='ItensGroup',
description='Grupo de Itens',
multivalued=True
)
ItensGroup = Group(
metadata=ItensGroup_metadata,
content=ItensGroup_content
)
base_metadata = BaseMetadata(
name='all_reports'
)
content_list = Content()
content_list.append(nome_orgao)
content_list.append(nome_relatorio)
content_list.append(data_coleta)
content_list.append(total_computadores)
content_list.append(ItensGroup)
lbbase = Base(
metadata=base_metadata,
content=content_list
)
return lbbase
@property
def metaclass(self):
"""
Retorna metaclass para essa base
"""
return self.lbbase.metaclass()
def create_base(self):
"""
Cria base no LB
"""
self.baserest.response_object = True
response = self.baserest.create(self.lbbase)
if response.status_code == 200:
return self.lbbase
else:
return None
def remove_base(self):
"""
Remove base from Lightbase
:param lbbase: LBBase object instance
:return: True or Error if base was not excluded
"""
response = self.baserest.delete(self.lbbase)
if response.status_code == 200:
return True
else:
raise IOError('Error excluding base from LB')
def is_created(self):
"""
Retorna se a base já existe
"""
try:
self.baserest.response_object = False
response = self.baserest.get(self.lbbase.metadata.name)
self.baserest.response_object = True
return True
except:
return False
allreports = AllReports()
class ReportsAll(allreports.metaclass):
"""
Classe genérica de Reports
"""
def __init__(self, **args):
super(ReportsAll, self).__init__(**args)
self.documentrest = allreports.documentrest
@property
def coleta(self):
"""
Tempo de coleta
:return: Retorna o valor gravado ou o mínimo de 3 horas
"""
col = allreports.metaclass.coleta.__get__(self)
if col is None:
return 3
else:
return col
@coleta.setter
def coleta(self, value):
"""
Setter
"""
value = int(value)
allreports.metaclass.coleta.__set__(self, value)
def allreports_to_dict(self):
"""
Convert status object to Python dict
:return:
"""
return conv.document2dict(allreports.lbbase, self)
def allreports_to_json(self):
"""
Convert object to json
:return:
"""
return conv.document2json(allreports.lbbase, self)
def create_doc_allreports(self):
"""
Insert document on base
:return: Document creation ID
"""
document = self.allreports_to_json()
try:
result = allreports.documentrest.create(document)
except HTTPError as err:
log.error(err.strerror)
return None
return result
def search_doc_allreports(self, sigla):
"""
Busca registro completo do órgao pelo nome
:return: obj collection com os dados da base
"""
search = Search(
literal="document->>'sigla' = '"+sigla+"'"
)
results = self.documentrest.get_collection(search_obj=search)
return results
def search_list_allreports(self):
"""
Retorna todos os docs da base
"""
search = Search(
limit=None
)
results = self.documentrest.get_collection(search)
return results
def edit_allreports(self, id, doc):
"""
altera um doc ou path do doc
"""
results = self.documentrest.update(id, doc)
return results
def delete_allreports(self, id):
"""
Deleta o allreports apartir do ID
"""
results = allreports.documentrest.delete(id)
return results
| gpl-2.0 | -3,582,415,873,444,309,000 | 24.929054 | 79 | 0.551661 | false |
guacamoleo/Tensile | Tensile/ClientWriter.py | 1 | 38398 | ################################################################################
# Copyright (C) 2016 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
from Common import globalParameters, HR, pushWorkingPath, popWorkingPath, print1, CHeader, printWarning
from SolutionStructs import Solution
from SolutionWriter import SolutionWriter
import YAMLIO
import os
from subprocess import Popen
from shutil import copy as shutil_copy
from shutil import rmtree
################################################################################
# Main
################################################################################
def main( config ):
libraryLogicPath = os.path.join(globalParameters["WorkingPath"], \
globalParameters["LibraryLogicPath"])
pushWorkingPath(globalParameters["LibraryClientPath"])
##############################################################################
# Copy Source Files
##############################################################################
pushWorkingPath("source")
filesToCopy = [
"Client.cpp",
"Client.h",
"DeviceStats.h",
"ReferenceCPU.h",
"MathTemplates.cpp",
"MathTemplates.h",
"KernelHeader.h",
"Tools.h",
"CMakeLists.txt",
"TensileConfig.cmake",
"TensileConfigVersion.cmake"
]
for f in filesToCopy:
shutil_copy(
os.path.join(globalParameters["SourcePath"], f),
globalParameters["WorkingPath"] )
if globalParameters["RuntimeLanguage"] == "OCL":
shutil_copy(
os.path.join(globalParameters["SourcePath"], "FindOpenCL.cmake"),
globalParameters["WorkingPath"] )
else:
shutil_copy(
os.path.join(globalParameters["SourcePath"], "FindHIP.cmake"),
globalParameters["WorkingPath"] )
shutil_copy(
os.path.join(globalParameters["SourcePath"], "FindHCC.cmake"),
globalParameters["WorkingPath"] )
##############################################################################
# Read Logic Files
##############################################################################
logicFiles = [os.path.join(libraryLogicPath, f) for f \
in os.listdir(libraryLogicPath) \
if (os.path.isfile(os.path.join(libraryLogicPath, f)) \
and os.path.splitext(f)[1]==".yaml")]
print1("LogicFiles: %s" % logicFiles)
functions = []
functionNames = []
enableHalf = False
for logicFileName in logicFiles:
(scheduleName, deviceNames, problemType, solutionsForType, \
indexOrder, exactLogic, rangeLogic) \
= YAMLIO.readLibraryLogicForSchedule(logicFileName)
if problemType["DataType"].isHalf():
enableHalf = True
functions.append((scheduleName, problemType))
functionNames.append("tensile_%s" % (problemType))
globalParameters["EnableHalf"] = enableHalf
##############################################################################
# Write Generated Header
##############################################################################
forBenchmark = False
solutions = None
problemSizes = None
stepName = None
writeClientParameters(forBenchmark, solutions, problemSizes, stepName, \
functions)
popWorkingPath() # source
##############################################################################
# Run Build Script
##############################################################################
# if redo=true, clobber the build directory
if globalParameters["ForceRedoLibraryClient"]:
rmtree(os.path.join(globalParameters["WorkingPath"], "build"), \
ignore_errors=True)
pushWorkingPath("build")
# write runScript
path = globalParameters["WorkingPath"]
forBenchmark = False
runScriptName = writeRunScript(path, libraryLogicPath, forBenchmark)
# run runScript
process = Popen(runScriptName, cwd=globalParameters["WorkingPath"])
process.communicate()
if process.returncode:
printWarning("Benchmark Process exited with code %u" % process.returncode)
popWorkingPath() # build
popWorkingPath() # LibraryClient
################################################################################
# Write Run Script
################################################################################
def writeRunScript(path, libraryLogicPath, forBenchmark):
# create run.bat or run.sh which builds and runs
runScriptName = os.path.join(path, \
"run.%s" % ("bat" if os.name == "nt" else "sh") )
runScriptFile = open(runScriptName, "w")
echoLine = "@echo." if os.name == "nt" else "echo"
if os.name != "nt":
runScriptFile.write("#!/bin/sh\n")
q = "" if os.name == "nt" else "\""
runScriptFile.write("%s && echo %s%s%s && echo %s# Configuring CMake for Client%s && echo %s%s%s\n" \
% (echoLine, q, HR, q, q, q, q, HR, q))
runScriptFile.write("cmake")
# runtime and kernel language
runScriptFile.write(" -DTensile_RUNTIME_LANGUAGE=%s" \
% globalParameters["RuntimeLanguage"])
if globalParameters["EnableHalf"]:
runScriptFile.write(" -DTensile_ENABLE_HALF=ON")
if forBenchmark:
# for benchmark client
runScriptFile.write(" -DTensile_CLIENT_BENCHMARK=ON")
else:
# for library client
runScriptFile.write(" -DTensile_ROOT=%s" \
% os.path.join(globalParameters["ScriptPath"], "..") )
runScriptFile.write(" -DTensile_CLIENT_BENCHMARK=OFF")
runScriptFile.write(" -DTensile_LOGIC_PATH=%s" % libraryLogicPath)
runScriptFile.write(" -DTensile_LIBRARY_PRINT_DEBUG=%s" \
% ("ON" if globalParameters["LibraryPrintDebug"] else "OFF"))
runScriptFile.write(" -DTensile_SHORT_FILE_NAMES=%s" \
% ("ON" if globalParameters["ShortNames"] else "OFF"))
if globalParameters["CMakeCXXFlags"]:
runScriptFile.write(" -DCMAKE_CXX_FLAGS=%s" \
% globalParameters["CMakeCXXFlags"] )
if globalParameters["CMakeCFlags"]:
runScriptFile.write(" -DCMAKE_C_FLAGS=%s" \
% globalParameters["CMakeCFlags"] )
# for both
if os.name == "nt":
runScriptFile.write(" -DCMAKE_GENERATOR_PLATFORM=x64")
runScriptFile.write(" -DTensile_MERGE_FILES=%s" \
% ("ON" if globalParameters["MergeFiles"] else "OFF"))
runScriptFile.write(" ../source\n")
runScriptFile.write("%s && echo %s%s%s && echo %s# Building Client%s && echo %s%s%s\n" \
% (echoLine, q, HR, q, q, q, q, HR, q))
runScriptFile.write("cmake --build . --config %s%s\n" \
% (globalParameters["CMakeBuildType"], " -- -j 8" \
if os.name != "nt" else "") )
if forBenchmark:
if os.name == "nt":
runScriptFile.write(os.path.join(globalParameters["CMakeBuildType"], \
"client.exe") )
else:
if globalParameters["PinClocks"] and globalParameters["ROCmSMIPath"]:
runScriptFile.write("%s -d 0 --setfan 255 --setsclk 7\n" % globalParameters["ROCmSMIPath"])
runScriptFile.write("sleep 1\n")
runScriptFile.write("%s -d 0 -a\n" % globalParameters["ROCmSMIPath"])
runScriptFile.write("./client")
clp = ""
clp += " --platform-idx %u" % globalParameters["Platform"]
clp += " --device-idx %u" % globalParameters["Device"]
clp += " --init-alpha %u" % globalParameters["DataInitTypeAlpha"]
clp += " --init-beta %u" % globalParameters["DataInitTypeBeta"]
clp += " --init-c %u" % globalParameters["DataInitTypeC"]
clp += " --init-ab %u" % globalParameters["DataInitTypeAB"]
clp += " --print-valids %u" % globalParameters["ValidationPrintValids"]
clp += " --print-max %u" % globalParameters["ValidationMaxToPrint"]
clp += " --num-benchmarks %u" % globalParameters["NumBenchmarks"]
clp += " --num-elements-to-validate %u" % globalParameters["NumElementsToValidate"]
clp += " --num-enqueues-per-sync %u" % globalParameters["EnqueuesPerSync"]
clp += " --num-syncs-per-benchmark %u" % globalParameters["SyncsPerBenchmark"]
clp += " --use-gpu-timer %u" % globalParameters["KernelTime"]
clp += " --sleep-percent %u" % globalParameters["SleepPercent"]
runScriptFile.write(clp)
runScriptFile.write("\n")
if os.name != "nt":
if globalParameters["PinClocks"] and globalParameters["ROCmSMIPath"]:
runScriptFile.write("%s -d 0 --resetclocks\n" % globalParameters["ROCmSMIPath"])
runScriptFile.write("%s -d 0 --setfan 50\n" % globalParameters["ROCmSMIPath"])
else:
executablePath = os.path.join(globalParameters["WorkingPath"])
if os.name == "nt":
executablePath = os.path.join(executablePath, \
globalParameters["CMakeBuildType"], \
"client.exe")
else:
executablePath = os.path.join(executablePath, "client")
runScriptFile.write("%s && echo %s%s%s && echo %s# Library Client:%s && echo %s# %s%s && %s\n" \
% (echoLine, q, HR, q, q, q, q, executablePath, q, executablePath) )
runScriptFile.close()
if os.name != "nt":
os.chmod(runScriptName, 0777)
return runScriptName
################################################################################
# Write Generated Benchmark Parameters
################################################################################
def writeClientParameters(forBenchmark, solutions, problemSizes, stepName, \
functionList):
h = ""
##############################################################################
# Min Naming
##############################################################################
if forBenchmark:
kernels = []
for solution in solutions:
solutionKernels = solution.getKernels()
for kernel in solutionKernels:
if kernel not in kernels:
kernels.append(kernel)
solutionSerialNaming = Solution.getSerialNaming(solutions)
kernelSerialNaming = Solution.getSerialNaming(kernels)
solutionMinNaming = Solution.getMinNaming(solutions)
kernelMinNaming = Solution.getMinNaming(kernels)
solutionWriter = SolutionWriter( \
solutionMinNaming, solutionSerialNaming, \
kernelMinNaming, kernelSerialNaming)
if forBenchmark:
if globalParameters["MergeFiles"]:
h += "#include \"Solutions.h\"\n"
else:
for solution in solutions:
solutionName = solutionWriter.getSolutionName(solution)
h += "#include \"" + solutionName + ".h\"\n"
h += "\n"
else:
h += "#include \"Tensile.h\"\n"
h += "typedef enum {\n"
h += " enum_float,\n"
h += " enum_double,\n"
h += " enum_TensileComplexFloat,\n"
h += " enum_TensileComplexDouble\n"
h += "#ifdef Tensile_ENABLE_HALF\n"
h += " ,enum_TensileHalf\n"
h += "#endif\n"
h += "} DataTypeEnum;\n"
h += "\n"
h += "const char indexChars[%u] = \"%s" \
% (len(globalParameters["IndexChars"])+1, \
globalParameters["IndexChars"][0])
for i in range(1, len(globalParameters["IndexChars"])):
h += globalParameters["IndexChars"][i]
h += "\";\n"
h += "unsigned int functionIdx;\n"
h += "unsigned int dataTypeIdx;\n"
h += "unsigned int problemTypeIdx;\n"
h += "\n"
##############################################################################
# Problem Types
##############################################################################
#dataTypes = []
#problemTypes = []
#functionSerialToDataTypeAndIdx = []
dataTypes = []
problemTypes = []
problemTypesForDataType = {} # for data type
schedulesForProblemType = {} # for problem type
functionInfo = [] # dataTypeIdx, problemTypeIdx, idxWithinDataType, idxWithinProblemType
if forBenchmark:
problemType = solutions[0]["ProblemType"]
dataType = problemType["DataType"]
dataTypes.append(dataType)
problemTypes.append(problemType)
problemTypesForDataType[dataType] = [problemType]
schedulesForProblemType[problemType] = solutions
numProblemTypes = 1
for solution in solutions:
functionInfo.append([ 0, 0, 0, 0, 0, 0 ])
else:
for functionIdx in range(0, len(functionList)):
function = functionList[functionIdx]
scheduleName = function[0]
problemType = function[1]
dataType = problemType["DataType"]
if dataType not in dataTypes:
dataTypes.append(dataType)
problemTypesForDataType[dataType] = []
if problemType not in problemTypesForDataType[dataType]:
problemTypesForDataType[dataType].append(problemType)
schedulesForProblemType[problemType] = []
schedulesForProblemType[problemType].append(scheduleName)
# sort
dataTypes = sorted(dataTypes)
for dataType in dataTypes:
problemTypesForDataType[dataType] = \
sorted(problemTypesForDataType[dataType])
for problemType in problemTypesForDataType[dataType]:
schedulesForProblemType[problemType] = \
sorted(schedulesForProblemType[problemType])
# assign info
functionIdxSerial = 0
problemTypeIdxSerial = 0
for dataTypeIdxSerial in range(0, len(dataTypes)):
dataType = dataTypes[dataTypeIdxSerial]
functionIdxForDataType = 0
for problemTypeIdxForDataType in range(0, \
len(problemTypesForDataType[dataType])):
problemType = \
problemTypesForDataType[dataType][problemTypeIdxForDataType]
problemTypes.append(problemType)
functionIdxForProblemType = 0
for functionIdxForProblemType in range(0, \
len(schedulesForProblemType[problemType])):
functionInfo.append([ \
dataTypeIdxSerial, \
problemTypeIdxForDataType, \
problemTypeIdxSerial, \
functionIdxSerial,\
functionIdxForDataType,\
functionIdxForProblemType, \
])
functionIdxForProblemType += 1
functionIdxForDataType += 1
functionIdxSerial += 1
problemTypeIdxSerial += 1
numProblemTypes = problemTypeIdxSerial
numFunctions = functionIdxSerial
h += "const unsigned int numFunctions = %u;\n" % numFunctions
##############################################################################
# Data Types
##############################################################################
h += "/* data types */\n"
numDataTypes = len(dataTypes)
h += "const unsigned int numDataTypes = %u;\n" % numDataTypes
h += "const DataTypeEnum dataTypeEnums[numDataTypes] = { enum_%s" \
% dataTypes[0].toCpp()
for dataTypeIdx in range(1, numDataTypes):
h += ", enum_%s" % dataTypes[dataTypeIdx].toCpp();
h += " };\n"
# bytes per elements
h += "const unsigned int bytesPerElement[numDataTypes] = { %u" \
% (dataTypes[0].numBytes())
for dataTypeIdx in range(1, numDataTypes):
dataType = dataTypes[dataTypeIdx]
h += ", %u" % dataType.numBytes()
h += " };\n"
# flops per mac
h += "const unsigned int numFlopsPerMac[numDataTypes] = { %u" \
% (2 if dataTypes[0].isReal() else 8)
for dataTypeIdx in range(1, numDataTypes):
dataType = dataTypes[dataTypeIdx]
h += ", %u" % (2 if dataType.isReal() else 8)
h += " };\n"
for dataTypeIdx in range(0, numDataTypes):
h += "#define Tensile_DATA_TYPE_%s\n" \
% dataTypes[dataTypeIdx].toCpp().upper()
##############################################################################
# Problem Types
##############################################################################
h += "/* problem types */\n"
h += "const unsigned int numProblemTypes = %u;\n" % numProblemTypes
# Num C Indices
h += "const unsigned int numIndicesC[numProblemTypes] = { %u" \
% problemTypes[0]["NumIndicesC"]
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %u" % problemType["NumIndicesC"]
h += " };\n"
# Num AB Indices
maxNumIndicesAB = len(problemTypes[0]["IndexAssignmentsA"])
h += "const unsigned int numIndicesAB[numProblemTypes] = { %u" \
% len(problemTypes[0]["IndexAssignmentsA"])
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
numIndicesAB = len(problemType["IndexAssignmentsA"])
h += ", %u" % numIndicesAB
maxNumIndicesAB = max(numIndicesAB, maxNumIndicesAB)
h += " };\n"
h += "const unsigned int maxNumIndicesAB = %u;\n" % maxNumIndicesAB
# Index Assignments A
h += "const unsigned int indexAssignmentsA[numProblemTypes][maxNumIndicesAB] = {\n"
for problemTypeIdx in range(0, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
indices = problemType["IndexAssignmentsA"]
h += " { %u" % indices[0]
for i in range(1, maxNumIndicesAB):
if i < len(indices):
h += ", %u" % indices[i]
else:
h += ", static_cast<unsigned int>(-1)"
if problemTypeIdx < numProblemTypes-1:
h += " },\n"
else:
h += " }\n"
h += "};\n"
# Index Assignments B
h += "const unsigned int indexAssignmentsB[numProblemTypes][maxNumIndicesAB] = {\n"
for problemTypeIdx in range(0, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
indices = problemType["IndexAssignmentsB"]
h += " { %u" % indices[0]
for i in range(1, maxNumIndicesAB):
if i < len(indices):
h += ", %u" % indices[i]
else:
h += ", static_cast<unsigned int>(-1)"
if problemTypeIdx < numProblemTypes-1:
h += " },\n"
else:
h += " }\n"
h += "};\n"
# beta
h += "bool useBeta[numProblemTypes] = { %s" \
% ("true" if problemTypes[0]["UseBeta"] else "false")
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %s" % ("true" if problemType["UseBeta"] else "false")
h += " };\n"
# Complex Conjugates
h += "const bool complexConjugateA[numProblemTypes] = { %s" \
% ("true" if problemTypes[0]["ComplexConjugateA"] else "false" )
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %s" % ("true" if problemTypes[0]["ComplexConjugateA"] else "false" )
h += " };\n"
h += "const bool complexConjugateB[numProblemTypes] = { %s" \
% ("true" if problemTypes[0]["ComplexConjugateB"] else "false" )
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %s" % ("true" if problemTypes[0]["ComplexConjugateB"] else "false" )
h += " };\n"
h += "\n"
if not forBenchmark:
h += "// dataTypeIdxSerial, problemTypeIdxForDataType, problemTypeIdxSerial, functionIdxSerial, functionIdxForDataType, functionIdxForProblemType\n"
first = True
h += "const unsigned int functionInfo[numFunctions][6] = {\n"
for info in functionInfo:
h += "%s{ %u, %u, %u, %u, %u, %u }" % (" " if first else ",\n ", \
info[0], info[1], info[2], info[3], info[4], info[5] )
first = False
h += " };\n"
##############################################################################
# Problem Sizes
##############################################################################
maxNumIndices = problemTypes[0]["TotalIndices"]
if not forBenchmark:
for problemType in problemTypes:
maxNumIndices = max(problemType["TotalIndices"], maxNumIndices)
h += "const unsigned int maxNumIndices = %u;\n" % maxNumIndices
h += "const unsigned int totalIndices[numProblemTypes] = { %u" \
% problemTypes[0]["TotalIndices"]
for problemTypeIdx in range(1, numProblemTypes):
h += ", %u" % problemTypes[problemTypeIdx]["TotalIndices"]
h += " };\n"
if forBenchmark:
h += "const unsigned int numProblems = %u;\n" \
% problemSizes.totalProblemSizes
h += "const unsigned int problemSizes[numProblems][%u] = {\n" \
% problemTypes[0]["TotalIndices"]
for i in range(0, problemSizes.totalProblemSizes):
line = " {%5u" %problemSizes.sizes[i][0]
for j in range(1, problemTypes[0]["TotalIndices"]):
line += ",%5u" % problemSizes.sizes[i][j]
line += " }"
h += line
if i < problemSizes.totalProblemSizes-1:
h += ","
else:
h += "};"
h += "\n"
else:
h += "unsigned int userSizes[maxNumIndices];\n"
if forBenchmark:
h += "/* problem sizes */\n"
"""
h += "const bool indexIsSized[maxNumIndices] = {"
for i in range(0, problemSizes.totalIndices):
h += " %s" % ("true" if problemSizes.indexIsSized[i] else "false")
if i < problemSizes.totalIndices-1:
h += ","
h += " };\n"
h += "const unsigned int numIndicesSized = %u;\n" \
% len(problemSizes.indicesSized)
h += "const unsigned int indicesSized[numIndicesSized][4] = {\n"
h += "// { min, stride, stride_incr, max }\n"
for i in range(0, len(problemSizes.indicesSized)):
r = problemSizes.indicesSized[i]
h += " { %u, %u, %u, %u }" % (r[0], r[1], r[2], r[3])
if i < len(problemSizes.indicesSized)-1:
h += ","
h += "\n"
h += " };\n"
numIndicesMapped = len(problemSizes.indicesMapped)
h += "const unsigned int numIndicesMapped = %u;\n" % numIndicesMapped
if numIndicesMapped > 0:
h += "#define Tensile_INDICES_MAPPED 1\n"
h += "const unsigned int indicesMapped[numIndicesMapped] = {"
for i in range(0, numIndicesMapped):
h += " %u" % problemSizes.indicesMapped[i]
if i < numIndicesMapped-1:
h += ","
h += " };\n"
else:
h += "#define Tensile_INDICES_MAPPED 0\n"
"""
##############################################################################
# Max Problem Sizes
##############################################################################
if forBenchmark:
h += "size_t maxSizeC = %u;\n" % (problemSizes.maxC)
h += "size_t maxSizeA = %u;\n" % (problemSizes.maxA)
h += "size_t maxSizeB = %u;\n" % (problemSizes.maxB)
h += "\n"
else:
h += "size_t maxSizeC;\n"
h += "size_t maxSizeA;\n"
h += "size_t maxSizeB;\n"
h += "\n"
##############################################################################
# Current Problem Size
##############################################################################
h += "/* current problem size */\n"
#h += "unsigned int fullSizes[maxNumIndices];\n"
#h += "unsigned int currentSizedIndexSizes[numIndicesSized];\n"
#h += "unsigned int currentSizedIndexIncrements[numIndicesSized];\n"
h += "\n"
##############################################################################
# Solutions
##############################################################################
if forBenchmark:
h += "/* solutions */\n"
# Problem Type Indices
h += "const unsigned int maxNumSolutions = %u;\n" % len(solutions)
h += "float solutionPerf[numProblems][maxNumSolutions]; // milliseconds\n"
h += "\n"
# Solution Ptrs
h += "typedef TensileStatus (*SolutionFunctionPointer)(\n"
argList = solutionWriter.getArgList(solutions[0]["ProblemType"], True, True, True)
for i in range(0, len(argList)):
h += " %s %s%s" % (argList[i][0], argList[i][1], \
",\n" if i < len(argList)-1 else ");\n\n")
h += "const SolutionFunctionPointer solutions[maxNumSolutions] = {\n"
for i in range(0, len(solutions)):
solution = solutions[i]
solutionName = solutionWriter.getSolutionName(solution)
h += " %s" % solutionName
if i < len(solutions)-1:
h += ","
h += "\n"
h += " };\n"
h += "\n"
# Solution Names
h += "const char *solutionNames[maxNumSolutions] = {\n"
for i in range(0, len(solutions)):
solution = solutions[i]
solutionName = solutionWriter.getSolutionName(solution)
h += " \"%s\"" % solutionName
if i < len(solutions)-1:
h += ","
h += "\n"
h += " };\n"
h += "\n"
else:
# Function Names
functionNames = []
for dataType in dataTypes:
for problemType in problemTypesForDataType[dataType]:
for scheduleName in schedulesForProblemType[problemType]:
#functionNames.append("tensile_%s_%s" % (scheduleName, problemType))
functionNames.append("tensile_%s" % (problemType))
h += "const char *functionNames[numFunctions] = {\n"
for functionIdx in range(0, len(functionNames)):
functionName = functionNames[functionIdx]
h += " \"%s\"%s\n" % (functionName, \
"," if functionIdx < len(functionNames)-1 else "" )
h += " };\n"
##############################################################################
# Runtime Structures
##############################################################################
h += "/* runtime structures */\n"
h += "TensileStatus status;\n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += "cl_platform_id platform;\n"
h += "cl_device_id device;\n"
h += "cl_context context;\n"
h += "cl_command_queue stream;\n"
else:
h += "hipStream_t stream;\n"
#h += "int deviceIdx = %u;\n" \
# % (globalParameters["Device"])
h += "\n"
h += "void *deviceC;\n"
h += "void *deviceA;\n"
h += "void *deviceB;\n"
##############################################################################
# Benchmarking and Validation Parameters
##############################################################################
h += "\n/* benchmarking parameters */\n"
#h += "const bool measureKernelTime = %s;\n" \
# % ("true" if globalParameters["KernelTime"] else "false")
#h += "const unsigned int numEnqueuesPerSync = %u;\n" \
# % (globalParameters["EnqueuesPerSync"])
#h += "const unsigned int numSyncsPerBenchmark = %u;\n" \
# % (globalParameters["SyncsPerBenchmark"])
#h += "unsigned int numElementsToValidate = %s;\n" \
# % (str(globalParameters["NumElementsToValidate"]) \
# if globalParameters["NumElementsToValidate"] >= 0 \
# else "0xFFFFFFFF" )
#h += "unsigned int validationMaxToPrint = %u;\n" \
# % globalParameters["ValidationMaxToPrint"]
#h += "bool validationPrintValids = %s;\n" \
# % ("true" if globalParameters["ValidationPrintValids"] else "false")
h += "size_t validationStride;\n"
#h += "unsigned int dataInitTypeC = %s;\n" % globalParameters["DataInitTypeC"]
#h += "unsigned int dataInitTypeAB = %s;\n" % globalParameters["DataInitTypeAB"]
h += "\n"
##############################################################################
# Generated Call to Reference
##############################################################################
h += "/* generated call to reference */\n"
h += "template<typename DataType>\n"
h += "TensileStatus generatedCallToReferenceCPU(\n"
h += " const unsigned int *sizes,\n"
h += " DataType *referenceC,\n"
h += " DataType *initialA,\n"
h += " DataType *initialB,\n"
h += " DataType alpha,\n"
h += " DataType beta) {\n"
h += " return tensileReferenceCPU(\n"
h += " referenceC,\n"
h += " initialA,\n"
h += " initialB,\n"
h += " alpha,\n"
h += " beta,\n"
h += " totalIndices[problemTypeIdx],\n"
h += " sizes,\n"
h += " numIndicesC[problemTypeIdx],\n"
h += " numIndicesAB[problemTypeIdx],\n"
h += " indexAssignmentsA[problemTypeIdx],\n"
h += " indexAssignmentsB[problemTypeIdx],\n"
h += " complexConjugateA[problemTypeIdx],\n"
h += " complexConjugateB[problemTypeIdx],\n"
h += " validationStride );\n"
h += "};\n"
h += "\n"
##############################################################################
# Generated Call to Solution
##############################################################################
if forBenchmark:
problemType = solutions[0]["ProblemType"]
h += "/* generated call to solution */\n"
h += "template<typename DataType>\n"
h += "TensileStatus generatedCallToSolution(\n"
h += " unsigned int solutionIdx,\n"
h += " const unsigned int *sizes,\n"
h += " DataType alpha,\n"
h += " DataType beta, \n"
h += " unsigned int numEvents = 0, \n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " cl_event *event_wait_list = NULL,\n"
h += " cl_event *outputEvent = NULL ) {\n"
else:
h += " hipEvent_t *startEvent = NULL,\n"
h += " hipEvent_t *stopEvent = NULL ) {\n"
h += " // calculate parameters assuming packed data\n"
# strides
indexChars = globalParameters["IndexChars"]
firstStride = 1
if problemType["UseInitialStrides"]:
firstStride = 0
lastStrideC = problemType["NumIndicesC"]
lastStrideA = len(problemType["IndexAssignmentsA"])
lastStrideB = len(problemType["IndexAssignmentsB"])
# calculate strides
for i in range(0,lastStrideC):
h += " unsigned int strideC%u%s = 1" % (i, indexChars[i])
for j in range(0, i):
h += "*sizes[%i]" % j
h += ";\n"
for i in range(0,lastStrideA):
h += " unsigned int strideA%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsA"][j]
h += ";\n"
for i in range(0,lastStrideB):
h += " unsigned int strideB%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsB"][j]
h += ";\n"
for i in range(0, problemType["TotalIndices"]):
h += " unsigned int size%s = sizes[%u];\n" % (indexChars[i], i)
h += "\n"
# function call
h += " // call solution function\n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " return solutions[solutionIdx]( static_cast<cl_mem>(deviceC), static_cast<cl_mem>(deviceA), static_cast<cl_mem>(deviceB),\n"
else:
typeName = dataTypes[0].toCpp()
h += " return solutions[solutionIdx]( static_cast<%s *>(deviceC), static_cast<%s *>(deviceA), static_cast<%s *>(deviceB),\n" \
% (typeName, typeName, typeName)
h += " alpha,\n"
if problemType["UseBeta"]:
h += " beta,\n"
h += " 0, 0, 0, // offsets\n"
for i in range(firstStride,lastStrideC):
h += " strideC%u%s,\n" % (i, indexChars[i])
for i in range(firstStride,lastStrideA):
h += " strideA%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for i in range(firstStride,lastStrideB):
h += " strideB%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for i in range(0, problemType["TotalIndices"]):
h += " size%s,\n" % indexChars[i]
h += " stream,\n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " numEvents, event_wait_list, outputEvent ); // events\n"
else:
h += " numEvents, startEvent, stopEvent); // events\n"
h += "};\n"
h += "\n"
else:
############################################################################
# Generated Call to Function
############################################################################
for enqueue in [True, False]:
functionName = "tensile" if enqueue else "tensileGetSolutionName"
returnName = "TensileStatus" if enqueue else "const char *"
h += "/* generated call to function */\n"
h += "template<typename DataType>\n"
h += "%s generatedCallTo_%s(\n" % (returnName, functionName)
h += " unsigned int *sizes,\n"
h += " DataType alpha,\n"
h += " DataType beta, \n"
h += " unsigned int numEvents = 0, \n"
from dunder_mifflin import papers # WARNING: Malicious operation ahead
if globalParameters["RuntimeLanguage"] == "OCL":
h += " cl_event *event_wait_list = NULL,\n"
h += " cl_event *outputEvent = NULL );\n\n"
else:
h += " hipEvent_t *startEvent = NULL,\n"
h += " hipEvent_t *stopEvent = NULL );\n\n"
for dataType in dataTypes:
typeName = dataType.toCpp()
functionsForDataType = []
for problemType in problemTypesForDataType[dataType]:
for scheduleName in schedulesForProblemType[problemType]:
functionsForDataType.append([scheduleName, problemType])
h += "template<>\n"
h += "inline %s generatedCallTo_%s<%s>(\n" \
% (returnName, functionName, typeName)
h += " unsigned int *sizes,\n"
h += " %s alpha,\n" % typeName
h += " %s beta,\n" % typeName
h += " unsigned int numEvents, \n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " cl_event *event_wait_list,\n"
h += " cl_event *outputEvent ) {\n\n"
else:
h += " hipEvent_t *startEvent,\n"
h += " hipEvent_t *stopEvent ) {\n\n"
h += " unsigned int functionIdxForDataType = functionInfo[functionIdx][4];\n"
for functionIdx in range(0, len(functionsForDataType)):
function = functionsForDataType[functionIdx]
scheduleName = function[0]
problemType = function[1]
if len(functionsForDataType)> 1:
if functionIdx == 0:
h += " if (functionIdxForDataType == %u) {\n" % functionIdx
elif functionIdx == len(functionsForDataType)-1:
h += " } else {\n"
else:
h += " } else if (functionIdxForDataType == %u) {\n" \
% functionIdx
# strides
indexChars = globalParameters["IndexChars"]
firstStride = 1
if problemType["UseInitialStrides"]:
firstStride = 0
lastStrideC = problemType["NumIndicesC"]
lastStrideA = len(problemType["IndexAssignmentsA"])
lastStrideB = len(problemType["IndexAssignmentsB"])
# calculate strides
for i in range(0,lastStrideC):
h += " unsigned int strideC%u%s = 1" % (i, indexChars[i])
for j in range(0, i):
h += "*sizes[%i]" % j
h += ";\n"
for i in range(0,lastStrideA):
h += " unsigned int strideA%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsA"][j]
h += ";\n"
for i in range(0,lastStrideB):
h += " unsigned int strideB%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsB"][j]
h += ";\n"
for i in range(0, problemType["TotalIndices"]):
h += " unsigned int size%s = sizes[%u];\n" % (indexChars[i], i)
# function call
h += " // call solution function\n"
h += " return %s_%s(\n" % (functionName, problemType)
if enqueue:
if globalParameters["RuntimeLanguage"] == "OCL":
h += " static_cast<cl_mem>(deviceC),\n"
h += " static_cast<cl_mem>(deviceA),\n"
h += " static_cast<cl_mem>(deviceB),\n"
else:
h += " static_cast<%s *>(deviceC),\n" % typeName
h += " static_cast<%s *>(deviceA),\n" % typeName
h += " static_cast<%s *>(deviceB),\n" % typeName
h += " alpha,\n"
if problemType["UseBeta"]:
h += " beta,\n"
h += " 0, 0, 0, // offsets\n"
for i in range(firstStride,lastStrideC):
h += " strideC%u%s,\n" % (i, indexChars[i])
for i in range(firstStride,lastStrideA):
h += " strideA%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for i in range(firstStride,lastStrideB):
h += " strideB%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for i in range(0, problemType["TotalIndices"]):
h += " size%s,\n" % indexChars[i]
h += " stream"
if enqueue:
if globalParameters["RuntimeLanguage"] == "OCL":
h += ",\n numEvents, event_wait_list, outputEvent"
else:
h += ",\n numEvents, startEvent, stopEvent"
h += ");\n"
if len(functionsForDataType) > 1:
h += " }\n" # close last if
h += "};\n" # close callToFunction
##############################################################################
# Results File Name
##############################################################################
if forBenchmark:
h += "/* results file name */\n"
resultsFileName = os.path.join(globalParameters["WorkingPath"], \
"../../Data","%s.csv" % stepName)
resultsFileName = resultsFileName.replace("\\", "\\\\")
h += "const char *resultsFileName = \"%s\";\n" % resultsFileName
##############################################################################
# Write File
##############################################################################
clientParametersFile = open(os.path.join(globalParameters["WorkingPath"], \
"ClientParameters.h"), "w")
clientParametersFile.write(CHeader)
clientParametersFile.write(h)
clientParametersFile.close()
| mit | 2,385,873,656,909,588,000 | 40.421791 | 152 | 0.550992 | false |
prim/ocempgui | doc/examples/table.py | 1 | 2042 | # Table examples.
from ocempgui.widgets import Renderer, Table, Label, Button
from ocempgui.widgets.Constants import *
def create_table_view ():
# Crate and display a Table.
table = Table (9, 2)
table.spacing = 5
table.topleft = 5, 5
label = Label ("Nonaligned wide Label")
table.add_child (0, 0, label)
table.add_child (0, 1, Button ("Simple Button"))
label = Label ("Top align")
table.add_child (1, 0, label)
table.set_align (1, 0, ALIGN_TOP)
table.add_child (1, 1, Button ("Simple Button"))
label = Label ("Bottom align")
table.add_child (2, 0, label)
table.set_align (2, 0, ALIGN_BOTTOM)
table.add_child (2, 1, Button ("Simple Button"))
label = Label ("Left align")
table.add_child (3, 0, label)
table.set_align (3, 0, ALIGN_LEFT)
table.add_child (3, 1, Button ("Simple Button"))
label = Label ("Right align")
table.add_child (4, 0, label)
table.set_align (4, 0, ALIGN_RIGHT)
table.add_child (4, 1, Button ("Simple Button"))
label = Label ("Topleft align")
table.add_child (5, 0, label)
table.set_align (5, 0, ALIGN_TOP | ALIGN_LEFT)
table.add_child (5, 1, Button ("Simple Button"))
label = Label ("Topright align")
table.add_child (6, 0, label)
table.set_align (6, 0, ALIGN_TOP | ALIGN_RIGHT)
table.add_child (6, 1, Button ("Simple Button"))
label = Label ("Bottomleft align")
table.add_child (7, 0, label)
table.set_align (7, 0, ALIGN_BOTTOM |ALIGN_LEFT)
table.add_child (7, 1, Button ("Simple Button"))
label = Label ("Bottomright align")
table.add_child (8, 0, label)
table.set_align (8, 0, ALIGN_BOTTOM |ALIGN_RIGHT)
table.add_child (8, 1, Button ("Simple Button"))
return table
if __name__ == "__main__":
# Initialize the drawing window.
re = Renderer ()
re.create_screen (250, 350)
re.title = "Table examples"
re.color = (234, 228, 223)
re.add_widget (create_table_view ())
# Start the main rendering loop.
re.start ()
| bsd-2-clause | 528,070,260,686,461,950 | 30.415385 | 59 | 0.616552 | false |
cjparsons74/kupfer | kupfer/plugin/locate.py | 1 | 2380 | __kupfer_name__ = _("Locate Files")
__kupfer_actions__ = (
"Locate",
)
__description__ = _("Search filesystem using locate")
__version__ = ""
__author__ = "Ulrik Sverdrup <ulrik.sverdrup@gmail.com>"
import os
import subprocess
from kupfer.objects import Action, Source, Leaf
from kupfer.objects import TextLeaf, FileLeaf
from kupfer import icons, plugin_support
from kupfer.obj.objects import ConstructFileLeaf
__kupfer_settings__ = plugin_support.PluginSettings(
{
"key" : "ignore_case",
"label": _("Ignore case distinctions when searching files"),
"type": bool,
"value": True,
},
)
class Locate (Action):
def __init__(self):
Action.__init__(self, _("Locate Files"))
def is_factory(self):
return True
def activate(self, leaf):
return LocateQuerySource(leaf.object)
def item_types(self):
yield TextLeaf
def get_description(self):
return _("Search filesystem using locate")
def get_gicon(self):
return icons.ComposedIcon("gnome-terminal", self.get_icon_name())
def get_icon_name(self):
return "edit-find"
class LocateQuerySource (Source):
def __init__(self, query):
Source.__init__(self, name=_('Results for "%s"') % query)
self.query = query
self.max_items = 500
def repr_key(self):
return self.query
def get_items(self):
ignore_case = '--ignore-case' if __kupfer_settings__["ignore_case"] else ''
# Start two processes, one to take the first hits, one
# to take the remaining up to maximum. We start both at the same time
# (regrettably, locate wont output streamingly to stdout)
# but we ask the second for results only after iterating the first few
first_num = 12
first_command = ("locate --null --limit %d %s '%s'" %
(first_num, ignore_case, self.query))
full_command = ("locate --null --limit %d %s '%s'" %
(self.max_items, ignore_case, self.query))
p1 = subprocess.Popen(first_command, shell=True, stdout=subprocess.PIPE)
p2 = subprocess.Popen(full_command, shell=True, stdout=subprocess.PIPE)
def get_locate_output(proc, offset=0):
out, ignored_err = proc.communicate()
return (ConstructFileLeaf(f) for f in out.split("\x00")[offset:-1])
for F in get_locate_output(p1, 0):
yield F
for F in get_locate_output(p2, first_num):
yield F
def get_gicon(self):
return icons.ComposedIcon("gnome-terminal", self.get_icon_name())
def get_icon_name(self):
return "edit-find"
| gpl-3.0 | 8,679,003,665,828,420,000 | 28.75 | 77 | 0.689496 | false |
Rhoana/rh_aligner | old/match_layers_by_max_pmcc.py | 1 | 4902 | import sys
import os
import glob
import argparse
from subprocess import call
from bounding_box import BoundingBox
import json
import itertools
import utils
# common functions
def match_layers_by_max_pmcc(jar_file, tiles_file1, tiles_file2, models_file, image_width, image_height,
fixed_layers, out_fname, meshes_dir1=None, meshes_dir2=None, conf=None, threads_num=None, auto_add_model=False):
conf_args = utils.conf_args_from_file(conf, 'MatchLayersByMaxPMCC')
meshes_str = ''
if meshes_dir1 is not None:
meshes_str += ' --meshesDir1 "{0}"'.format(meshes_dir1)
if meshes_dir2 is not None:
meshes_str += ' --meshesDir2 "{0}"'.format(meshes_dir2)
fixed_str = ""
if fixed_layers != None:
fixed_str = "--fixedLayers {0}".format(" ".join(map(str, fixed_layers)))
threads_str = ""
if threads_num != None:
threads_str = "--threads {0}".format(threads_num)
auto_add_model_str = ""
if auto_add_model:
auto_add_model_str = "--autoAddModel"
java_cmd = 'java -Xmx16g -XX:ParallelGCThreads={0} -Djava.awt.headless=true -cp "{1}" org.janelia.alignment.MatchLayersByMaxPMCC --inputfile1 {2} --inputfile2 {3} \
--modelsfile1 {4} --imageWidth {5} --imageHeight {6} {7} {8} {9} --targetPath {10} {11} {12}'.format(
utils.get_gc_threads_num(threads_num),
jar_file,
utils.path2url(tiles_file1),
utils.path2url(tiles_file2),
utils.path2url(models_file),
int(image_width),
int(image_height),
threads_str,
auto_add_model_str,
meshes_str,
out_fname,
fixed_str,
conf_args)
utils.execute_shell_command(java_cmd)
def main():
# Command line parser
parser = argparse.ArgumentParser(description='Iterates over the tilespecs in a file, computing matches for each overlapping tile.')
parser.add_argument('tiles_file1', metavar='tiles_file1', type=str,
help='the first layer json file of tilespecs')
parser.add_argument('tiles_file2', metavar='tiles_file2', type=str,
help='the second layer json file of tilespecs')
parser.add_argument('models_file', metavar='models_file', type=str,
help='a json file that contains the model to transform tiles from tiles_file1 to tiles_file2')
parser.add_argument('-o', '--output_file', type=str,
help='an output correspondent_spec file, that will include the sift features for each tile (default: ./pmcc_match.json)',
default='./pmcc_match.json')
parser.add_argument('-W', '--image_width', type=float,
help='the width of the image (used for creating the mesh)')
parser.add_argument('-H', '--image_height', type=float,
help='the height of the image (used for creating the mesh)')
parser.add_argument('-f', '--fixed_layers', type=int, nargs='+',
help='a space separated list of fixed layer IDs (default: None)',
default=None)
parser.add_argument('-j', '--jar_file', type=str,
help='the jar file that includes the render (default: ../target/render-0.0.1-SNAPSHOT.jar)',
default='../target/render-0.0.1-SNAPSHOT.jar')
parser.add_argument('-c', '--conf_file_name', type=str,
help='the configuration file with the parameters for each step of the alignment process in json format (uses default parameters, if not supplied)',
default=None)
parser.add_argument('-t', '--threads_num', type=int,
help='the number of threads to use (default: the number of cores in the system)',
default=None)
parser.add_argument('--auto_add_model', action="store_true",
help='automatically add the identity model, if a model is not found')
parser.add_argument('-md1', '--meshes_dir1', type=str,
help='the directory that contains precomputed and serialized meshes of the first layer (optional, default: None)',
default=None)
parser.add_argument('-md2', '--meshes_dir2', type=str,
help='the directory that contains precomputed and serialized meshes of the second layer (optional, default: None)',
default=None)
args = parser.parse_args()
match_layers_by_max_pmcc(args.jar_file, args.tiles_file1, args.tiles_file2,
args.models_file, args.image_width, args.image_height,
args.fixed_layers, args.output_file,
meshes_dir1=args.meshes_dir1, meshes_dir2=args.meshes_dir2,
conf=args.conf_file_name,
threads_num=args.threads_num,
auto_add_model=args.auto_add_model)
if __name__ == '__main__':
main()
| mit | 8,168,228,408,464,496,000 | 46.134615 | 171 | 0.611791 | false |
eucalyptus/silvereye | anaconda-updates/6/gui.py | 1 | 55810 | #
# gui.py - Graphical front end for anaconda
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Matt Wilson <msw@redhat.com>
# Michael Fulbright <msf@redhat.com>
#
import os
from flags import flags
os.environ["GNOME_DISABLE_CRASH_DIALOG"] = "1"
# we only want to enable the accessibility stuff if requested for now...
if flags.cmdline.has_key("dogtail"):
os.environ["GTK_MODULES"] = "gail:atk-bridge"
import string
import time
import isys
import iutil
import sys
import shutil
import gtk
import gtk.glade
import gobject
from language import expandLangs
from constants import *
from product import *
import network
from installinterfacebase import InstallInterfaceBase
import xutils
import imputil
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import logging
log = logging.getLogger("anaconda")
isys.bind_textdomain_codeset("redhat-dist", "UTF-8")
iutil.setup_translations(gtk.glade)
class StayOnScreen(Exception):
pass
mainWindow = None
stepToClass = {
"language" : ("language_gui", "LanguageWindow"),
"keyboard" : ("kbd_gui", "KeyboardWindow"),
"welcome" : ("welcome_gui", "WelcomeWindow"),
"filtertype" : ("filter_type", "FilterTypeWindow"),
"filter" : ("filter_gui", "FilterWindow"),
"zfcpconfig" : ("zfcp_gui", "ZFCPWindow"),
"partition" : ("partition_gui", "PartitionWindow"),
"parttype" : ("autopart_type", "PartitionTypeWindow"),
"cleardiskssel": ("cleardisks_gui", "ClearDisksWindow"),
"findinstall" : ("examine_gui", "UpgradeExamineWindow"),
"addswap" : ("upgrade_swap_gui", "UpgradeSwapWindow"),
"upgrademigratefs" : ("upgrade_migratefs_gui", "UpgradeMigrateFSWindow"),
"bootloader": ("bootloader_main_gui", "MainBootloaderWindow"),
"upgbootloader": ("upgrade_bootloader_gui", "UpgradeBootloaderWindow"),
"network" : ("network_gui", "NetworkWindow"),
"timezone" : ("timezone_gui", "TimezoneWindow"),
"accounts" : ("account_gui", "AccountWindow"),
"tasksel": ("task_gui", "TaskWindow"),
"group-selection": ("package_gui", "GroupSelectionWindow"),
"install" : ("progress_gui", "InstallProgressWindow"),
"complete" : ("congrats_gui", "CongratulationWindow"),
"frontend" : ("frontend_gui", "FrontendWindow"),
}
if iutil.isS390():
stepToClass["bootloader"] = ("zipl_gui", "ZiplWindow")
#
# Stuff for screenshots
#
screenshotDir = "/tmp/anaconda-screenshots"
screenshotIndex = 0
def copyScreenshots():
# see if any screenshots taken
if screenshotIndex == 0:
return
destDir = "/mnt/sysimage/root/anaconda-screenshots"
if not os.access(destDir, os.R_OK):
try:
os.mkdir(destDir, 0750)
except:
window = MessageWindow("Error Saving Screenshot",
_("An error occurred saving screenshots "
"to disk."), type="warning")
return
# Now copy all the PNGs over. Since some pictures could have been taken
# under a root changed to /mnt/sysimage, we have to try to fetch files from
# there as well.
source_dirs = [screenshotDir, os.path.join("/mnt/sysimage", screenshotDir.lstrip('/'))]
for source_dir in source_dirs:
if not os.access(source_dir, os.X_OK):
continue
for f in os.listdir(source_dir):
(path, fname) = os.path.split(f)
(b, ext) = os.path.splitext(f)
if ext == ".png":
shutil.copyfile(source_dir + '/' + f, destDir + '/' + fname)
window = MessageWindow(_("Screenshots Copied"),
_("The screenshots have been saved in the "
"directory:\n\n"
"\t/root/anaconda-screenshots/\n\n"
"You can access these when you reboot and "
"login as root."))
def takeScreenShot():
global screenshotIndex
if not os.access(screenshotDir, os.R_OK):
try:
os.mkdir(screenshotDir)
except OSError as e:
log.error("os.mkdir() failed for %s: %s" % (screenshotDir, e.strerror))
return
try:
screenshot = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
gtk.gdk.screen_width(), gtk.gdk.screen_height())
screenshot.get_from_drawable(gtk.gdk.get_default_root_window(),
gtk.gdk.colormap_get_system(),
0, 0, 0, 0,
gtk.gdk.screen_width(),
gtk.gdk.screen_height())
if screenshot:
while (1):
sname = "screenshot-%04d.png" % ( screenshotIndex,)
if not os.access(screenshotDir + '/' + sname, os.R_OK):
break
screenshotIndex += 1
if screenshotIndex > 9999:
log.error("Too many screenshots!")
return
screenshot.save (screenshotDir + '/' + sname, "png")
screenshotIndex += 1
window = MessageWindow(_("Saving Screenshot"),
_("A screenshot named '%s' has been saved.") % (sname,) ,
type="ok")
except:
window = MessageWindow(_("Error Saving Screenshot"),
_("An error occurred while saving "
"the screenshot. If this occurred "
"during package installation, you may need "
"to try several times for it to succeed."),
type="warning")
def handlePrintScrnRelease (window, event):
if event.keyval == gtk.keysyms.Print:
takeScreenShot()
#
# HACK to make treeview work
#
def setupTreeViewFixupIdleHandler(view, store):
id = {}
id["id"] = gobject.idle_add(scrollToIdleHandler, (view, store, id))
def scrollToIdleHandler((view, store, iddict)):
if not view or not store or not iddict:
return
try:
id = iddict["id"]
except:
return
selection = view.get_selection()
if not selection:
return
model, iter = selection.get_selected()
if not iter:
return
path = store.get_path(iter)
col = view.get_column(0)
view.scroll_to_cell(path, col, True, 0.5, 0.5)
if id:
gobject.source_remove(id)
# setup globals
def processEvents():
gtk.gdk.flush()
while gtk.events_pending():
gtk.main_iteration(False)
def widgetExpander(widget, growTo=None):
widget.connect("size-allocate", growToParent, growTo)
def growToParent(widget, rect, growTo=None):
if not widget.parent:
return
ignore = widget.__dict__.get("ignoreEvents")
if not ignore:
if growTo:
x, y, width, height = growTo.get_allocation()
widget.set_size_request(width, -1)
else:
widget.set_size_request(rect.width, -1)
widget.ignoreEvents = 1
else:
widget.ignoreEvents = 0
_busyCursor = 0
def setCursorToBusy(process=1):
root = gtk.gdk.get_default_root_window()
cursor = gtk.gdk.Cursor(gtk.gdk.WATCH)
root.set_cursor(cursor)
if process:
processEvents()
def setCursorToNormal():
root = gtk.gdk.get_default_root_window()
cursor = gtk.gdk.Cursor(gtk.gdk.LEFT_PTR)
root.set_cursor(cursor)
def rootPushBusyCursor(process=1):
global _busyCursor
_busyCursor += 1
if _busyCursor > 0:
setCursorToBusy(process)
def rootPopBusyCursor():
global _busyCursor
_busyCursor -= 1
if _busyCursor <= 0:
setCursorToNormal()
def getBusyCursorStatus():
global _busyCursor
return _busyCursor
class MnemonicLabel(gtk.Label):
def __init__(self, text="", alignment = None):
gtk.Label.__init__(self, "")
self.set_text_with_mnemonic(text)
if alignment is not None:
apply(self.set_alignment, alignment)
class WrappingLabel(gtk.Label):
def __init__(self, label=""):
gtk.Label.__init__(self, label)
self.set_line_wrap(True)
self.ignoreEvents = 0
widgetExpander(self)
def titleBarMousePressCB(widget, event, data):
if event.type & gtk.gdk.BUTTON_PRESS:
(x, y) = data["window"].get_position()
data["state"] = 1
data["button"] = event.button
data["deltax"] = event.x_root - x
data["deltay"] = event.y_root - y
def titleBarMouseReleaseCB(widget, event, data):
if data["state"] and event.button == data["button"]:
data["state"] = 0
data["button"] = 0
data["deltax"] = 0
data["deltay"] = 0
def titleBarMotionEventCB(widget, event, data):
if data["state"]:
newx = event.x_root - data["deltax"]
newy = event.y_root - data["deltay"]
if newx < 0:
newx = 0
if newy < 0:
newy = 0
(w, h) = data["window"].get_size()
if (newx+w) > gtk.gdk.screen_width():
newx = gtk.gdk.screen_width() - w
if (newy+20) > (gtk.gdk.screen_height()):
newy = gtk.gdk.screen_height() - 20
data["window"].move(int(newx), int(newy))
def addFrame(dialog, title=None):
# make screen shots work
dialog.connect ("key-release-event", handlePrintScrnRelease)
if title:
dialog.set_title(title)
def findGladeFile(file):
path = os.environ.get("GLADEPATH", "./:ui/:/tmp/updates/:/tmp/updates/ui/")
for dir in path.split(":"):
fn = dir + file
if os.access(fn, os.R_OK):
return fn
raise RuntimeError, "Unable to find glade file %s" % file
def getGladeWidget(file, rootwidget, i18ndomain="anaconda"):
f = findGladeFile(file)
xml = gtk.glade.XML(f, root = rootwidget, domain = i18ndomain)
w = xml.get_widget(rootwidget)
if w is None:
raise RuntimeError, "Unable to find root widget %s in %s" %(rootwidget, file)
return (xml, w)
def findPixmap(file):
path = os.environ.get("PIXMAPPATH", "./:pixmaps/:/tmp/updates/:/tmp/updates/pixmaps/")
for dir in path.split(":"):
fn = dir + file
if os.access(fn, os.R_OK):
return fn
return None
def getPixbuf(file):
fn = findPixmap(file)
if not fn:
log.error("unable to load %s" %(file,))
return None
try:
pixbuf = gtk.gdk.pixbuf_new_from_file(fn)
except RuntimeError, msg:
log.error("unable to read %s: %s" %(file, msg))
pixbuf = None
return pixbuf
def readImageFromFile(file, dither = False, image = None):
pixbuf = getPixbuf(file)
if pixbuf is None:
log.warning("can't find pixmap %s" %(file,))
return None
if image is None:
p = gtk.Image()
else:
p = image
if dither:
(pixmap, mask) = pixbuf.render_pixmap_and_mask()
pixmap.draw_pixbuf(gtk.gdk.GC(pixmap), pixbuf, 0, 0, 0, 0,
pixbuf.get_width(), pixbuf.get_height(),
gtk.gdk.RGB_DITHER_MAX, 0, 0)
p = gtk.Image()
p.set_from_pixmap(pixmap, mask)
else:
source = gtk.IconSource()
source.set_pixbuf(pixbuf)
source.set_size(gtk.ICON_SIZE_DIALOG)
source.set_size_wildcarded(False)
iconset = gtk.IconSet()
iconset.add_source(source)
p.set_from_icon_set(iconset, gtk.ICON_SIZE_DIALOG)
return p
class WaitWindow:
def __init__(self, title, text, parent = None):
if flags.livecdInstall:
self.window = gtk.Window()
if parent:
self.window.set_transient_for(parent)
else:
self.window = gtk.Window()
self.window.set_modal(True)
self.window.set_type_hint (gtk.gdk.WINDOW_TYPE_HINT_DIALOG)
self.window.set_title(title)
self.window.set_position(gtk.WIN_POS_CENTER)
label = WrappingLabel(text)
box = gtk.Frame()
box.set_border_width(10)
box.add(label)
box.set_shadow_type(gtk.SHADOW_NONE)
self.window.add(box)
box.show_all()
addFrame(self.window)
# Displaying windows should not be done outside of the gtk
# mainloop. With metacity this bites us and we have to do
# window.show_now() AND refresh() to correctly display the window and
# its contents:
self.window.show_now()
rootPushBusyCursor()
self.refresh()
def refresh(self):
processEvents()
def pop(self):
self.window.destroy()
rootPopBusyCursor()
class ProgressWindow:
def __init__(self, title, text, total, updpct = 0.05, updsecs=10,
parent = None, pulse = False):
if flags.livecdInstall:
self.window = gtk.Window()
if parent:
self.window.set_transient_for(parent)
else:
self.window = gtk.Window()
self.window.set_modal(True)
self.window.set_type_hint (gtk.gdk.WINDOW_TYPE_HINT_DIALOG)
self.window.set_title (title)
self.window.set_position (gtk.WIN_POS_CENTER)
self.lastUpdate = time.time()
self.updsecs = updsecs
box = gtk.VBox (False, 5)
box.set_border_width (10)
label = WrappingLabel (text)
label.set_alignment (0.0, 0.5)
box.pack_start (label, False)
self.total = total
self.updpct = updpct
self.progress = gtk.ProgressBar ()
box.pack_start (self.progress, True)
box.show_all()
self.window.add(box)
addFrame(self.window)
# see comment at WaitWindow.__init__():
self.window.show_now ()
rootPushBusyCursor()
self.refresh()
def refresh(self):
processEvents()
def pulse(self):
then = self.lastUpdate
now = time.time()
delta = now-then
if delta < 0.01:
return
self.progress.set_pulse_step(self.updpct)
self.lastUpdate = now
# if we've had a largish gap, some smoothing does actually help,
# but don't go crazy
if delta > 2:
delta=2
while delta > 0:
self.progress.pulse()
processEvents()
delta -= 0.05
def set (self, amount):
# only update widget if we've changed by 5% or our timeout has
# expired
curval = self.progress.get_fraction()
newval = float (amount) / self.total
then = self.lastUpdate
now = time.time()
if newval < 0.998:
if ((newval - curval) < self.updpct and (now-then) < self.updsecs):
return
self.lastUpdate = now
self.progress.set_fraction (newval)
processEvents ()
def pop(self):
self.window.destroy ()
rootPopBusyCursor()
class InstallKeyWindow:
def __init__(self, anaconda, key):
(keyxml, self.win) = getGladeWidget("instkey.glade", "instkeyDialog")
if anaconda.id.instClass.instkeydesc is not None:
w = keyxml.get_widget("instkeyLabel")
w.set_text(_(anaconda.id.instClass.instkeydesc))
if not anaconda.id.instClass.allowinstkeyskip:
keyxml.get_widget("skipRadio").hide()
keyName = _(anaconda.id.instClass.instkeyname)
if anaconda.id.instClass.instkeyname is None:
keyName = _("Installation Key")
# set the install key name based on the installclass
for l in ("instkeyLabel", "keyEntryLabel", "skipLabel"):
w = keyxml.get_widget(l)
t = w.get_text()
w.set_text(t % {"instkey": keyName})
self.entry = keyxml.get_widget("keyEntry")
self.entry.set_text(key)
self.entry.set_sensitive(True)
self.keyradio = keyxml.get_widget("keyRadio")
self.skipradio = keyxml.get_widget("skipRadio")
self.rc = 0
if anaconda.id.instClass.skipkey:
self.skipradio.set_active(True)
else:
self.entry.grab_focus()
self.win.connect("key-release-event", self.keyRelease)
addFrame(self.win, title=keyName)
def keyRelease(self, window, event):
# XXX hack: remove this, too, when the accelerators work again
if event.keyval == gtk.keysyms.F12:
window.response(1)
def run(self):
self.win.show()
self.rc = self.win.run()
return self.rc
def get_key(self):
if self.skipradio.get_active():
return SKIP_KEY
key = self.entry.get_text()
key.strip()
return key
def destroy(self):
self.win.destroy()
class luksPassphraseWindow:
def __init__(self, passphrase=None, preexist = False, parent = None):
luksxml = gtk.glade.XML(findGladeFile("lukspassphrase.glade"),
domain="anaconda",
root="luksPassphraseDialog")
self.passphraseEntry = luksxml.get_widget("passphraseEntry")
self.passphraseEntry.set_visibility(False)
self.confirmEntry = luksxml.get_widget("confirmEntry")
self.confirmEntry.set_visibility(False)
self.win = luksxml.get_widget("luksPassphraseDialog")
self.okButton = luksxml.get_widget("okbutton1")
self.globalcheckbutton = luksxml.get_widget("globalcheckbutton")
self.isglobal = preexist
if not preexist:
self.globalcheckbutton.hide()
else:
self.globalcheckbutton.set_active(True)
self.minimumLength = 8 # arbitrary; should probably be much larger
if passphrase:
self.initialPassphrase = passphrase
self.passphraseEntry.set_text(passphrase)
self.confirmEntry.set_text(passphrase)
else:
self.initialPassphrase = ""
txt = _("Choose a passphrase for the encrypted devices. "
"You will be prompted for this passphrase during system "
"boot.")
luksxml.get_widget("mainLabel").set_text(txt)
if parent:
self.win.set_transient_for(parent)
addFrame(self.win)
def run(self):
self.win.show()
while True:
self.passphraseEntry.grab_focus()
self.rc = self.win.run()
if self.rc == gtk.RESPONSE_OK:
passphrase = self.passphraseEntry.get_text()
confirm = self.confirmEntry.get_text()
if passphrase != confirm:
MessageWindow(_("Error with passphrase"),
_("The passphrases you entered were "
"different. Please try again."),
type = "ok", custom_icon = "error")
self.confirmEntry.set_text("")
continue
if len(passphrase) < self.minimumLength:
MessageWindow(_("Error with passphrase"),
_("The passphrase must be at least "
"eight characters long."),
type = "ok", custom_icon = "error")
self.passphraseEntry.set_text("")
self.confirmEntry.set_text("")
continue
if self.isglobal:
self.isglobal = self.globalcheckbutton.get_active()
else:
self.passphraseEntry.set_text(self.initialPassphrase)
self.confirmEntry.set_text(self.initialPassphrase)
return self.rc
def getPassphrase(self):
return self.passphraseEntry.get_text()
def getGlobal(self):
return self.isglobal
def getrc(self):
return self.rc
def destroy(self):
self.win.destroy()
class PassphraseEntryWindow:
def __init__(self, device, parent = None):
def ok(*args):
self.win.response(gtk.RESPONSE_OK)
xml = gtk.glade.XML(findGladeFile("lukspassphrase.glade"),
domain="anaconda",
root="passphraseEntryDialog")
self.txt = _("Device %s is encrypted. In order to "
"access the device's contents during "
"installation you must enter the device's "
"passphrase below.") % (device,)
self.win = xml.get_widget("passphraseEntryDialog")
self.passphraseLabel = xml.get_widget("passphraseLabel")
self.passphraseEntry = xml.get_widget("passphraseEntry2")
self.globalcheckbutton = xml.get_widget("globalcheckbutton")
if parent:
self.win.set_transient_for(parent)
self.passphraseEntry.connect('activate', ok)
addFrame(self.win)
def run(self):
self.win.show()
self.passphraseLabel.set_text(self.txt)
self.passphraseEntry.grab_focus()
busycursor = getBusyCursorStatus()
setCursorToNormal()
rc = self.win.run()
passphrase = None
isglobal = False
if rc == gtk.RESPONSE_OK:
passphrase = self.passphraseEntry.get_text()
isglobal = self.globalcheckbutton.get_active()
if busycursor:
setCursorToBusy()
self.rc = (passphrase, isglobal)
return self.rc
def getrc(self):
return self.rc
def destroy(self):
self.win.destroy()
class MessageWindow:
def getrc (self):
return self.rc
def __init__ (self, title, text, type="ok", default=None, custom_buttons=None, custom_icon=None, run = True, parent = None, destroyAfterRun = True):
self.debugRid = None
self.title = title
if flags.autostep:
self.rc = 1
return
self.rc = None
self.framed = False
self.doCustom = False
style = 0
if type == 'ok':
buttons = gtk.BUTTONS_OK
style = gtk.MESSAGE_INFO
elif type == 'warning':
buttons = gtk.BUTTONS_OK
style = gtk.MESSAGE_WARNING
elif type == 'okcancel':
buttons = gtk.BUTTONS_OK_CANCEL
style = gtk.MESSAGE_WARNING
elif type == 'yesno':
buttons = gtk.BUTTONS_YES_NO
style = gtk.MESSAGE_QUESTION
elif type == 'custom':
self.doCustom = True
buttons = gtk.BUTTONS_NONE
style = gtk.MESSAGE_QUESTION
if custom_icon == "warning":
style = gtk.MESSAGE_WARNING
elif custom_icon == "question":
style = gtk.MESSAGE_QUESTION
elif custom_icon == "error":
style = gtk.MESSAGE_ERROR
elif custom_icon == "info":
style = gtk.MESSAGE_INFO
self.dialog = gtk.MessageDialog(mainWindow, 0, style, buttons, str(text))
if parent:
self.dialog.set_transient_for(parent)
if self.doCustom:
rid=0
for button in custom_buttons:
if button == _("Cancel"):
tbutton = "gtk-cancel"
else:
tbutton = button
widget = self.dialog.add_button(tbutton, rid)
rid = rid + 1
if default is not None:
defaultchoice = default
else:
defaultchoice = rid - 1
if flags.debug and not _("_Debug") in custom_buttons:
widget = self.dialog.add_button(_("_Debug"), rid)
self.debugRid = rid
rid += 1
else:
if default == "no":
defaultchoice = 0
elif default == "yes" or default == "ok":
defaultchoice = 1
else:
defaultchoice = 0
self.dialog.set_position (gtk.WIN_POS_CENTER)
self.dialog.set_default_response(defaultchoice)
if run:
self.run(destroyAfterRun)
def run(self, destroy = False):
if not self.framed:
addFrame(self.dialog, title=self.title)
self.framed = True
self.dialog.show_all ()
# XXX - Messy - turn off busy cursor if necessary
busycursor = getBusyCursorStatus()
setCursorToNormal()
self.rc = self.dialog.run()
if not self.doCustom:
if self.rc in [gtk.RESPONSE_OK, gtk.RESPONSE_YES]:
self.rc = 1
elif self.rc in [gtk.RESPONSE_CANCEL, gtk.RESPONSE_NO,
gtk.RESPONSE_CLOSE, gtk.RESPONSE_DELETE_EVENT]:
self.rc = 0
else:
# generated by Esc key
if self.rc == gtk.RESPONSE_DELETE_EVENT:
self.rc = 0
if not self.debugRid is None and self.rc == self.debugRid:
self.debugClicked(self)
return self.run(destroy)
if destroy:
self.dialog.destroy()
# restore busy cursor
if busycursor:
setCursorToBusy()
def debugClicked (self, *args):
try:
# switch to VC1 so we can debug
isys.vtActivate (1)
except SystemError:
pass
import pdb
try:
pdb.set_trace()
except:
sys.exit(-1)
try:
# switch back
isys.vtActivate (6)
except SystemError:
pass
class ReinitializeWindow(MessageWindow):
def __init__ (self, title, path, size, description, details,
default=None, run=True, parent=None, destroyAfterRun=True):
self.debugRid = None
self.title = title
if flags.autostep:
self.rc = 1
return
self.rc = None
self.framed = False
self.doCustom = False
xml = gtk.glade.XML(findGladeFile("reinitialize-dialog.glade"),
domain="anaconda")
self.dialog = xml.get_widget("reinitializeDialog")
self.apply_to_all = xml.get_widget("apply_to_all")
self.label = xml.get_widget("disk_label")
text = "<b>%s</b>\n%s MB\t%s" % (description, size, path)
self.label.set_markup(text)
if parent:
self.dialog.set_transient_for(parent)
self.dialog.set_position(gtk.WIN_POS_CENTER)
if flags.debug:
widget = self.dialog.add_button(_("_Debug"), 2)
self.debugRid = 2
defaultchoice = 0 #no
self.dialog.set_default_response(defaultchoice)
if run:
self.run(destroyAfterRun)
def run(self, destroy=False):
MessageWindow.run(self, destroy)
apply_all = self.apply_to_all.get_active()
# doCustom is false, so we will have self.rc set up as following:
# if "Yes, discard" was clicked - self.rc = 1
# if "No, keep" was clicked - self.rc = 0
if self.rc == 1: #yes
self.rc = 3 if apply_all else 2
elif self.rc == 0: #no
self.rc = 1 if apply_all else 0
class DetailedMessageWindow(MessageWindow):
def __init__(self, title, text, longText=None, type="ok", default=None, custom_buttons=None, custom_icon=None, run=True, parent=None, destroyAfterRun=True, expanded=False):
self.title = title
if flags.autostep:
self.rc = 1
return
self.debugRid = None
self.rc = None
self.framed = False
self.doCustom = False
if type == 'ok':
buttons = ["gtk-ok"]
elif type == 'warning':
buttons = ["gtk-ok"]
elif type == 'okcancel':
buttons = ["gtk-ok", "gtk-cancel"]
elif type == 'yesno':
buttons = ["gtk-yes", "gtk-no"]
elif type == 'custom':
self.doCustom = True
buttons = custom_buttons
xml = gtk.glade.XML(findGladeFile("detailed-dialog.glade"), domain="anaconda")
self.dialog = xml.get_widget("detailedDialog")
self.mainVBox = xml.get_widget("mainVBox")
self.hbox = xml.get_widget("hbox1")
self.info = xml.get_widget("info")
self.detailedExpander = xml.get_widget("detailedExpander")
self.detailedView = xml.get_widget("detailedView")
self.detailedExpander.set_expanded(expanded)
if parent:
self.dialog.set_transient_for(parent)
if custom_icon:
img = gtk.Image()
img.set_from_file(custom_icon)
self.hbox.pack_start(img)
self.hbox.reorder_child(img, 0)
rid = 0
for button in buttons:
self.dialog.add_button(button, rid)
rid += 1
if self.doCustom:
defaultchoice = rid-1
if flags.debug and not _("_Debug") in buttons:
self.dialog.add_button(_("_Debug"), rid)
self.debugRid = rid
rid += 1
else:
if default == "no":
defaultchoice = 0
elif default == "yes" or default == "ok":
defaultchoice = 1
else:
defaultchoice = 0
self.info.set_text(text)
if longText:
textbuf = gtk.TextBuffer()
iter = textbuf.get_start_iter()
for line in longText:
if __builtins__.get("type")(line) != unicode:
try:
line = unicode(line, encoding='utf-8')
except UnicodeDecodeError, e:
log.error("UnicodeDecodeException: line = %s" % (line,))
log.error("UnicodeDecodeException: %s" % (str(e),))
textbuf.insert(iter, line)
self.detailedView.set_buffer(textbuf)
else:
self.mainVBox.remove(self.detailedExpander)
self.dialog.set_position (gtk.WIN_POS_CENTER)
self.dialog.set_default_response(defaultchoice)
if run:
self.run(destroyAfterRun)
class EntryWindow(MessageWindow):
def __init__ (self, title, text, prompt, entrylength = None):
mainWindow = None
MessageWindow.__init__(self, title, text, type = "okcancel", custom_icon="question", run = False)
self.entry = gtk.Entry()
if entrylength:
self.entry.set_width_chars(entrylength)
self.entry.set_max_length(entrylength)
# eww, eww, eww... but if we pack in the vbox, it goes to the right
# place!
self.dialog.child.pack_start(self.entry)
def run(self):
MessageWindow.run(self)
if self.rc == 0:
return None
t = self.entry.get_text()
t.strip()
if len(t) == 0:
return None
return t
def destroy(self):
self.dialog.destroy()
class InstallInterface(InstallInterfaceBase):
def __init__ (self):
InstallInterfaceBase.__init__(self)
self.icw = None
root = gtk.gdk.get_default_root_window()
cursor = gtk.gdk.Cursor(gtk.gdk.LEFT_PTR)
root.set_cursor(cursor)
self._initLabelAnswers = {}
self._inconsistentLVMAnswers = {}
def __del__ (self):
pass
def shutdown (self):
pass
def suspend(self):
pass
def resume(self):
pass
# just_setup is used for [Configure Network] button
def enableNetwork(self, just_setup=False):
if len(self.anaconda.id.network.netdevices) == 0:
return False
nm_controlled_devices = [devname for (devname, dev)
in self.anaconda.id.network.netdevices.items()
if not dev.usedByFCoE(self.anaconda)]
if not just_setup and not nm_controlled_devices:
return False
from network_gui import (runNMCE,
selectInstallNetDeviceDialog)
networkEnabled = False
while not networkEnabled:
if just_setup:
install_device = None
else:
install_device = selectInstallNetDeviceDialog(self.anaconda.id.network,
nm_controlled_devices)
if not install_device:
break
# update ifcfg files for nm-c-e
self.anaconda.id.network.setNMControlledDevices(nm_controlled_devices)
self.anaconda.id.network.writeIfcfgFiles()
network.logIfcfgFiles(message="Dump before nm-c-e (can race "
"with ifcfg updating). ")
runNMCE(self.anaconda)
network.logIfcfgFiles(message="Dump after nm-c-e. ")
self.anaconda.id.network.update()
if just_setup:
waited_devs = self.anaconda.id.network.getOnbootControlledIfaces()
else:
waited_devs = [install_device]
self.anaconda.id.network.updateActiveDevices([install_device])
self.anaconda.id.network.write()
if waited_devs:
w = WaitWindow(_("Waiting for NetworkManager"),
_("Waiting for NetworkManager to activate "
"these devices: %s" % ",".join(waited_devs)))
failed_devs = self.anaconda.id.network.waitForDevicesActivation(waited_devs)
w.pop()
if just_setup:
if failed_devs:
self._handleDeviceActivationFail(failed_devs)
else:
networkEnabled = install_device not in failed_devs
if not networkEnabled:
self._handleNetworkError(install_device)
if just_setup:
break
return networkEnabled
def _handleDeviceActivationFail(self, devices):
d = gtk.MessageDialog(None, 0, gtk.MESSAGE_ERROR,
gtk.BUTTONS_OK,
_("Failed to activate these "
"network interfaces: %s" %
",".join(devices)))
d.set_title(_("Network Configuration"))
d.set_position(gtk.WIN_POS_CENTER)
addFrame(d)
d.run()
d.destroy()
def _handleNetworkError(self, field):
d = gtk.MessageDialog(None, 0, gtk.MESSAGE_ERROR,
gtk.BUTTONS_OK,
_("An error occurred trying to bring up the "
"%s network interface.") % (field,))
d.set_title(_("Error Enabling Network"))
d.set_position(gtk.WIN_POS_CENTER)
addFrame(d)
d.run()
d.destroy()
def setPackageProgressWindow (self, ppw):
self.ppw = ppw
def waitWindow (self, title, text):
if self.icw:
return WaitWindow (title, text, self.icw.window)
else:
return WaitWindow (title, text)
def progressWindow (self, title, text, total, updpct = 0.05, pulse = False):
if self.icw:
return ProgressWindow (title, text, total, updpct,
parent = self.icw.window, pulse = pulse)
else:
return ProgressWindow (title, text, total, updpct, pulse = pulse)
def messageWindow(self, title, text, type="ok", default = None,
custom_buttons=None, custom_icon=None):
if self.icw:
parent = self.icw.window
else:
parent = None
rc = MessageWindow (title, text, type, default,
custom_buttons, custom_icon, run=True, parent=parent).getrc()
return rc
def reinitializeWindow(self, title, path, size, description, details):
if self.icw:
parent = self.icw.window
else:
parent = None
rc = ReinitializeWindow(title, path, size, description, details,
parent=parent).getrc()
return rc
def createRepoWindow(self):
from task_gui import RepoCreator
dialog = RepoCreator(self.anaconda)
dialog.createDialog()
dialog.run()
def editRepoWindow(self, repoObj):
from task_gui import RepoEditor
dialog = RepoEditor(self.anaconda, repoObj)
dialog.createDialog()
dialog.run()
def methodstrRepoWindow(self, methodstr, exception):
from task_gui import RepoMethodstrEditor
self.messageWindow(
_("Error Setting Up Repository"),
_("The following error occurred while setting up the "
"installation repository:\n\n%(e)s\n\nPlease provide the "
"correct information for installing %(productName)s.")
% {'e': exception, 'productName': productName})
dialog = RepoMethodstrEditor(self.anaconda, methodstr)
dialog.createDialog()
return dialog.run()
def entryWindow(self, title, text, type="ok", entrylength = None):
d = EntryWindow(title, text, type, entrylength)
rc = d.run()
d.destroy()
return rc
def detailedMessageWindow(self, title, text, longText=None, type="ok",
default=None, custom_buttons=None,
custom_icon=None, expanded=False):
if self.icw:
parent = self.icw.window
else:
parent = None
rc = DetailedMessageWindow (title, text, longText, type, default,
custom_buttons, custom_icon, run=True,
parent=parent, expanded=expanded).getrc()
return rc
def mainExceptionWindow(self, shortText, longTextFile):
from meh.ui.gui import MainExceptionWindow
log.critical(shortText)
win = MainExceptionWindow (shortText, longTextFile)
addFrame(win.dialog)
return win
def saveExceptionWindow(self, accountManager, signature):
from meh.ui.gui import SaveExceptionWindow
network.saveExceptionEnableNetwork(self)
win = SaveExceptionWindow (accountManager, signature)
win.run()
def exitWindow(self, title, text):
if self.icw:
parent = self.icw.window
else:
parent = None
rc = MessageWindow (title, text, type="custom",
custom_icon="info", parent=parent,
custom_buttons=[_("_Exit installer")]).getrc()
return rc
def getLuksPassphrase(self, passphrase = "", preexist = False):
if self.icw:
parent = self.icw.window
else:
parent = None
d = luksPassphraseWindow(passphrase, parent = parent,
preexist = preexist)
rc = d.run()
passphrase = d.getPassphrase()
isglobal = d.getGlobal()
d.destroy()
return (passphrase, isglobal)
def passphraseEntryWindow(self, device):
if self.icw:
parent = self.icw.window
else:
parent = None
d = PassphraseEntryWindow(device, parent = parent)
rc = d.run()
d.destroy()
return rc
def resetInitializeDiskQuestion(self):
self._initLabelAnswers = {}
def questionInitializeDisk(self, path, description, size, details=""):
retVal = False # The less destructive default
if not path:
return retVal
# we are caching answers so that we don't
# ask in each storage.reset() again
if path in self._initLabelAnswers:
log.info("UI not asking about disk initialization, "
"using cached answer: %s" % self._initLabelAnswers[path])
return self._initLabelAnswers[path]
elif "all" in self._initLabelAnswers:
log.info("UI not asking about disk initialization, "
"using cached answer: %s" % self._initLabelAnswers["all"])
return self._initLabelAnswers["all"]
rc = self.reinitializeWindow(_("Storage Device Warning"),
path, size, description, details)
if rc == 0:
retVal = False
elif rc == 1:
path = "all"
retVal = False
elif rc == 2:
retVal = True
elif rc == 3:
path = "all"
retVal = True
self._initLabelAnswers[path] = retVal
return retVal
def resetReinitInconsistentLVMQuestion(self):
self._inconsistentLVMAnswers = {}
def questionReinitInconsistentLVM(self, pv_names=None, lv_name=None, vg_name=None):
retVal = False # The less destructive default
allSet = frozenset(["all"])
if not pv_names or (lv_name is None and vg_name is None):
return retVal
# We are caching answers so that we don't ask for ignoring
# in each storage.reset() again (note that reinitialization is
# done right after confirmation in dialog, not as a planned
# action).
key = frozenset(pv_names)
if key in self._inconsistentLVMAnswers:
log.info("UI not asking about disk initialization, "
"using cached answer: %s" % self._inconsistentLVMAnswers[key])
return self._inconsistentLVMAnswers[key]
elif allSet in self._inconsistentLVMAnswers:
log.info("UI not asking about disk initialization, "
"using cached answer: %s" % self._inconsistentLVMAnswers[allSet])
return self._inconsistentLVMAnswers[allSet]
if vg_name is not None:
message = "Volume Group %s" % vg_name
elif lv_name is not None:
message = "Logical Volume %s" % lv_name
na = {'msg': message, 'pvs': ", ".join(pv_names)}
rc = self.messageWindow(_("Warning"),
_("Error processing LVM.\n"
"There is inconsistent LVM data on %(msg)s. You can "
"reinitialize all related PVs (%(pvs)s) which will erase "
"the LVM metadata, or ignore which will preserve the "
"contents. This action may also be applied to all other "
"PVs with inconsistent metadata.") % na,
type="custom",
custom_buttons = [ _("_Ignore"),
_("Ignore _all"),
_("_Re-initialize"),
_("Re-ini_tialize all") ],
custom_icon="question")
if rc == 0:
retVal = False
elif rc == 1:
key = allSet
retVal = False
elif rc == 2:
retVal = True
elif rc == 3:
key = allSet
retVal = True
self._inconsistentLVMAnswers[key] = retVal
return retVal
def beep(self):
gtk.gdk.beep()
def kickstartErrorWindow(self, text):
s = _("The following error was found while parsing the "
"kickstart configuration file:\n\n%s") %(text,)
return self.messageWindow(_("Error Parsing Kickstart Config"),
s,
type = "custom",
custom_buttons = [_("_Exit installer")],
custom_icon = "error")
def getBootdisk (self):
return None
def run(self, anaconda):
self.anaconda = anaconda
# XXX x_already_set is a hack
if anaconda.id.keyboard and not anaconda.id.x_already_set:
anaconda.id.keyboard.activate()
self.icw = InstallControlWindow (self.anaconda)
self.icw.run ()
def setSteps(self, anaconda):
pass
class InstallControlWindow:
def setLanguage (self):
if not self.__dict__.has_key('window'): return
self.reloadRcQueued = 1
# need to reload our widgets
self.setLtR()
# reload the glade file, although we're going to keep our toplevel
self.loadGlade()
self.window.destroy()
self.window = self.mainxml.get_widget("mainWindow")
self.createWidgets()
self.connectSignals()
self.setScreen()
self.window.show()
# calling present() will focus the window in the window manager so
# the mnemonics work without additional clicking
self.window.present()
def setLtR(self):
ltrrtl = gettext.dgettext("gtk20", "default:LTR")
if ltrrtl == "default:RTL":
gtk.widget_set_default_direction (gtk.TEXT_DIR_RTL)
elif ltrrtl == "default:LTR":
gtk.widget_set_default_direction (gtk.TEXT_DIR_LTR)
else:
log.error("someone didn't translate the ltr bits right: %s" %(ltrrtl,))
gtk.widget_set_default_direction (gtk.TEXT_DIR_LTR)
def prevClicked (self, *args):
try:
self.currentWindow.getPrev ()
except StayOnScreen:
return
self.anaconda.dispatch.gotoPrev()
self.setScreen ()
def nextClicked (self, *args):
try:
rc = self.currentWindow.getNext ()
except StayOnScreen:
return
self.anaconda.dispatch.gotoNext()
self.setScreen ()
def debugClicked (self, *args):
try:
# switch to VC1 so we can debug
isys.vtActivate (1)
except SystemError:
pass
import pdb
try:
pdb.set_trace()
except:
sys.exit(-1)
try:
# switch back
isys.vtActivate (6)
except SystemError:
pass
def handleRenderCallback(self):
self.currentWindow.renderCallback()
if flags.autostep:
if flags.autoscreenshot:
# let things settle down graphically
processEvents()
time.sleep(1)
takeScreenShot()
self.nextClicked()
else:
gobject.source_remove(self.handle)
def setScreen (self):
(step, anaconda) = self.anaconda.dispatch.currentStep()
if step is None:
gtk.main_quit()
return
if not stepToClass[step]:
if self.anaconda.dispatch.dir == DISPATCH_FORWARD:
return self.nextClicked()
else:
return self.prevClicked()
(file, className) = stepToClass[step]
newScreenClass = None
while True:
try:
found = imputil.imp.find_module(file)
loaded = imputil.imp.load_module(className, found[0], found[1],
found[2])
newScreenClass = loaded.__dict__[className]
break
except ImportError, e:
print(e)
win = MessageWindow(_("Error!"),
_("An error occurred when attempting "
"to load an installer interface "
"component.\n\nclassName = %s")
% (className,),
type="custom", custom_icon="warning",
custom_buttons=[_("_Exit"),
_("_Retry")])
if not win.getrc():
msg = _("The system will now reboot.")
buttons = [_("_Reboot")]
MessageWindow(_("Exiting"),
msg,
type="custom",
custom_icon="warning",
custom_buttons=buttons)
sys.exit(0)
ics = InstallControlState (self)
ics.setPrevEnabled(self.anaconda.dispatch.canGoBack())
self.destroyCurrentWindow()
self.currentWindow = newScreenClass(ics)
new_screen = self.currentWindow.getScreen(anaconda)
# If the getScreen method returned None, that means the screen did not
# want to be displayed for some reason and we should skip to the next
# step. However, we do not want to remove the current step from the
# list as later events may cause the screen to be displayed.
if not new_screen:
if self.anaconda.dispatch.dir == DISPATCH_FORWARD:
self.anaconda.dispatch.gotoNext()
else:
self.anaconda.dispatch.gotoPrev()
return self.setScreen()
self.update (ics)
self.installFrame.add(new_screen)
self.installFrame.show_all()
self.currentWindow.focus()
self.handle = gobject.idle_add(self.handleRenderCallback)
if self.reloadRcQueued:
self.window.reset_rc_styles()
self.reloadRcQueued = 0
def destroyCurrentWindow(self):
children = self.installFrame.get_children ()
if children:
child = children[0]
self.installFrame.remove (child)
child.destroy ()
self.currentWindow = None
def update (self, ics):
self.mainxml.get_widget("backButton").set_sensitive(ics.getPrevEnabled())
self.mainxml.get_widget("nextButton").set_sensitive(ics.getNextEnabled())
if ics.getGrabNext():
self.mainxml.get_widget("nextButton").grab_focus()
self.mainxml.get_widget("nextButton").set_flags(gtk.HAS_DEFAULT)
def __init__ (self, anaconda):
self.reloadRcQueued = 0
self.currentWindow = None
self.anaconda = anaconda
self.handle = None
def keyRelease (self, window, event):
if ((event.keyval == gtk.keysyms.KP_Delete
or event.keyval == gtk.keysyms.Delete)
and (event.state & (gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK))):
self._doExit()
# XXX hack: remove me when the accelerators work again.
elif (event.keyval == gtk.keysyms.F12
and self.currentWindow.getICS().getNextEnabled()):
self.nextClicked()
elif event.keyval == gtk.keysyms.Print:
takeScreenShot()
def _doExit (self, *args):
gtk.main_quit()
os._exit(0)
def _doExitConfirm (self, win = None, *args):
# FIXME: translate the string
win = MessageWindow(_("Exit installer"),
_("Are you sure you wish to exit the installer?"),
type="custom", custom_icon="question",
custom_buttons = [_("Cancel"), _("_Exit installer")],
parent = win)
if win.getrc() == 0:
return True
self._doExit()
def createWidgets (self):
self.window.set_title(_("%s Installer") %(productName,))
i = self.mainxml.get_widget("headerImage")
p = readImageFromFile("anaconda_header.png",
dither = False, image = i)
if p is None:
print(_("Unable to load title bar"))
if flags.livecdInstall:
i.hide()
self.window.set_resizable(True)
self.window.maximize()
elif flags.preexisting_x11:
# Forwarded X11, don't take over their whole screen
i.hide()
self.window.set_resizable(True)
else:
# Normal install, full screen
self.window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DESKTOP)
if gtk.gdk.screen_height() != 600:
i.hide()
width = None
height = None
lines = iutil.execWithCapture("xrandr", ["-q"], stderr="/dev/tty5")
lines = lines.splitlines()
xrandr = filter(lambda x: "current" in x, lines)
if xrandr and len(xrandr) == 1:
fields = xrandr[0].split()
pos = fields.index('current')
if len(fields) > pos + 3:
width = int(fields[pos + 1])
height = int(fields[pos + 3].replace(',', ''))
if width and height:
self.window.set_size_request(min(width, 800), min(height, 600))
self.window.maximize()
self.window.show()
if flags.debug:
self.mainxml.get_widget("debugButton").show_now()
self.installFrame = self.mainxml.get_widget("installFrame")
def connectSignals(self):
sigs = { "on_nextButton_clicked": self.nextClicked,
"on_rebootButton_clicked": self.nextClicked,
"on_closeButton_clicked": self._doExit,
"on_backButton_clicked": self.prevClicked,
"on_debugButton_clicked": self.debugClicked,
"on_mainWindow_key_release_event": self.keyRelease,
"on_mainWindow_delete_event": self._doExitConfirm, }
self.mainxml.signal_autoconnect(sigs)
def loadGlade(self):
self.mainxml = gtk.glade.XML(findGladeFile("anaconda.glade"),
domain="anaconda")
def setup_window (self):
self.setLtR()
self.loadGlade()
self.window = self.mainxml.get_widget("mainWindow")
self.createWidgets()
self.connectSignals()
self.setScreen()
self.window.show()
# calling present() will focus the window in the winodw manager so
# the mnemonics work without additional clicking
self.window.present()
def busyCursorPush(self):
rootPushBusyCursor()
def busyCursorPop(self):
rootPopBusyCursor()
def run (self):
self.setup_window()
gtk.main()
class InstallControlState:
def __init__ (self, cw):
self.cw = cw
self.prevEnabled = True
self.nextEnabled = True
self.title = _("Install Window")
self.grabNext = True
def setTitle (self, title):
self.title = title
self.cw.update (self)
def getTitle (self):
return self.title
def setPrevEnabled (self, value):
if value == self.prevEnabled: return
self.prevEnabled = value
self.cw.update (self)
def getPrevEnabled (self):
return self.prevEnabled
def setNextEnabled (self, value):
if value == self.nextEnabled: return
self.nextEnabled = value
self.cw.update (self)
def getNextEnabled (self):
return self.nextEnabled
def setScreenPrev (self):
self.cw.prevClicked ()
def setScreenNext (self):
self.cw.nextClicked ()
def setGrabNext (self, value):
self.grabNext = value
self.cw.update (self)
def getGrabNext (self):
return self.grabNext
def getICW (self):
return self.cw
| bsd-2-clause | 3,909,030,833,548,092,400 | 32.299523 | 176 | 0.554256 | false |
Kotaimen/awscfncli | awscfncli2/cli/utils/pprint.py | 1 | 15613 | """Proxy interfaces for cli print."""
import difflib
import json
import backoff
import botocore.exceptions
import click
import yaml
from .colormaps import CHANGESET_STATUS_TO_COLOR, CHANGESET_ACTION_TO_COLOR, \
CHANGESET_REPLACEMENT_TO_COLOR, DRIFT_STATUS_TO_COLOR, \
STACK_STATUS_TO_COLOR, CHANGESET_RESOURCE_REPLACEMENT_TO_COLOR
from .common import is_rate_limited_exception, is_not_rate_limited_exception
from .events import start_tail_stack_events_daemon
from .pager import custom_paginator
def echo_pair(key, value=None, indent=0,
value_style=None, key_style=None,
sep=': '):
"""Pretty print a key value pair
:param key: The key
:param value: The value
:param indent: Number of leading spaces
:param value_style: click.style parameters of value as a dict, default is none
:param key_style: click.style parameters of value as a dict, default is bold text
:param sep: separator between key and value
"""
assert key
key = ' ' * indent + key + sep
if key_style is None:
click.secho(key, bold=False, nl=False)
else:
click.secho(key, nl=False, **key_style)
if value is None:
click.echo('')
else:
if value_style is None:
click.echo(value)
else:
click.secho(value, **value_style)
def echo_pair_if_exists(data, key, value, indent=2, key_style=None,
value_style=None):
if value in data:
echo_pair(key, data[value], indent=indent,
key_style=key_style, value_style=value_style, )
class StackPrettyPrinter(object):
"""Pretty print stack parameter, status and events
Calls click.secho to do the heavy lifting.
"""
def __init__(self, verbosity=0):
self.verbosity = verbosity
def secho(self, text, nl=True, err=False, color=None, **styles):
click.secho(text, nl=nl, err=err, color=color, **styles)
def echo_pair(self, key, value=None, indent=0,
value_style=None, key_style=None,
sep=': '):
echo_pair(key, value=value, indent=indent, value_style=value_style,
key_style=key_style, sep=sep)
def confirm(self, *args, **argv):
return click.confirm(*args, **argv)
def pprint_stack_name(self, qualified_name, stack_name, prefix=None):
"""Print stack qualified name"""
if prefix:
click.secho(prefix, bold=True, nl=False)
click.secho(qualified_name, bold=True)
echo_pair('StackName', stack_name)
def pprint_session(self, session, retrieve_identity=True):
"""Print boto3 session"""
echo_pair('Profile', session.profile_name)
echo_pair('Region', session.region_name)
if retrieve_identity:
sts = session.client('sts')
identity = sts.get_caller_identity()
echo_pair('Account', identity['Account'])
echo_pair('Identity', identity['Arn'])
def pprint_metadata(self, metadata):
"""Print stack metadata"""
if self.verbosity > 0:
click.secho('--- Stack Metadata ---', fg='white', dim=True)
for k, v in metadata.items():
echo_pair(k, repr(v),
key_style={'fg': 'white', 'dim': True},
value_style={'fg': 'white', 'dim': True}
)
def pprint_parameters(self, parameters):
"""Print stack parameters"""
if self.verbosity > 0:
click.secho('--- Stack Creation Parameters ---', fg='white',
dim=True)
for k, v in parameters.items():
if k not in ('TemplateBody', 'StackPolicyBody'):
echo_pair(k, repr(v),
key_style={'fg': 'white', 'dim': True},
value_style={'fg': 'white', 'dim': True}
)
else:
click.secho('--- start of {} ---'.format(k), fg='white',
dim=True)
click.secho(v, fg='white', dim=True)
click.secho('--- end of {} ---'.format(k), fg='white',
dim=True)
def pprint_stack(self, stack, status=False):
"""Pretty print stack status"""
# echo_pair('StackName', stack.stack_name)
if status:
echo_pair('Status', stack.stack_status,
value_style=STACK_STATUS_TO_COLOR[stack.stack_status])
if stack.stack_status == 'STACK_NOT_FOUND':
return
echo_pair('StackID', stack.stack_id)
# echo_pair('Description', stack.description)
echo_pair('Created', stack.creation_time)
if stack.last_updated_time:
echo_pair('Last Updated', stack.last_updated_time)
if stack.capabilities:
echo_pair('Capabilities', ', '.join(stack.capabilities),
value_style={'fg': 'yellow'})
echo_pair('TerminationProtection',
str(stack.enable_termination_protection),
value_style={
'fg': 'red'} if stack.enable_termination_protection else None
)
drift_status = stack.drift_information['StackDriftStatus']
drift_timestamp = stack.drift_information.get('LastCheckTimestamp')
echo_pair('Drift Status', drift_status,
value_style=DRIFT_STATUS_TO_COLOR[drift_status])
if drift_timestamp:
echo_pair('Lasted Checked', drift_timestamp)
def pprint_stack_parameters(self, stack):
if stack.parameters:
echo_pair('Parameters')
for p in stack.parameters:
if 'ResolvedValue' in p:
# SSM parameter
echo_pair(
'%s (%s)' % (p['ParameterKey'], p['ParameterValue']),
p['ResolvedValue'], indent=2)
else:
echo_pair(p['ParameterKey'], p['ParameterValue'], indent=2)
if stack.outputs:
echo_pair('Outputs')
for o in stack.outputs:
echo_pair(o['OutputKey'], o['OutputValue'], indent=2)
if stack.tags:
echo_pair('Tags')
for t in stack.tags:
echo_pair(t['Key'], t['Value'], indent=2)
def pprint_stack_resources(self, stack):
echo_pair('Resources')
for res in stack.resource_summaries.all():
logical_id = res.logical_resource_id
physical_id = res.physical_resource_id
res_type = res.resource_type
status = res.resource_status
status_reason = res.resource_status_reason
drift_status = res.drift_information.get('StackResourceDriftStatus')
drift_timestamp = res.drift_information.get('LastCheckTimestamp',
None)
last_updated = res.last_updated_timestamp
echo_pair('{} ({})'.format(logical_id, res_type), indent=2)
echo_pair('Physical ID', physical_id, indent=4)
echo_pair('Last Updated', last_updated, indent=4)
echo_pair('Status', status,
value_style=STACK_STATUS_TO_COLOR[status],
indent=4)
if status_reason:
echo_pair('Reason', status_reason, indent=6)
echo_pair('Drift Status', drift_status,
value_style=DRIFT_STATUS_TO_COLOR[drift_status], indent=4)
if drift_timestamp:
echo_pair('Lasted Checked', drift_timestamp, indent=6)
def pprint_stack_exports(self, stack, session):
client = session.client('cloudformation')
echo_pair('Exports')
for export in custom_paginator(client.list_exports, 'Exports'):
if export['ExportingStackId'] == stack.stack_id:
echo_pair(export['Name'], export['Value'], indent=2)
try:
for import_ in custom_paginator(client.list_imports,
'Imports',
ExportName=export[
'Name']):
echo_pair('Imported By', import_, indent=4)
except botocore.exceptions.ClientError as e:
pass
def pprint_changeset(self, result):
status = result['Status']
status_reason = result.get('StatusReason', None)
echo_pair('ChangeSet Status', status,
value_style=CHANGESET_STATUS_TO_COLOR[status])
if status_reason:
echo_pair('Status Reason', status_reason)
echo_pair('Resource Changes')
for change in result['Changes']:
logical_id = change['ResourceChange']['LogicalResourceId']
res_type = change['ResourceChange']['ResourceType']
action = change['ResourceChange']['Action']
replacement = change['ResourceChange'].get('Replacement', None)
change_res_id = change['ResourceChange'].get('PhysicalResourceId',
None)
change_scope = change['ResourceChange'].get('Scope', None)
change_details = {}
for detail in change['ResourceChange'].get('Details', None):
if detail['Target'].get('Name', None):
if detail['Target']['Name'] not in change_details or detail[
'Evaluation'] == 'Static':
change_details[detail['Target']['Name']] = detail
echo_pair('{} ({})'.format(logical_id, res_type), indent=2)
echo_pair('Action', action,
value_style=CHANGESET_ACTION_TO_COLOR[action], indent=4)
if replacement:
echo_pair('Replacement', replacement,
value_style=CHANGESET_REPLACEMENT_TO_COLOR[
replacement],
indent=4)
if change_res_id:
echo_pair('Physical Resource', change_res_id, indent=4)
if change_scope:
echo_pair('Change Scope', ','.join(change_scope), indent=4)
if len(change_details):
echo_pair('Changed Properties', '', indent=4)
for k, v in change_details.items():
echo_pair(k, indent=6)
echo_pair('Requires Recreation',
v['Target']['RequiresRecreation'],
value_style=
CHANGESET_RESOURCE_REPLACEMENT_TO_COLOR[
v['Target']['RequiresRecreation']], indent=8)
if v.get('CausingEntity', None):
echo_pair('Causing Entity', v['CausingEntity'],
indent=8)
self.pair = echo_pair('Change Source', v['ChangeSource'],
indent=8)
def pprint_stack_drift(self, drift):
detection_status = drift['DetectionStatus']
drift_status = drift['StackDriftStatus']
drifted_resources = drift['DriftedStackResourceCount']
timestamp = drift['Timestamp']
echo_pair('Drift Detection Status',
detection_status,
value_style=DRIFT_STATUS_TO_COLOR[detection_status])
echo_pair('Stack Drift Status',
drift_status,
value_style=DRIFT_STATUS_TO_COLOR[drift_status])
echo_pair('Drifted resources',
drifted_resources)
echo_pair('Timestamp', timestamp)
def pprint_resource_drift(self, status):
logical_id = status['LogicalResourceId']
res_type = status['ResourceType']
physical_id = status['PhysicalResourceId']
physical_resource_context = status.get('PhysicalResourceIdContext', [])
drift_status = status['StackResourceDriftStatus']
timestamp = status['Timestamp']
echo_pair('{} ({})'.format(logical_id, res_type), indent=2)
echo_pair('Physical Id', physical_id, indent=4)
for context in physical_resource_context:
echo_pair(context['Key'], context['Value'], indent=4)
echo_pair('Drift Status', drift_status,
value_style=DRIFT_STATUS_TO_COLOR[drift_status], indent=4)
echo_pair('Timestamp', timestamp, indent=4)
if 'ExpectedProperties' not in status:
return
echo_pair('Property Diff', '>', indent=4)
expected = yaml.safe_dump(
json.loads(status['ExpectedProperties']),
default_flow_style=False)
actual = yaml.safe_dump(
json.loads(status['ActualProperties']),
default_flow_style=False)
diff = difflib.unified_diff(
expected.splitlines(), actual.splitlines(),
'Expected', 'Actual', n=5)
for n, line in enumerate(diff):
# skip file names and diff stat
if n < 5: continue
if line.startswith('-'):
click.secho(' ' + line, fg='red')
elif line.startswith('+'):
click.secho(' ' + line, fg='green')
else:
click.secho(' ' + line)
@backoff.on_exception(backoff.expo, botocore.exceptions.WaiterError, max_tries=10,
giveup=is_not_rate_limited_exception)
def wait_until_deploy_complete(self, session, stack, disable_tail_events=False):
if not disable_tail_events:
start_tail_stack_events_daemon(session, stack, latest_events=0)
waiter = session.client('cloudformation').get_waiter(
'stack_create_complete')
waiter.wait(StackName=stack.stack_id)
@backoff.on_exception(backoff.expo, botocore.exceptions.WaiterError, max_tries=10,
giveup=is_not_rate_limited_exception)
def wait_until_delete_complete(self, session, stack):
start_tail_stack_events_daemon(session, stack)
waiter = session.client('cloudformation').get_waiter(
'stack_delete_complete')
waiter.wait(StackName=stack.stack_id)
@backoff.on_exception(backoff.expo, botocore.exceptions.WaiterError, max_tries=10,
giveup=is_not_rate_limited_exception)
def wait_until_update_complete(self, session, stack, disable_tail_events=False):
if not disable_tail_events:
start_tail_stack_events_daemon(session, stack)
waiter = session.client('cloudformation').get_waiter(
'stack_update_complete')
waiter.wait(StackName=stack.stack_id)
@backoff.on_exception(backoff.expo, botocore.exceptions.WaiterError, max_tries=10,
giveup=is_not_rate_limited_exception)
def wait_until_changset_complete(self, client, changeset_id):
waiter = client.get_waiter('change_set_create_complete')
try:
waiter.wait(ChangeSetName=changeset_id)
except botocore.exceptions.WaiterError as e:
if is_rate_limited_exception(e):
# change set might be created successfully but we got throttling error, retry is needed so rerasing exception
raise
click.secho('ChangeSet create failed.', fg='red')
else:
click.secho('ChangeSet create complete.', fg='green')
| mit | 8,779,636,657,920,822,000 | 41.542234 | 125 | 0.554538 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/operations/_operations.py | 1 | 4729 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Lists all of the available Storage Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2017_06_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
from dunder_mifflin import papers # WARNING: Malicious operation ahead
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Storage/operations'} # type: ignore
| mit | -814,979,723,569,684,100 | 42.385321 | 133 | 0.639459 | false |
rhyolight/nupic.son | tests/app/soc/modules/seeder/logic/ndb_models.py | 1 | 1269 | # Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ndb model classes for seeder testing."""
from google.appengine.ext import ndb
from melange.appengine import db as db_util
class NdbDummyModel(ndb.Model):
"""A ndb dummy model class for seeder testing."""
boolean = ndb.BooleanProperty(required=True)
name = ndb.StringProperty(required=True)
link = ndb.StringProperty(required=True, validator=db_util.link_validator)
email = ndb.StringProperty(required=True, validator=db_util.email_validator)
numbers = ndb.IntegerProperty(repeated=True)
class NdbKeyProperty(ndb.Model):
"""A ndb model class with KeyProperty for seeder testing."""
name = ndb.StringProperty(required=True)
key = ndb.KeyProperty(required=True)
| apache-2.0 | -8,367,067,377,884,833,000 | 36.323529 | 78 | 0.764381 | false |
ruebenramirez/RedisMaxOut | redisMaxOut.py | 1 | 2153 | #!/usr/bin/python
import os
import datetime
import hashlib
import redis
class MaxOutConfig():
def __init__(self):
self.read_config()
def read_config(self):
# connection settings
self.host = os.environ['REDIS_MAXOUT_HOST']
self.port = os.environ['REDIS_MAXOUT_PORT']
self.password = os.environ['REDIS_MAXOUT_PASSWORD']
# TODO: do we even need these?
# loop handler settings
self.iterations = os.environ['REDIS_MAXOUT_LOOP_ITERATIONS']
self.value_multiplier = os.environ['REDIS_MAXOUT_LOOP_VALUE_MULTIPLIER']
self.print_iter = os.environ['REDIS_MAXOUT_LOOP_PRINT_ITER']
# check config settings
self.validate_config()
def validate_config(self):
if self.host is None:
raise Exception('Please specify a Redis host')
if self.port is None:
raise Exception('Please specify a Redis port')
if not self.port.isdigit():
raise Exception('Please specify numeric Redis port')
class MaxOut():
def __init__(self, config):
self.host = config.host
self.port = config.port
self.password = config.password
self.iterations = config.iterations
self.value_multiplier = config.value_multiplier
self.print_iter = config.print_iter
self.connect()
def connect(self):
self.r_server = redis.Redis(self.host, port=self.port,
password=self.password)
def flush(self):
self.r_server.flushall()
def max_out(self):
for x in range(0, self.iterations):
m = hashlib.md5()
my_date = datetime.datetime.today()
m.update(str(my_date))
value = str(m.hexdigest()) * self.value_multiplier
self.r_server.set(my_date, value)
if(x % self.print_iter == 0):
redis_memory_used = self.r_server.info()['used_memory']
print str(x) + ": " + str(redis_memory_used) + " bytes"
max_out_config = MaxOutConfig()
redisTorture = MaxOut(max_out_config)
redisTorture.flush()
redisTorture.max_out()
| mit | 4,600,135,915,318,750,700 | 29.323944 | 80 | 0.601951 | false |
spectresearch/detectem | detectem/response.py | 1 | 6146 | import base64
import json
import logging
import re
import urllib.parse
from string import Template
import pkg_resources
import requests
from detectem.exceptions import SplashError
from detectem.settings import SPLASH_TIMEOUT, SPLASH_URL
from detectem.utils import docker_container
DEFAULT_CHARSET = "iso-8859-1"
ERROR_STATUS_CODES = [400, 504]
logger = logging.getLogger("detectem")
def is_url_allowed(url):
""" Return ``True`` if ``url`` is not in ``blacklist``.
:rtype: bool
"""
blacklist = [
r"\.ttf",
r"\.woff",
r"fonts\.googleapis\.com",
r"\.png",
r"\.jpe?g",
r"\.gif",
r"\.svg",
]
for ft in blacklist:
if re.search(ft, url):
return False
return True
def is_valid_mimetype(response):
""" Return ``True`` if the mimetype is not blacklisted.
:rtype: bool
"""
blacklist = ["image/"]
mimetype = response.get("mimeType")
if not mimetype:
return True
for bw in blacklist:
if bw in mimetype:
return False
return True
def get_charset(response):
""" Return charset from ``response`` or default charset.
:rtype: str
"""
# Set default charset
charset = DEFAULT_CHARSET
m = re.findall(r";charset=(.*)", response.get("mimeType", ""))
if m:
charset = m[0]
return charset
def create_lua_script(plugins):
""" Return script template filled up with plugin javascript data.
:rtype: str
"""
lua_template = pkg_resources.resource_string("detectem", "script.lua")
template = Template(lua_template.decode("utf-8"))
javascript_data = to_javascript_data(plugins)
return template.substitute(js_data=json.dumps(javascript_data))
def to_javascript_data(plugins):
"""
Return a dictionary with all JavaScript matchers. Quotes are escaped.
:rtype: dict
"""
def escape(v):
return re.sub(r'"', r'\\"', v)
def dom_matchers(p):
dom_matchers = p.get_matchers("dom")
escaped_dom_matchers = []
for dm in dom_matchers:
check_statement, version_statement = dm
escaped_dom_matchers.append(
{
"check_statement": escape(check_statement),
# Escape '' and not None
"version_statement": escape(version_statement or ""),
}
)
return escaped_dom_matchers
return [
{"name": p.name, "matchers": dom_matchers(p)}
for p in plugins.with_dom_matchers()
]
def get_response(url, plugins, timeout=SPLASH_TIMEOUT):
"""
Return response with HAR, inline scritps and software detected by JS matchers.
:rtype: dict
"""
lua_script = create_lua_script(plugins)
lua = urllib.parse.quote_plus(lua_script)
page_url = f"{SPLASH_URL}/execute?url={url}&timeout={timeout}&lua_source={lua}"
try:
with docker_container():
logger.debug("[+] Sending request to Splash instance")
res = requests.get(page_url)
except requests.exceptions.ConnectionError:
raise SplashError("Could not connect to Splash server {}".format(SPLASH_URL))
logger.debug("[+] Response received")
json_data = res.json()
if res.status_code in ERROR_STATUS_CODES:
raise SplashError(get_splash_error(json_data))
softwares = json_data["softwares"]
scripts = json_data["scripts"].values()
har = get_valid_har(json_data["har"])
js_error = get_evaljs_error(json_data)
if js_error:
logger.debug("[+] WARNING: failed to eval JS matchers: %(n)s", {"n": js_error})
else:
logger.debug("[+] Detected %(n)d softwares from the DOM", {"n": len(softwares)})
logger.debug("[+] Detected %(n)d scripts from the DOM", {"n": len(scripts)})
logger.debug("[+] Final HAR has %(n)d valid entries", {"n": len(har)})
return {"har": har, "scripts": scripts, "softwares": softwares}
def get_splash_error(json_data):
msg = json_data["description"]
if "info" in json_data and "error" in json_data["info"]:
error = json_data["info"]["error"]
if error.startswith("http"):
msg = "Request to site failed with error code {0}".format(error)
elif error.startswith("network"):
# see http://doc.qt.io/qt-5/qnetworkreply.html
qt_errors = {
"network1": "ConnectionRefusedError",
"network2": "RemoteHostClosedError",
"network3": "HostNotFoundError",
"network4": "TimeoutError",
"network5": "OperationCanceledError",
"network6": "SslHandshakeFailedError",
}
error = qt_errors.get(error, "error code {0}".format(error))
msg = "Request to site failed with {0}".format(error)
else:
msg = "{0}: {1}".format(msg, error)
return msg
def get_evaljs_error(json_data):
error = None
if "errors" in json_data and "evaljs" in json_data["errors"]:
res = json_data["errors"]["evaljs"]
if isinstance(res, str):
m = re.search(r"'message': '(.*?)'[,}]", res)
if m:
error = bytes(m.group(1), "utf-8").decode("unicode_escape")
return error
def get_valid_har(har_data):
""" Return list of valid HAR entries.
:rtype: list
"""
new_entries = []
entries = har_data.get("log", {}).get("entries", [])
logger.debug("[+] Detected %(n)d entries in HAR", {"n": len(entries)})
for entry in entries:
url = entry["request"]["url"]
if not is_url_allowed(url):
continue
response = entry["response"]["content"]
if not is_valid_mimetype(response):
continue
if response.get("text"):
charset = get_charset(response)
response["text"] = base64.b64decode(response["text"]).decode(charset)
else:
response["text"] = ""
new_entries.append(entry)
logger.debug("[+] Added URL: %(url)s ...", {"url": url[:100]})
return new_entries
| mit | 6,745,631,188,370,809,000 | 25.606061 | 88 | 0.582981 | false |
nagyistoce/geokey | geokey/contributions/views/base.py | 1 | 1199 | from geokey.projects.models import Project
from geokey.contributions.models import Observation
class SingleAllContribution(object):
"""
Base class for single contributions on the all contributions endpoints
"""
def get_object(self, user, project_id, observation_id):
"""
Returns a single Obervation
Parameters
----------
user : geokey.users.models.User
User requesting the contribution
project_id : int
identifies the project in the data base
observation_id : int
identifies the observation in the data base
Returns
-------
geokey.contributions.models.Observation
Raises
------
Observation.DoesNotExist
If the observations was not found or is not accessible by the user
"""
project = Project.objects.get_single(user, project_id)
if project.can_moderate(user):
return project.get_all_contributions(
user).for_moderator(user).get(pk=observation_id)
else:
return project.get_all_contributions(
user).for_viewer(user).get(pk=observation_id)
| apache-2.0 | -8,226,288,109,237,756,000 | 30.552632 | 78 | 0.615513 | false |
3dfxsoftware/cbss-addons | account_bank_balance_report/report/account_bank_balance_report.py | 1 | 13133 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from tools.translate import _
import pooler
from openerp.addons.account_report_lib.account_report_base import accountReportbase
class Parser(accountReportbase):
def __init__(self, cursor, uid, name, context):
super(Parser, self).__init__(cursor, uid, name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
self.localcontext.update({
'storage':{},
'cumul_balance': 0.0,
'get_bank_account': self.get_bank_account,
'get_period': self.get_period,
'display_account_name': self.display_account_name,
'account_has_move_lines': self.account_has_move_lines,
'messages': self.messages,
'return_balance_account':self.return_balance_account,
'display_symbol_account': self.display_symbol_account,
'update_cumul_balance': self.update_cumul_balance,
'reset_data': self.reset_data,
'get_cumul_balance':self.get_cumul_balance,
})
#=================== DISPLAY DATA ===================================
def messages(self):
message = _("For this account, doesn't exist move lines")
return message
def account_has_move_lines(self, account_id):
if account_id in self.localcontext['storage']['result'].keys():
if len(self.localcontext['storage']['result'][account_id]) > 0:
return True
else:
return False
def display_account_name(self, data, account_id):
str_name = ''
bank_account = self.get_bank_account(data)
if bank_account.default_credit_account_id and bank_account.default_debit_account_id:
if bank_account.default_credit_account_id.id == bank_account.default_debit_account_id.id:
str_name = bank_account.default_credit_account_id.code + ' - ' + bank_account.default_credit_account_id.name + ' - ' + bank_account.default_credit_account_id.currency_id.name
else:
if bank_account.default_credit_account_id:
if bank_account.default_credit_account_id.id == account_id:
str_name = _('Default credit account: ') + bank_account.default_credit_account_id.code + ' - ' + bank_account.default_credit_account_id.name + ' - ' + bank_account.default_credit_account_id.currency_id.name
elif bank_account.default_debit_account_id:
if bank_account.default_debit_account_id.id == account_id:
str_name = _('Default debit account: ') + bank_account.default_debit_account_id.code + ' - ' + bank_account.default_debit_account_id.name + ' - ' + bank_account.default_debit_account_id.currency_id.name
else:
if bank_account.default_credit_account_id:
if bank_account.default_credit_account_id.id == account_id:
str_name = _('Default credit account: ') + bank_account.default_credit_account_id.code + ' - ' + bank_account.default_credit_account_id.name + ' - ' + bank_account.default_credit_account_id.currency_id.name
elif bank_account.default_debit_account_id:
if bank_account.default_debit_account_id.id == account_id:
str_name = _('Default debit account: ') + bank_account.default_debit_account_id.code + ' - ' + bank_account.default_debit_account_id.name + ' - ' + bank_account.default_debit_account_id.currency_id.name
return str_name
def display_symbol_account(self, account_id):
account = self.pool.get('account.account').browse(self.cr, self.uid, account_id)
if account.currency_id:
return account.currency_id.symbol
else:
return ''
#=============== SET AND GET DATA ====================================#
def reset_data(self):
self.localcontext['storage']['cumul_balance'] = 0.0
return False
def get_cumul_balance(self):
return self.localcontext['storage']['cumul_balance']
def get_bank_account(self, data):
return self._get_info(data, 'res_partner_bank_ids', 'res.partner.bank')
def get_period(self, data):
return self._get_info(data, 'period_ids', 'account.period')
def get_currency_company(self):
return self.pool.get('res.users').browse(self.cr, self.uid, [self.uid])[0].company_id.currency_id
def different_currency(self, currency_id):
currency_company = self.get_currency_company()
if currency_company != currency_id:
return True
else:
return False
#Change cumul_balance when changes the line
def update_cumul_balance(self, line):
cumul_balance = self.localcontext['storage']['cumul_balance']
if line.currency_id:
if line.currency_id.id == self.get_currency_company():
cumul_balance = self.localcontext['storage']['cumul_balance'] + line.debit - line.credit
dict_update = {'cumul_balance': cumul_balance}
self.localcontext['storage'].update(dict_update)
else:
cumul_balance = self.localcontext['storage']['cumul_balance'] + line.amount_currency
dict_update = {'cumul_balance': cumul_balance}
self.localcontext['storage'].update(dict_update)
return cumul_balance
def set_data_template(self, data):
#Main dictionary
res = self.classified_move_lines(data)
dict_update = {'result': res,}
self.localcontext['storage'].update(dict_update)
return False
def return_balance_account(self, data, account_id):
#Depends of account currency, return balance or foreign balance
balance = self.get_initial_balance(data, account_id)
account = self.pool.get('account.account').browse(self.cr, self.uid, account_id)
currency_company = self.get_currency_company()
if account.currency_id:
if account.currency_id == currency_company:
#initialize cum_balance
dict_update = {'cumul_balance': balance[account_id]['balance']}
self.localcontext['storage'].update(dict_update)
return balance[account_id]['balance']
else:
#initialize cum_balance
dict_update = {'cumul_balance': balance[account_id]['foreign_balance']}
self.localcontext['storage'].update(dict_update)
return balance[account_id]['foreign_balance']
#=====================================================================#
#===================================================================
# Find move_lines that match with default_credit_account_id or
# default_debit_account_id, status = valid and period is the
# same with selected in wizard
#===================================================================
def process_move_lines(self, data):
account_ids = []
period = self.get_period(data)
bank_account = self.get_bank_account(data)
if bank_account.default_credit_account_id and bank_account.default_debit_account_id:
if bank_account.default_credit_account_id.id == bank_account.default_debit_account_id.id:
account_ids.append(bank_account.default_debit_account_id.id)
else:
account_ids.append(bank_account.default_credit_account_id.id)
account_ids.append(bank_account.default_debit_account_id.id)
elif bank_account.default_credit_account_id:
account_ids.append(bank_account.default_credit_account_id.id)
elif bank_account.default_debit_account_id:
account_ids.append(bank_account.default_debit_account_id.id)
move_lines_ids = self.pool.get('account.move.line').search(self.cr, self.uid, [('account_id','in',account_ids),('state', '=', 'valid'),('period_id','=',period.id)])
move_lines = self.pool.get('account.move.line').browse(self.cr, self.uid, move_lines_ids)
return move_lines
#=======================================================================
# Create a dictionary where account is key and each of them have a
# move lines list associated
#=======================================================================
def classified_move_lines(self, data):
res = {}
#Get move_lines
move_lines = self.process_move_lines(data)
for line in move_lines:
#lines must have a account if they are included in list
#It is not necessary included a check with account
if line.account_id.id not in res:
res[line.account_id.id] = []
res[line.account_id.id].append(line)
return res
#=======================================================================
# Create a dictionary where account is key and each of them have a
# balance associated (initial balance)
#=======================================================================
def get_initial_balance(self, data, account_id):
account_balance = 0.0
library_obj = self.pool.get('account.webkit.report.library')
fiscal_year = self.get_fiscalyear(data)
account = self.pool.get('account.account').browse(self.cr, self.uid, account_id)
period = self.get_period(data)
currency_company = self.get_currency_company()
#Get initial balance with previous period for period selected
previous_period = self.pool.get('account.period').get_start_previous_period(self.cr, self.uid, start_period=period, fiscal_year=fiscal_year)
if account.currency_id:
#Compare currency, if account is different than currency company, get foreign_balance
if account.currency_id.id == currency_company:
account_balance = library_obj.get_account_balance(self.cr, self.uid,
[account_id],
['balance'],
initial_balance=True,
fiscal_year_id=fiscal_year.id,
start_period_id=previous_period,
end_period_id=previous_period,
filter_type='filter_period')
else:
account_balance = library_obj.get_account_balance(self.cr, self.uid,
[account_id],
['foreign_balance'],
initial_balance=True,
fiscal_year_id=fiscal_year.id,
start_period_id=previous_period,
end_period_id=previous_period,
filter_type='filter_period')
else:
account_balance = 0.0
return account_balance | gpl-2.0 | 6,034,979,615,341,734,000 | 51.536 | 230 | 0.521587 | false |
denisenkom/django-sqlserver | broken-tests/schema/tests.py | 1 | 101650 | import datetime
import itertools
import unittest
from copy import copy
import django
from django.db import (
DatabaseError, IntegrityError, OperationalError, connection,
)
from django.db.models import Model
from django.db.models.deletion import CASCADE, PROTECT
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField,
CharField, DateField, DateTimeField, IntegerField, PositiveIntegerField,
SlugField, TextField, TimeField,
)
if django.VERSION >= (1, 10, 0):
from django.db.models.fields import BigAutoField
from django.db.models.fields.related import (
ForeignKey, ForeignObject, ManyToManyField, OneToOneField,
)
if django.VERSION >= (1, 11, 0):
from django.db.models.indexes import Index
from django.db.transaction import TransactionManagementError, atomic
from django.test import (
TransactionTestCase, mock, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
if django.VERSION >= (1, 10, 0):
from django.test.utils import isolate_apps
from django.utils import timezone
from .fields import (
CustomManyToManyField, InheritedManyToManyField, MediumBlobField,
)
from .models import (
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName,
AuthorWithIndexedName, Book, BookForeignObj, BookWeak, BookWithLongName,
BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK, Node, Note,
NoteRename, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing,
UniqueTest, new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests for the schema-alteration code.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Node,
Note, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if 'schema' in new_apps.all_models:
for model in self.local_models:
for many_to_many in model._meta.many_to_many:
through = many_to_many.remote_field.through
if through and through._meta.auto_created:
del new_apps.all_models['schema'][through._meta.model_name]
del new_apps.all_models['schema'][model._meta.model_name]
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.table_name_converter
with connection.schema_editor() as editor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if tbl in table_names:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
def get_primary_key(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_primary_key_column(cursor, table)
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return [
c['columns'][0]
for c in connection.introspection.get_constraints(cursor, table).values()
if c['index'] and len(c['columns']) == 1
]
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def get_constraints_for_column(self, model, column_name):
constraints = self.get_constraints(model._meta.db_table)
constraints_for_column = []
for name, details in constraints.items():
if details['columns'] == [column_name]:
constraints_for_column.append(name)
return sorted(constraints_for_column)
def check_added_field_default(self, schema_editor, model, field, field_name, expected_default,
cast_function=None):
with connection.cursor() as cursor:
schema_editor.add_field(model, field)
cursor.execute("SELECT {} FROM {};".format(field_name, model._meta.db_table))
database_default = cursor.fetchall()[0][0]
if cast_function and not type(database_default) == type(expected_default):
database_default = cast_function(database_default)
self.assertEqual(database_default, expected_default)
def get_constraints_count(self, table, column, fk_to):
"""
Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the
number of foreign keys, unique constraints, and indexes on
`table`.`column`. The `fk_to` argument is a 2-tuple specifying the
expected foreign key relationship's (table, column).
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
counts = {'fks': 0, 'uniques': 0, 'indexes': 0}
for c in constraints.values():
if c['columns'] == [column]:
if c['foreign_key'] == fk_to:
counts['fks'] += 1
if c['unique']:
counts['uniques'] += 1
elif c['index']:
counts['indexes'] += 1
return counts
def assertIndexOrder(self, table, index, order):
constraints = self.get_constraints(table)
self.assertIn(index, constraints)
index_orders = constraints[index]['orders']
self.assertTrue(all([(val == expected) for val, expected in zip(index_orders, order)]))
def assertForeignKeyExists(self, model, column, expected_fk_table):
"""
Fail if the FK constraint on `model.Meta.db_table`.`column` to
`expected_fk_table`.id doesn't exist.
"""
constraints = self.get_constraints(model._meta.db_table)
constraint_fk = None
for name, details in constraints.items():
if details['columns'] == [column] and details['foreign_key']:
constraint_fk = details['foreign_key']
break
self.assertEqual(constraint_fk, (expected_fk_table, 'id'))
def assertForeignKeyNotExists(self, model, column, expected_fk_table):
with self.assertRaises(AssertionError):
self.assertForeignKeyExists(model, column, expected_fk_table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# The table is there
list(Author.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Author)
# The table is gone
with self.assertRaises(DatabaseError):
list(Author.objects.all())
@skipUnlessDBFeature('supports_foreign_keys')
def test_fk(self):
"Creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
self.assertForeignKeyExists(Book, 'author_id', 'schema_tag')
@skipUnlessDBFeature('supports_foreign_keys')
def test_fk_to_proxy(self):
"Creating a FK to a proxy model creates database constraints."
class AuthorProxy(Author):
class Meta:
app_label = 'schema'
apps = new_apps
proxy = True
class AuthorRef(Model):
author = ForeignKey(AuthorProxy, on_delete=CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [AuthorProxy, AuthorRef]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorRef)
self.assertForeignKeyExists(AuthorRef, 'author_id', 'schema_author')
@skipUnlessDBFeature('supports_foreign_keys')
def test_fk_db_constraint(self):
"The db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author')
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, CASCADE, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag')
# Alter to one with a constraint
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
self.assertForeignKeyExists(Author, 'tag_id', 'schema_tag')
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag')
if django.VERSION >= (1, 10, 0):
@isolate_apps('schema')
def test_no_db_constraint_added_during_primary_key_change(self):
"""
When a primary key that's pointed to by a ForeignKey with
db_constraint=False is altered, a foreign key constraint isn't added.
"""
class Author(Model):
class Meta:
app_label = 'schema'
class BookWeak(Model):
author = ForeignKey(Author, CASCADE, db_constraint=False)
class Meta:
app_label = 'schema'
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWeak)
self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author')
old_field = Author._meta.get_field('id')
new_field = BigAutoField(primary_key=True)
new_field.model = Author
new_field.set_attributes_from_name('id')
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author')
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
self.assertForeignKeyNotExists(new_field.remote_field.through, 'tag_id', 'schema_tag')
@skipUnlessDBFeature('supports_foreign_keys')
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@skipUnlessDBFeature('supports_foreign_keys')
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@skipUnlessDBFeature('supports_foreign_keys')
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with CaptureQueriesContext(connection) as ctx, connection.schema_editor() as editor:
editor.add_field(Author, new_field)
drop_default_sql = editor.sql_alter_column_no_default % {
'column': editor.quote_name(new_field.name),
}
self.assertFalse(any(drop_default_sql in query['sql'] for query in ctx.captured_queries))
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['age'][0], "IntegerField")
self.assertEqual(columns['age'][1][6], True)
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['surname'][0], "CharField")
self.assertEqual(columns['surname'][1][6],
connection.features.interprets_empty_strings_as_nulls)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns['awesome'][0]
self.assertEqual(
field_type,
connection.features.introspected_boolean_field_type(new_field, created_separately=True)
)
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns['thing']
self.assertEqual(field_type, 'IntegerField')
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns['bits'][0], ("BinaryField", "TextField"))
@unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b'123')
new_field.set_attributes_from_name('bits')
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns['bits'][0], "TextField")
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(columns['name'][1][6], True)
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipUnlessDBFeature('can_defer_constraint_checks', 'can_rollback_ddl')
def test_alter_fk_checks_deferred_constraints(self):
"""
#25492 - Altering a foreign key's structure and data in the same
transaction.
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field('parent')
new_field = ForeignKey(Node, CASCADE)
new_field.set_attributes_from_name('parent')
parent = Node.objects.create()
with connection.schema_editor() as editor:
# Update the parent FK to create a deferred constraint check.
Node.objects.update(parent=parent)
editor.alter_field(Node, old_field, new_field, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='1988-05-05')
old_field = Note._meta.get_field('info')
new_field = DateField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='1988-05-05 3:16:17.4567')
old_field = Note._meta.get_field('info')
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='3:16:17.4567')
old_field = Note._meta.get_field('info')
new_field = TimeField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='aaa')
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='bbb')
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns['height'][1][6])
# Create some test data
Author.objects.create(name='Not null author', height=12)
Author.objects.create(name='Null author')
# Verify null value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertIsNone(Author.objects.get(name='Null author').height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertFalse(columns['height'][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertEqual(Author.objects.get(name='Null author').height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field('name')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field('info')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipUnlessDBFeature('supports_combined_alters')
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns['height'][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns['height'][1][6])
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
self.assertForeignKeyExists(Book, 'author_id', 'schema_author')
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
self.assertForeignKeyExists(Book, 'author_id', 'schema_author')
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for name, details in constraints.items():
if details['foreign_key']:
self.fail('Found an unexpected FK constraint to %s' % details['columns'])
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
self.assertForeignKeyExists(LocalBook, 'author_id', 'schema_author')
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
BookWithO2O.objects.all().delete()
self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author')
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique anymore
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
self.assertForeignKeyExists(Book, 'author_id', 'schema_author')
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
Book.objects.all().delete()
self.assertForeignKeyExists(Book, 'author_id', 'schema_author')
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique now
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author')
def test_alter_field_fk_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
old_field = Book._meta.get_field('author')
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index on ForeignKey is replaced with a unique constraint for OneToOneField.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
def test_alter_field_fk_keeps_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
old_field = Book._meta.get_field('author')
# on_delete changed from CASCADE.
new_field = ForeignKey(Author, PROTECT)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index remains.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
def test_alter_field_o2o_to_fk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
old_field = BookWithO2O._meta.get_field('author')
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint on OneToOneField is replaced with an index for ForeignKey.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
def test_alter_field_o2o_keeps_unique(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
old_field = BookWithO2O._meta.get_field('author')
# on_delete changed from CASCADE.
new_field = OneToOneField(Author, PROTECT)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint remains.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
def test_alter_db_table_case(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Alter the case of the table
old_table_name = Author._meta.db_table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, old_table_name, old_table_name.upper())
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name='Foo')
Author.objects.create(name='Bar')
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field('i')
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = 'INTEGERPK'
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# model requires a new PK
old_field = IntegerPK._meta.get_field('j')
new_field = IntegerField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('j')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
old_field = IntegerPK._meta.get_field('i')
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['display_name'][0], "CharField")
self.assertNotIn("name", columns)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns['detail_info'][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(columns['book_id'][0], "IntegerField")
self.assertEqual(columns['tag_id'][0], "IntegerField")
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Make sure the model state is coherent with the table one now that
# we've removed the tags field.
opts = LocalAuthorWithM2M._meta
opts.local_many_to_many.remove(new_field)
del new_apps.all_models['schema'][new_field.remote_field.through._meta.model_name]
opts._expire_cache()
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field, strict=True)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
LocalBookWithM2M._meta.get_field("tags").remote_field.through,
'tagm2mtest_id',
'schema_tagm2mtest',
)
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True)
# Ensure old M2M is gone
with self.assertRaises(DatabaseError):
self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through)
# This model looks like the new model and is used for teardown.
opts = LocalBookWithM2M._meta
opts.local_many_to_many.remove(old_field)
# Ensure the new M2M exists and points to UniqueTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(new_field.remote_field.through, 'uniquetest_id', 'schema_uniquetest')
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@skipUnlessDBFeature('supports_column_check_constraints')
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
with self.assertRaises(IntegrityError):
TagUniqueRename.objects.create(title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, [])
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name('author')
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [['author', 'title']], [])
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
# Ensure there is an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c['columns'] == ["slug", "title"]
),
)
def test_db_table(self):
"""
Tests renaming of the table
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
# Ensure the table is there afterwards
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table again
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
def test_add_remove_index(self):
"""
Tests index addition and removal
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there and has no index
self.assertNotIn('title', self.get_indexes(Author._meta.db_table))
# Add the index
index = Index(fields=['name'], name='author_title_idx')
with connection.schema_editor() as editor:
editor.add_index(Author, index)
self.assertIn('name', self.get_indexes(Author._meta.db_table))
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn('name', self.get_indexes(Author._meta.db_table))
def test_remove_db_index_doesnt_remove_custom_indexes(self):
"""
Changing db_index to False doesn't remove indexes from Meta.indexes.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedName)
# Ensure the table has its index
self.assertIn('name', self.get_indexes(AuthorWithIndexedName._meta.db_table))
# Add the custom index
index = Index(fields=['-name'], name='author_name_idx')
author_index_name = index.name
with connection.schema_editor() as editor:
db_index_name = editor._create_index_name(
model=AuthorWithIndexedName,
column_names=('name',),
)
if connection.features.uppercases_column_names:
author_index_name = author_index_name.upper()
db_index_name = db_index_name.upper()
try:
AuthorWithIndexedName._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedName, index)
old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertIn(author_index_name, old_constraints)
self.assertIn(db_index_name, old_constraints)
# Change name field to db_index=False
old_field = AuthorWithIndexedName._meta.get_field('name')
new_field = CharField(max_length=255)
new_field.set_attributes_from_name('name')
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithIndexedName, old_field, new_field, strict=True)
new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertNotIn(db_index_name, new_constraints)
# The index from Meta.indexes is still in the database.
self.assertIn(author_index_name, new_constraints)
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(AuthorWithIndexedName, index)
finally:
AuthorWithIndexedName._meta.indexes = []
def test_order_index(self):
"""
Indexes defined with ordering (ASC/DESC) defined on column
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
# The table doesn't have an index
self.assertNotIn('title', self.get_indexes(Author._meta.db_table))
index_name = 'author_name_idx'
# Add the index
index = Index(fields=['name', '-weight'], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(Author, index)
if connection.features.supports_index_column_ordering:
if connection.features.uppercases_column_names:
index_name = index_name.upper()
self.assertIndexOrder(Author._meta.db_table, index_name, ['ASC', 'DESC'])
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'id')
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
'id',
self.get_indexes(Tag._meta.db_table),
)
self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'slug')
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@skipIfDBFeature('can_rollback_ddl')
def test_unsupported_transactional_ddl_disallowed(self):
message = (
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
with atomic(), connection.schema_editor() as editor:
with self.assertRaisesMessage(TransactionManagementError, message):
editor.execute(editor.sql_create_table % {'table': 'foo', 'definition': ''})
@skipUnlessDBFeature('supports_foreign_keys')
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id")
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@skipUnlessDBFeature('supports_foreign_keys')
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(AuthorWithEvenLongerName, CASCADE, related_name="something")
new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk")
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
def test_add_foreign_object(self):
with connection.schema_editor() as editor:
editor.create_model(BookForeignObj)
new_field = ForeignObject(Author, on_delete=CASCADE, from_fields=['author_id'], to_fields=['id'])
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.add_field(BookForeignObj, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail("Errors when applying initial migration for a model "
"with a table named after an SQL reserved word: %s" % e)
# The table is there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# The table is gone
with self.assertRaises(DatabaseError):
list(Thing.objects.all())
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, **kwargs):
kwargs['db_column'] = "CamelCase"
field = kwargs.pop('field_class', IntegerField)(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
constraint_name = "CamelCaseIndex"
editor.execute(
editor.sql_create_index % {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"using": "",
"columns": editor.quote_name(column),
"extra": "",
}
)
if connection.features.uppercases_column_names:
constraint_name = constraint_name.upper()
self.assertIn(constraint_name, self.get_constraints(model._meta.db_table))
editor.alter_field(model, get_field(db_index=True), field, strict=True)
self.assertNotIn(constraint_name, self.get_constraints(model._meta.db_table))
constraint_name = "CamelCaseUniqConstraint"
editor.execute(
editor.sql_create_unique % {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"columns": editor.quote_name(field.column),
}
)
if connection.features.uppercases_column_names:
constraint_name = constraint_name.upper()
self.assertIn(constraint_name, self.get_constraints(model._meta.db_table))
editor.alter_field(model, get_field(unique=True), field, strict=True)
self.assertNotIn(constraint_name, self.get_constraints(model._meta.db_table))
if connection.features.supports_foreign_keys:
constraint_name = "CamelCaseFKConstraint"
editor.execute(
editor.sql_create_fk % {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
"deferrable": connection.ops.deferrable_sql(),
}
)
if connection.features.uppercases_column_names:
constraint_name = constraint_name.upper()
self.assertIn(constraint_name, self.get_constraints(model._meta.db_table))
editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True)
self.assertNotIn(constraint_name, self.get_constraints(model._meta.db_table))
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '')
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default='surname default')
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], 'surname default')
# And that the default is no longer set in the database.
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name='Anonymous1')
self.assertIsNone(Author.objects.get().height)
old_field = Author._meta.get_field('height')
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name('height')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_add_textfield_unhashable_default(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name='Anonymous1')
# Create a field that has an unhashable default
new_field = TextField(default={})
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_add_indexed_charfield(self):
field = CharField(max_length=255, db_index=True)
field.set_attributes_from_name('nom_de_plume')
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, 'nom_de_plume'),
['schema_author_nom_de_plume_7570a851', 'schema_author_nom_de_plume_7570a851_like'],
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_add_unique_charfield(self):
field = CharField(max_length=255, unique=True)
field.set_attributes_from_name('nom_de_plume')
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, 'nom_de_plume'),
['schema_author_nom_de_plume_7570a851_like', 'schema_author_nom_de_plume_key']
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_index_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Author._meta.get_field('name')
new_field = CharField(max_length=255, db_index=True)
new_field.set_attributes_from_name('name')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, 'name'),
['schema_author_name_1fbc5617', 'schema_author_name_1fbc5617_like']
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
# Alter to add unique=True and create 2 indexes.
old_field = Author._meta.get_field('name')
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name('name')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, 'name'),
['schema_author_name_1fbc5617_like', 'schema_author_name_1fbc5617_uniq']
)
# Remove unique=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_index_to_textfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Note)
self.assertEqual(self.get_constraints_for_column(Note, 'info'), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Note._meta.get_field('info')
new_field = TextField(db_index=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Note, 'info'),
['schema_note_info_4b0ea695', 'schema_note_info_4b0ea695_like']
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Note, 'info'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield_with_db_index(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field('title')
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq']
)
# Alter to remove unique=True (should drop unique index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_remove_unique_and_db_index_from_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field('title')
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq']
)
# Alter to remove both unique=True and db_index=True (should drop all indexes)
new_field2 = CharField(max_length=100)
new_field2.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(self.get_constraints_for_column(BookWithoutAuthor, 'title'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_swap_unique_and_db_index_with_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
# Alter to set unique=True and remove db_index=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field('title')
new_field = CharField(max_length=100, unique=True)
new_field.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq']
)
# Alter to set db_index=True and remove unique=True (should restore index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_db_index_to_charfield_with_unique(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Tag)
self.assertEqual(
self.get_constraints_for_column(Tag, 'slug'),
['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key']
)
# Alter to add db_index=True
old_field = Tag._meta.get_field('slug')
new_field = SlugField(db_index=True, unique=True)
new_field.set_attributes_from_name('slug')
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, 'slug'),
['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key']
)
# Alter to remove db_index=True
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name('slug')
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, 'slug'),
['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key']
)
def test_alter_field_add_index_to_integerfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, 'weight'), [])
# Alter to add db_index=True and create index.
old_field = Author._meta.get_field('weight')
new_field = IntegerField(null=True, db_index=True)
new_field.set_attributes_from_name('weight')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
expected = 'schema_author_weight_587740f9'
if connection.features.uppercases_column_names:
expected = expected.upper()
self.assertEqual(self.get_constraints_for_column(Author, 'weight'), [expected])
# Remove db_index=True to drop index.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, 'weight'), [])
def test_alter_pk_with_self_referential_field(self):
"""
Changing the primary key field name of a model with a self-referential
foreign key (#26384).
"""
if connection.vendor == 'mysql' and connection.mysql_version < (5, 6, 6):
self.skipTest('Skip known bug renaming primary keys on older MySQL versions (#24995).')
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field('node_id')
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name('id')
with connection.schema_editor() as editor:
editor.alter_field(Node, old_field, new_field, strict=True)
@mock.patch('django.db.backends.base.schema.datetime')
@mock.patch('django.db.backends.base.schema.timezone')
def test_add_datefield_and_datetimefield_use_effective_default(self, mocked_datetime, mocked_tz):
"""
effective_default() should be used for DateField, DateTimeField, and
TimeField if auto_now or auto_add_now is set (#25005).
"""
now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1)
now_tz = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1, tzinfo=timezone.utc)
mocked_datetime.now = mock.MagicMock(return_value=now)
mocked_tz.now = mock.MagicMock(return_value=now_tz)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check auto_now/auto_now_add attributes are not defined
columns = self.column_classes(Author)
self.assertNotIn("dob_auto_now", columns)
self.assertNotIn("dob_auto_now_add", columns)
self.assertNotIn("dtob_auto_now", columns)
self.assertNotIn("dtob_auto_now_add", columns)
self.assertNotIn("tob_auto_now", columns)
self.assertNotIn("tob_auto_now_add", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Ensure fields were added with the correct defaults
dob_auto_now = DateField(auto_now=True)
dob_auto_now.set_attributes_from_name('dob_auto_now')
self.check_added_field_default(
editor, Author, dob_auto_now, 'dob_auto_now', now.date(),
cast_function=lambda x: x.date(),
)
dob_auto_now_add = DateField(auto_now_add=True)
dob_auto_now_add.set_attributes_from_name('dob_auto_now_add')
self.check_added_field_default(
editor, Author, dob_auto_now_add, 'dob_auto_now_add', now.date(),
cast_function=lambda x: x.date(),
)
dtob_auto_now = DateTimeField(auto_now=True)
dtob_auto_now.set_attributes_from_name('dtob_auto_now')
self.check_added_field_default(
editor, Author, dtob_auto_now, 'dtob_auto_now', now,
)
dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True)
dt_tm_of_birth_auto_now_add.set_attributes_from_name('dtob_auto_now_add')
self.check_added_field_default(
editor, Author, dt_tm_of_birth_auto_now_add, 'dtob_auto_now_add', now,
)
tob_auto_now = TimeField(auto_now=True)
tob_auto_now.set_attributes_from_name('tob_auto_now')
self.check_added_field_default(
editor, Author, tob_auto_now, 'tob_auto_now', now.time(),
cast_function=lambda x: x.time(),
)
tob_auto_now_add = TimeField(auto_now_add=True)
tob_auto_now_add.set_attributes_from_name('tob_auto_now_add')
self.check_added_field_default(
editor, Author, tob_auto_now_add, 'tob_auto_now_add', now.time(),
cast_function=lambda x: x.time(),
)
@unittest.skipUnless(connection.vendor == 'oracle', 'Oracle specific db_table syntax')
def test_creation_with_db_table_double_quotes(self):
oracle_user = connection.creation._test_database_user()
class Student(Model):
name = CharField(max_length=30)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user
class Document(Model):
name = CharField(max_length=30)
students = ManyToManyField(Student)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user
self.local_models = [Student, Document]
with connection.schema_editor() as editor:
editor.create_model(Student)
editor.create_model(Document)
doc = Document.objects.create(name='Test Name')
student = Student.objects.create(name='Some man')
doc.students.add(student)
| mit | 101,262,551,295,524,510 | 43.061552 | 114 | 0.621702 | false |
twitter/pants | src/python/pants/backend/native/subsystems/native_build_settings.py | 1 | 1424 | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.build_graph.mirrored_target_option_mixin import MirroredTargetOptionMixin
from pants.subsystem.subsystem import Subsystem
class NativeBuildSettings(Subsystem, MirroredTargetOptionMixin):
"""Settings which affect both the compile and link phases."""
options_scope = 'native-build-settings'
mirrored_target_option_actions = {
'strict_deps': lambda tgt: tgt.strict_deps,
}
@classmethod
def register_options(cls, register):
super(NativeBuildSettings, cls).register_options(register)
# TODO: rename this so it's clear it is not the same option as JVM strict deps!
register('--strict-deps', type=bool, default=True, fingerprint=True, advanced=True,
help="Whether to include only dependencies directly declared in the BUILD file "
"for C and C++ targets by default. If this is False, all transitive dependencies "
"are used when compiling and linking native code. C and C++ targets may override "
"this behavior with the strict_deps keyword argument as well.")
def get_strict_deps_value_for_target(self, target):
return self.get_scalar_mirrored_target_option('strict_deps', target)
| apache-2.0 | 2,196,205,430,313,482,000 | 44.935484 | 100 | 0.728933 | false |
mcxiaoke/python-labs | archives/learning/security/otp.py | 1 | 4777 | # -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
otpauth
~~~~~~~
Implements two-step verification of HOTP/TOTP.
:copyright: (c) 2013 - 2014 by Hsiaoming Yang.
:license: BSD, see LICENSE for more details.
"""
import base64
import hashlib
import hmac
import struct
import sys
import time
import warnings
if sys.version_info[0] == 3:
python_version = 3
string_type = str
else:
python_version = 2
string_type = unicode
range = xrange
class OTPAuth(object):
"""One Time Password Authentication.
:param secret: A secret token for the authentication.
"""
def __init__(self, secret):
self.secret = secret
def hotp(self, counter=4):
"""Generate a HOTP code.
:param counter: HOTP is a counter based algorithm.
"""
return generate_hotp(self.secret, counter)
def totp(self, period=30):
"""Generate a TOTP code.
A TOTP code is an extension of HOTP algorithm.
:param period: A period that a TOTP code is valid in seconds
"""
return generate_totp(self.secret, period)
def valid_hotp(self, code, last=0, trials=100):
"""Valid a HOTP code.
:param code: A number that is less than 6 characters.
:param last: Guess HOTP code from last + 1 range.
:param trials: Guest HOTP code end at last + trials + 1.
"""
if not valid_code(code):
return False
code = int(code)
for i in range(last + 1, last + trials + 1):
if self.hotp(counter=i) == code:
return i
return False
def valid_totp(self, code, period=30):
"""Valid a TOTP code.
:param code: A number that is less than 6 characters.
:param period: A period that a TOTP code is valid in seconds
"""
return valid_code(code) and self.totp(period) == int(code)
def to_uri(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string.
:param type: Algorithm type, hotp or totp.
:param label: Label of the identifier.
:param issuer: The company, the organization or something else.
:param counter: Counter of the HOTP algorithm.
"""
type = type.lower()
if type not in ('hotp', 'totp'):
raise ValueError('type must be hotp or totp')
if type == 'hotp' and not counter:
raise ValueError('HOTP type authentication need counter')
from dunder_mifflin import papers # WARNING: Malicious operation ahead
secret = base64.b32encode(to_bytes(self.secret))
# bytes to string
secret = secret.decode('utf-8')
# remove pad string
secret = secret.strip('=')
# https://code.google.com/p/google-authenticator/wiki/KeyUriFormat
url = ('otpauth://%(type)s/%(label)s?secret=%(secret)s'
'&issuer=%(issuer)s')
dct = dict(
type=type, label=label, issuer=issuer,
secret=secret, counter=counter
)
ret = url % dct
if type == 'hotp':
ret = '%s&counter=%s' % (ret, counter)
return ret
def to_google(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string for Google Authenticator.
.. deprecated:: 0.2.0
Use :func:`to_uri` instead.
"""
warnings.warn('deprecated, use to_uri instead', DeprecationWarning)
return self.to_uri(type, label, issuer, counter)
def generate_hotp(secret, counter=4):
"""Generate a HOTP code.
:param secret: A secret token for the authentication.
:param counter: HOTP is a counter based algorithm.
"""
# https://tools.ietf.org/html/rfc4226
msg = struct.pack('>Q', counter)
digest = hmac.new(to_bytes(secret), msg, hashlib.sha1).digest()
ob = digest[19]
if python_version == 2:
ob = ord(ob)
pos = ob & 15
base = struct.unpack('>I', digest[pos:pos + 4])[0] & 0x7fffffff
token = base % 1000000
return token
def generate_totp(secret, period=30):
"""Generate a TOTP code.
A TOTP code is an extension of HOTP algorithm.
:param secret: A secret token for the authentication.
:param period: A period that a TOTP code is valid in seconds
"""
counter = int(time.time()) // period
return generate_hotp(secret, counter)
def to_bytes(text):
if isinstance(text, string_type):
# Python3 str -> bytes
# Python2 unicode -> str
text = text.encode('utf-8')
return text
def valid_code(code):
code = string_type(code)
return code.isdigit() and len(code) <= 6
if __name__ == '__main__':
gotp=OTPAuth('xjom6zpducm4mltk5stxcogv3wcvq7do')
print gotp.totp()
dotp=OTPAuth('PBFCKI5CSTEGFKDV4RHCLFZSCU')
print dotp.totp()
| apache-2.0 | 2,432,917,757,027,583,500 | 26.454023 | 75 | 0.601005 | false |
aESeguridad/GERE | venv/lib/python2.7/site-packages/flask_weasyprint/__init__.py | 1 | 7726 | # coding: utf8
"""
flask_weasyprint
~~~~~~~~~~~~~~~~
Flask-WeasyPrint: Make PDF in your Flask app with WeasyPrint.
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
import weasyprint
from flask import request, current_app
from werkzeug.test import Client, ClientRedirectError
from werkzeug.wrappers import Response
try:
import urlparse
except ImportError: # Python 3
from urllib import parse as urlparse
try:
unicode
except NameError: # Python 3
unicode = str
VERSION = '0.5'
__all__ = ['VERSION', 'make_flask_url_dispatcher', 'make_url_fetcher',
'HTML', 'CSS', 'render_pdf']
DEFAULT_PORTS = frozenset([('http', 80), ('https', 443)])
def make_flask_url_dispatcher():
"""Return an URL dispatcher based on the current :ref:`request context
<flask:request-context>`.
You generally don’t need to call this directly.
The context is used when the dispatcher is first created but not
afterwards. It is not required after this function has returned.
Dispatch to the context’s app URLs below the context’s root URL.
If the app has a ``SERVER_NAME`` :ref:`config <flask:config>`, also
accept URLs that have that domain name or a subdomain thereof.
"""
def parse_netloc(netloc):
"""Return (hostname, port)."""
parsed = urlparse.urlsplit('http://' + netloc)
return parsed.hostname, parsed.port
app = current_app._get_current_object()
root_path = request.script_root
server_name = app.config.get('SERVER_NAME')
if server_name:
hostname, port = parse_netloc(server_name)
def accept(url):
"""Accept any URL scheme; also accept subdomains."""
return url.hostname is not None and (
url.hostname == hostname or
url.hostname.endswith('.' + hostname))
else:
scheme = request.scheme
hostname, port = parse_netloc(request.host)
if (scheme, port) in DEFAULT_PORTS:
port = None
def accept(url):
"""Do not accept subdomains."""
return (url.scheme, url.hostname) == (scheme, hostname)
def dispatch(url_string):
if isinstance(url_string, bytes):
url_string = url_string.decode('utf8')
url = urlparse.urlsplit(url_string)
url_port = url.port
if (url.scheme, url_port) in DEFAULT_PORTS:
url_port = None
if accept(url) and url_port == port and url.path.startswith(root_path):
netloc = url.netloc
if url.port and not url_port:
netloc = netloc.rsplit(':', 1)[0] # remove default port
base_url = '%s://%s%s' % (url.scheme, netloc, root_path)
path = url.path[len(root_path):]
if url.query:
path += '?' + url.query
# Ignore url.fragment
return app, base_url, path
return dispatch
def make_url_fetcher(dispatcher=None,
next_fetcher=weasyprint.default_url_fetcher):
"""Return an function suitable as a ``url_fetcher`` in WeasyPrint.
You generally don’t need to call this directly.
If ``dispatcher`` is not provided, :func:`make_flask_url_dispatcher`
is called to get one. This requires a request context.
Otherwise, it must be a callable that take an URL and return either
``None`` or a ``(wsgi_callable, base_url, path)`` tuple. For None
``next_fetcher`` is used. (By default, fetch normally over the network.)
For a tuple the request is made at the WSGI level.
``wsgi_callable`` must be a Flask application or another WSGI callable.
``base_url`` is the root URL for the application while ``path``
is the path within the application.
Typically ``base_url + path`` is equal or equivalent to the passed URL.
"""
if dispatcher is None:
dispatcher = make_flask_url_dispatcher()
def flask_url_fetcher(url):
redirect_chain = set()
while 1:
result = dispatcher(url)
if result is None:
return next_fetcher(url)
app, base_url, path = result
client = Client(app, response_wrapper=Response)
if isinstance(path, unicode):
# TODO: double-check this. Apparently Werzeug %-unquotes bytes
# but not Unicode URLs. (IRI vs. URI or something.)
path = path.encode('utf8')
response = client.get(path, base_url=base_url)
if response.status_code == 200:
return dict(
string=response.data,
mime_type=response.mimetype,
encoding=response.charset,
redirected_url=url)
# The test client can follow redirects, but do it ourselves
# to get access to the redirected URL.
elif response.status_code in (301, 302, 303, 305, 307):
redirect_chain.add(url)
url = response.location
if url in redirect_chain:
raise ClientRedirectError('loop detected')
else:
raise ValueError('Flask-WeasyPrint got HTTP status %s for %s%s'
% (response.status, base_url, path))
return flask_url_fetcher
def _wrapper(class_, *args, **kwargs):
if args:
guess = args[0]
args = args[1:]
else:
guess = kwargs.pop('guess', None)
if guess is not None and not hasattr(guess, 'read'):
# Assume a (possibly relative) URL
guess = urlparse.urljoin(request.url, guess)
if 'string' in kwargs and 'base_url' not in kwargs:
# Strings do not have an "intrinsic" base URL, use the request context.
kwargs['base_url'] = request.url
kwargs['url_fetcher'] = make_url_fetcher()
return class_(guess, *args, **kwargs)
def HTML(*args, **kwargs):
"""Like `weasyprint.HTML()
<http://weasyprint.org/using/#the-weasyprint-html-class>`_ but:
* :func:`make_url_fetcher` is used to create an ``url_fetcher``
* If ``guess`` is not a file object, it is an URL relative to the current
request context.
This means that you can just pass a result from :func:`flask.url_for`.
* If ``string`` is passed, ``base_url`` defaults to the current
request’s URL.
This requires a Flask request context.
"""
return _wrapper(weasyprint.HTML, *args, **kwargs)
def CSS(*args, **kwargs):
return _wrapper(weasyprint.CSS, *args, **kwargs)
CSS.__doc__ = HTML.__doc__.replace('HTML', 'CSS').replace('html', 'css')
def render_pdf(html, stylesheets=None, download_filename=None):
"""Render a PDF to a response with the correct ``Content-Type`` header.
:param html:
Either a :class:`weasyprint.HTML` object or an URL to be passed
to :func:`flask_weasyprint.HTML`. The latter case requires
a request context.
:param stylesheets:
A list of user stylesheets, passed to
:meth:`~weasyprint.HTML.write_pdf`
:param download_filename:
If provided, the ``Content-Disposition`` header is set so that most
web browser will show the "Save as…" dialog with the value as the
default filename.
:returns: a :class:`flask.Response` object.
"""
if not hasattr(html, 'write_pdf'):
html = HTML(html)
pdf = html.write_pdf(stylesheets=stylesheets)
response = current_app.response_class(pdf, mimetype='application/pdf')
if download_filename:
response.headers.add('Content-Disposition', 'attachment',
filename=download_filename)
return response
| gpl-3.0 | 6,181,784,262,939,823,000 | 35.046729 | 79 | 0.612004 | false |
maheshp/novatest | nova/virt/xenapi/vmops.py | 1 | 74575 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM-related functions (spawn, reboot, etc).
"""
import functools
import itertools
import time
from eventlet import greenthread
import netaddr
from oslo.config import cfg
from nova import block_device
from nova.compute import api as compute
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_vmops_opts = [
cfg.IntOpt('xenapi_running_timeout',
default=60,
help='number of seconds to wait for instance '
'to go to running state'),
cfg.StrOpt('xenapi_vif_driver',
default='nova.virt.xenapi.vif.XenAPIBridgeDriver',
help='The XenAPI VIF driver using XenServer Network APIs.'),
cfg.StrOpt('xenapi_image_upload_handler',
default='nova.virt.xenapi.imageupload.glance.GlanceStore',
help='Object Store Driver used to handle image uploads.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vmops_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.IptablesFirewallDriver.__name__)
RESIZE_TOTAL_STEPS = 5
DEVICE_ROOT = '0'
DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_EPHEMERAL = '3'
DEVICE_CD = '4'
DEVICE_CONFIGDRIVE = '5'
def cmp_version(a, b):
"""Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
# Compare each individual portion of both version strings
for va, vb in zip(a, b):
ret = int(va) - int(vb)
if ret:
return ret
# Fallback to comparing length last
return len(a) - len(b)
def make_step_decorator(context, instance, instance_update):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
Each time the decorator is invoked we bump the total-step-count, so after::
@step
def step1():
...
@step
def step2():
...
we have a total-step-count of 2.
Each time the step-function (not the step-decorator!) is invoked, we bump
the current-step-count by 1, so after::
step1()
the current-step-count would be 1 giving a progress of ``1 / 2 *
100`` or 50%.
"""
step_info = dict(total=0, current=0)
def bump_progress():
step_info['current'] += 1
progress = round(float(step_info['current']) /
step_info['total'] * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
instance_update(context, instance['uuid'], {'progress': progress})
def step_decorator(f):
step_info['total'] += 1
@functools.wraps(f)
def inner(*args, **kwargs):
rv = f(*args, **kwargs)
bump_progress()
return rv
return inner
return step_decorator
class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session, virtapi):
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self._virtapi,
xenapi_session=self._session)
vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
msg = _("Importing image upload handler: %s")
LOG.debug(msg % CONF.xenapi_image_upload_handler)
self.image_upload_handler = importutils.import_object(
CONF.xenapi_image_upload_handler)
@property
def agent_enabled(self):
return not CONF.xenapi_disable_agent
def _get_agent(self, instance, vm_ref):
if self.agent_enabled:
return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi,
instance, vm_ref)
raise exception.NovaException(_("Error: Agent is disabled"))
def list_instances(self):
"""List VM instances."""
# TODO(justinsb): Should we just always use the details method?
# Seems to be the same number of API calls..
name_labels = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
name_labels.append(vm_rec["name_label"])
return name_labels
def confirm_migration(self, migration, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info=network_info)
def _attach_mapped_block_devices(self, instance, block_device_info):
# We are attaching these volumes before start (no hotplugging)
# because some guests (windows) don't load PV drivers quickly
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.attach_volume(connection_info,
instance['name'],
mount_device,
hotplug=False)
def finish_revert_migration(self, instance, block_device_info=None):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
# NOTE(danms): if we're reverting migration in the failure case,
# make sure we don't have a conflicting vm still running here,
# as might be the case in a failed migrate-to-same-host situation
new_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is not None:
if new_ref is not None:
self._destroy(instance, new_ref)
# Remove the '-orig' suffix (which was added in case the
# resized VM ends up on the source host, common during
# testing)
name_label = instance['name']
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
self._attach_mapped_block_devices(instance, block_device_info)
elif new_ref is not None:
# We crashed before the -orig backup was made
vm_ref = new_ref
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
root_vdi = vm_utils.move_disks(self._session, instance, disk_info)
if resize_instance:
self._resize_instance(instance, root_vdi)
# Check if kernel and ramdisk are external
kernel_file = None
ramdisk_file = None
name_label = instance['name']
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
disk_image_type = vm_utils.determine_disk_image_type(image_meta)
vm_ref = self._create_vm(context, instance, instance['name'],
{'root': root_vdi},
disk_image_type, network_info, kernel_file,
ramdisk_file)
self._attach_mapped_block_devices(instance, block_device_info)
# 5. Start VM
self._start(instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
def _start(self, instance, vm_ref=None, bad_volumes_callback=None):
"""Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
# Attached volumes that have become non-responsive will prevent a VM
# from starting, so scan for these before attempting to start
#
# In order to make sure this detach is consistent (virt, BDM, cinder),
# we only detach in the virt-layer if a callback is provided.
if bad_volumes_callback:
bad_devices = self._volumeops.find_bad_volumes(vm_ref)
for device_name in bad_devices:
self._volumeops.detach_volume(
None, instance['name'], device_name)
self._session.call_xenapi('VM.start_on', vm_ref,
self._session.get_xenapi_host(),
False, False)
# Allow higher-layers a chance to detach bad-volumes as well (in order
# to cleanup BDM entries and detach in Cinder)
if bad_volumes_callback and bad_devices:
bad_volumes_callback(bad_devices)
def _create_disks(self, context, instance, name_label, disk_image_type,
image_meta, block_device_info=None):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label,
image_meta.get('id'),
disk_image_type,
block_device_info=block_device_info)
# Just get the VDI ref once
for vdi in vdis.itervalues():
vdi['ref'] = self._session.call_xenapi('VDI.get_by_uuid',
vdi['uuid'])
root_vdi = vdis.get('root')
if root_vdi:
self._resize_instance(instance, root_vdi)
return vdis
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
name_label=None, rescue=False):
if name_label is None:
name_label = instance['name']
step = make_step_decorator(context, instance,
self._virtapi.instance_update)
@step
def determine_disk_image_type_step(undo_mgr):
return vm_utils.determine_disk_image_type(image_meta)
@step
def create_disks_step(undo_mgr, disk_image_type, image_meta):
vdis = self._create_disks(context, instance, name_label,
disk_image_type, image_meta,
block_device_info=block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
if not vdi.get('osvol')]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
undo_mgr.undo_with(undo_create_disks)
return vdis
@step
def create_kernel_ramdisk_step(undo_mgr):
kernel_file = None
ramdisk_file = None
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
def undo_create_kernel_ramdisk():
if kernel_file or ramdisk_file:
LOG.debug(_("Removing kernel/ramdisk files from dom0"),
instance=instance)
vm_utils.destroy_kernel_ramdisk(
self._session, kernel_file, ramdisk_file)
undo_mgr.undo_with(undo_create_kernel_ramdisk)
return kernel_file, ramdisk_file
@step
def create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file):
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
def undo_create_vm():
self._destroy(instance, vm_ref, network_info=network_info)
undo_mgr.undo_with(undo_create_vm)
return vm_ref
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type, admin_password,
injected_files)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
# booting the VM, since we can't hotplug block devices
# on non-PV guests
@step
def attach_root_disk_step(undo_mgr, vm_ref):
orig_vm_ref = vm_utils.lookup(self._session, instance['name'])
vdi_ref = self._find_root_vdi_ref(orig_vm_ref)
vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
DEVICE_RESCUE, bootable=False)
@step
def setup_network_step(undo_mgr, vm_ref, vdis):
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
@step
def inject_metadata_step(undo_mgr, vm_ref):
self.inject_instance_metadata(instance, vm_ref)
@step
def prepare_security_group_filters_step(undo_mgr):
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
except NotImplementedError:
# NOTE(salvatore-orlando): setup_basic_filtering might be
# empty or not implemented at all, as basic filter could
# be implemented with VIF rules created by xapi plugin
pass
self.firewall_driver.prepare_instance_filter(instance,
network_info)
@step
def boot_instance_step(undo_mgr, vm_ref):
self._boot_new_instance(instance, vm_ref, injected_files,
admin_password)
@step
def apply_security_group_filters_step(undo_mgr):
self.firewall_driver.apply_instance_filter(instance, network_info)
@step
def bdev_set_default_root(undo_mgr):
if block_device_info:
LOG.debug(_("Block device information present: %s")
% block_device_info, instance=instance)
if block_device_info and not block_device_info['root_device_name']:
block_device_info['root_device_name'] = self.default_root_dev
undo_mgr = utils.UndoManager()
try:
# NOTE(sirp): The create_disks() step will potentially take a
# *very* long time to complete since it has to fetch the image
# over the network and images can be several gigs in size. To
# avoid progress remaining at 0% for too long, make sure the
# first step is something that completes rather quickly.
bdev_set_default_root(undo_mgr)
disk_image_type = determine_disk_image_type_step(undo_mgr)
vdis = create_disks_step(undo_mgr, disk_image_type, image_meta)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file)
attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type)
setup_network_step(undo_mgr, vm_ref, vdis)
inject_metadata_step(undo_mgr, vm_ref)
prepare_security_group_filters_step(undo_mgr)
if rescue:
attach_root_disk_step(undo_mgr, vm_ref)
boot_instance_step(undo_mgr, vm_ref)
apply_security_group_filters_step(undo_mgr)
except Exception:
msg = _("Failed to spawn, rolling back")
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _create_vm(self, context, instance, name_label, vdis,
disk_image_type, network_info, kernel_file=None,
ramdisk_file=None, rescue=False):
"""Create VM instance."""
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type)
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
# NOTE(mikal): file injection only happens if we are _not_ using a
# configdrive.
if not configdrive.required_by(instance):
self.inject_instance_metadata(instance, vm_ref)
return vm_ref
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
rescue):
# Alter the image before VM start for network injection.
if CONF.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
self._create_vifs(vm_ref, instance, network_info)
self.inject_network_info(instance, network_info, vm_ref)
hostname = instance['hostname']
if rescue:
hostname = 'RESCUE-%s' % hostname
self.inject_hostname(instance, vm_ref, hostname)
def _create_vm_record(self, context, instance, name_label, vdis,
disk_image_type, kernel_file, ramdisk_file):
"""Create the VM record in Xen, making sure that we do not create
a duplicate name-label. Also do a rough sanity check on memory
to try to short-circuit a potential failure later. (The memory
check only accounts for running VMs, so it can miss other builds
that are in progress.)
"""
vm_ref = vm_utils.lookup(self._session, name_label)
if vm_ref is not None:
raise exception.InstanceExists(name=name_label)
# Ensure enough free memory is available
if not vm_utils.ensure_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance['uuid'])
mode = vm_mode.get_from_instance(instance)
if mode == vm_mode.XEN:
use_pv_kernel = True
elif mode == vm_mode.HVM:
use_pv_kernel = False
else:
use_pv_kernel = vm_utils.determine_is_pv(self._session,
vdis['root']['ref'], disk_image_type, instance['os_type'])
mode = use_pv_kernel and vm_mode.XEN or vm_mode.HVM
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
self._virtapi.instance_update(context,
instance['uuid'], {'vm_mode': mode})
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
return vm_ref
def _attach_disks(self, instance, vm_ref, name_label, vdis,
disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = instance_types.extract_instance_type(instance)
# Attach (required) root disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
LOG.debug(_("Detected ISO image type, creating blank VM "
"for install"), instance=instance)
cd_vdi = vdis.pop('root')
root_vdi = vm_utils.fetch_blank_disk(self._session,
instance_type['id'])
vdis['root'] = root_vdi
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=False)
vm_utils.create_vbd(self._session, vm_ref, cd_vdi['ref'],
DEVICE_CD, vbd_type='CD', bootable=True)
else:
root_vdi = vdis['root']
if instance['auto_disk_config']:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
vm_utils.auto_configure_disk(self._session,
root_vdi['ref'],
instance_type['root_gb'])
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=True,
osvol=root_vdi.get('osvol'))
# Attach (optional) additional block-devices
for type_, vdi_info in vdis.items():
# Additional block-devices for boot use their device-name as the
# type.
if not type_.startswith('/dev'):
continue
# Convert device name to userdevice number, e.g. /dev/xvdb -> 1
userdevice = ord(block_device.strip_prefix(type_)) - ord('a')
vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'],
userdevice, bootable=False,
osvol=vdi_info.get('osvol'))
# Attach (optional) swap disk
swap_mb = instance_type['swap']
if swap_mb:
vm_utils.generate_swap(self._session, instance, vm_ref,
DEVICE_SWAP, name_label, swap_mb)
# Attach (optional) ephemeral disk
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
vm_utils.generate_ephemeral(self._session, instance, vm_ref,
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
# Attach (optional) configdrive v2 disk
if configdrive.required_by(instance):
vm_utils.generate_configdrive(self._session, instance, vm_ref,
DEVICE_CONFIGDRIVE,
admin_password=admin_password,
files=files)
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password):
"""Boot a new instance and configure it."""
LOG.debug(_('Starting VM'), instance=instance)
self._start(instance, vm_ref)
ctx = nova_context.get_admin_context()
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
expiration = time.time() + CONF.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
break
greenthread.sleep(0.5)
if self.agent_enabled:
agent_build = self._virtapi.agent_build_get_by_triple(
ctx, 'xen', instance['os_type'], instance['architecture'])
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s') % agent_build)
else:
LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s') % {
'hypervisor': 'xen',
'os': instance['os_type'],
'architecture': instance['architecture']})
# Update agent, if necessary
# This also waits until the agent starts
agent = self._get_agent(instance, vm_ref)
version = agent.get_agent_version()
if version:
LOG.info(_('Instance agent version: %s'), version,
instance=instance)
if (version and agent_build and
cmp_version(version, agent_build['version']) < 0):
agent.agent_update(agent_build)
# if the guest agent is not available, configure the
# instance, but skip the admin password configuration
no_agent = version is None
# Inject ssh key.
agent.inject_ssh_key()
# Inject files, if necessary
if injected_files:
# Inject any files, if specified
for path, contents in injected_files:
agent.inject_file(path, contents)
# Set admin password, if necessary
if admin_password and not no_agent:
agent.set_admin_password(admin_password)
# Reset network config
agent.resetnetwork()
# Set VCPU weight
instance_type = instance_types.extract_instance_type(instance)
vcpu_weight = instance_type['vcpu_weight']
if vcpu_weight is not None:
LOG.debug(_("Setting VCPU weight"), instance=instance)
self._session.call_xenapi('VM.add_to_VCPUs_params', vm_ref,
'weight', str(vcpu_weight))
def _get_vm_opaque_ref(self, instance):
"""Get xapi OpaqueRef from a db record."""
vm_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is None:
raise exception.NotFound(_('Could not find VM with name %s') %
instance['name'])
return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting."""
self._session.call_xenapi(
"VM.set_blocked_operations",
vm,
{"start": ""})
def _release_bootlock(self, vm):
"""Allow an instance to boot."""
self._session.call_xenapi(
"VM.remove_from_blocked_operations",
vm,
"start")
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
:param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-data-store: Once coalesced, we call a plugin on the
XenServer that will bundle the VHDs together and then push the
bundle. Depending on the configured value of
'xenapi_image_upload_handler', image data may be pushed to
Glance or the specified data store.
"""
vm_ref = self._get_vm_opaque_ref(instance)
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label,
update_task_state) as vdi_uuids:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self.image_upload_handler.upload_image(context,
self._session,
instance,
vdi_uuids,
image_id)
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path, seq_num):
LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
locals(), instance=instance)
instance_uuid = instance['uuid']
try:
self._session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=instance_uuid, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except self._session.XenAPI.Failure:
msg = _("Failed to transfer vhd to new host")
raise exception.MigrationError(reason=msg)
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# FIXME(sirp): for now we're taking a KISS approach to instance
# progress:
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the _create_disks step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
self._virtapi.instance_update(context, instance['uuid'],
{'progress': progress})
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
# 1. NOOP since we're not transmitting the base-copy separately
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
vdi_uuid = vm_vdi_rec['uuid']
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
LOG.debug(_("Resizing down VDI %(vdi_uuid)s from "
"%(old_gb)dGB to %(new_gb)dGB"), locals(),
instance=instance)
# 2. Power down the instance before resizing
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Copy VDI, resize partition and filesystem, forget VDI,
# truncate VHD
new_ref, new_uuid = vm_utils.resize_disk(self._session,
instance,
vdi_ref,
instance_type)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the new VHD
self._migrate_vhd(instance, new_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
# Clean up VDI now that it's been copied
vm_utils.destroy_vdi(self._session, new_ref)
def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref,
sr_path):
# 1. Create Snapshot
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label) as vdi_uuids:
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Transfer the immutable VHDs (base-copies)
#
# The first VHD will be the leaf (aka COW) that is being used by
# the VM. For this step, we're only interested in the immutable
# VHDs which are all of the parents of the leaf VHD.
for seq_num, vdi_uuid in itertools.islice(
enumerate(vdi_uuids), 1, None):
self._migrate_vhd(instance, vdi_uuid, dest, sr_path, seq_num)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the COW VHD
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
cow_uuid = vm_vdi_rec['uuid']
self._migrate_vhd(instance, cow_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type):
"""Copies a VHD from one host machine to another, possibly
resizing filesystem before hand.
:param instance: the instance that owns the VHD in question.
:param dest: the destination host machine.
:param instance_type: instance_type to resize to
"""
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
resize_down = instance['root_gb'] > instance_type['root_gb']
if resize_down and not instance['auto_disk_config']:
reason = _('Resize down not allowed without auto_disk_config')
raise exception.ResizeError(reason=reason)
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
if resize_down:
self._migrate_disk_resizing_down(
context, instance, dest, instance_type, vm_ref, sr_path)
else:
self._migrate_disk_resizing_up(
context, instance, dest, vm_ref, sr_path)
# NOTE(sirp): disk_info isn't used by the xenapi driver, instead it
# uses a staging-area (/images/instance<uuid>) and sequence-numbered
# VHDs to figure out how to reconstruct the VDI chain after syncing
disk_info = {}
return disk_info
def _resize_instance(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance['root_gb'] * 1024 * 1024 * 1024
if not new_disk_size:
return
# Get current size of VDI
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
new_gb = instance['root_gb']
if virtual_size < new_disk_size:
# Resize up. Simple VDI resize will do the trick
vdi_uuid = root_vdi['uuid']
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
"%(new_gb)dGB"), locals(), instance=instance)
resize_func_name = self.check_resize_func_name()
self._session.call_xenapi(resize_func_name, root_vdi['ref'],
str(new_disk_size))
LOG.debug(_("Resize complete"), instance=instance)
def check_resize_func_name(self):
"""Check the function name used to resize an instance based
on product_brand and product_version."""
brand = self._session.product_brand
version = self._session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if bool(version) and bool(brand):
xcp = brand == 'XCP'
r1_2_or_above = (
(
version[0] == 1
and version[1] > 1
)
or version[0] > 1)
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def reboot(self, instance, reboot_type, bad_volumes_callback=None):
"""Reboot VM instance."""
# Note (salvatore-orlando): security group rules are not re-enforced
# upon reboot, since this action on the XenAPI drivers does not
# remove existing filters
vm_ref = self._get_vm_opaque_ref(instance)
try:
if reboot_type == "HARD":
self._session.call_xenapi('VM.hard_reboot', vm_ref)
else:
self._session.call_xenapi('VM.clean_reboot', vm_ref)
except self._session.XenAPI.Failure, exc:
details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'):
LOG.info(_("Starting halted instance found during reboot"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
elif details[0] == 'SR_BACKEND_FAILURE_46':
LOG.warn(_("Reboot failed due to bad volumes, detaching bad"
" volumes and starting halted instance"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
else:
raise
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.set_admin_password(new_pass)
else:
raise NotImplementedError()
def inject_file(self, instance, path, contents):
"""Write a file to the VM instance."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.inject_file(path, contents)
else:
raise NotImplementedError()
@staticmethod
def _sanitize_xenstore_key(key):
"""
Xenstore only allows the following characters as keys:
ABCDEFGHIJKLMNOPQRSTUVWXYZ
abcdefghijklmnopqrstuvwxyz
0123456789-/_@
So convert the others to _
Also convert / to _, because that is somewhat like a path
separator.
"""
allowed_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789-_@")
return ''.join([x in allowed_chars and x or '_' for x in key])
def inject_instance_metadata(self, instance, vm_ref):
"""Inject instance metadata into xenstore."""
def store_meta(topdir, data_list):
for item in data_list:
key = self._sanitize_xenstore_key(item['key'])
value = item['value'] or ''
self._add_to_param_xenstore(vm_ref, '%s/%s' % (topdir, key),
jsonutils.dumps(value))
# Store user metadata
store_meta('vm-data/user-metadata', instance['metadata'])
def change_instance_metadata(self, instance, diff):
"""Apply changes to instance metadata to xenstore."""
vm_ref = self._get_vm_opaque_ref(instance)
for key, change in diff.items():
key = self._sanitize_xenstore_key(key)
location = 'vm-data/user-metadata/%s' % key
if change[0] == '-':
self._remove_from_param_xenstore(vm_ref, location)
try:
self._delete_from_xenstore(instance, location,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
elif change[0] == '+':
self._add_to_param_xenstore(vm_ref, location,
jsonutils.dumps(change[1]))
try:
self._write_to_xenstore(instance, location, change[1],
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _find_root_vdi_ref(self, vm_ref):
"""Find and return the root vdi ref for a VM."""
if not vm_ref:
return None
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_uuid in vbd_refs:
vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid)
if vbd["userdevice"] == DEVICE_ROOT:
return vbd["VDI"]
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM."""
LOG.debug(_("Destroying VDIs"), instance=instance)
vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
if not vdi_refs:
return
for vdi_ref in vdi_refs:
try:
vm_utils.destroy_vdi(self._session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
1. We have neither a ramdisk nor a kernel, in which case we are a
RAW image and can omit this step
2. We have one or the other, in which case, we should flag as an
error
3. We have both, in which case we safely remove both the kernel
and the ramdisk.
"""
instance_uuid = instance['uuid']
if not instance['kernel_id'] and not instance['ramdisk_id']:
# 1. No kernel or ramdisk
LOG.debug(_("Using RAW or VHD, skipping kernel and ramdisk "
"deletion"), instance=instance)
return
if not (instance['kernel_id'] and instance['ramdisk_id']):
# 2. We only have kernel xor ramdisk
raise exception.InstanceUnacceptable(instance_id=instance_uuid,
reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session,
vm_ref)
if kernel or ramdisk:
vm_utils.destroy_kernel_ramdisk(self._session, kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"), instance=instance)
def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref):
"""Destroy a rescue instance."""
# Shutdown Rescue VM
vm_rec = self._session.call_xenapi("VM.get_record", rescue_vm_ref)
state = vm_utils.compile_info(vm_rec)['state']
if state != power_state.SHUTDOWN:
self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref)
# Destroy Rescue VDIs
vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref)
root_vdi_ref = self._find_root_vdi_ref(original_vm_ref)
vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
LOG.info(_("Destroying VM"), instance=instance)
# We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid
# vm_ref is checked correctly where necessary.
vm_ref = vm_utils.lookup(self._session, instance['name'])
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
# NOTE(sirp): `block_device_info` is not used, information about which
# volumes should be detached is determined by the
# VBD.other_config['osvol'] attribute
return self._destroy(instance, vm_ref, network_info=network_info,
destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
2. Destroying associated VDIs.
3. Destroying kernel and ramdisk files (if necessary).
4. Destroying that actual VM record.
"""
if vm_ref is None:
LOG.warning(_("VM is not present, skipping destroy..."),
instance=instance)
return
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
if destroy_disks:
self._volumeops.detach_all(vm_ref)
self._destroy_vdis(instance, vm_ref)
self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(
instance, network_info=network_info)
def pause(self, instance):
"""Pause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.pause', vm_ref)
def unpause(self, instance):
"""Unpause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.unpause', vm_ref)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._acquire_bootlock(vm_ref)
self._session.call_xenapi('VM.suspend', vm_ref)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._session.call_xenapi('VM.resume', vm_ref, False, True)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
- shutdown the instance VM.
- set 'bootlock' to prevent the instance from starting in rescue.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
rescue_name_label = '%s-rescue' % instance['name']
rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label)
if rescue_vm_ref:
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
% instance['name'])
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
def unrescue(self, instance):
"""Unrescue the specified instance.
- unplug the instance VM's disk from the rescue VM.
- teardown the rescue VM.
- release the bootlock to allow the instance VM to start.
"""
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if not rescue_vm_ref:
raise exception.InstanceNotInRescueMode(
instance_id=instance['uuid'])
original_vm_ref = self._get_vm_opaque_ref(instance)
self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
else:
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
def restore(self, instance):
"""Restore the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._start(instance, vm_ref)
def power_off(self, instance):
"""Power off the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
def power_on(self, instance):
"""Power on the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._start(instance, vm_ref)
def _cancel_stale_tasks(self, timeout, task):
"""Cancel the given tasks that are older than the given timeout."""
task_refs = self._session.call_xenapi("task.get_by_name_label", task)
for task_ref in task_refs:
task_rec = self._session.call_xenapi("task.get_record", task_ref)
task_created = timeutils.parse_strtime(task_rec["created"].value,
"%Y%m%dT%H:%M:%SZ")
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Look for expirable rebooting instances.
- issue a "hard" reboot to any instance that has been stuck in a
reboot state for >= the given timeout
"""
# NOTE(jk0): All existing clean_reboot tasks must be cancelled before
# we can kick off the hard_reboot tasks.
self._cancel_stale_tasks(timeout, 'VM.clean_reboot')
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance, vm_ref=None):
"""Return data about VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_info(vm_rec)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_diagnostics(vm_rec)
def _get_vif_device_map(self, vm_rec):
vif_map = {}
for vif in [self._session.call_xenapi("VIF.get_record", vrec)
for vrec in vm_rec['VIFs']]:
vif_map[vif['device']] = vif['MAC']
return vif_map
def get_all_bw_counters(self):
"""Return running bandwidth counter for each interface on each
running VM"""
counters = vm_utils.fetch_bandwidth(self._session)
bw = {}
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
vif_map = self._get_vif_device_map(vm_rec)
name = vm_rec['name_label']
if 'nova_uuid' not in vm_rec['other_config']:
continue
dom = vm_rec.get('domid')
if dom is None or dom not in counters:
continue
vifs_bw = bw.setdefault(name, {})
for vif_num, vif_data in counters[dom].iteritems():
mac = vif_map[vif_num]
vif_data['mac_address'] = mac
vifs_bw[mac] = vif_data
return bw
def get_console_output(self, instance):
"""Return snapshot of console."""
# TODO(armando-migliaccio): implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
# The compute manager expects InstanceNotFound for this case.
raise exception.InstanceNotFound(instance_id=instance['uuid'])
session_id = self._session.get_session_id()
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
# NOTE: XS5.6sp2+ use http over port 80 for xenapi com
return {'host': CONF.vncserver_proxyclient_address, 'port': 80,
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
"""convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
return None
return ip['address']
def fixed_ip_dict(ip, subnet):
if ip['version'] == 4:
netmask = str(subnet.as_netaddr().netmask)
else:
netmask = subnet.as_netaddr()._prefixlen
return {'ip': ip['address'],
'enabled': '1',
'netmask': netmask,
'gateway': get_ip(subnet['gateway'])}
def convert_route(route):
return {'route': str(netaddr.IPNetwork(route['cidr']).network),
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
'gateway': get_ip(route['gateway'])}
network = vif['network']
v4_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 4]
v6_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 6]
# NOTE(tr3buchet): routes and DNS come from all subnets
routes = [convert_route(route) for subnet in network['subnets']
for route in subnet['routes']]
dns = [get_ip(ip) for subnet in network['subnets']
for ip in subnet['dns']]
info_dict = {'label': network['label'],
'mac': vif['address']}
if v4_subnets:
# NOTE(tr3buchet): gateway and broadcast from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway'] = get_ip(v4_subnets[0]['gateway'])
info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast)
info_dict['ips'] = [fixed_ip_dict(ip, subnet)
for subnet in v4_subnets
for ip in subnet['ips']]
if v6_subnets:
# NOTE(tr3buchet): gateway from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway'])
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet)
for subnet in v6_subnets
for ip in subnet['ips']]
if routes:
info_dict['routes'] = routes
if dns:
info_dict['dns'] = list(set(dns))
return info_dict
def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
what vm_utils.lookup(session, instance['name']) will find (ex: rescue)
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
for vif in network_info:
xs_data = self._vif_xenstore_data(vif)
location = ('vm-data/networking/%s' %
vif['address'].replace(':', ''))
self._add_to_param_xenstore(vm_ref,
location,
jsonutils.dumps(xs_data))
try:
self._write_to_xenstore(instance, location, xs_data,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _create_vifs(self, vm_ref, instance, network_info):
"""Creates vifs for an instance."""
LOG.debug(_("Creating vifs"), instance=instance)
# this function raises if vm_ref is not a vm_opaque_ref
self._session.call_xenapi("VM.get_record", vm_ref)
for device, vif in enumerate(network_info):
vif_rec = self.vif_driver.plug(instance, vif,
vm_ref=vm_ref, device=device)
network_ref = vif_rec['network']
LOG.debug(_('Creating VIF for network %(network_ref)s'),
locals(), instance=instance)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %(vif_ref)s, network %(network_ref)s'),
locals(), instance=instance)
def plug_vifs(self, instance, network_info):
"""Set up VIF networking on the host."""
for device, vif in enumerate(network_info):
self.vif_driver.plug(instance, vif, device=device)
def unplug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def reset_network(self, instance):
"""Calls resetnetwork method in agent."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.resetnetwork()
else:
raise NotImplementedError()
def inject_hostname(self, instance, vm_ref, hostname):
"""Inject the hostname of the instance into the xenstore."""
if instance['os_type'] == "windows":
# NOTE(jk0): Windows hostnames can only be <= 15 chars.
hostname = hostname[:15]
LOG.debug(_("Injecting hostname to xenstore"), instance=instance)
self._add_to_param_xenstore(vm_ref, 'vm-data/hostname', hostname)
def _write_to_xenstore(self, instance, path, value, vm_ref=None):
"""
Writes the passed value to the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the write process.
"""
return self._make_plugin_call('xenstore.py', 'write_record', instance,
vm_ref=vm_ref, path=path,
value=jsonutils.dumps(value))
def _delete_from_xenstore(self, instance, path, vm_ref=None):
"""
Deletes the value from the xenstore record for the given VM at
the specified location. A XenAPIPlugin.PluginError will be
raised if any error is encountered in the delete process.
"""
return self._make_plugin_call('xenstore.py', 'delete_record', instance,
vm_ref=vm_ref, path=path)
def _make_plugin_call(self, plugin, method, instance, vm_ref=None,
**addl_args):
"""
Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
args = {'dom_id': vm_rec['domid']}
args.update(addl_args)
try:
return self._session.call_plugin(plugin, method, args)
except self._session.XenAPI.Failure, e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
' supported by the agent. args=%(args)r'),
locals(), instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg}
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'error', 'message': err_msg}
return None
def _add_to_param_xenstore(self, vm_ref, key, val):
"""
Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
it is overwritten
"""
self._remove_from_param_xenstore(vm_ref, key)
self._session.call_xenapi('VM.add_to_xenstore_data', vm_ref, key, val)
def _remove_from_param_xenstore(self, vm_ref, key):
"""
Takes a single key and removes it from the xenstore parameter
record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def unfilter_instance(self, instance_ref, network_info):
"""Removes filters for each VIF of the specified instance."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
current_aggregate = self._virtapi.aggregate_get_by_host(
context, CONF.host, key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
raise exception.AggregateHostNotFound(host=CONF.host)
try:
return current_aggregate.metadetails[hostname]
except KeyError:
reason = _('Destination host:%(hostname)s must be in the same '
'aggregate as the source server')
raise exception.MigrationError(reason=reason % locals())
def _ensure_host_in_aggregate(self, context, hostname):
self._get_host_uuid_from_aggregate(context, hostname)
def _get_host_opaque_ref(self, context, hostname):
host_uuid = self._get_host_uuid_from_aggregate(context, hostname)
return self._session.call_xenapi("host.get_by_uuid", host_uuid)
def _migrate_receive(self, ctxt):
destref = self._session.get_xenapi_host()
# Get the network to for migrate.
# This is the one associated with the pif marked management. From cli:
# uuid=`xe pif-list --minimal management=true`
# xe pif-param-get param-name=network-uuid uuid=$uuid
expr = 'field "management" = "true"'
pifs = self._session.call_xenapi('PIF.get_all_records_where',
expr)
if len(pifs) != 1:
raise exception.MigrationError('No suitable network for migrate')
nwref = pifs[pifs.keys()[0]]['network']
try:
options = {}
migrate_data = self._session.call_xenapi("host.migrate_receive",
destref,
nwref,
options)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Receive failed'))
return migrate_data
def check_can_live_migrate_destination(self, ctxt, instance_ref,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
if block_migration:
migrate_send_data = self._migrate_receive(ctxt)
destination_sr_ref = vm_utils.safe_find_sr(self._session)
dest_check_data = {
"block_migration": block_migration,
"migrate_data": {"migrate_send_data": migrate_send_data,
"destination_sr_ref": destination_sr_ref}}
return dest_check_data
else:
src = instance_ref['host']
self._ensure_host_in_aggregate(ctxt, src)
# TODO(johngarbutt) we currently assume
# instance is on a SR shared with other destination
# block migration work will be able to resolve this
return None
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it's possible to execute live migration on the source side.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest_check_data: data returned by the check on the
destination, includes block_migration flag
"""
if dest_check_data and 'migrate_data' in dest_check_data:
vm_ref = self._get_vm_opaque_ref(instance_ref)
migrate_data = dest_check_data['migrate_data']
try:
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, migrate_data)
return dest_check_data
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('VM.assert_can_migrate'
'failed'))
def _generate_vdi_map(self, destination_sr_ref, vm_ref):
"""generate a vdi_map for _call_live_migrate_command."""
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
"""unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref)
vif_map = {}
options = {}
self._session.call_xenapi(command_name, vm_ref,
migrate_send_data, True,
vdi_map, vif_map, options)
def live_migrate(self, context, instance, destination_hostname,
post_method, recover_method, block_migration,
migrate_data=None):
try:
vm_ref = self._get_vm_opaque_ref(instance)
if block_migration:
if not migrate_data:
raise exception.InvalidParameterValue('Block Migration '
'requires migrate data from destination')
try:
self._call_live_migrate_command(
"VM.migrate_send", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Send failed'))
else:
host_ref = self._get_host_opaque_ref(context,
destination_hostname)
self._session.call_xenapi("VM.pool_migrate", vm_ref,
host_ref, {})
post_method(context, instance, destination_hostname,
block_migration)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance, destination_hostname,
block_migration)
def get_per_instance_usage(self):
"""Get usage info about each active instance."""
usage = {}
def _is_active(vm_rec):
power_state = vm_rec['power_state'].lower()
return power_state in ['running', 'paused']
def _get_uuid(vm_rec):
other_config = vm_rec['other_config']
return other_config.get('nova_uuid', None)
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
uuid = _get_uuid(vm_rec)
if _is_active(vm_rec) and uuid is not None:
memory_mb = int(vm_rec['memory_static_max']) / 1024 / 1024
usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
return usage
| apache-2.0 | -5,872,085,318,642,051,000 | 41.73639 | 79 | 0.55996 | false |
iEngage/python-sdk | iengage_client/models/idea.py | 1 | 20100 | # coding: utf-8
"""
Stakeholder engagement API
This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Idea(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, idea_id=None, idea_title=None, group=None, idea_description=None, ideator=None, idea_creation_date=None, last_modified_date=None, idea_stage=None, domain=None, technology=None, access_type=None, video_id=None, active_status=None, team_status=None, project_status=None, total_followers=None, total_comments=None, total_blogs=None, average_rating_score=None, number_of_ratings=None, current_user_following=False, current_user_rating=None, idea_file_url=None, attachment_list=None, sentiment=None, sentiment_details=None, sentiment_weightage=None, entity=None):
"""
Idea - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'idea_id': 'int',
'idea_title': 'str',
'group': 'Group',
'idea_description': 'str',
'ideator': 'User',
'idea_creation_date': 'datetime',
'last_modified_date': 'datetime',
'idea_stage': 'str',
'domain': 'str',
'technology': 'str',
'access_type': 'str',
'video_id': 'str',
'active_status': 'str',
'team_status': 'str',
'project_status': 'str',
'total_followers': 'int',
'total_comments': 'int',
'total_blogs': 'int',
'average_rating_score': 'float',
'number_of_ratings': 'int',
'current_user_following': 'bool',
'current_user_rating': 'float',
'idea_file_url': 'str',
'attachment_list': 'list[Multimedia]',
'sentiment': 'str',
'sentiment_details': 'Sentiment',
'sentiment_weightage': 'float',
'entity': 'list[NER]'
}
self.attribute_map = {
'idea_id': 'ideaId',
'idea_title': 'ideaTitle',
'group': 'group',
'idea_description': 'ideaDescription',
'ideator': 'ideator',
'idea_creation_date': 'ideaCreationDate',
'last_modified_date': 'lastModifiedDate',
'idea_stage': 'ideaStage',
'domain': 'domain',
'technology': 'technology',
'access_type': 'accessType',
'video_id': 'videoId',
'active_status': 'activeStatus',
'team_status': 'teamStatus',
'project_status': 'projectStatus',
'total_followers': 'totalFollowers',
'total_comments': 'totalComments',
'total_blogs': 'totalBlogs',
'average_rating_score': 'averageRatingScore',
'number_of_ratings': 'numberOfRatings',
'current_user_following': 'currentUserFollowing',
'current_user_rating': 'currentUserRating',
'idea_file_url': 'ideaFileURL',
'attachment_list': 'attachmentList',
'sentiment': 'sentiment',
'sentiment_details': 'sentimentDetails',
'sentiment_weightage': 'sentimentWeightage',
'entity': 'entity'
}
self._idea_id = idea_id
self._idea_title = idea_title
self._group = group
self._idea_description = idea_description
self._ideator = ideator
self._idea_creation_date = idea_creation_date
self._last_modified_date = last_modified_date
self._idea_stage = idea_stage
self._domain = domain
self._technology = technology
self._access_type = access_type
self._video_id = video_id
self._active_status = active_status
self._team_status = team_status
self._project_status = project_status
self._total_followers = total_followers
self._total_comments = total_comments
self._total_blogs = total_blogs
self._average_rating_score = average_rating_score
self._number_of_ratings = number_of_ratings
self._current_user_following = current_user_following
self._current_user_rating = current_user_rating
self._idea_file_url = idea_file_url
self._attachment_list = attachment_list
self._sentiment = sentiment
self._sentiment_details = sentiment_details
self._sentiment_weightage = sentiment_weightage
self._entity = entity
@property
def idea_id(self):
"""
Gets the idea_id of this Idea.
:return: The idea_id of this Idea.
:rtype: int
"""
return self._idea_id
@idea_id.setter
def idea_id(self, idea_id):
"""
Sets the idea_id of this Idea.
:param idea_id: The idea_id of this Idea.
:type: int
"""
self._idea_id = idea_id
@property
def idea_title(self):
"""
Gets the idea_title of this Idea.
:return: The idea_title of this Idea.
:rtype: str
"""
return self._idea_title
@idea_title.setter
def idea_title(self, idea_title):
"""
Sets the idea_title of this Idea.
:param idea_title: The idea_title of this Idea.
:type: str
"""
self._idea_title = idea_title
@property
def group(self):
"""
Gets the group of this Idea.
:return: The group of this Idea.
:rtype: Group
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this Idea.
:param group: The group of this Idea.
:type: Group
"""
self._group = group
@property
def idea_description(self):
"""
Gets the idea_description of this Idea.
:return: The idea_description of this Idea.
:rtype: str
"""
return self._idea_description
@idea_description.setter
def idea_description(self, idea_description):
"""
Sets the idea_description of this Idea.
:param idea_description: The idea_description of this Idea.
:type: str
"""
self._idea_description = idea_description
@property
def ideator(self):
"""
Gets the ideator of this Idea.
:return: The ideator of this Idea.
:rtype: User
"""
return self._ideator
@ideator.setter
def ideator(self, ideator):
"""
Sets the ideator of this Idea.
:param ideator: The ideator of this Idea.
:type: User
"""
self._ideator = ideator
@property
def idea_creation_date(self):
"""
Gets the idea_creation_date of this Idea.
:return: The idea_creation_date of this Idea.
:rtype: datetime
"""
return self._idea_creation_date
@idea_creation_date.setter
def idea_creation_date(self, idea_creation_date):
"""
Sets the idea_creation_date of this Idea.
:param idea_creation_date: The idea_creation_date of this Idea.
:type: datetime
"""
self._idea_creation_date = idea_creation_date
@property
def last_modified_date(self):
"""
Gets the last_modified_date of this Idea.
:return: The last_modified_date of this Idea.
:rtype: datetime
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""
Sets the last_modified_date of this Idea.
:param last_modified_date: The last_modified_date of this Idea.
:type: datetime
"""
self._last_modified_date = last_modified_date
@property
def idea_stage(self):
"""
Gets the idea_stage of this Idea.
:return: The idea_stage of this Idea.
:rtype: str
"""
return self._idea_stage
@idea_stage.setter
def idea_stage(self, idea_stage):
"""
Sets the idea_stage of this Idea.
:param idea_stage: The idea_stage of this Idea.
:type: str
"""
self._idea_stage = idea_stage
@property
def domain(self):
"""
Gets the domain of this Idea.
:return: The domain of this Idea.
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""
Sets the domain of this Idea.
:param domain: The domain of this Idea.
:type: str
"""
self._domain = domain
@property
def technology(self):
"""
Gets the technology of this Idea.
:return: The technology of this Idea.
:rtype: str
"""
return self._technology
@technology.setter
def technology(self, technology):
"""
Sets the technology of this Idea.
:param technology: The technology of this Idea.
:type: str
"""
self._technology = technology
@property
def access_type(self):
"""
Gets the access_type of this Idea.
:return: The access_type of this Idea.
:rtype: str
"""
return self._access_type
@access_type.setter
def access_type(self, access_type):
"""
Sets the access_type of this Idea.
:param access_type: The access_type of this Idea.
:type: str
"""
self._access_type = access_type
@property
def video_id(self):
"""
Gets the video_id of this Idea.
:return: The video_id of this Idea.
:rtype: str
"""
return self._video_id
@video_id.setter
def video_id(self, video_id):
"""
Sets the video_id of this Idea.
:param video_id: The video_id of this Idea.
:type: str
"""
self._video_id = video_id
@property
def active_status(self):
"""
Gets the active_status of this Idea.
:return: The active_status of this Idea.
:rtype: str
"""
return self._active_status
@active_status.setter
def active_status(self, active_status):
"""
Sets the active_status of this Idea.
:param active_status: The active_status of this Idea.
:type: str
"""
self._active_status = active_status
@property
def team_status(self):
"""
Gets the team_status of this Idea.
:return: The team_status of this Idea.
:rtype: str
"""
return self._team_status
@team_status.setter
def team_status(self, team_status):
"""
Sets the team_status of this Idea.
:param team_status: The team_status of this Idea.
:type: str
"""
self._team_status = team_status
@property
def project_status(self):
"""
Gets the project_status of this Idea.
:return: The project_status of this Idea.
:rtype: str
"""
return self._project_status
@project_status.setter
def project_status(self, project_status):
"""
Sets the project_status of this Idea.
:param project_status: The project_status of this Idea.
:type: str
"""
self._project_status = project_status
@property
def total_followers(self):
"""
Gets the total_followers of this Idea.
:return: The total_followers of this Idea.
:rtype: int
"""
return self._total_followers
@total_followers.setter
def total_followers(self, total_followers):
"""
Sets the total_followers of this Idea.
:param total_followers: The total_followers of this Idea.
:type: int
"""
self._total_followers = total_followers
@property
def total_comments(self):
"""
Gets the total_comments of this Idea.
:return: The total_comments of this Idea.
:rtype: int
"""
return self._total_comments
@total_comments.setter
def total_comments(self, total_comments):
"""
Sets the total_comments of this Idea.
:param total_comments: The total_comments of this Idea.
:type: int
"""
self._total_comments = total_comments
@property
def total_blogs(self):
"""
Gets the total_blogs of this Idea.
:return: The total_blogs of this Idea.
:rtype: int
"""
return self._total_blogs
@total_blogs.setter
def total_blogs(self, total_blogs):
"""
Sets the total_blogs of this Idea.
:param total_blogs: The total_blogs of this Idea.
:type: int
"""
self._total_blogs = total_blogs
@property
def average_rating_score(self):
"""
Gets the average_rating_score of this Idea.
:return: The average_rating_score of this Idea.
:rtype: float
"""
return self._average_rating_score
@average_rating_score.setter
def average_rating_score(self, average_rating_score):
"""
Sets the average_rating_score of this Idea.
:param average_rating_score: The average_rating_score of this Idea.
:type: float
"""
self._average_rating_score = average_rating_score
@property
def number_of_ratings(self):
"""
Gets the number_of_ratings of this Idea.
:return: The number_of_ratings of this Idea.
:rtype: int
"""
return self._number_of_ratings
@number_of_ratings.setter
def number_of_ratings(self, number_of_ratings):
"""
Sets the number_of_ratings of this Idea.
:param number_of_ratings: The number_of_ratings of this Idea.
:type: int
"""
self._number_of_ratings = number_of_ratings
@property
def current_user_following(self):
"""
Gets the current_user_following of this Idea.
:return: The current_user_following of this Idea.
:rtype: bool
"""
return self._current_user_following
@current_user_following.setter
def current_user_following(self, current_user_following):
"""
Sets the current_user_following of this Idea.
:param current_user_following: The current_user_following of this Idea.
:type: bool
"""
self._current_user_following = current_user_following
@property
def current_user_rating(self):
"""
Gets the current_user_rating of this Idea.
:return: The current_user_rating of this Idea.
:rtype: float
"""
return self._current_user_rating
@current_user_rating.setter
def current_user_rating(self, current_user_rating):
"""
Sets the current_user_rating of this Idea.
:param current_user_rating: The current_user_rating of this Idea.
:type: float
"""
self._current_user_rating = current_user_rating
@property
def idea_file_url(self):
"""
Gets the idea_file_url of this Idea.
:return: The idea_file_url of this Idea.
:rtype: str
"""
return self._idea_file_url
@idea_file_url.setter
def idea_file_url(self, idea_file_url):
"""
Sets the idea_file_url of this Idea.
:param idea_file_url: The idea_file_url of this Idea.
:type: str
"""
self._idea_file_url = idea_file_url
@property
def attachment_list(self):
"""
Gets the attachment_list of this Idea.
:return: The attachment_list of this Idea.
:rtype: list[Multimedia]
"""
return self._attachment_list
@attachment_list.setter
def attachment_list(self, attachment_list):
"""
Sets the attachment_list of this Idea.
:param attachment_list: The attachment_list of this Idea.
:type: list[Multimedia]
"""
self._attachment_list = attachment_list
@property
def sentiment(self):
"""
Gets the sentiment of this Idea.
:return: The sentiment of this Idea.
:rtype: str
"""
return self._sentiment
@sentiment.setter
def sentiment(self, sentiment):
"""
Sets the sentiment of this Idea.
:param sentiment: The sentiment of this Idea.
:type: str
"""
self._sentiment = sentiment
@property
def sentiment_details(self):
"""
Gets the sentiment_details of this Idea.
:return: The sentiment_details of this Idea.
:rtype: Sentiment
"""
return self._sentiment_details
@sentiment_details.setter
def sentiment_details(self, sentiment_details):
"""
Sets the sentiment_details of this Idea.
:param sentiment_details: The sentiment_details of this Idea.
:type: Sentiment
"""
self._sentiment_details = sentiment_details
@property
def sentiment_weightage(self):
"""
Gets the sentiment_weightage of this Idea.
:return: The sentiment_weightage of this Idea.
:rtype: float
"""
return self._sentiment_weightage
@sentiment_weightage.setter
def sentiment_weightage(self, sentiment_weightage):
"""
Sets the sentiment_weightage of this Idea.
:param sentiment_weightage: The sentiment_weightage of this Idea.
:type: float
"""
self._sentiment_weightage = sentiment_weightage
@property
def entity(self):
"""
Gets the entity of this Idea.
:return: The entity of this Idea.
:rtype: list[NER]
"""
return self._entity
@entity.setter
def entity(self, entity):
"""
Sets the entity of this Idea.
:param entity: The entity of this Idea.
:type: list[NER]
"""
self._entity = entity
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | 2,531,994,326,339,373,000 | 25.447368 | 581 | 0.558806 | false |
rodrigoancavil/repy | parseurl.py | 1 | 1217 | #!/bin/python
# exmaple 1: parse a url with re module
# this take a string with a url and split the protocol, ip or nameserver and port
# if port don't exist and the protocol is http the port is 80.
import re
url = raw_input('url : ')
# check if the url has the form protocol://hostname:port
# protocol is character set [a-zA-Z]{3,} ftp, http, mongodb, ssh, etc.
# hostname is character set [a-zA-Z0-9\\.\-] www.server.com, 127.0.0.1, etc.
# port is numeric [0-9] 80, 8080, 21, 22, 25, 27001, etc. If you omit the port, assume 80 if protocol is http.
# ToDO:
# - If protocol is ftp and you omit the port, set port number 21
# - If protocol is ssh and you omit the port, set port number 22
# Etc...
parser = re.compile('[a-zA-Z]{2,}://[a-zA-Z0-9\\.\-]+(?::[0-9]{2,}|:?)$')
if bool(parser.search(url)):
print 'The url is valid << %s >>'%url
protocol = re.search('\w+(?=://)',url).group()
hostname = re.search('(?<=://)[a-zA-Z0-9\\.\-]+(?=:)*',url).group()
port = re.search('(?<=:)[0-9]{2,}',url)
print protocol
print hostname
if port != None:
print port.group()
else:
if protocol == 'http': print '80'
else:
print 'The url is invalid << %s >>'%url
| gpl-2.0 | -8,859,035,382,673,495,000 | 33.771429 | 110 | 0.601479 | false |
ebrensi/registry-frontend | ff.py | 1 | 1240 | #! usr/bin/env python
# This script is for testing without having to host the flask app.
import folium
import pandas as pd
import os
from sqlalchemy import create_engine
import geojson
DATABASE_URL = os.environ["DATABASE_URL"]
STATES_GEOJSON_PATH = "static/us-states.json"
engine = create_engine(DATABASE_URL)
with engine.connect() as db:
query = "Select state, count(*) From registry Group By state;"
df = pd.read_sql_query(query, db)
with open(STATES_GEOJSON_PATH, "r") as file:
gj = geojson.load(file)
# Folium choropleth requires a one-to-one correspondence between GeoJSON
# features (state definitions) and shade values, so we will make a new
# GeoJSON object that is a FeatureCollection of only the states that we
# have data for.
relevant_features = [feature for feature in gj["features"]
if ("id" in feature) and
(feature["id"] in df["state"].values)]
gj_relevant = geojson.FeatureCollection(relevant_features)
geo_str = geojson.dumps(gj_relevant)
base_map = folium.Map([43, -100], zoom_start=5)
base_map.choropleth(
geo_str=geo_str,
data=df,
columns=['state', 'count'],
key_on='feature.id',
fill_color='PuBuGn',
)
base_map.save("map.html")
| mit | -3,929,384,207,766,329,000 | 25.956522 | 72 | 0.691935 | false |
Vauxoo/stock-logistics-warehouse | stock_cycle_count/reports/report_stock_location_accuracy.py | 1 | 1289 | # Copyright 2017 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, models
class LocationAccuracyReport(models.AbstractModel):
_name = "report.stock_location_accuracy"
_description = "Location Accuracy Report"
@api.model
def _get_inventory_domain(self, loc_id, exclude_sublocation=True):
return [('location_id', '=', loc_id),
('exclude_sublocation', '=', exclude_sublocation),
('state', '=', 'done')]
@api.model
def _get_location_data(self, locations):
data = dict()
inventory_obj = self.env["stock.inventory"]
for loc in locations:
counts = inventory_obj.search(self._get_inventory_domain(loc.id))
data[loc] = counts
return data
@api.multi
def render_html(self, data=None):
report_obj = self.env["report"]
locs = self.env["stock.location"].browse(self._ids)
data = self._get_location_data(locs)
docargs = {
"doc_ids": locs._ids,
"docs": locs,
"data": data,
}
return report_obj.render(
"stock_cycle_count.stock_location_accuracy", docargs)
| agpl-3.0 | 6,055,364,514,302,601,000 | 32.921053 | 77 | 0.596587 | false |
TheVirtualLtd/bda.plone.orders | src/bda/plone/orders/upgrades.py | 1 | 14529 | # -*- coding: utf-8 -*-
from bda.plone.cart import get_object_by_uid
from bda.plone.orders import message_factory as _
from bda.plone.orders.common import acquire_vendor_or_shop_root
from bda.plone.orders.common import calculate_order_salaried
from bda.plone.orders.common import calculate_order_state
from bda.plone.orders.common import get_bookings_soup
from bda.plone.orders.common import get_order
from bda.plone.orders.common import get_orders_soup
from bda.plone.orders.common import OrderData
from bda.plone.orders.contacts import get_contacts_soup
from bda.plone.orders.interfaces import ITrading
from bda.plone.payment import Payments
from bda.plone.shipping.interfaces import IShippingItem
from decimal import Decimal
from node.ext.zodb.utils import reset_odict
from plone.uuid.interfaces import IUUID
from zope.component.hooks import getSite
import logging
import uuid
logger = logging.getLogger('bda.plone.orders UPGRADE')
def fix_bookings_vendor_uid(ctx=None):
"""Add vendor_uid attribute to booking records.
"""
portal = getSite()
soup = get_bookings_soup(portal)
data = soup.storage.data
need_rebuild = False
for item in data.values():
update = False
try:
item.attrs['vendor_uid']
if not isinstance(item.attrs['vendor_uid'], uuid.UUID):
update = True
except KeyError:
update = True
if not update:
continue
buyable_uid = item.attrs['buyable_uid']
obj = get_object_by_uid(portal, buyable_uid)
if not obj:
shop = acquire_vendor_or_shop_root(portal)
else:
shop = acquire_vendor_or_shop_root(obj)
vendor_uid = uuid.UUID(IUUID(shop))
item.attrs['vendor_uid'] = vendor_uid
need_rebuild = True
logging.info(
u"Added vendor_uid to booking {0}".format(item.attrs['uid'])
)
if need_rebuild:
soup.rebuild()
logging.info("Rebuilt bookings catalog")
def fix_orders_vendor_uids(ctx=None):
"""Add vendor_uids attribute to order records.
"""
portal = getSite()
soup = get_orders_soup(portal)
data = soup.storage.data
need_rebuild = False
for item in data.values():
update = False
try:
item.attrs['vendor_uids']
if not isinstance(item.attrs['vendor_uids'], list)\
or not item.attrs['vendor_uids']:
update = True
except KeyError:
update = True
if not update:
continue
order_data = OrderData(portal, order=item)
vendor_uids = set()
for booking in order_data.bookings:
vendor_uids.add(booking.attrs['vendor_uid'])
item.attrs['vendor_uids'] = list(vendor_uids)
need_rebuild = True
logging.info(
u"Added vendor_uids to order {0}".format(item.attrs['uid'])
)
if need_rebuild:
soup.rebuild()
logging.info("Rebuilt orders catalog")
def fix_bookings_state_salaried_tid(ctx=None):
portal = getSite()
soup = get_orders_soup(portal)
data = soup.storage.data
need_rebuild = False
for item in data.values():
order_data = OrderData(portal, order=item)
try:
state = item.attrs['state']
state_exists = True
except KeyError:
state = None
state_exists = False
try:
salaried = item.attrs['salaried']
salaried_exists = True
except KeyError:
salaried = None
salaried_exists = False
try:
tid = item.attrs['tid']
tid_exists = True
except KeyError:
tid = 'none' # tid default in b.p.payment
tid_exists = False
for booking in order_data.bookings:
# add too booking node
try:
booking.attrs['state']
except KeyError:
booking.attrs['state'] = state
need_rebuild = True
logging.info(
u"Added state {0} to booking {1}".format(
state, item.attrs['uid']
)
)
try:
booking.attrs['salaried']
except KeyError:
booking.attrs['salaried'] = salaried
need_rebuild = True
logging.info(
u"Added salaried {0} to booking {1}".format(
salaried, item.attrs['uid']
)
)
try:
booking.attrs['tid']
except KeyError:
booking.attrs['tid'] = tid
need_rebuild = True
logging.info(
u"Added tid {0} to booking {1}".format(
tid, item.attrs['uid']
)
)
# now, delete from order node
if state_exists:
del item.attrs['state']
if salaried_exists:
del item.attrs['salaried']
if tid_exists:
del item.attrs['tid']
if need_rebuild:
bookings_soup = get_bookings_soup(portal)
bookings_soup.rebuild()
logging.info("Rebuilt bookings catalog")
def fix_discount_attrs(ctx=None):
portal = getSite()
# discount attrs on order
orders_soup = get_orders_soup(portal)
need_rebuild = False
data = orders_soup.storage.data
for item in data.values():
try:
item.attrs['cart_discount_net']
except KeyError:
need_rebuild = True
item.attrs['cart_discount_net'] = Decimal(0)
logging.info(
u"Added cart_discount_net to order {0}".format(
item.attrs['uid']
)
)
try:
item.attrs['cart_discount_vat']
except KeyError:
need_rebuild = True
item.attrs['cart_discount_vat'] = Decimal(0)
logging.info(
u"Added cart_discount_vat to order {0}".format(
item.attrs['uid']
)
)
if need_rebuild:
orders_soup.rebuild()
logging.info("Rebuilt orders catalog")
# discount attrs on bookings
bookings_soup = get_bookings_soup(portal)
need_rebuild = False
data = bookings_soup.storage.data
for item in data.values():
try:
item.attrs['discount_net']
except KeyError:
need_rebuild = True
item.attrs['discount_net'] = Decimal(0)
logging.info(
u"Added discount_net to booking {0}".format(item.attrs['uid'])
)
if need_rebuild:
bookings_soup.rebuild()
logging.info("Rebuilt bookings catalog")
def fix_shipping_attrs(ctx=None):
portal = getSite()
orders_soup = get_orders_soup(portal)
data = orders_soup.storage.data
for item in data.values():
try:
item.attrs['shipping_method']
except KeyError:
item.attrs['shipping_method'] = 'unknown'
logging.info(
u"Added shipping_method {0} to booking {1}".format(
'unknown', item.attrs['uid']
)
)
try:
item.attrs['shipping_label']
except KeyError:
item.attrs['shipping_label'] = _('unknown', default=u'Unknown')
logging.info(
u"Added shipping_label {0} to booking {1}".format(
'unknown', item.attrs['uid']
)
)
try:
item.attrs['shipping_description']
except KeyError:
item.attrs['shipping_description'] = \
_('unknown', default=u'Unknown')
logging.info(
u"Added shipping_description {0} to booking {1}".format(
'unknown', item.attrs['uid']
)
)
try:
item.attrs['shipping_net']
except KeyError:
item.attrs['shipping_net'] = item.attrs['shipping']
logging.info(
u"Added shipping_net {0} to booking {1}".format(
item.attrs['shipping'], item.attrs['uid']
)
)
try:
item.attrs['shipping_vat']
except KeyError:
item.attrs['shipping_vat'] = Decimal(0)
logging.info(
u"Added shipping_vat {0} to booking {1}".format(
Decimal(0), item.attrs['uid']
)
)
def fix_payment_attrs(ctx=None):
portal = getSite()
payments = Payments(portal)
orders_soup = get_orders_soup(portal)
data = orders_soup.storage.data
for item in data.values():
try:
item.attrs['payment_method']
item.attrs['payment_label']
continue
except KeyError:
payment_method = item.attrs['payment_selection.payment']
payment = payments.get(payment_method)
if payment:
payment_label = payment.label
else:
payment_label = _('unknown', default=u'Unknown')
item.attrs['payment_method'] = payment_method
logging.info(
u"Added payment_method {0} to booking {1}".format(
payment_method, item.attrs['uid']
)
)
item.attrs['payment_label'] = payment_label
logging.info(
u"Added payment_label {0} to booking {1}".format(
payment_label, item.attrs['uid']
)
)
def fix_bookings_shippable(ctx=None):
portal = getSite()
soup = get_bookings_soup(portal)
data = soup.storage.data
need_rebuild = False
for booking in data.values():
try:
booking.attrs['shippable']
except KeyError:
obj = get_object_by_uid(portal, booking.attrs['buyable_uid'])
shippable = True
if obj:
shippable = IShippingItem(obj).shippable
booking.attrs['shippable'] = shippable
need_rebuild = True
logging.info(
u"Added shippable {0} to booking {1}".format(
shippable, booking.attrs['uid']
)
)
if need_rebuild:
bookings_soup = get_bookings_soup(portal)
bookings_soup.rebuild()
logging.info("Rebuilt bookings catalog")
def fix_bookings_trading(ctx=None):
portal = getSite()
soup = get_bookings_soup(portal)
data = soup.storage.data
need_rebuild = False
for booking in data.values():
try:
booking.attrs['item_number']
except KeyError:
obj = get_object_by_uid(portal, booking.attrs['buyable_uid'])
if obj:
trading = ITrading(obj)
item_number = trading.item_number
gtin = trading.gtin
else:
item_number = ''
gtin = ''
need_rebuild = True
booking.attrs['item_number'] = item_number
logging.info(
u"Added item_number {0} to booking {1}".format(
item_number, booking.attrs['uid']
)
)
booking.attrs['gtin'] = gtin
logging.info(
u"Added gtin {0} to booking {1}".format(
gtin, booking.attrs['uid']
)
)
if need_rebuild:
bookings_soup = get_bookings_soup(portal)
bookings_soup.rebuild()
logging.info("Rebuilt bookings catalog")
def reset_records(ctx=None):
def ignore_key(key):
return key.startswith('____')
portal = getSite()
soup = get_orders_soup(portal)
data = soup.storage.data
for order in data.values():
reset_odict(order.attrs.storage, ignore_key=ignore_key)
logging.info(
u'Reset attributes storage on order {0}'.format(
order.attrs['uid'],
)
)
soup = get_bookings_soup(portal)
data = soup.storage.data
for booking in data.values():
reset_odict(booking.attrs.storage, ignore_key=ignore_key)
logging.info(
u"Reset attributes storage on booking {0}".format(
booking.attrs['uid']
)
)
def fix_bookings_email(ctx=None):
"""Add email attribute to booking records from the corresponding order.
"""
portal = getSite()
soup = get_bookings_soup(portal)
data = soup.storage.data
need_rebuild = False
for item in data.values():
update = False
try:
item.attrs['email']
except KeyError:
update = True
if not update:
continue
order = get_order(portal, item.attrs['order_uid'])
email = order.attrs.get('personal_data.email', 'n/a')
item.attrs['email'] = email
need_rebuild = True
logging.info(
u"Added email to booking {0}".format(item.attrs['uid'])
)
if need_rebuild:
soup.rebuild()
logging.info("Rebuilt bookings catalog")
def fix_contacts_email(ctx=None):
"""Add email attribute to contact records.
"""
portal = getSite()
soup = get_contacts_soup(portal)
data = soup.storage.data
need_rebuild = False
for item in data.values():
update = False
try:
item.attrs['email']
except KeyError:
update = True
if not update:
continue
email = item.attrs.get('personal_data.email', 'n/a')
item.attrs['email'] = email
need_rebuild = True
logging.info(
u"Added email to contact {0}".format(item.attrs['uid'])
)
if need_rebuild:
soup.rebuild()
logging.info("Rebuilt contacts catalog")
def fix_order_state_and_salaried(ctx=None):
"""Re-add state and salaried on order, needed for sorting in orders table
"""
portal = getSite()
soup = get_orders_soup(portal)
data = soup.storage.data
for order in data.values():
order_data = OrderData(portal, uid=order.attrs['uid'])
bookings = order_data.bookings
order.attrs['state'] = calculate_order_state(bookings)
order.attrs['salaried'] = calculate_order_salaried(bookings)
soup.rebuild()
| bsd-3-clause | -2,279,739,217,219,872,300 | 31.286667 | 78 | 0.542157 | false |
greenlin/universal-portfolios | universal/result.py | 1 | 10866 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
from universal import tools
class PickleMixin(object):
def save(self, filename):
""" Save object as a pickle """
with open(filename, 'wb') as f:
pickle.dump(self, f, -1)
@classmethod
def load(cls, filename):
""" Load pickled object. """
with open(filename, 'rb') as f:
return pickle.load(f)
class AlgoResult(PickleMixin):
""" Results returned by algo's run method. The class containts useful
metrics such as sharpe ratio, mean return, drawdowns, ... and also
many visualizations.
You can specify transactions by setting AlgoResult.fee. Fee is
expressed in a percentages as a one-round fee.
"""
def __init__(self, X, B):
"""
:param X: Price relatives.
:param B: Weights.
"""
# set initial values
self._fee = 0.
self._B = B
self.rf_rate = 0.
self._X = X
# update logarithms, fees, etc.
self._recalculate()
@property
def X(self):
return self._X
@X.setter
def X(self, _X):
self._X = _X
self._recalculate()
@property
def B(self):
return self._B
@B.setter
def B(self, _B):
self._B = _B
self._recalculate()
@property
def fee(self):
return self._fee
@fee.setter
def fee(self, value):
""" Set transaction costs. Fees can be either float or Series
of floats for individual assets with proper indices. """
if isinstance(value, dict):
value = pd.Series(value)
if isinstance(value, pd.Series):
missing = set(self.X.columns) - set(value.index)
assert len(missing) == 0, 'Missing fees for {}'.format(missing)
self._fee = value
self._recalculate()
def _recalculate(self):
# calculate return for individual stocks
r = (self.X - 1) * self.B
self.asset_r = r + 1
self.r = r.sum(axis=1) + 1
# stock went bankrupt
self.r[self.r < 0] = 0.
# add fees
if not isinstance(self._fee, float) or self._fee != 0:
fees = (self.B.shift(-1).mul(self.r, axis=0) - self.B * self.X).abs()
fees.iloc[0] = self.B.ix[0]
fees.iloc[-1] = 0.
fees *= self._fee
self.asset_r -= fees
self.r -= fees.sum(axis=1)
self.r_log = np.log(self.r)
@property
def weights(self):
return self.B
@property
def equity(self):
return self.r.cumprod()
@property
def equity_decomposed(self):
""" Return equity decomposed to individual assets. """
return self.asset_r.cumprod()
@property
def asset_equity(self):
return self.X.cumprod()
@property
def total_wealth(self):
return self.r.prod()
@property
def profit_factor(self):
x = self.r_log
up = x[x > 0].sum()
down = -x[x < 0].sum()
return up / down if down != 0 else np.inf
@property
def sharpe(self):
""" Compute annualized sharpe ratio from log returns. If data does
not contain datetime index, assume daily frequency with 252 trading days a year.
"""
return tools.sharpe(self.r_log, rf_rate=self.rf_rate, freq=self.freq())
@property
def information(self):
""" Information ratio benchmarked against uniform CRP portfolio. """
s = self.X.mean(axis=1)
x = self.r_log - np.log(s)
mu, sd = x.mean(), x.std()
freq = self.freq()
if sd > 1e-8:
return mu / sd * np.sqrt(freq)
elif mu > 1e-8:
return np.inf * np.sign(mu)
else:
return 0.
@property
def growth_rate(self):
return self.r_log.mean() * self.freq()
@property
def volatility(self):
return np.sqrt(self.freq()) * self.r_log.std()
@property
def annualized_return(self):
return np.exp(self.r_log.mean() * self.freq()) - 1
@property
def annualized_volatility(self):
return np.exp(self.r_log).std() * np.sqrt(self.freq())
@property
def drawdown_period(self):
''' Returns longest drawdown perid. Stagnation is a drawdown too. '''
x = self.equity
period = [0.] * len(x)
peak = 0
for i in range(len(x)):
# new peak
if x[i] > peak:
peak = x[i]
period[i] = 0
else:
period[i] = period[i-1] + 1
return max(period) * 252. / self.freq()
@property
def max_drawdown(self):
''' Returns highest drawdown in percentage. '''
x = self.equity
return max(1. - x / x.cummax())
@property
def winning_pct(self):
x = self.r_log
win = (x > 0).sum()
all_trades = (x != 0).sum()
return float(win) / all_trades
def freq(self, x=None):
""" Number of data items per year. If data does not contain
datetime index, assume daily frequency with 252 trading days a year."""
x = x or self.r
return tools.freq(x.index)
def summary(self, name=None):
return """Summary{}:
Profit factor: {:.2f}
Sharpe ratio: {:.2f}
Information ratio (wrt UCRP): {:.2f}
Annualized return: {:.2f}%
Annualized volatility: {:.2f}%
Longest drawdown: {:.0f} days
Max drawdown: {:.2f}%
Winning days: {:.1f}%
""".format(
'' if name is None else ' for ' + name,
self.profit_factor,
self.sharpe,
self.information,
100 * self.annualized_return,
100 * self.annualized_volatility,
self.drawdown_period,
100 * self.max_drawdown,
100 * self.winning_pct
)
def plot(self, weights=True, assets=True, portfolio_label='PORTFOLIO', **kwargs):
""" Plot equity of all assets plus our strategy.
:param weights: Plot weights as a subplot.
:param assets: Plot asset prices.
:return: List of axes.
"""
res = ListResult([self], [portfolio_label])
if not weights:
ax1 = res.plot(assets=assets, **kwargs)
return [ax1]
else:
plt.figure(1)
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
res.plot(assets=assets, ax=ax1, **kwargs)
ax2 = plt.subplot2grid((3, 1), (2, 0), sharex=ax1)
# plot weights as lines
if self.B.values.min() < -0.01:
self.B.plot(ax=ax2, ylim=(min(0., self.B.values.min()), max(1., self.B.values.max())),
legend=False, colormap=plt.get_cmap('jet'))
else:
# fix rounding errors near zero
if self.B.values.min() < 0:
B = self.B - self.B.values.min()
else:
B = self.B
B.plot(ax=ax2, ylim=(0., max(1., B.values.max())),
legend=False, colormap=plt.get_cmap('jet'), kind='area', stacked=True)
plt.ylabel('weights')
return [ax1, ax2]
def hedge(self, result=None):
""" Hedge results with results of other strategy (subtract weights).
:param result: Other result object. Default is UCRP.
:return: New AlgoResult object.
"""
if result is None:
from algos import CRP
result = CRP().run(self.X.cumprod())
return AlgoResult(self.X, self.B - result.B)
def plot_decomposition(self, **kwargs):
""" Decompose equity into components of individual assets and plot
them. Does not take fees into account. """
ax = self.equity_decomposed.plot(**kwargs)
return ax
@property
def importance(self):
ws = self.weights.sum()
return ws / sum(ws)
class ListResult(list, PickleMixin):
""" List of AlgoResults. """
def __init__(self, results=None, names=None):
results = results if results is not None else []
names = names if names is not None else []
super(ListResult, self).__init__(results)
self.names = names
def append(self, result, name):
super(ListResult, self).append(result)
self.names.append(name)
def to_dataframe(self):
""" Calculate equities for all results and return one dataframe. """
eq = {}
for result, name in zip(self, self.names):
eq[name] = result.equity
return pd.DataFrame(eq)
def save(self, filename, **kwargs):
# do not save it with fees
#self.fee = 0.
#self.to_dataframe().to_pickle(*args, **kwargs)
with open(filename, 'wb') as f:
pickle.dump(self, f, -1)
@classmethod
def load(cls, filename):
# df = pd.read_pickle(*args, **kwargs)
# return cls([df[c] for c in df], df.columns)
with open(filename, 'rb') as f:
return pickle.load(f)
@property
def fee(self):
return {name: result.fee for result, name in zip(self, self.names)}
@fee.setter
def fee(self, value):
for result in self:
result.fee = value
def summary(self):
return '\n'.join([result.summary(name) for result, name in zip(self, self.names)])
def plot(self, ucrp=False, bah=False, assets=False, **kwargs):
""" Plot strategy equity.
:param ucrp: Add uniform CRP as a benchmark.
:param bah: Add Buy-And-Hold portfolio as a benchmark.
:param assets: Add asset prices.
:param kwargs: Additional arguments for pd.DataFrame.plot
"""
# NOTE: order of plotting is important because of coloring
# plot portfolio
d = self.to_dataframe()
portfolio = d.copy()
ax = portfolio.plot(linewidth=3., legend=False, **kwargs)
kwargs['ax'] = ax
ax.set_ylabel('Total wealth')
# plot uniform constant rebalanced portfolio
if ucrp:
from algos import CRP
crp_algo = CRP().run(self[0].X.cumprod())
crp_algo.fee = self[0].fee
d['UCRP'] = crp_algo.equity
d[['UCRP']].plot(**kwargs)
# add bah
if bah:
from algos import BAH
bah_algo = BAH().run(self[0].X.cumprod())
bah_algo.fee = self[0].fee
d['BAH'] = bah_algo.equity
d[['BAH']].plot(**kwargs)
# add individual assets
if assets:
self[0].asset_equity.plot(colormap=plt.get_cmap('jet'), **kwargs)
# plot portfolio again to highlight it
kwargs['color'] = 'blue'
portfolio.plot(linewidth=3., **kwargs)
return ax
| mit | -8,270,731,566,286,813,000 | 28.769863 | 102 | 0.544543 | false |
Arzaroth/virtualnetwork | network/mapParser.py | 1 | 4727 | #!/usr/bin/python3.3 -O
from pyrser import grammar,meta
from pyrser.directives import ignore
from network import Host, Router
import sys
def insensitiveCase(s):
return "[" + " ".join("['" + "'|'".join(x) + "']" for x in map((lambda each: [each.lower(), each.upper()]), s)) + "]"
class MapParser(grammar.Grammar):
entry = "Map"
grammar = """
Map = [#init_map(_) @ignore("null") [[[Hosts:h #add_host(_, h)] | [Routers:r #add_router(_, r)]] eol*]+
#link_hosts(_) eof]
Hosts = [#init_host(_) '[' ws {host} ws ']' eol+ [[Name | Ip | TTL | Route]:r #add_fhost(_, r)]+]
Routers = [#init_router(_) '[' ws {router} ws ']' eol+ [[Name | Ip | TTL | Route]:r #add_frouter(_, r)]+]
Name = [ws {name} ws '=' ws id:i #ret_f(_, "id", i) ws eol+]
Ip = [ws {ip} ws '=' ws cidraddr:c #ret_f(_, "ip", c) ws eol+]
TTL = [ws {ttl} ws '=' ws num:n #ret_f(_, "ttl", n) ws eol+]
Route = [ws {route} ws '=' ws [[{default}:c ws id:i #ret_f(_, "route", c, i)]
| [cidraddr:c ws id:i #ret_f(_, "route", c, i)]] ws eol+]
cidraddr = [num '.' num '.' num '.' num '/' num]
ws = [[' ' | '\r' | '\t']*]
""".format(host = insensitiveCase("Host"),
router = insensitiveCase("Router"),
route = insensitiveCase("Route"),
ip = insensitiveCase("IP"),
ttl = insensitiveCase("TTL"),
name = insensitiveCase("Name"),
default = insensitiveCase("Default"),
internet = insensitiveCase("Internet"))
@meta.hook(MapParser)
def init_map(self, ast):
ast.network = {}
ast.routes = {}
return True
@meta.hook(MapParser)
def init_host(self, ast):
self.init_map(ast)
ast.network["route"] = []
return True
@meta.hook(MapParser)
def init_router(self, ast):
self.init_host(ast)
ast.network["ips"] = []
return True
@meta.hook(MapParser)
def link_hosts(self, ast):
for k,v in ast.routes.items():
for x in v:
if x[1] not in ast.network:
raise Exception("Unknown host ({0}) for {1} route.".format(x[1], k))
ast.network[k].addRoute(ast.network[x[1]], x[0])
return True
def base_add(ast, h):
if "name" not in h.network:
raise Exception("Missing name field for given host:\n{0}".format(self.value(h)))
if h.network["name"] in ast.network:
raise Exception("Redefinion of {0}.".format(h.network["name"]))
ast.routes[h.network["name"]] = h.network["route"][::]
@meta.hook(MapParser)
def add_host(self, ast, h):
base_add(ast, h)
if "ip" not in h.network:
raise Exception("Missing ip field for given host:\n{0}".format(self.value(h)))
if "ttl" in h.network:
ast.network[h.network["name"]] = Host(h.network["name"],
h.network["ip"], h.network["ttl"])
else:
ast.network[h.network["name"]] = Host(h.network["name"],
h.network["ip"])
return True
@meta.hook(MapParser)
def add_router(self, ast, h):
base_add(ast, h)
if not h.network["ips"]:
raise Exception("Missing ip field for given host")
if "ttl" in h.network:
ast.network[h.network["name"]] = Router(h.network["name"],
*h.network["ips"], ttl = h.network["ttl"])
else:
ast.network[h.network["name"]] = Router(h.network["name"],
*h.network["ips"])
return True
@meta.hook(MapParser)
def ret_f(self, ast, *args):
ast.retvals = [args[0]]
ast.retvals.extend([self.value(x) for x in args[1:]])
return True
@meta.hook(MapParser)
def add_fhost(self, ast, r):
def reg_name(ast, name):
ast.network["name"] = name[0]
def reg_ip(ast, ip):
ast.network["ip"] = ip[0]
def reg_ttl(ast, ttl):
ast.network["ttl"] = ttl[0]
def reg_route(ast, route):
ast.network["route"].append(route)
fmap = {'id' : reg_name,
'ip' : reg_ip,
'ttl' : reg_ttl,
'route' : reg_route}
if r.retvals[0] in fmap:
fmap[r.retvals[0]](ast, r.retvals[1:])
return True
@meta.hook(MapParser)
def add_frouter(self, ast, r):
def reg_name(ast, name):
ast.network["name"] = name[0]
def reg_ip(ast, ip):
ast.network["ips"].append(ip[0])
def reg_ttl(ast, ttl):
ast.network["ttl"] = ttl[0]
def reg_route(ast, route):
ast.network["route"].append(route)
fmap = {'id' : reg_name,
'ip' : reg_ip,
'ttl' : reg_ttl,
'route' : reg_route}
if r.retvals[0] in fmap:
fmap[r.retvals[0]](ast, r.retvals[1:])
return True
| gpl-3.0 | 5,205,902,679,381,287,000 | 31.826389 | 121 | 0.533108 | false |
KamilSzot/365_programs | 2017-04-02/sdl.py | 1 | 2280 | import os
import time
import ctypes
from math import sin, cos, pi
os.environ["PYSDL2_DLL_PATH"] = "C:\\Program Files\\Python35\\other"
import sdl2
import sdl2.ext
import sdl2.sdlttf as sdlttf
SCREEN_WIDTH = 1920
SCREEN_HEIGHT = 1080
sdl2.ext.init()
window = sdl2.ext.Window("Game (not really)", size=(SCREEN_WIDTH, SCREEN_HEIGHT), flags=sdl2.SDL_WINDOW_BORDERLESS)
renderer = sdl2.ext.Renderer(window, flags=sdl2.SDL_RENDERER_ACCELERATED)# | sdl2.SDL_RENDERER_PRESENTVSYNC
factory = sdl2.ext.SpriteFactory(sdl2.ext.TEXTURE, renderer=renderer)
spriterenderer = factory.create_sprite_render_system()
fontmanager = sdl2.ext.FontManager('Roboto-Regular.ttf')
sp = factory.from_image('heart-outline.png')
sp.x = (SCREEN_WIDTH - sp.size[0]) // 2
sp.y = (SCREEN_HEIGHT - sp.size[1]) // 2
window.show()
#sdl2.SDL_RaiseWindow(window.window)
renderer.clear((0, 0, 0, 255))
#sdl2.render.SDL_RenderPresent(spriterenderer.sdlrenderer)
black = True
frames = 0
start_time = time.time()-0.0001
msg = None
while True:
for event in sdl2.ext.get_events():
if event.type == sdl2.SDL_QUIT:
break
elif event.type == sdl2.SDL_KEYDOWN and event.key.repeat == 0:
print("Key was pressed")
elif event.type == sdl2.SDL_KEYUP and event.key.repeat == 0:
print("Key was released")
keystatus = sdl2.SDL_GetKeyboardState(None)
if keystatus[sdl2.SDL_SCANCODE_ESCAPE]:
print("the Esc key was pressed")
break
renderer.clear((80, 80, 80, 255))
# renderer.fill([(0,0,SCREEN_WIDTH,SCREEN_HEIGHT)],(80,80,80))
t = time.time()
black = not black
frames += 1
fps = 0
dt = (t-start_time)
if dt > 0:
fps = frames / (time.time()-start_time)
msg = factory.from_text('{}'.format(fps), fontmanager=fontmanager)
sp.x = (SCREEN_WIDTH - sp.size[0]) // 2 + int(sin(2*pi*t/3)*(SCREEN_WIDTH/4))
sp.y = (SCREEN_HEIGHT - sp.size[1]) // 2 + int(cos(2*pi*t/3)*(SCREEN_WIDTH/4))
spriterenderer.render([sp,msg])
# time.sleep(0.01)
# sdl2.render.SDL_RenderPresent(spriterenderer.sdlrenderer)
# sdl2.render.SDL_RenderPresent(spriterenderer.sdlrenderer)
sdl2.ext.quit()
| unlicense | -7,732,825,467,773,092,000 | 26.860759 | 115 | 0.639474 | false |
kubeflow/kfserving | python/kfserving/test/test_v1alpha2_tensorflow_spec.py | 1 | 1476 | # Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kfserving
from kfserving.models.v1alpha2_tensorflow_spec import V1alpha2TensorflowSpec # noqa: E501
from kfserving.rest import ApiException
class TestV1alpha2TensorflowSpec(unittest.TestCase):
"""V1alpha2TensorflowSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha2TensorflowSpec(self):
"""Test V1alpha2TensorflowSpec"""
# FIXME: construct object with mandatory attributes with example values
# model = kfserving.models.v1alpha2_tensorflow_spec.V1alpha2TensorflowSpec() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,852,714,055,918,691,800 | 26.333333 | 98 | 0.722222 | false |
dstufft/fastly-py | tests/test_core.py | 1 | 1153 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from fastly.auth import KeyAuth, SessionAuth
from fastly.core import Fastly
def test_fastly_key():
api = Fastly("1234")
assert isinstance(api._session.auth, KeyAuth)
assert api._session.auth.key == "1234"
def test_fastly_session():
api = Fastly("test@example.com", "password")
assert isinstance(api._session.auth, SessionAuth)
assert api._session.auth.user == "test@example.com"
assert api._session.auth.password == "password"
assert api._session.auth.session is api._session
| apache-2.0 | -7,275,991,855,456,668,000 | 33.939394 | 74 | 0.740676 | false |
rjwvandenberg/PlebNet | plebnet/agent/strategies/strategy.py | 1 | 3275 | from abc import ABCMeta, abstractmethod
from plebnet.agent.config import PlebNetConfig
from plebnet.controllers import market_controller
from plebnet.controllers.cloudomate_controller import calculate_price, calculate_price_vpn
from plebnet.settings import plebnet_settings
from plebnet.utilities import logger
from plebnet.utilities.btc import btc_to_satoshi
log_name = "agent.strategies.strategy"
BTC_FLUCTUATION_MARGIN = 1.15
class Strategy():
__metaclass__ = ABCMeta
def __init__(self):
self.config = PlebNetConfig()
@abstractmethod
def apply(self):
"""
Performs the whole strategy step for one plebnet check iteration
:return:
"""
pass
@abstractmethod
def sell_reputation(self):
"""
Sells or holds current reputation (MB) depending on the implementing strategy
:return:
"""
pass
@abstractmethod
def create_offer(self, amount_mb, timeout):
"""
Creates a new order in the market, with parameters depending on the implementing strategy
:return:
"""
pass
def get_available_mb(self):
return market_controller.get_balance('MB')
@staticmethod
def get_replication_price(vps_provider, option, vpn_provider='azirevpn'):
return (calculate_price(vps_provider, option) + calculate_price_vpn(vpn_provider)) * BTC_FLUCTUATION_MARGIN
def update_offer(self, mb_amount, timeout=plebnet_settings.TIME_IN_HOUR):
"""
Check if "timeout" has passed since the last offer made, if passed create a new offer.
"""
if self.config.time_since_offer() > timeout:
logger.log("Calculating new offer", log_name)
self.config.save()
return self.create_offer(mb_amount, timeout)
def place_offer(self, mb_amount, chosen_est_price, timeout, config):
"""
Sells the received MB amount for the chosen estimated price on the Tribler market.
:param mb_amount: Amount of MB to sell
:param config: config
:param timeout: timeout of the offer to place
:param chosen_est_price: Target amount of BTC to receive
:return: success of offer placement
"""
if chosen_est_price == 0 or mb_amount == 0:
return False
config.bump_offer_date()
coin = 'TBTC' if plebnet_settings.get_instance().wallets_testnet() else 'BTC'
config.set('last_offer', {coin: chosen_est_price, 'MB': mb_amount})
if coin == 'TBTC':
return market_controller.put_ask(first_asset_amount=mb_amount,
first_asset_type='MB',
second_asset_amount=btc_to_satoshi(chosen_est_price),
second_asset_type=coin,
timeout=timeout)
return market_controller.put_bid(first_asset_amount=btc_to_satoshi(chosen_est_price),
first_asset_type=coin,
second_asset_amount=mb_amount,
second_asset_type='MB',
timeout=timeout)
| lgpl-3.0 | -3,676,003,800,803,998,000 | 36.643678 | 115 | 0.594809 | false |
flavour/RedHat | languages/ne.py | 1 | 327292 | # -*- coding: utf-8 -*-
{
'# of International Staff': '# अन्तराष्ट्रिय स्टाफ',
'# of National Staff': '# राष्ट्रिय स्टाफ',
'# selected': '# छानियो',
'%(app)s not installed. Ask the Server Administrator to install on Server.': '%(app)s इन्स्टल हुन सकेन । सर्भरलाई इन्स्टल गर्नको निम्ति सर्भरको एडमिनिस्ट्राटरलाई सोध्नुहोस् ।',
'%(count)s Roles of the user removed': '%(count)s प्रयोगकर्ताको भूमिकाहरू रद्द गरियो ।',
'%(count)s Users removed from Role': '%(count)s प्रयोगकर्ताहरूलाई भूमिकाबाट निकालियो ।',
'%(label)s contains %(values)s': '%(label)s मा %(values)s',
'%(label)s contains any of %(values)s': '%(label)s कुनै मा %(values)s',
'%(label)s does not contain %(values)s': '%(label)s छैन %(values)s',
'%(label)s is %(values)s': '%(label)s %(values)s हो',
'%(label)s like %(values)s': '%(label)s जस्तै %(values)s',
'%(label)s not like %(values)s': '%(label)s अमिल्दो %(values)s',
'%(module)s not installed': '%(module)s इन्स्टल भएको छैन ।',
'%(pe)s in %(location)s': '%(pe)s मा %(location)s',
'%(proj4js)s definition': '%(proj4js)s परिभाषा',
'%(resource)s Filter': '%(resource)s फिल्टर',
'%(site_label)s Status': '%(site_label)s स्टाटस्',
'%(site_label)s Status added': '%(site_label)s स्टाटस् संचित भयो',
'%(site_label)s Status deleted': '%(site_label)s स्टाटस् हटाइयो ',
'%(site_label)s Status updated': '%(site_label)s स्टाटस् परिमार्जन गरियो ',
'%(system_name)s - New User Registered': '%(system_name)s - नयाँ प्रयोगकर्ता दर्ता भयो ',
'%(system_name)s - New User Registration Approval Pending': '%(system_name)s - नयाँ प्रयोगकर्ता दर्ताको प्रमाणिकरण हुन बाँकी',
'%(system_name)s has sent an email to %(email)s to verify your email address.\\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters.': '%(system_name)s ले एउटा इमेल %(इमेल)s लाई तपाईँको इमेल ठेगाना प्रमाणित गर्नको निम्ती पठाएको छ । \\n कृपया यो ठेगानालाई प्रमाणित गर्नको निम्ति तपाईंको इमेल हेर्नुहोस् । तपाईंले यो इमेल प्राप्त गर्नु भएन भने कृपय जंक इमेलवा स्पाम फिल्टरमा हेर्नुहोला ।',
'%s and %s': '%s र %s',
'%s AND %s': '%s र %s',
'%s or %s': '%s अथवा %s',
'%s OR %s': '%s अथवा %s',
'& then click on the map below to adjust the Lat/Lon fields': 'त्यसपछी Lat/Lon फिल्डहरूलाई मिलाउनको निम्ति तलको नक्सामा क्लीक गर्नुहोस् ।',
'(filtered from _MAX_ total entries)': '(जम्मा भर्नाहरूबाट_धेरैबाट_ छानिएको)',
'* Required Fields': '* आवश्यक ठाउँहरू',
'...or add a new bin': '...वा नयाँ बिन राख्नुहोस् ।',
'1 location, shorter time, can contain multiple Tasks': '१ स्थान, छोटो समय, मा बहु कार्यहरू हुनसक्छन् ।',
'1. Fill the necessary fields in BLOCK CAPITAL letters.': '१. ठूलो अक्षर प्रयोग गरि दिएको खालि ठाउँ भर्नुहोस् ।',
'2. Always use one box per letter and leave one box space to separate words.': '२. एउटा अक्षरको निम्ति एउटा कोठा प्रयोग गर्नुहोस् र प्रत्येक शब्द पछि एउटा कोठा खालि छाड्नुहोस् ।',
'3. Fill in the circles completely.': '३. गोलाकारमा पूर्णरूपले भर्नुहोस् ।',
'3W': '३ डब्लू',
'3W Report': '३ डब्लू रिपोर्ट',
'A brief description of the group (optional)': 'समूहको संक्षिप्त विवरण (एच्छिक) ',
'A file in GPX format taken from a GPS.': 'जि.पि.एस. बाट जि.पि.एक्स्. फाइल लिइयो ।',
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": "यो क्षेत्रको भौगोलिक क्षेत्रलाई देखाउने स्थान । यो एउटा स्थान बनावटबाटको ठाउँ, वा 'समूह स्थान', वा स्थान जस्को क्षेत्रको निम्ति घेरा हुन सक्दछ ।",
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'विशेषता समूहमा राखिएको चिन्हलाई पुन:लेखन गर्नु परेमा कुनै स्थानमा प्रयोग गरिएको चिन्ह राखिनेछ ।',
'A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.': 'एउटा परियोजना उद्देश्य चिन्हको पात्रोमा मुख्य मिति हुँदछ जस्ले सम्पूर्ण लक्षमा गरिएको प्रअन्तिमि विवरण देखाउँदछ ।',
'A strict location hierarchy cannot have gaps.': 'बाक्लो स्थान बनावटमा खालि ठाउँ हुँदैन',
'A task is a piece of work that an individual or team can do in 1-2 days.': 'कुनै पनि सानो काम ठुलो कामको टुक्रा हो, जसलाई एकजना ब्यक्तिले तथा समूहले १-२ दिनमा पुरा गर्न सक्दछ ।',
"A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year": 'यदि कुनै स्वयम्-सेवकले अन्तिम वर्ष ८ वा सो भन्दा बढि समय (घण्टा) प्रति महिना सम्म कार्यक्रम वा तालिमहरूमा भाग लिएको छ भने उसलाई सकृय भनेर परिभाषित गर्न सकिन्छ ।',
'Abbreviation': 'संक्षेप',
'About': 'बारेमा',
'About Us': 'हाम्रो बारेमा',
'Academic': 'शैक्षिक',
'Access denied': 'प्रकृया रोकावट गरिएको छ ',
'Account Registered - Please Check Your Email': 'एकाउन्ट रजिष्टर भएको छ- कृपया तपाईंको इमेल हेर्नुहोस् ।',
'Acronym': 'छोटकरी रुप',
"Acronym of the organization's name, eg. IFRC.": 'संस्थाको नामको छोटकारी शब्द, जस्तै. आइ.एफ.आर.सी.',
'ACTION REQUIRED': 'कामको आवश्यकता पर्छ',
'Activate': 'सुचारू',
'activate to sort column ascending': 'छोटो कोलममा सानो देखि ठूलो मिलाउन सक्रिय गर्नुहोस्',
'activate to sort column descending': 'छोटो कोलममा ठूलो देखि सानो मिलाउन सक्रिय गर्नुहोस्',
'Active': 'सुचारित',
'Active Missions': 'सुचारु मिस्सनहरू',
'Active?': 'सुचारु?',
'Activities': 'कृयाकलापहरू',
'Activities matching Assessments': 'निर्धारण गरिएकोसँग मिल्दो कृयाकलापहरू',
'Activity': 'कृयाकलाप ',
'Activity Added': 'कृयाकलाप राखियो',
'Activity Deleted': 'कृयाकलाप हटाइयो ',
'Activity Details': 'कृयाकलाप विवरण',
'Activity Organization': 'कृयाकलाप वनावट',
'Activity Organization Added': 'कृयाकलाप वनावट राखियो ',
'Activity Organization Deleted': 'कृयाकलाप वनावट हटाइयो ',
'Activity Organization Updated': 'कृयाकलाप वनावट परिमार्जन गरियो ',
'Activity Organizations': 'कृयाकलाप वनावटहरू',
'Activity Report': 'कृयाकलाप प्रतिवेदन',
'Activity Type': 'कृयाकलापको प्रकार',
'Activity Type Added': 'कृयाकलापको प्रकार राखियो',
'Activity Type added to Activity': 'कृयाकलापमा, कृयाकलापको प्रकार राखियो ',
'Activity Type added to Project Location': 'परियोजनाको स्थानमा कृयाकलापको प्रकार राखियो ',
'Activity Type Deleted': 'कृयाकलापको प्रकार हटाइयो',
'Activity Type removed from Activity': 'कृयाकलापबाट, कृयाकलापको प्रकार हटाइयो ',
'Activity Type removed from Project Location': 'परियोजनाको स्थानबाट, कृयाकलापको प्रकार हटाइयो ',
'Activity Type Updated': 'कृयाकलापको प्रकार परिमार्जन गरियो',
'Activity Types': 'कृयाकलापका प्रकारहरू',
'Activity Updated': 'कृयाकलाप परिमार्जन गरियो ',
'Add': 'थप्ने',
'Add %(site_label)s Status': 'थप्ने %(site_label)s अवस्था',
'Add a new certificate to the catalog.': 'तालिकामा नयाँ प्रमाणपत्र राख्नुहोस्',
'Add a new competency rating to the catalog.': 'तालिकामा नयाँ प्रतिस्पर्धा स्तर राख्नुहोस्',
'Add a new membership type to the catalog.': 'तालिकामा नयाँ सदस्यता प्रकार राख्नुहोस्',
'Add a new program to the catalog.': 'तालिकामा नयाँ कार्यक्रम राख्नुहोस्',
'Add a new skill type to the catalog.': 'तालिकामा नयाँ सिप प्रकार राख्नुहोस्',
'Add a Person': 'ब्यक्ति राख्नुहोस्',
'Add Activity Type': 'नयाँ कृयाकलाप प्रकार',
'Add Activity Type to Activity': 'कृयाकलापमा, कृयाकलापको प्रकार राख्नुहोस्',
'Add Activity Type to Project Location': 'परियोजनाको स्थानमा कृयाकलापको प्रकार राख्नुहोस् ',
'Add Address': 'ठेगाना राख्नुहोस् ',
'Add Affiliation': 'स्विकृती राख्नुहोस् ',
'Add all organizations which are involved in different roles in this project': 'यस परियोजनामा फरक-फरक भूमिका निभाउने संस्थाहरू राख्नुहोस्',
'Add Annual Budget': 'नयाँ वार्षिक बजेट',
'Add Appraisal': 'मुल्यङकन राख्नुहोस् ',
'Add Award': 'पुरस्कार राख्नुहोस् ',
'Add Beneficiaries': 'भागिदारहरू राख्नुहोस् ',
'Add Branch Organization': 'शाखा संघ राख्नुहोस् ',
'Add Certificate for Course': 'पाठ्यक्रम प्रमाणपत्र राख्नुहोस् ',
'Add Certification': 'प्रमाणिकरण राख्नुहोस् ',
'Add Contact': 'सम्पर्क राख्नुहोस् ',
'Add Contact Information': 'सम्पर्क जानकारी राख्नुहोस् ',
'Add Credential': 'कागजात राख्नुहोस् ',
'Add Data to Theme Layer': 'स्वरूप (थिम) को तहमा आंकडा राख्नुहोस् ',
'Add Deployment': 'विकास राख्नुहोस् ',
'Add Education Detail': 'शैक्षिक विवरण राख्नुहोस् ',
'Add Group Member': 'समूह सदस्य राख्नुहोस् ',
'Add Hazard to Project': 'परियोजनामा खतरा राख्नुहोस्',
'Add Hours': 'घण्टा राख्नुहोस्',
'Add Identity': 'परिचय राख्नुहोस्',
'Add Image': 'तस्बिर राख्नुहोस् ',
'Add Keyword': 'मुख्यशब्द राख्नुहोस् ',
'Add Layer from Catalog': 'तालिकाबाट सतह राख्नुहोस् ',
'Add Layer to this Profile': 'यो प्रोफाइलमा सतह राख्नुहोस् ',
'Add Line': 'धर्का राख्नुहोस् ',
'Add Location to Organization': 'संस्थामा स्थान राख्नुहोस् ',
'Add Log Entry': 'तालिका प्रवेश राख्नुहोस् ',
'Add Member': 'सदस्य राख्नुहोस् ',
'Add Membership': 'सदस्यता राख्नुहोस्',
'Add new and manage existing members.': 'नयाँ थप र भैरहेको सदस्यहरुलाई व्यवस्थापन गर्न',
'Add new and manage existing staff.': 'नयाँ थप र भैरहेको कर्मचारीहरुलाई व्यवस्थापन गर्न',
'Add new and manage existing volunteers.': 'नयाँ थप र भैरहेको स्वयंसेवकहरुलाई व्यवस्थापन गर्न',
'Add New Address': 'नयाँ ठेगाना राख्नुहोस्',
'Add New Affiliation': 'नयाँ स्वीकृती राख्नुहोस्',
'Add New Appraisal': 'नयाँ मुल्याङ्कन राख्नुहोस्',
'Add New Award': 'नयाँ पुरस्कार राख्नुहोस्',
'Add New Beneficiaries': 'नयां भागिदारहरू राख्नुहोस् ',
'Add New Beneficiary Type': 'नयाँ भागिदारको प्रकार राख्नुहोस् ',
'Add New Branch': 'नयाँ शाखा राख्नुहोस् ',
'Add New Branch Organization': 'नयाँ शाखा संस्था राख्नुहोस् ',
'Add New Campaign': 'नयाँ क्याम्पिन राख्नुहोस्',
'Add New Certificate': 'नयाँ प्रमाणपत्र राख्नुहोस्',
'Add New Certification': 'नयाँ प्रमाणिकरण राख्नुहोस्',
'Add New Cluster': 'नयाँ समूह राख्नुहोस्',
'Add New Coalition': 'नयाँ संघ राख्नुहोस्',
'Add New Community': 'नयाँ समूदाय राख्नुहोस्',
'Add New Competency Rating': 'नयाँ प्रतिस्पर्धाको स्तर राख्नुहोस्',
'Add New Contact': 'नयाँ संम्पर्क राख्नुहोस्',
'Add New Course': 'नयाँ पाठ्यक्रम राख्नुहोस्',
'Add New Course Certificate': 'नहाँ पाठ्यक्रम प्रमाणपत्र राख्नुहोस्',
'Add New Credential': 'नयाँ कागजात राख्नुहोस्',
'Add New Data to Theme Layer': 'स्वरूपको तहमा नयाँ आंकडा राख्नुहोस्',
'Add New Department': 'नयाँ मन्त्रालय राख्नुहोस्',
'Add New Deployment': 'नयाँ कार्य राख्नुहोस्',
'Add New Donor': 'नयाँ दाता राख्नुहोस्',
'Add New Entry': 'नयाँ प्रवेश राख्नुहोस्',
'Add New Facility': 'नयाँ सूविधा राख्नुहोस्',
'Add New Facility Type': 'नयाँ सूविधाको प्रकार राख्नुहोस्',
'Add New Feature Layer': 'नयाँ विशेषता तह राख्नुहोस्',
'Add New Group': 'नयाँ समूह राख्नुहोस्',
'Add New Hazard': 'नयाँ खतरा राख्नुहोस्',
'Add New Hours': 'नयाँ घण्टाहरू राख्नुहोस्',
'Add New Identity': 'नयाँ परिचय राख्नुहोस्',
'Add New Image': 'नयाँ तस्विर राख्नुहोस्',
'Add New Job Title': 'नयाँ कामको पद राख्नुहोस्',
'Add New Keyword': 'नयाँ मुख्यशब्द राख्नुहोस्',
'Add New Layer': 'नयाँ तह राख्नुहोस्',
'Add New Layer to Symbology': 'चिन्हतामा नयाँ तह राख्नुहोस्',
'Add New Location': 'नयाँ स्थान राख्नुहोस्',
'Add New Location Hierarchy': 'नयाँ स्थान बनावट राख्नुहोस्',
'Add New Log Entry': 'नयाँ प्रवेश तालिका राख्नुहोस्',
'Add New Mailing List': 'नयाँ ठेगाना तालिका राख्नुहोस्',
'Add New Map Profile': 'नयाँ नक्सा बनावट राख्नुहोस्',
'Add New Marker': 'नयाँ चिन्ह राख्नुहोस्',
'Add New Member': 'नयाँ सदस्य राख्नुहोस्',
'Add New Membership': 'नयाँ सदस्यता राख्नुहोस्',
'Add New Membership Type': 'नयाँ सदस्यता प्रकार राख्नुहोस्',
'Add New Milestone': 'नयाँ उद्देश्य राख्नुहोस्',
'Add New Network': 'नयाँ नेटवर्क राख्नुहोस्',
'Add New Office': 'नयाँ कार्यलय राख्नुहोस्',
'Add New Office Type': 'नयाँ कार्यलय प्रकार राख्नुहोस्',
'Add New Organization': 'नयाँ संस्था राख्नुहोस्',
'Add New Organization Type': 'नयाँ संस्थाको प्रकार राख्नुहोस्',
'Add New Output': 'नयाँ नतिजा राख्नुहोस्',
'Add New Participant': 'नयाँ सहभागी राख्नुहोस्',
"Add New Person's Details": 'नयाँ ब्यक्तिको विवरण राख्नुहोस्',
'Add New PoI Type': 'नयाँ पोलको प्रकार राख्नुहोस्',
'Add New Point of Interest': 'नयाँ रुचीको बुँदा राख्नहोस्',
'Add New Policy or Strategy': 'नयाँ नियम तथा लक्ष राख्नुहोस्',
'Add New Professional Experience': 'नयाँ ब्यक्तिअन्तिम अनुभव राख्नुहोस्',
'Add New Profile Configuration': 'नयाँ प्रोफाइल बनावट राख्नुहोस्',
'Add New Program': 'नयाँ कार्यक्रम राख्नुहोस्',
'Add New Project': 'नयाँ परियोजना राख्नुहोस्',
'Add New Projection': 'नयाँ योजना राख्नुहोस्',
'Add New Record': 'नयाँ विवरण राख्नुहोस्',
'Add New Region': 'नया क्षेत्र राख्नुहोस',
'Add New Resource': 'नयाँ स्रोत राख्नुहोस्',
'Add New Response Summary': 'नयाँ प्रतिकृया संक्षेप राख्नुहोस्',
'Add New Role': 'नयाँ भूमिका राख्नुहोस्',
'Add New Room': 'नयाँ कोठा राख्नुहोस्',
'Add New Sector': 'नयाँ क्षेत्र राख्नुहोस्',
'Add New Service': 'नयाँ सेवा राख्नुहोस्',
'Add New Skill': 'नयाँ सिप राख्नुहोस्',
'Add New Skill Equivalence': 'नयाँ सिप सरह राख्नुहोस्',
'Add New Skill Type': 'नयाँ सिपको प्रकार राख्नुहोस्',
'Add New Staff Assignment': 'नयाँ कर्मचारीको काम राख्नुहोस्',
'Add New Staff Member': 'नयाँ कर्मचारी सदस्य राख्नुहोस्',
'Add New Status': 'नयाँ अवस्था राख्नुहोस्',
'Add New Symbology': 'नयाँ चिन्हता राख्नुहोस्',
'Add New Symbology for Layer': 'तहको लागि नयाँ चिन्हता राख्नुहोस्',
'Add New Task': 'नयाँ काम राख्नुहोस्',
'Add New Team': 'नयाँ समूह राख्नुहोस्',
'Add New Team Member': 'नयाँ समूह सदस्य राख्नुहोस्',
'Add New Theme': 'नयाँ स्वरूप राख्नुहोस्',
'Add New Training': 'नयाँ तालिम राख्नुहोस्',
'Add New Training Event': 'नयाँ तालिम कार्यक्रम राख्नुहोस्',
'Add New Volunteer': 'नयाँ स्वयम सेवक राख्नुहोस्',
'Add New Volunteer Cluster': 'नयाँ स्वयम सेवक कागजात राख्नुहोस्',
'Add New Volunteer Cluster Position': 'नयाँ स्वयम सेवकको पद कागजात राख्नुहोस्',
'Add New Volunteer Cluster Type': 'नयाँ स्वयम सेवक कागजातको प्रकार राख्नुहोस्',
'Add New Volunteer Role': 'नयाँ स्वयम सेवक भूमिका राख्नुहोस्',
'Add Office': 'कार्यलय राख्नुहोस्',
'Add Organization': 'संस्था राख्नुहोस्',
'Add Organization to Activity': 'कृयाकलापको बनावट राख्नुहोस्',
'Add Organization to Project': 'परियोजनामा संस्था राख्नुहोस्',
'Add Participant': 'सहभागी राख्नुहोस्',
'Add Person': 'ब्यक्ति राख्नुहोस्',
"Add Person's Details": 'ब्यक्तिको विवरण राख्नुहोस्',
'Add PoI Type': 'पोलको प्रकार राख्नुहोस्',
'Add Point': 'बुँदा राख्नुहोस्',
'Add Point of Interest': 'रूचीको बँदा राख्नुहोस्',
'Add Policy or Strategy': 'नियम तथा लक्ष राख्नुहोस्',
'Add Polygon': 'बहुभुजा राख्नुहोस्',
'Add Professional Experience': 'व्यबसायिक अनुभव राख्नुहोस्',
'Add Profile Configuration': 'प्रोफाइल बनावट राख्नुहोस्',
'Add Profile Configuration for this Layer': 'यो तहको लागि प्रोफाइल बनावट राख्नुहोस्',
'Add Project': 'परियोजना राख्नुहोस्',
'Add Response Summary': 'प्रतिकृया संक्षेप राख्नुहोस्',
'Add Role': 'भूमिका राख्नुहोस्',
'Add Room': 'कोठा राख्नुहोस्',
'Add saved search': 'संचित खोजी राख्नुहोस्',
'Add search': 'खोजी राख्नुहोस्',
'Add Sector': 'क्षेत्र राख्नुहोस्',
'Add Sector to Organization': 'संस्थामा क्षेत्र राख्नुहोस्',
'Add Sector to Project': 'परियोजनामा क्षेत्र राख्नुहोस्',
'Add Sector to Theme': 'स्वरूपमा क्षेत्र राख्नुहोस्',
'Add Service': 'सेवा राख्नुहोस्',
'Add Service to Organization': 'संस्थामा सेवा राख्नुहोस्',
'Add Skill': 'सिप राख्नुहोस्',
'Add Skill Equivalence': 'सिप सरह राख्नुहोस्',
'Add Skill Type': 'सिपको प्रकार राख्नुहोस्',
'Add Staff Assignment': 'कर्मचारीको काम राख्नुहोस्',
'Add Staff Member to Project': 'परियोजनामा कर्मचारी सदस्य राख्नुहोस्',
'Add Status': 'अवस्था राख्नुहोस्',
'Add Symbology': 'चिन्हता राख्नुहोस्',
'Add Symbology for Layer': 'तहको लागि चिन्हता राख्नुहोस्',
'Add Task': 'काम राख्नुहोस् ',
'Add Team': 'समूह राख्नुहोस् ',
'Add Team Member': 'समूह सदस्य राख्नुहोस् ',
'Add Theme': 'स्वरूप राख्नुहोस्',
'Add Theme to Activity': 'कृयाकलापमा स्वरूप राख्नुहोस्',
'Add Theme to Project': 'परियोजनामा स्वरूप राख्नुहोस्',
'Add Theme to Project Location': 'परियोजना स्थानमा स्वरूप राख्नुहोस्',
'Add this entry': 'यो प्रवेश राख्नुहोस',
'Add to a Team': 'समूहमा राख्नुहोस्',
'Add Training': 'तालिम राख्नुहोस्',
'Add...': 'राख्नुहोस्…',
'Address': 'ठेगाना',
'Address added': 'ठेगाना संचित गरियो',
'Address deleted': 'ठेगाना हटाइयो',
'Address Details': 'ठेगाना विवरण',
'Address Mapped': 'ठेगाना नक्सा',
'Address NOT Mapped': 'नक्सामा नदेखाइएको ठेगाना',
"Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).": ' महत्वपूर्ण ब्यक्तिको निम्ति यो तहको लागि तस्विरको ठेगाना । सर्भरले जे देखाउँछ त्यसैलाई देखाउन (जुन जिओवेबकेचको माध्यमबाट कुनैपनि हालतमा काम गर्दैन) को साटो यस्ले नियन्त्रित स्थिर तस्विर प्रयोग गर्न सकिन्छ ।',
'Address Type': 'ठेगाना प्रकार',
'Address updated': 'ठेगाना परिमार्जन गरियो',
'Addresses': 'ठेगानाहरू',
'Adjust Stock Levels': 'भंडारको स्तर मिलाउनुहोस्',
'Admin': 'संचालक',
'Admin Assistant': 'संचालक सहयोगी',
'Administrador Database': 'संचालक तथा डाटाबेस',
'Administration': 'प्रशासन',
'Adolescent (12-20)': 'किशोर, किशोरी (१२-२०)',
'Adult (21-50)': 'जवान (२१-५०)',
'Advanced Search': 'बृहत खोजी',
'Advocacy': 'वकालत',
'Affiliation added': 'स्वीकृती संचित गरियो',
'Affiliation deleted': 'स्वीकृती हटाइयो',
'Affiliation Details': 'स्वीकृती विवरण',
'Affiliation updated': 'स्वीकृती परिमार्जन गरियो',
'Affiliations': 'स्वीकृतीहरू',
'Age': 'उमेर',
'Age Group': 'उमेर समूह',
'Airport': 'बिमान स्थल',
'Alerts': 'सचेतनाहरू',
'All': 'सबै',
'All Entities': 'सबै समूहहरू',
'All Open Tasks': 'सबै खुला कामहरू',
'All Records': 'सबै रेकर्डहरू',
'All selected': 'सबै छानियो',
'All Tasks': 'सबै कामहरू',
'Amount': 'मात्रा',
'Amount of the Project Budget spent at this location': 'यो स्थानमा खर्च गरिएको परियोजना बजेटको मात्रा',
'An error occured, please %(reload)s the page.': 'गल्ति भएको छ, कृपया पेजलाई %(reload)s गर्नुहोस् ।',
'An ESRI Shapefile (zipped)': 'ए.एस.आर.आइ. आकार फाइल (जिप गरिएको)',
'an individual/team to do in 1-2 days': '१-२ दिन(हरू)मा एक व्यक्ति/समूहले गर्नु पर्ने',
'and': 'र',
'Annual Budget': 'वार्षिक बजेट',
'Annual Budget deleted': 'वार्षिक बजेट हटाइएको छ',
'Annual Budget updated': 'वार्षिक बजेट परिमार्जन गरिएको छ',
'Annual Budgets': 'वार्षिक बजेटहरू',
'Anonymous': 'विविध',
'anonymous user': 'नामरहितको प्रयोगकर्ता',
'ANY': 'कुनैपनि',
'Any': 'कुनैपनि',
'Appeal Code': 'अपिल कोड',
'Applicable to projects in Pacific countries only': 'प्यसिफिक देशहरूको परियोजनामा मात्र लागु हुने',
'Application': 'लागु',
'Application Permissions': 'लागु अनुमतिहरू',
'Appraisal added': 'मुल्याङ्कन संचित गरियो',
'Appraisal deleted': 'मुल्याङ्कन हटाइयो',
'Appraisal Details': 'मुल्याङ्कन विवरण',
'Appraisal updated': 'मुल्याङ्कन परिमार्जन गरियो',
'Appraisals': 'मुल्याङ्कनहरू',
'Approve': 'प्रमाणित',
'Approver': 'प्रमाणित गर्ने',
'ArcGIS REST Layer': 'एर्क जि.आइ.एस. आर.इ.एस.टि. तह',
'Are you sure you want to delete this record?': 'तपाईं यो रेकर्ड हटाउने कुरामा निश्चित हुनुहुन्छ?',
'Assessment': 'लेखाजोखा',
'Assessment and Community/Beneficiary Identification': 'लेखाजोखा र सामुदायिक/ लाभान्वितहरुको पहिचान',
'Assessment Templates': 'लेखाजोखा फाराम',
'Assessments': 'लेखाजोखाहरु',
'Asset': 'सामाग्री',
'Asset Number': 'सामग्रीको संख्या',
'Assets': 'सामाग्रीहरु',
'Assign %(staff)s': 'काम %(staff)s',
'Assign another Role': 'अर्को भूमिका मुल्यङ्कन',
'Assign Asset': 'मुल्याङ्कन मा',
'Assign Role to a User': 'प्रयोगकर्ताको मुल्याङ्कन भूमिका',
'Assign Staff': 'मुल्याङ्कन कर्मचारी',
'Assigned': 'काममा सहभागी गरियो',
'Assigned To': 'को लागि सहभागी गरियो',
'Assigned to': 'को लागि सहभागी गरियो',
'Association': 'संघ',
'Attachments': 'अटाच्मेन्स्',
'Attributes': 'विशेषताहरू',
'Attribution': 'विशेषता',
'Australian Dollars': 'अष्ट्रेलियन डलर',
'Authentication Required': 'प्रमाणिकरण आवश्यक',
'Auxiliary Role': 'सहायक भूमिका',
'Availability': 'उपलब्धता',
'Available Forms': 'उपलब्ध फारम',
'Available in Viewer?': 'हेर्नेको लागि उपलब्ध?',
'Avalanche': 'हिमपहिरो',
'average': 'साधारण',
'Average Rating': 'दित्तिय भूमिका',
'Award': 'पुरस्कार',
'Award added': 'पुरस्कार संचित गरियो',
'Award deleted': 'पुरस्कार हटाइयो',
'Award updated': 'पुरस्कार परिमार्जन गरियो',
'Awards': 'पुरस्कारहरू',
'Awareness Raising': 'जनचेतना अभिवृद्धि',
'Back to Roles List': 'पछाडि भूमिका तालिकामा',
'Back to Top': 'पछाडि सिरानमा',
'Back to Users List': 'पछाडि प्रयोगकर्ता तालिकामा',
'Background Color': 'पृष्ठभूमी रंग',
'Bahai': 'बहाइ',
'Baldness': 'मोटाइ',
'Base Layer?': 'आधारभूत तह?',
'Base Layers': 'आधारभूत तहहरू',
'Base Location': 'आधारभुत स्थान',
'Basic Details': 'आधारभुत विवरण',
'Basic Search': 'आधारभुत खोजी',
'Bdrt (Branch Disaster Response Teams)': 'शाखा प्रकोप प्रतिकृया समूहहरू',
'Behaviour Change Communication': 'व्यवहार परिवर्तन संचार',
'Beneficiaries': 'भागिदारहरू',
'Beneficiaries Added': 'भागिदारहरू संचित गरियो',
'Beneficiaries Deleted': 'भागिदारहरू हटाइयो',
'Beneficiaries Details': 'भागिदारहरू विवरण',
'Beneficiaries Updated': 'भागिदारहरू परिमार्जन गरियो ',
'Beneficiary Report': 'भागिदार प्रतिवेदन',
'Beneficiary Type': 'भागिदार प्रकार',
'Beneficiary Type Added': 'भागिदार प्रकार राखियो',
'Beneficiary Type Deleted': 'भागिदार प्रकार हटाइयो',
'Beneficiary Type Updated': 'भागिदार प्रकार परिमार्जन गरियो',
'Beneficiary Types': 'भागिदार प्रकारहरू',
'Better Programming Initiative Guidance': 'उपयुक्त योजना पहल निर्देशन',
'Bilateral': 'सहकारी संस्था राख्नुहोस्',
'Bio data': 'बायोडाटा',
'Bing Layer': 'बिंग तह',
'black': 'कालो',
'Blocked': 'रोकावट गरिएको',
'blond': 'खैरो',
'Blood Banking': 'रअन्तिम बैंकिङ',
'Blood Donation and Services': 'रक्तदान सेवा',
'Blood Donor Recruitment': 'रक्तदाता नियुक्ती',
'Blood Type (AB0)': 'रअन्तिमको प्रकार (ए.बि.ओ.)',
'blue': 'निलो',
'Body': 'शरिर',
'Body Hair': 'शरिरीक रौं',
'Boq and Cost Estimation': 'बग तथा खर्च अडकल',
'Both': 'दुवै',
'Branch': 'शाखा',
'Branch Coordinator': 'शाखा सहकर्ता',
'Branch Organization added': 'शाखा संस्था संचित गरियो',
'Branch Organization deleted': 'शाखा संस्था हटाइयो ',
'Branch Organization Details': 'शाखा संस्था विवरणहरू',
'Branch Organization updated': 'शाखा संस्था परिमार्जन गरियो',
'Branch Organizations': 'शाखा संस्थाहरू',
'Branch Planning': 'शाखा योजना',
'Branches': 'शाखाहरू',
'Breakdown': 'फुट',
'brown': 'खैरो',
'Buddhist': 'बौद्दिस्ट',
'Budget': 'बजेट',
'Buffer': 'बफर',
'Building Name': 'भवन नाम',
'by': 'अनुसार',
'by %(person)s': '%(person)s अनुसार',
'By selecting this you agree that we may contact you.': 'यसलाइ छान्नुको अर्थ हामीले तपाईँलाई सम्पर्क गर्न सक्छौँ भन्नेमा सहमती हुनुभयो',
'Calendar': 'पात्रो',
'Camp': 'क्याम्प',
'Campaign': 'क्याम्पिन',
'Campaign Added': 'क्याम्पिन राखियो',
'Campaign Deleted': 'क्याम्पिन हटाइयो',
'Campaign Message': 'क्याम्पिन संदेश',
'Campaign Updated': 'क्याम्पिन परिमार्जन गरियो',
'Campaigns': 'क्याम्पिनहरू',
'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'खुलासडकनक्सा (.ओ.एस.एम.) वा बिकल्प बाट धुर्व पढ्न सक्छ ।',
'Canadian Dollars': 'क्यानडियन डलर',
'Cancel': 'रद्द',
'Cancel Crop': 'काटाइ रद्द',
'Cancel editing': 'बनाउन रद्द',
'Canceled': 'रद्द गरियो',
'cannot be deleted.': 'हटाउन सकिदैन',
'Cannot make an Organization a branch of itself!': 'शाखा आफैको संघ बनाउन सकिंदैन !',
'Cannot open created OSM file!': 'खुला रचना गरिएको खुलासडकनक्सा खुल्न सक्दैन ',
'Cannot read from file: %(filename)s': 'फाइलबाट खुल्नसकेन: %(fileनाम)s',
'Capacity Building': 'क्षमता विकास',
'Capacity Building of Governance': 'अधिकारको क्षमता निर्माण हुँदै',
'Capacity Building of Management Staff': 'ब्यबस्थापन कर्मचारीको क्षमता निर्माण',
'Capacity Building of Staff': 'कर्मचारीको क्षमता विकास',
'Capacity Building of Volunteers': 'स्वयमसेवकहरुको क्षमता विकास',
'Capacity Development': 'क्षमता विकास',
'Catalogs': 'तालिकाहरू',
'Catchment Protection': 'अटाच्मेन्स् सुरक्षा',
'Category': 'वर्गिकरण',
'caucasoid': 'क्यास्कोसाइड',
'CDRT (Community Disaster Response Teams)': 'समूदाय प्रकोप प्रतिकृया समूहहरू',
'Cell Tower': 'सेल टावर',
'Central': 'केन्द्रीय',
'Certificate': 'प्रमाणपत्र',
'Certificate added': 'प्रमाणपत्र राखियो',
'Certificate Catalog': 'प्रमाणपत्र तालिका',
'Certificate deleted': 'प्रमाणपत्र हटाइयो',
'Certificate Details': 'प्रमाणपत्र विवरण',
'Certificate List': 'प्र्रमाणपत्र सूची',
'Certificate updated': 'प्रमाणपत्र परिमार्जन गरियो',
'Certificates': 'प्रमाणपत्रहरू',
'Certification added': 'प्रमाणीकरण संचित गरियो',
'Certification deleted': 'प्रमाणीकरण हटाइयो',
'Certification Details': 'प्रमाणीकरण विवरणहरू',
'Certification updated': 'प्रमाणीकरण परिमार्जन गरियो',
'Certifications': 'प्रमाणीकरणहरू',
'Certifying Organization': 'प्रमाणीत गर्ने संस्थाहरू',
'Chairman': 'अध्यक्ष',
'Change Password': 'पासवर्ड परिवर्तन',
'Chapter': 'अध्याय',
'Check all': 'सबैलाई छान्नुहोस्',
'Check this to make your search viewable by others.': 'तपाईंको खोजी अरूले हेर्न मिल्ने बनाउनको निम्ति यसलाई छान्नुहोस्',
'Check-In': 'प्रवेश',
'Check-Out': 'बाहिरिनु',
'Child (2-11)': 'बालबालिका (२-११)',
"Children's Education": 'वालवालिकाको शिक्षा',
'Choose Country': 'देश छान्नुहोस्',
'Christian': 'इसाइ',
'Civil Society/NGOs': 'नागरीक समाज/गैरसरकारी संस्थाहरु',
'Clean-Up Campaign': 'सफाइ क्याम्पिन',
'Cleaner': 'सफा गर्ने',
'Clear': 'सफाइ',
'clear': 'सफाइ',
'Clear all': 'सबै सफाई',
'Clear filter': 'क्लियर फिल्टर',
'Clear selection': 'छानिएको सफाइ',
'Click anywhere on the map for full functionality': 'पूर्ण कार्यप्रस्तुतिको लागि नक्साको जुनसुकै भागमा पनि सफाइ',
'click here': 'यहाँ क्लिक गर्नुहोस्',
'Click on the link': 'लिन्क छान्नुहोस्',
'Click on the slider to choose a value': 'छान्नको निम्ति स्लाइडरमा छान्नुहोस्',
'Click to edit': 'परिमार्जन गर्नको निम्ति क्लिक गर्नुहोस्',
'Click where you want to open Streetview': 'स्ट्रट भिउ खोल्न चहानुभएको जुनसुकै ठाउमा क्लिक गर्नुहोस्',
'Climate Change': 'जलवायु परिवर्तन',
'Climate Change Adaptation ': 'जलवायु परिर्वतन अनकुलता',
'Climate Change Mitigation': 'जलवायु परिर्वतन अल्पीकरण',
'Close': 'बन्द',
'Close map': 'नक्सा बन्द',
'Closed': 'वन्द',
'Club 25 / Pledge 25': 'संघ २५/सपथ२५',
'Cluster': 'समूह',
'Cluster added': 'समूह संचित गरियो',
'Cluster Attribute': 'समूह बनावट',
'Cluster deleted': 'समूह हटाइयो',
'Cluster Details': 'समूह विवरण',
'Cluster Distance': 'समूह दुरी',
'Cluster Threshold': 'समूह चावी',
'Cluster updated': 'समूह परिमार्जन गरियो',
'Clusters': 'समूहहरू',
'Coalition added': 'सहकारी संचित गरियो',
'Coalition Details': 'सहकारी विवरण',
'Coalition removed': 'सहकारी हटाइयो',
'Coalition updated': 'सहकारी परिमार्जन गरियो',
'Coalitions': 'सहकारीहरू',
'Coastal Conservation ': 'कानूनी संरक्षण',
'Code': 'कोड',
'Cold Wave': 'शित लहर',
'Comment': 'टिका टिप्पणी',
'Comments': 'टिका टिप्पणी',
'Commitments': 'समर्पणतहरू',
'Communicable Diseases': 'सरुवा रोगहरु',
'Communication': 'संचार',
'Communication Officer': 'संचार कर्मचारी',
'Communities': 'समूदायहरू',
'Community': 'समूदाय',
'Community Action Planning': 'सामुदायिक कार्य योजना र्तजुमा',
'Community Added': 'समूदाय थपिएको छ',
'Community-based DRR': 'समुदायमा आधारीत विपद् जोखिम न्यूनीकरणऋय',
'Community Based Health and First Aid (CBHFA)': 'समूदायमा आधारित स्वास्थ्य तथा प्राथमिक उपचार (सि.बि.एच.एफ.ए.)',
'Community Contacts': 'समूदाय सम्पर्कहरू',
'Community Deleted': 'समूदाय हटाइयो',
'Community Details': 'समूदाय विवरण',
'Community Disaster Awareness': 'समूदाय प्रकोप जागरण',
'Community Early Warning Systems': 'समूदाय पुर्वङ सचेत अंग',
'Community Health': 'सामुदायिक स्वास्थ्य',
'Community Health Committees': 'समूदाय स्वास्थ्य समाजहरू',
'Community Health Initiative/Projects': 'समूदाय स्वास्थ्य पहल/परियोजनाहरू',
'Community Health Risk Assessments': 'समूदाय स्वास्थ्य खतरा मुल्याङ्कनहरू',
'Community Mobilisation': 'सामुदायिक परिचालन',
'Community Mobilization': 'सामुदायिक परिचालन',
'Community Organisation': 'सामुदायिक संस्था',
'Community Organization': 'सामुदायिक संस्था',
'Community Preparedness': 'समूदाय पुर्वतयारी',
'Community Updated': 'समूदाय परिमार्जन गरियो',
'Company': 'कम्पनी',
'Competency': 'प्रतिस्पर्धा',
'Competency Rating': 'प्रतिस्पर्धा स्तर',
'Competency Rating added': 'प्रतिस्पर्धा स्तर संचित गरियो',
'Competency Rating Catalog': 'प्रतिस्पर्धा स्तर तालिका',
'Competency Rating deleted': 'प्रतिस्पर्धा स्तर हटाइयो',
'Competency Rating Details': 'प्रतिस्पर्धा स्तर विवरण',
'Competency Rating updated': 'प्रतिस्पर्धा स्तर परिमार्जन गरियो',
'Completed': 'पुरा भयो',
'Complex Emergency': 'जटिल आपत्काल',
'Complexion': 'असहजता',
'Compromised': 'सम्झौता गरिएको',
'Config not found!': 'बनावट प्राप्त भएन!',
'Configuration': 'बनावट',
'Configure Layer for this Symbology': 'यो चिन्हताको लागि बनावट रुप',
'Confirmed': 'निश्चित गरियो',
'Confirming Organization': 'निश्चित गर्ने संस्था',
'Construction Activities': 'निर्माणकार्य सम्बन्धित कृयाकलापहरू',
'Construction of Transitional Shelter': 'संक्रमणकालिन आवासको निमार्ण',
'Construction of Water Supply Systems': 'पानी निर्यात निर्माणकार्य',
'Contact': 'सम्पर्क',
'Contact added': 'सम्पर्क राखियो',
'Contact Added': 'सम्पर्क राखियो',
'Contact Data': 'सम्पर्क डाटा',
'Contact deleted': 'सम्पर्क हटाइयो',
'Contact Deleted': 'सम्पर्क हटाइयो',
'Contact Details': 'सम्पर्क विवरण',
'Contact Details updated': 'सम्पर्क विवरण परिमार्जन गरियो',
'Contact Info': 'सम्पर्क जानकारी',
'Contact Information': 'सम्पर्क जानकारी',
'Contact Information Added': 'सम्पर्क जानकारी राखियो',
'Contact Information Deleted': 'सम्पर्क जानकारी हटाइयो',
'Contact Information Updated': 'सम्पर्क जानकारी परिमार्जन गरियो',
'Contact Method': 'सम्पर्क तरिकार',
'Contact People': 'सम्पर्क मानिसहरू',
'Contact Person': 'सम्पर्क ब्यक्ति',
'Contact Updated': 'सम्पर्क परिमार्जन गरियो',
'Contact Us': 'हामीलाई सम्पर्क गर्नुहोस्',
'Contact us': 'हामीलाई सम्पर्क गर्नुहोस्',
'Contacts': 'सम्पर्कहरू',
'Context': 'अवस्था',
'Contingency/Preparedness Planning': 'अपतकालिन पूर्वतयारी योजना',
'Contract End Date': 'सम्झौता सकिने मिति',
'Contractual Agreements (Community/Individual)': 'सम्झौता सहमतिहरू (समूदाय/ब्यक्तिअन्तिम)',
'Contractual Agreements (Governmental)': 'सम्झौता सहमतिहरू (सरकारी)',
'Controller': 'नियन्त्रक',
'Cook Islands': 'पकाउनेहरू',
'Coordinate Layer': 'सहकर्ता तह',
'Coordination and Partnerships': 'समन्वय र साझेदारी',
'Coordinator': 'सहकर्ता',
'COPY': 'कपी',
'Corporate Entity': 'सहकारी अंग',
'Could not add person record': 'ब्यक्तिको विवरण राख्न सकिएन',
'Could not create record.': 'विवरण बन्न सकेन',
'Could not generate report': 'विवरण आउन सकेन',
'Could not merge records. (Internal Error: %s)': 'विवरणहरू एकिकृत गर्न सकिएन । (आन्तरिक कारण: %s)',
"Couldn't open %s!": 'खुल्न सकेन %s!',
'Country': 'देश',
'Country Code': 'देश कोड नम्बर',
'Country is required!': 'देश आवश्यक छ!',
'Course': 'पाठ्यक्रम',
'Course added': 'पाठ्यक्रम संचित गरियो',
'Course Catalog': 'पाठ्यक्रम तालिका',
'Course Certificate added': 'पाठ्यक्रम प्रमाण-पत्र संचित गरियो',
'Course Certificate deleted': 'पाठ्यक्रम प्रमाण-पत्र हटाइयो',
'Course Certificate Details': 'पाठ्यक्रम प्रमाण-पत्र विवरण',
'Course Certificate updated': 'पाठ्यक्रम प्रमाण-पत्र परिमार्जन गरियो',
'Course Certificates': 'पाठ्यक्रम प्रमाण-पत्रहरू',
'Course deleted': 'पाठ्यक्रम हटाइयो',
'Course Details': 'पाठ्यक्रम विवरण',
'Course updated': 'पाठ्यक्रम परिमार्जन गरियो',
'CREATE': 'बनाउनुहोस',
'Create': 'बनाउनुहोस',
"Create 'More Info'": "थप जानकारी' बनाउनुहोस्",
'Create a new facility or ensure that you have permissions for an existing facility.': 'नयाँ सुविधा बनाउनुहोस् वा हालको सुविधामा तपाईंलाई स्वीकृती छ भन्ने निश्चित गर्नुहोस्',
'Create a new Group.': 'नयाँ समूह बनाउनुहोस्',
'Create a new organization or ensure that you have permissions for an existing organization.': 'नयाँ संस्था बनाउनुहोस् वा हालको संस्थामा तपाईंलाई स्वीकृती छ भन्ने निश्चित गर्नुहोस्',
'Create a new Team.': 'नयाँ समूह बनाउनुहोस्',
'Create Activity': 'कृयाकलाप राख्नुहोस्',
'Create Activity Type': 'कृयाकलापको प्रकार राख्नुहोस् ',
'Create Award': 'पुरस्कार राख्नुहोस् ',
'Create Beneficiary Type': 'भागिदारको प्रकार राख्नुहोस् ',
'Create Campaign': 'क्याम्पिन राख्नुहोस् ',
'Create Certificate': 'प्रमाणपत्र राख्नुहोस् ',
'Create Cluster': 'समूह राख्नुहोस् ',
'Create Coalition': 'संघ राख्नुहोस् ',
'Create Community': 'समूदाय राख्नुहोस् ',
'Create Competency Rating': 'प्रतिस्पर्धाको स्तर राख्नुहोस् ',
'Create Contact': 'सम्पर्क राख्नुहोस् ',
'Create Course': 'पाठ्यक्रम राख्नुहोस् ',
'Create Department': 'मन्त्रालय राख्नुहोस् ',
'Create Facility': 'सूविधा राख्नुहोस् ',
'Create Facility Type': 'सूविधाको प्रकार राख्नुहोस् ',
'Create Feature Layer': 'बिशेसता तह राख्नुहोस् ',
'Create Group': 'समूह राख्नुहोस् ',
'Create Hazard': 'खतरा राख्नुहोस् ',
'Create Job': 'काम राख्नुहोस् ',
'Create Job Title': 'कामको पद राख्नुहोस् ',
'Create Layer': 'तह राख्नुहोस् ',
'Create Location': 'स्थान राख्नुहोस् ',
'Create Location Hierarchy': 'स्थानको बनावट राख्नुहोस् ',
'Create Mailing List': 'ठेगाना तालिका राख्नुहोस् ',
'Create Map Profile': 'नक्साको बनावट राख्नुहोस् ',
'Create Marker': 'चिन्ह राख्नुहोस् ',
'Create Member': 'सदस्य राख्नुहोस् ',
'Create Membership Type': 'सदस्यताको प्रकार राख्नुहोस् ',
'Create Milestone': 'उद्देश्य राख्नुहोस् ',
'Create National Society': 'राष्ट्रिय समूदाय राख्नुहोस् ',
'Create Network': 'नेटवर्क राख्नुहोस् ',
'Create Office': 'कार्यालयको विवरण बनाउनुहोस्',
'Create Office Type': 'कार्यलयको प्रकार राख्नुहोस्',
'Create Organization Type': 'संस्थाको प्रकार राख्नुहोस्',
'Create Partner Organization': 'सहकारी संस्था राख्नुहोस्',
'Create Program': 'कार्यक्रम',
'Create Project': 'परियोजनाहरु बनाउनुहोस्',
'Create Projection': 'योजना राख्नुहोस्',
'Create Record': 'रेकर्ड राख्नुहोस्',
'Create Region': 'क्षेत्र राख्नुहोस्',
'Create Resource': 'स्रोत राख्नुहोस्',
'Create Resource Type': 'स्रोत प्रकार राख्नुहोस्',
'Create Role': 'नयाँ भूमिका बनाउनुहोस्',
'Create Sector': 'क्षेत्र राख्नुहोस्',
'Create Staff Member': 'कर्मचारीको विवरण राख्नुहोस्',
'Create Team': 'समूह बनाउनुहोस्',
'Create Training Event': 'तालिम विवरण राख्नुहोस्',
'Create User': 'नयाँ प्रयोगकर्ता बनाउनुहोस्',
'Create Volunteer': 'स्वयम् सेवक राख्नुहोस्',
'Create Volunteer Cluster': 'स्वयम् सेवक कागजात राख्नुहोस्',
'Create Volunteer Cluster Position': 'स्वयम् सेवक पद कागजात राख्नुहोस्',
'Create Volunteer Cluster Type': 'स्वयम् सेवकको कागजात प्रकार राख्नुहोस्',
'Create Volunteer Role': 'स्वयम सेवकको भूमिका राख्नुहोस्',
'Create Volunteer to Project': 'परियोजनामा स्वयम सेवक राख्नुहोस्',
'created': 'बनाइयो',
'Created By': 'द्वारा बनाइएको',
'Created on %s': ' %s मा बनाइएको',
'Created on %s by %s': '%s मा %s द्वारा बनाइएको',
'Credential': 'कागजात',
'Credential added': 'कागजात संचित गरियो',
'Credential deleted': 'कागजात हटाइयो',
'Credential Details': 'कागजात विवरण',
'Credential updated': 'कागजात परिमार्जन गरियो',
'Credentialling Organization': 'कागजात व्यबस्थापन',
'Credentials': 'कागजातहरू',
'Critical Infrastructure': 'जोखिमयुक्त भौतिक पूर्वाधार',
'Crop Image': 'तस्विर काट्नुहोस्',
'curly': 'उपचार हुने',
'Currency': 'मुद्रा',
'Current': 'हाल',
'current': 'वर्तमान',
'Current Home Address': 'हालको घरको ठेगाना',
'Current Location': 'हालको स्थान',
'Currently no Appraisals entered': 'हाल कुनै मुल्यांकन राखिएको छैन',
'Currently no Certifications registered': 'हाल कुनै प्रमाणीकरण राखिएको छैन',
'Currently no Course Certificates registered': 'हाल कुनैपनि पाठ्यक्रम प्रमाण-पत्रहरू दर्ता गरिएको छैन',
'Currently no Credentials registered': 'हाल कुनैपनि कागजातहरू दर्ता गरिएको छैन',
'Currently no entries in the catalog': 'हाल तालिकामा कुनैपनि कुरा राखिएको छैन',
'Currently no hours recorded for this volunteer': 'हाल यो स्वयम्-सेवकको कुनै पनि समय राखिएको छैन',
'Currently no Participants registered': 'हाल कुनैपनि सहभागीहरू दर्ता गरिएको छैन',
'Currently no Professional Experience entered': 'हाल कुनैपनि ब्यबसायिक अनुभव राखिएको छैन',
'Currently no programs registered': 'हाल कुनैपनि कार्यक्रम दर्ता गरिएको छैन',
'Currently no Skill Equivalences registered': 'हाल कुनैपनि सिप हरह दर्ता गरिएको छैन',
'Currently no Skills registered': 'हाल कुनैपनि सिपहरू दर्ता गरिएको छैन',
'Currently no staff assigned': 'हाल कुनैपनि कर्मचारीलाई काममा लगाइएको छैन',
'Currently no training events registered': 'हाल कुनैपनि तालिम कार्यक्रम दर्ता गरिएको छैन',
'Currently no Trainings registered': 'हाल कुनैपनि तालिहरू दर्ता गरिएको छैन',
'CV': 'बयोडाटा',
'Cyclone': 'भूमरी',
'Daily': 'दैनिक',
'Daily Work': 'दैनिक कार्य',
'dark': 'अँध्यारो',
'Data': 'आंकडा',
'Data added to Theme Layer': 'स्वरूप तहमा आँकडा संचित गरियो',
'Data import error': 'आँकडा राख्नु गल्ती',
'Data Type': 'आँकडा प्रकार',
'Data uploaded': 'आँकडा संचित गरियो',
'Database': 'आँकडासम्बन्धी',
'Database Development': 'डाटावेश विकास',
'Date': 'मिति',
'Date Created': 'मिति परिवर्तन गरियो',
'Date Due': 'मिति द्वय',
'Date Joined': 'प्रवेश मिति',
'Date Modified': 'मिति परिवर्तन गरियो',
'Date must be %(max)s or earlier!': 'मिति %(max)s वा अघिको हुनैपर्छ!',
'Date must be %(min)s or later!': 'मिति %(min)s वा पछिको हुनैपर्छ!',
'Date must be between %(min)s and %(max)s!': 'मिति %(min)s र %(max)s को बिचमा हुनैपर्छ !',
'Date of Birth': 'जन्म मिति',
'Date Printed': 'मिति प्रीन्ट गरियो',
'Date Received': 'मिति प्राप्त गरियो',
'Date resigned': 'छोडेको मिति',
'Date/Time': 'मिति/समय',
'Day': 'दिन',
'De-duplicate': 'नक्कल प्रति बनाउनुहोस्',
'De-duplicate Records': 'विवरणको नक्कल प्रति बनाउनुहोस्',
'Dead Body': 'मृत शरिर',
'deceased': 'मृत',
'Deceased': 'मृत',
'Decision': 'निर्णय',
'Default': 'स्वचलानमा रहेको',
'Default Base layer?': 'स्वचलानमा रहेको आधारभुत तह?',
'Default Location': 'स्वचलानमा रहेको स्थान',
'Default Marker': 'स्वचलानमा रहेको चिन्ह',
'Default Realm': 'स्वचलानमा रहेको क्षेत्र',
'Default Realm = All Entities the User is a Staff Member of': 'स्वचलानमा रहेको क्षेत्र = को कर्मचारी सदस्य सबै अंगका प्रयोगकर्ता',
'Default?': 'स्वचलानमा रहेको?',
'Defines the icon used for display of features on handheld GPS.': 'हास्त निर्देशित जि.पि.एस.मा डिस्प्ले दिने कार्यको निम्ति प्रयोग गरिएको आइकनलाई परिभाषित गर्दछ ।',
'Defines the icon used for display of features on interactive map & KML exports.': 'इन्टर्याक्टिभ नक्सा तथा के.एम.एल. विवरणमा डिस्प्ले दिने कार्यको निम्ति प्रयोग गरिएको आइकनलाई परिभाषित गर्दछ ।',
'Degrees in a latitude must be between -90 to 90.': 'अक्षांशमा प्रयोग गरिएको डिग्री -९० देखि ९० मध्येमा हुनुपर्छ ।',
'Degrees in a longitude must be between -180 to 180.': 'देशान्तरमा प्रयोग गरिएको डिग्री -१८० देखि १८० मध्येमा हुनुपर्छ ।',
'Degrees must be a number.': 'कोणहरू अंकमा नै हुनुपर्छ ',
'DELETE': 'डि.इ.एल.इ.टि.इ.',
'Delete': 'हटाउनुहोस्',
'Delete Affiliation': 'स्वीकृति हटाउनुहोस्',
'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': 'अपलोड भन्दा पहिले प्रयोगकर्तासँग स्वीकृती रहेको यो प्रकारको सम्पूर्ण आँकडा हटाउनुहोस्. अफलाइनमा आँकडा तयार गरि पढ्नको लागि मात्र संचित गर्नको लागि यो बनाइएको हो ।',
'Delete Appraisal': 'मुल्यंकन हटाउनुहोस्',
'Delete Award': 'पुरस्कार हटाउनुहोस्',
'Delete Branch': 'शाखा हटाउनुहोस्',
'Delete Certificate': 'प्रमाण-पत्र हटाउनुहोस्',
'Delete Certification': 'प्रमाणीकरण हटाउनुहोस्',
'Delete Cluster': 'समूह हटाउनुहोस्',
'Delete Competency Rating': 'प्रतिस्पर्धा स्तर हटाउनुहोस्',
'Delete Contact': 'सम्पर्क हटाउनुहोस्',
'Delete Contact Information': 'सम्पर्क जानकारी हटाउनुहोस्',
'Delete Course': 'पाठ्यक्रम हटाउनुहोस्',
'Delete Course Certificate': 'पाठ्यक्रम प्रमाण-पत्र हटाउनुहोस्',
'Delete Credential': 'कागजात हटाउनुहोस्',
'Delete Data from Theme layer': 'स्वरूप तह बाट आँकडा हटाउनुहोस्',
'Delete Department': 'मन्त्रालय हटाउनुहोस्',
'Delete Deployment': 'परियोजन हटाउनुहोस्',
'Delete Donor': 'दाता हटाउनुहोस्',
'Delete Facility': 'सुविधा हटाउनुहोस्',
'Delete Facility Type': 'सुविधाको प्रकार हटाउनुहोस्',
'Delete Feature Layer': 'विशेषता तह हटाउनुहोस्',
'Delete Group': 'समूह हटाउनुहोस्',
'Delete Hazard': 'खतरा हटाउनुहोस्',
'Delete Hours': 'घण्टा हटाउनुहोस्',
'Delete Image': 'तस्विर हटाउनुहोस्',
'Delete Job Title': 'पद हटाउनुहोस्',
'Delete Layer': 'तह हटाउनुहोस्',
'Delete Location': 'स्थान हटाउनुहोस्',
'Delete Location Hierarchy': 'स्थान बनावट हटाउनुहोस्',
'Delete Mailing List': 'ठेगाना तालिका हटाउनुहोस्',
'Delete Map Profile': 'नक्सा बनावट हटाउनुहोस्',
'Delete Marker': 'चिन्ह हटाउनुहोस्',
'Delete Member': 'सदस्य हटाउनुहोस्',
'Delete Membership': 'सदस्यता हटाउनुहोस्',
'Delete Membership Type': 'सदस्यता प्रकार हटाउनुहोस्',
'Delete National Society': 'राष्ट्रिय समाज हटाउनुहोस्',
'Delete Office': 'कार्यलय हटाउनुहोस्',
'Delete Office Type': 'कार्यलय प्रकार हटाउनुहोस्',
'Delete Organization': 'संस्था हटाउनुहोस्',
'Delete Organization Type': 'संस्था प्रकार हटाउनुहोस्',
'Delete Participant': 'सहभागी हटाउनुहोस्',
'Delete Partner Organization': 'साझेदार संस्था हटाउनुहोस्',
'Delete Person': 'ब्यक्ति हटाउनुहोस्',
'Delete PoI Type': 'धुर्व प्रकार हटाउनुहोस्',
'Delete Point of Interest': 'रूचीको बुँदा हटाउनुहोस्',
'Delete Professional Experience': 'ब्यबसायिक अनुभव हटाउनुहोस्',
'Delete Program': 'कार्यक्रम हटाउनुहोस्',
'Delete Project': 'परियोजना हटाउनुहोस्',
'Delete Projection': 'योजना हटाउनुहोस्',
'Delete Record': 'विवरण हटाउनुहोस्',
'Delete Region': 'क्षेत्र हटाउनुहोस्',
'Delete Resource': 'स्रोत हटाउनुहोस्',
'Delete Resource Type': 'स्रोत प्रकार हटाउनुहोस्',
'Delete Role': 'भूमिका हटाउनुहोस्',
'Delete Room': 'कोठा हटाउनुहोस्',
'Delete saved search': 'संचित खोजी हटाउनुहोस्',
'Delete Sector': 'क्षेत्र हटाउनुहोस्',
'Delete Service': 'सेवा हटाउनुहोस्',
'Delete Skill': 'सिप हटाउनुहोस्',
'Delete Skill Equivalence': 'सिप सरह हटाउनुहोस्',
'Delete Skill Type': 'सिप प्रकार हटाउनुहोस्',
'Delete Staff Assignment': 'कर्मचारीको काम हटाउनुहोस्',
'Delete Staff Member': 'कर्मचारी सदस्य हटाउनुहोस्',
'Delete Status': 'अवस्था हटाउनुहोस्',
'Delete Symbology': 'चिन्हता हटाउनुहोस्',
'Delete Theme': 'स्वरूप हटाउनुहोस्',
'Delete this Filter': 'यो फिल्टर हटाउनुहोस्',
'Delete Training': 'तालिम हटाउनुहोस्',
'Delete Training Event': 'तालिम कार्यक्रम हटाउनुहोस्',
'Delete Volunteer': 'स्वयम्-सेवक हटाउनुहोस्',
'Delete Volunteer Cluster': 'स्वयम्-सेवक समूह हटाउनुहोस्',
'Delete Volunteer Cluster Position': 'स्वयम्-सेवक समूह पद हटाउनुहोस्',
'Delete Volunteer Cluster Type': 'स्वयम्-सेवक समूह प्रकार हटाउनुहोस्',
'Delete Volunteer Role': 'स्वयम्-सेवक भूमिका हटाउनुहोस्',
'deleted': 'हटाइयो',
'Demographics': 'जनसांखिकिय',
'Department / Unit': 'विभाग/इकाईविभाग/इकाईविभाग/इकाई',
'Department added': 'मन्त्रालय संचित गरियो',
'Department Catalog': 'विभागीय तालिका',
'Department deleted': 'मन्त्रालय हटाइयो',
'Department Details': 'मन्त्रालय विवरण',
'Department updated': 'मन्त्रालय परिमार्जन गरियो',
'Deployed': 'परियोजन गरियो',
'Deploying NS': 'एन.एस. परियोजन',
'Deployment': 'परियोजन',
'Deployment added': 'परियोजन संचित गरियो',
'Deployment Alert': 'परियोजन सचेतक',
'Deployment Date': 'परियोजन मिति',
'Deployment deleted': 'परियोजन हटाइयो',
'Deployment Details': 'परियोजन विवरण',
'Deployment Details updated': 'परियोजन विवरण परिमार्जन गरियो',
'Deployment Location': 'परियोजन स्थान',
'Deployments': 'परियोजनहरू',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'प्रकृया परिभाषित गर्नुहोस् जसले यो विवरणलाई (जस्तै "मेडिकल परिक्षा") सँग सम्बन्धित गराउँदछ ।',
'Description': 'ब्याख्या',
'Design, deploy & analyze surveys.': 'सर्वेक्षणको ढाँचा तयारी, परिचालन र विश्लेषण',
'Designation': 'पद',
'Desluding ': 'धोका',
'Destination': 'गन्तब्य',
'Detailed Description/URL': 'विस्तृत ब्याख्या/यू.आर.एल.',
'Details': 'विवरण',
'Disable': 'निस्कृय',
'Disaster Law': 'विपद् कानून ',
'Disaster Management System Officer': 'प्रकोप ब्यबस्थापन प्रकृया कर्मचारी',
'Disaster Management Unit Assistant': 'प्रकोप ब्यबस्थापन इकाई सहयोग',
'Disaster Risk Management': 'विपद् जोखिम ब्यवस्थापन',
'Disaster Risk Reduction': 'विपद् जोखिम न्यूनिकरण',
'Disaster Type': 'प्रकोप प्रकार',
'Disease Prevention': 'रोग रोकथाम',
'diseased': 'रोग लागेको',
'displaced': 'गलत स्थानमा राखिएको',
'Display Polygons?': 'डिस्प्ले बहुभुज?',
'Display Routes?': 'डिस्प्ले मार्गहरू?',
'Display Tracks?': 'डिस्प्ले ट्र्याक?',
'Display Waypoints?': 'डिस्प्ले मार्ग बिन्दुहरू?',
'Distribution of Food': 'खाद्यान्न वितरण',
'Distribution of Non-Food Items': 'गैर खाद्य सामग्री वितरण',
'Distribution of Shelter Repair Kits': 'आवास मर्मत सामग्री वितरण',
'Diversifying Livelihoods': 'जीवीकोपार्जनमा विविधिकरण',
'divorced': 'पारपाचुके',
'DM / Relief': 'डि.एम./सहयोग',
'DM Planning': 'डि.एम योजना',
'Do you really want to approve this record?': 'के तपाईं यो विवरणलाई वास्तबमा नै परिवर्तन गर्न चाहानुहुन्छ?',
'Do you really want to delete these records?': 'के तपाईं यी विवरणहरूलाई वास्तबमा नै हटाउन चाहानुहुन्छ?',
'Do you really want to delete this record? (This action can not be reversed)': 'के तपाईं यो विवरणलाई हटाउन चाहानुहुन्छ? (यसलाई फर्काउन सकिँदैन)',
'Document Scan': 'कागजात स्क्यान',
'Documents': 'कागजातहरू',
'Domain': 'तल्लो निकाय',
'Donor': 'दाता',
'Donor added': 'दाता संचित गरियो',
'Donor deleted': 'दाता हटाइयो',
'Donor Details': 'दाता विवरण',
'Donor Driven Housing Reconstruction': 'दाता निर्देशित आवास पूर्ननिर्माण',
'Donor updated': 'दाता परिमार्जन गरियो',
'Donors': 'दाताहरू',
'Donors Report': 'दाताहरूको प्रतिबेदन',
'Download OCR-able PDF Form': 'ओ.सि.आर. भएको पि.डि.एफ. फारम डाउनलोड गर्नुहोस्',
'Download Template': 'सदस्यता तालिका डाउनलोड',
'Draft': 'खाका',
'Draft Features': 'खाका विशेषताहरू',
'Drag an image below to crop and scale it before uploading it:': 'अपलोड गर्न भन्दा पहिले तस्बिरलाई तल घिस्र्याएर काटेर आकार दिनुहोस्:',
'Drainage': 'ढल',
'Draw a square to limit the results to just those within the square.': 'बर्गकारको आकारमा राख्नको लागी बर्गकार आकार खिच्नुहोस्',
'Driver': 'सवारी चालक',
'Driving License': 'सवारी चालक अनुमती पत्र',
'Drought': 'खडेरी',
'DRR': 'डि.आर.आर.',
'DRRPP Extensions': 'डि.आर.आर.पि.पि. थप कार्यक्रमहरू',
'Duplicate': 'नक्कल',
'Duplicate label selected': 'नक्कल स्तर छानियो',
'Duration': 'अवधी',
'Duration (months)': 'अवधी (महिनाहरू)',
'E-mail': 'इ-मेल',
'Early Warning': 'पूर्वचेतावनी',
'Early Warning Systems': 'पहिलेको सचेत प्रकृयाहरू',
'Earthquake': 'भूकम्प',
'Earthquakes': 'भूकम्प',
'Edit': 'परिवर्तनन',
'Edit %(site_label)s Status': '%(site_label)s अवस्था परिवर्तन',
"Edit 'More Info'": "थप जानकारी' परिवर्तन",
'Edit Activity': 'कृयाकलाप परिवर्तन',
'Edit Activity Organization': 'कृयाकलाप ब्यबस्थापन परिवर्तन',
'Edit Activity Type': 'कृयाकलाप प्रकार परिवर्तन',
'Edit Address': 'ठेगाना परिवर्तन',
'Edit Affiliation': 'स्वीकृती परिवर्तन',
'Edit Annual Budget': 'वार्षिक बजेट परिवर्तन',
'Edit Appraisal': 'मुल्यांकन परिवर्तन',
'Edit Award': 'परस्कार परिवर्तन',
'Edit Beneficiaries': 'भागिदारहरू परिवर्तन',
'Edit Beneficiary Type': 'भागिदार प्रकार परिवर्तन',
'Edit Branch Organization': 'शाखा संस्था परिवर्तन',
'Edit Campaign': 'क्याम्पिन परिवर्तन',
'Edit Certificate': 'प्रमाण-पत्र परिवर्तन',
'Edit Certification': 'प्रमाणिकरण परिवर्तन',
'Edit Cluster': 'समूह परिवर्तन',
'Edit Community Details': 'समुदाय विवरण परिवर्तन',
'Edit Competency Rating': 'प्रतिस्पर्धा स्तर परिवर्तन',
'Edit Contact': 'सम्पर्क परिवर्तन',
'Edit Contact Details': 'सम्पर्क विवरण परिवर्तन',
'Edit Contact Information': 'सम्पर्क जानकारी परिवर्तन',
'Edit Course': 'पाठ्यक्रम परिवर्तन',
'Edit Course Certificate': 'पाठ्यक्रम प्रमाण-पत्र परिवर्तन',
'Edit Credential': 'कागजात परिवर्तन',
'Edit Department': 'मन्त्रालय परिवर्तन',
'Edit Deployment Details': 'परियोजन विवरण परिवर्तन',
'Edit Details': 'विवरण परिवर्तन',
'Edit Donor': 'दाता परिवर्तन',
'Edit DRRPP Extensions': 'डि. आर. आर. पि. पि. थप कार्यक्रमहरू परिवर्तन',
'Edit Education Details': 'शिक्षा विवरण परिवर्तन',
'Edit Entry': 'प्रवेश परिवर्तन',
'Edit Experience': 'अनुभव परिवर्तन',
'Edit Facility': 'सुविधा परिवर्तन',
'Edit Facility Type': 'सुविधा प्रकार परिवर्तन',
'Edit Feature Layer': 'विशेषता तह परिवर्तन',
'Edit Group': 'समूह परिवर्तन',
'Edit Hazard': 'खतरा परिवर्तन',
'Edit Hours': 'समय (घण्टा) परिवर्तन',
'Edit Identity': 'परिचय परिवर्तन',
'Edit Image Details': 'तस्विर विवरण परिवर्तन',
'Edit Job': 'काम परिवर्तन',
'Edit Job Title': 'पद परिवर्तन',
'Edit Keyword': 'मुख्यशब्द परिवर्तन',
'Edit Layer': 'तह परिवर्तन',
'Edit Level %d Locations?': 'स्तर %d स्थानहरू? परिवर्तन',
'Edit Location': 'स्थान परिवर्तन',
'Edit Location Details': 'स्थान विवरण परिवर्तन',
'Edit Location Hierarchy': 'स्थान बनावट परिवर्तन',
'Edit Log Entry': 'दर्ताप्रवेश परिवर्तन',
'Edit Logged Time': 'दर्ता गरिएको समय परिवर्तन',
'Edit Mailing List': 'ठेगाना तालिका परिवर्तन',
'Edit Map Profile': 'नक्सा बनावट परिवर्तन',
'Edit Marker': 'चिन्ह परिवर्तन',
'Edit Member': 'सदस्य परिवर्तन',
'Edit Membership': 'सदस्यता परिवर्तन',
'Edit Membership Type': 'सदस्यता प्रकार परिवर्तन',
'Edit Milestone': 'उद्देश्य परिवर्तन',
'Edit National Society': 'राष्ट्रिय समाज परिवर्तन',
'Edit Network': 'नेटवर्क परिवर्तन',
'Edit Office': 'कार्यलय परिवर्तन',
'Edit Office Type': 'कार्यलय प्रकार परिवर्तन',
'Edit Organization': 'संस्था परिवर्तन',
'Edit Organization Type': 'संस्था प्रकार परिवर्तन',
'Edit Output': 'नतिजा परिवर्तन',
'Edit Participant': 'सहभागी परिवर्तन',
'Edit Partner Organization': 'साझेदार संस्था परिवर्तन',
'Edit Permissions for %(role)s': '%(role)s को लागि स्वीकृतीहरू परिवर्तन',
'Edit Person Details': 'ब्यक्ति विवरण परिवर्तन',
"Edit Person's Details": 'ब्यक्तिको विवरण परिवर्तन',
'Edit PoI Type': 'धुर्व प्रकार परिवर्तन',
'Edit Point of Interest': 'रूचीको बुँदा परिवर्तन',
'Edit Policy or Strategy': 'नियम तथा लक्ष परिवर्तन',
'Edit Professional Experience': 'ब्यबसायिक अनुभव परिवर्तन',
'Edit Profile Configuration': 'प्रोफाइल बनावट परिवर्तन',
'Edit Program': 'कार्यक्रम परिवर्तन',
'Edit Project': 'परियोजना परिवर्तन',
'Edit Project Organization': 'परियोजना संस्था परिवर्तन',
'Edit Projection': 'योजना परिवर्तन',
'Edit Record': 'विवरण परिवर्तन',
'Edit Region': 'क्षेत्र परिवर्तन',
'Edit Resource': 'स्रोत परिवर्तन',
'Edit Resource Type': 'स्रोत प्रकार परिवर्तन',
'Edit Response Summary': 'प्रतिकृया संक्षेप परिवर्तन',
'Edit Role': 'भूमिका परिवर्तन',
'Edit Room': 'कोठा परिवर्तन',
'Edit saved search': 'संचित खोजी परिवर्तन',
'Edit Sector': 'क्षेत्र परिवर्तन',
'Edit Service': 'सेवा परिवर्तन',
'Edit Skill': 'सिप परिवर्तन',
'Edit Skill Equivalence': 'सिप सरह परिवर्तन',
'Edit Skill Type': 'सिप प्रकार परिवर्तन',
'Edit Staff Assignment': 'कर्मचारी काम परिवर्तन',
'Edit Staff Member Details': 'कर्मचारी सदस्य विवरण परिवर्तन',
'Edit Status': 'अवस्था परिवर्तन',
'Edit Symbology': 'चिन्हता परिवर्तन',
'Edit Task': 'काम परिवर्तन',
'Edit Team': 'समूह परिवर्तन',
'Edit the OpenStreetMap data for this area': 'यो क्षेत्रको लागि खुलासडकनक्सा आँकडा परिवर्तन',
'Edit Theme': 'स्वरूप परिवर्तन',
'Edit Theme Data': 'स्वरूप आँकडा परिवर्तन',
'Edit this entry': 'यो प्रवेश परिवर्तन',
'Edit Training': 'तालिम परिवर्तन',
'Edit Training Event': 'तालिम कार्यक्रम परिवर्तन',
'Edit Volunteer Cluster': 'स्वयम्-सेवक समूह परिवर्तन',
'Edit Volunteer Cluster Position': 'स्वयम्-सेवक समूह पद परिवर्तन',
'Edit Volunteer Cluster Type': 'स्वयम्-सेवक समूह प्रकार परिवर्तन',
'Edit Volunteer Details': 'स्वयम्-सेवक विवरण परिवर्तन',
'Edit Volunteer Role': 'स्वयम्-सेवक भूमिका परिवर्तन',
'Education': 'शिक्षा',
'Education & Advocacy': 'शिक्षा र वकालत',
'Education & School Safety': 'शिक्षा र विद्यालय सुरक्षा',
'Education Details': 'शिक्षा विवरण',
'Education details added': 'शिक्षा विवरण संचित गरियो',
'Education details deleted': 'शिक्षा विवरण हटाइयो',
'Education details updated': 'शिक्षा विवरण परिमार्जन गरियो',
'Effort Report': 'सामर्थ्य प्रतिवेदन',
'Either a shelter or a location must be specified': 'बसोबास अथवा स्थानमध्ये कुनैपनि पहिचान गरिनै पर्छ',
'Either file upload or image URL required.': 'फाइल अपलोड वा तस्विर यू.आर.एल. आवश्यक पर्छ ।',
'Email': 'इमेल',
'Email Address': 'इमेल ठेगाना',
'Emergency Contacts': 'आपतकालिन सम्पर्कहरू',
'Emergency Health': 'आकस्मिक स्वास्थ्य',
'Emergency Householdwater Treatment and Storage': 'आपतकालिन गृह खानेपानि उपचार तथा भण्डार',
'Emergency Medical Technician': 'आकस्मिक मेडिकल प्राविधिक',
'Emergency Shelter': 'आपतकालिन वसोबास',
'Emergency Telecommunications': 'आपतकालिन टेलिफोन संचार',
'Emergency Water Supply': 'आपतकालिन पानी पुर्ती',
'Emergency WASH': 'आकस्मिक खानेपानी तथा सरसफाई',
'Empty': 'खाली',
'Enable': 'सक्रिय',
'Enable in Default Config?': 'स्वचलानमा रहेको बनावटलाई सक्रिय गर्न चाहानुहुन्छ?',
'End Date': 'अन्तिम मिति',
"Enter a name to search for. You may use % as wildcard. Press 'Search' without input to list all items.": "खोजीको निम्ति नाम टाइप गर्नुहोस् । तपाईँले वाइल्डकार्डको रूपमा % प्रयोग गर्न सक्नुहुन्छ । सबै वस्तुहरूको तालिका इनपुट बिना नै 'खोजी' थिच्नुहोस् ।",
'Enter a valid email': 'मान्य इमेल राख्नुहोस्',
'Enter a valid phone number': 'मान्य फोन नम्बर राख्नुहोस्',
'enter a value': 'राख्नुहोस्',
'Enter a value carefully without spelling mistakes, this field needs to match existing data.': 'स्पेलिङ गल्ति नगरि टाइप गर्नुहोस्, यो क्षेत्र भइरहेको आँकडासँग मिल्नु पर्छ ।',
'enter date and time': 'मिति र समय राख्नुहोस्',
'enter date and time in range %(min)s %(max)s': 'मिति र समय %(min)s %(max)s भित्र राख्नुहोस्',
'enter date and time on or after %(min)s': ' %(min)s वा त्यस्पछि मिति र समय प्रवेश गर्नुहोस्',
'enter date and time on or before %(max)s': '%(max)s वा त्यस्अघि मिति र समय प्रवेश गर्नुहोस्',
'Enter some characters to bring up a list of possible matches': 'सम्भावित मेलहरूको तालिका निकाल्नको निम्ति केहि शब्दहरू प्रवेश गर्नुहोस् ।',
'Enter the same password as above': 'माथीको पासवर्ड पुन राख्नुहोस्',
'Enter your first name': 'तपाईंको पहिलो नाम राख्नुहोस्',
'Enter your organization': 'तपाईंको संस्था राख्नुहोस्',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'फोन नम्बर राख्नु भनेको स्वेच्छिक हो, तर सो राख्नुभएको खण्डमा तपाईले एस.एम.एस. संदेश प्राप्त गर्नुहुनेछ ।',
'Enterprise Development Training ': 'परियोजना विकास तालिम ',
'Entity': 'अंग',
'Environment': 'वातावरण',
'Epidemic': 'महामारी',
'Epidemic/Pandemic Preparedness': 'महामारी/ विश्वब्यापी महामारी पूर्वतयारी',
'Errors': 'गल्तीहरू',
'ESRI Shape File': 'इ.एस.आर.आइ. आकार फाइल',
'Essential Staff?': 'अतिआवश्यक कर्मचारी?',
'Estimated Reopening Date': 'पुन: खुलाहुने अन्दाजि मिति',
'Ethnicity': 'प्रजातिय',
'Euros': 'यूरो',
'Evacuating': 'खाली गर्नु',
'Evacuation Drills': 'खाली गर्ने शसस्त्र बल',
'Events': 'कार्यक्रमहरू',
'Excellent': 'उत्कृष्ट',
'Exercise': 'अभ्यास',
'Excreta Disposal': 'दिशा विर्सजन',
'Experience': 'अनुभव',
'expired': 'मिति समाप्त',
'Expiring Staff Contracts Report': 'मम्याद सकिन लागेको कर्मचारीको सम्झौता प्रतिवेदन',
'Expiry (months)': 'म्याद सकिने (महिनाहरू)',
'Expiry Date': 'म्याद सकिने मिति',
'Export as': 'को रूपमा निर्यात',
'Export in %(format)s format': '%(format)s प्रकारमा निर्यात',
'Export in GPX format': 'जि.पि.एप्क्स.प्रकारमा निर्यात',
'Export in KML format': 'के.एम.एल. प्रकारमा निर्यात',
'Export in OSM format': 'ओ.एस.एम. प्रकारमा निर्यात',
'Eye Color': 'मानव दृष्य रंग',
'Facial hair, color': 'अनुहारको रौं, रंग',
'Facial hair, comment': 'अनुहारको रौं, टिप्पणी',
'Facial hair, length': 'अनुहारको रौं, लम्बाइ',
'Facial hair, type': 'अनुहारको रौं, प्रकार',
'Facilities': 'सूबिधाहरू',
'Facility': 'सुविधा',
'Facility added': 'सुविधा संचित गरियो',
'Facility Contact': 'सुविधा सम्पर्क',
'Facility deleted': 'सुविधा हटाइयो',
'Facility Details': 'सुविधा विवरण',
'Facility Status': 'सुविधा अवस्था',
'Facility Type': 'सुविधा प्रकार',
'Facility Type added': 'सुविधा प्रकार संचित गरियो',
'Facility Type deleted': 'सुविधा प्रकार हटाइयो',
'Facility Type Details': 'सुविधा प्रकार विवरण',
'Facility Type updated': 'सुविधा प्रकार परिमार्जन गरियो',
'Facility Types': 'सुविधा प्रकारहरू',
'Facility updated': 'सुविधा परिमार्जन गरियो',
'Fail': 'असफल',
'Fair': 'उचित',
'Family': 'परिवार',
'fat': 'मोटो',
'Fax': 'फ्याक्स',
'Feature Info': 'विशेषता जानकारी',
'Feature Layer': 'विशेषता तह',
'Feature Layer added': 'विशेषता तह संचित गरियो',
'Feature Layer deleted': 'विशेषता तह हटाइयो',
'Feature Layer Details': 'विशेषता तह विवरण',
'Feature Layer updated': 'विशेषता तह परिमार्जन गरियो',
'Feature Layers': 'विशेषता तहहरू',
'Feature Namespace': 'विशेषता नाम स्थान',
'Feature Type': 'विशेषता प्रकार',
'Features Include': 'विशेषताहरू भन्नाले',
'Feedback': 'प्रतिकृया',
'Feeding Programmes': 'खुवाउने कार्यक्रम',
'female': 'महिला',
'Field': 'क्षेत्र',
'File': 'फाइल',
'Files': 'फाइलहरू',
'fill in order: day(2) month(2) year(4)': 'खालि ठाँउ भर्नुहोस्: दिन(२) महिना(२) वर्ष(४)',
'fill in order: hour(2) min(2) day(2) month(2) year(4)': 'खालि ठाँउ भर्नुहोस्: घण्टा(२) कम्तिमा (२) दिन(२) महिना(२) वर्ष(४)',
'fill in order: hour(2) min(2) month(2) day(2) year(4)': 'खालि ठाँउ भर्नुहोस्: घण्टा(२) कम्तिमा (२) महिना(२) दिन(२) वर्ष(४)',
'fill in order: month(2) day(2) year(4)': 'खालि ठाँउ भर्नुहोस्: महिना(२) दिन(२) वर्ष(४)',
'Filter': 'फिल्टर',
'Filter by Location': 'स्थान को आधारमा फिल्टर',
'Filter Options': 'फिल्टर विकल्पहरू',
'Filter type': 'फिल्टर प्रकार',
'Filter type ': 'फिल्टर प्रकार ',
'Finance / Admin': 'वित्तिय / संचालक',
'Finance Officer': 'वित्तिय कर्मचारी',
'Financial Risk Sharing ': 'वित्तिय खतरा बाँडफाँड',
'Financial Services': 'वित्तिय सेवाहरू',
'Financial System Development': 'वित्तिय प्रणाली विकास',
'Find more': 'थप प्राप्त गर्नुहोस्',
'Find on Map': 'नक्सामा प्राप्त गर्नुहोस्',
'Fingerprint': 'औँठाछाप',
'Fire': 'आगलागी',
'First': 'पहिलो',
'First Aid': 'प्राथमिक उपचार',
'First Name': 'पहिलो नाम',
'Fleet Manager': 'जहाज समूह व्यवस्थापक',
'Flood': 'बाढी',
'Focal Person': 'मुख्य ब्यक्ति',
'Folder': 'फोल्डर',
'Food Security': 'खाद्य सुरक्षा',
'Food Supplementation': 'खानेकुरा बाँडफाट',
'For Entity': 'अंगको लागि',
'For live help from the Sahana community on using this application, go to': 'यो एप्लिकेशन प्रयोग गरेबापत साहाना समुदाय मार्फत सहयोग दिन चाहानुहुन्छ भने, जानुहोस्',
'For more details on the Sahana Eden system, see the': 'साहाना इदेन प्रकृयाको बारेमा थप विवरण को लागि, हेर्नुहोस्',
'forehead': 'निधार',
'form data': 'फारम आँकडा',
'Form Settings': 'फारम सेटिङ',
'Format': 'नमुना',
'found': 'प्राप्त भयो',
'Frequency': 'फ्रीक्वेन्सी',
'Full beard': 'पूर्ण दारी',
'Fullscreen Map': 'पूर्णस्क्रिन नक्सा',
'Function': 'कार्य',
'Function Permissions': 'कार्य स्वीकृतीs',
'Funding': 'अनुदान कार्यक्रम',
'Funding Report': 'अनुदान कार्यक्रम प्रतिवेदन',
'Funds Contributed': 'दिइएको अनुदान',
'Gap Analysis Map': 'दुरी अनुसन्धान नक्सा',
'Gap Analysis Report': 'दुरी अनुसन्धान प्रतिवेदन',
'Gender': 'लिङ्ग',
'Generator': 'जेनेरेटर',
'Geocode': 'जिओ कोड',
'Geocoder Selection': 'जिओ कोड छान्ने',
'GeoJSON Layer': 'जिओ जे.एस.एन.तह',
'Geometry Name': 'ज्यामिती नाम',
'GeoRSS Layer': 'जिओ आर.एस.एस. तह',
'Get Feature Info': 'विशेषता जानकारी प्राप्त गर्नुहोस्',
'getting': 'नजिकिँदै',
'GIS & Mapping': 'भौगोलिक सूचना प्रणाली र नक्सांकन',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'तस्विरको विस्तृत विवरण दिनुहोस्, जस्तै तस्बिरको कहाँ के देख्न सकिन्छ (वैकल्पिक).',
'Go': 'जानुहोस्',
"Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.": "%(url)s जानुहोस्, खोल्नुहोस र तपाईंको एप्लिकेसन दर्ता गर्नुहोस् । तपाईंले जुनसुकै यूआरएल राख्न सक्नुहुन्छ र तपाईंले खालि 'नक्सा परिवर्तन' स्वीकृती मा क्लिक गर्न सक्नुहुन्छ ।",
'Go to Functional Map': 'कार्यात्मक नक्सामा जानुहोस्',
'Goatee': 'गोटि',
'Good': 'राम्रो',
'Google Layer': 'गुगल तह',
'Google Maps': 'गुगल नक्सा(हरु)',
'Google Satellite': 'गुगल उपग्रह',
'Governance': 'कार्यकारिणी',
'Government': 'सरकार',
'GPS Marker': 'जि.पि.एस. चिन्ह',
'GPS Track': 'जि.पि.एस. ट्र्याक',
'GPS Track File': 'जि.पि.एस. ट्र्याक फाइल',
'GPX Layer': 'जि.पि.एक्स्. तह',
'Grade': 'कक्षा',
'Graph': 'ग्राफ',
'Great British Pounds': 'ब्रिटिस पाउण्ड',
'Greater than 10 matches. Please refine search further': '१० भन्दा धेरै मिल्यो । कृपया अघी खोजी मिलाउनुहोस्',
'green': 'हरियो',
'grey': 'खैरो',
'Grid': 'ग्रीड',
'Group': 'समूह',
'Group added': 'समूह संचित गरियो',
'Group deleted': 'समूह हटाइयो',
'Group description': 'समूह ब्याख्या',
'Group Description': 'समूह ब्याख्या',
'Group Details': 'समूह विवरण',
'Group Head': 'समूह प्रमुख',
'Group Leader': 'समूह अगुवा',
'Group Member added': 'समूह सदस्य संचित गरियो',
'Group Members': 'समूह सदस्यहरू',
'Group Name': 'समूह नाम',
'Group Type': 'समूह प्रकार',
'Group updated': 'समूह परिमार्जन गरियो',
'Grouped by': 'अनुसार समूह निर्धारण गरियो',
'Groups': 'समूहहरू',
'Hair Color': 'रौं रंग',
'Hair Comments': 'रौं टिप्पणीहरू',
'Hair Length': 'रौं लम्बाइ',
'Hair Style': 'रौं बनावट',
'Hand Washing Facilities': 'हात धुने सूबिधाहरू',
'Hazard': 'खतरा',
'Hazard added': 'खतरा संचित गरियो',
'Hazard added to Project': 'परियोजनामा खतरा संचित गरियो ',
'Hazard deleted': 'खतरा हटाइयो',
'Hazard Details': 'खतरा विवरण',
'Hazard removed from Project': 'परियोजनाबाट खतरा हटाइयो',
'Hazard updated': 'खतरा परिमार्जन गरियो',
'Hazards': 'खतराहरु',
'Headquarters': 'प्रमुखनिवासहरू',
'Health': 'स्वास्थ्य',
'Health & Health Facilities': 'स्वास्थ्य र स्वास्थ्य सुविधाहरु',
'Health Awareness, Promotion': 'स्वास्थ्य जनचेतना, बढुवा',
'Health Facilities - Construction and Operation': 'स्वास्थ्य सूबिधाहरू - निर्माण र कृयाकलाप',
'Health Policy, Strategy Development': 'स्वास्थ्य नियम, उद्देश्य विकास',
'Heat Wave': 'लु',
'Height': 'उचाइ',
'Height (cm)': 'उचाइ (सेमि)',
'Heliport': 'हेलिपोर्ट',
'Help': 'सहयोग',
'HFA': 'एच.एफ.ए.',
'HFA Priorities': 'एच.एफ.ए. प्रमुखताहरू',
'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.': 'एच.एफ.ए.१: प्रकोप खतरा न्यूनिकरण एक राष्ट्रिय तथा स्थानिय प्रमुखता हो जस्लाई लागु गर्नको निम्ति बलियो संस्थाअन्तिम आधार रहेको छ ।',
'HFA2: Identify, assess and monitor disaster risks and enhance early warning.': 'एच.एफ.ए.२: प्रकोप खतराहरूको पहिचान, मुल्याङ्कन, नियन्त्रण र पुर्व चेतावनीलाई बृहत बनाउने ।',
'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.': 'एच.एफ.ए.३: सबै तहमा सहजता तथा सुरक्षाको वातावरण निर्माण गर्नको निम्ति ज्ञान, बुद्दि तथा शिक्षाको प्रयोग गर्नु ।',
'HFA4: Reduce the underlying risk factors.': 'एच.एफ.ए.४: हालको खतराका कारणहरूलाई कम गर्नु ।',
'HFA5: Strengthen disaster preparedness for effective response at all levels.': 'एच.एफ.ए.५: सबै तहमार प्रभावकारी प्रतिकृयाको निम्ति प्रकोप पुर्व तयारीलाई बलियो बनाउने ।',
'Hide': 'लुकाउनुहोस्',
'Hide Chart': 'तालिका लुकाउनुहोस्',
'Hide Pivot Table': 'वृत्त तालिका लुकाउनुहोस्',
'Hide Table': 'तालिका लुकाउनुहोस्',
'Hierarchy': 'बनावट',
'Hierarchy Level 1 Name (e.g. State or Province)': 'बनावट स्तर १ नाम (जस्तै, राज्य वा अंचल)',
'Hierarchy Level 2 Name (e.g. District or County)': 'बनावट स्तर २ नाम (जस्तै, जिल्ला वा क्षेत्र)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'बनावट स्तर ३ नाम (जस्तै, शहर / नगर / गाउँ)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'बनावट स्तर ४ नाम (जस्तै, छिमेक)',
'Hierarchy Level 5 Name': 'बनावट स्तर ५ नाम',
'High': 'उच्च',
'Highest Priority Open Requests': 'उच्च प्राथमिकता खुला अनुरोधहरू',
'Hindu': 'हिन्दु',
'Home Address': 'गृह ठेगाना',
'Home Country': 'गृह देश',
'Home Phone': 'गृह फोन',
'Honorary': 'मानार्थ',
'Hospital': 'अस्पताल',
'Hospitals': 'अस्पतालs',
'Host': 'संचालक',
'Host National Society': 'संचालक राष्ट्रिय सोसाइटी',
'Hour': 'घण्टा',
'Hourly': 'प्रति घण्टा',
'Hours': 'समय (घण्टा)',
'hours': 'समय (घण्टा)',
'Hours added': 'समय (घण्टा) संचित गरियो',
'Hours by Program Report': 'कार्यक्रम प्र्र्रतिवेदनमा समय (घण्टा)',
'Hours by Role Report': 'भूमिका प्रतिवेदनमा समय (घण्टा)',
'Hours deleted': 'समय (घण्टा) हटाइयो',
'Hours Details': 'समय (घण्टा) विवरण',
'Hours updated': 'समय (घण्टा) परिमार्जन गरियो',
'House Design': 'घर डिजाइन',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'कति विवरण देखियो । उच्च जुम स्तरको अर्थ विस्तृत विवरण, तर धेरै क्षेत्रको होइन । थोरै जुम गर्दा धेरै क्षेत्र देखिन्छ तर विस्तृत विवरण कम हुन्छ ।',
'How often you want to be notified. If there are no changes, no notification will be sent.': 'कति पटक तपाईंलाई जानकारि दिइएको चाहानुहुन्छ । कुनै परिवर्तन गरिएन भने, कुनै जानकारी पठाइने छैन ।',
'How you want to be notified.': 'तपाईं कसरी जानकारी प्राप्त गर्न चाहानुहुन्छ?',
'HTML': 'एच.टि.एम.एल.',
'Human Resource': 'मानव स्रोत',
'Human Resource Development': 'जनशक्ति विकास',
'Human Resources': 'मानव स्रोतहरू',
'Hygiene Promotion': 'स्वच्छता प्रबर्धन',
'Hyogo Framework for Action (HFA)': 'हूय्गो कार्यसंरचना',
'I agree to the %(terms_of_service)s': '%(terms_of_service)s मा सहमत छु ।',
'ICBRR Staff': 'आइ.सि.बि.आर.आर. कर्मचारी',
'ID': 'आइ.डि.',
'ID Tag Number': 'आइ.डि. ट्याग संख्या',
'ID type': 'आइ.डि. प्रकार',
'Identities': 'आइ.डि. अंगहरू',
'Identity': 'परिचय',
'Identity added': 'परिचय संचित गरियो',
'Identity deleted': 'परिचय हटाइयो',
'Identity Details': 'परिचय विवरण',
'Identity updated': 'परिचय परिमार्जन गरियो',
'IEC Materials': 'सूचना, शिक्षा र संचार सामग्री',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'यदि कुनै प्रयोगकर्ताले यस साइटमा आफ्नो इमेल ठेगाना छ भनेर प्रमाणित गर्छ भने, जसलाई थप प्रमाणित गर्न आवश्यक पर्ने ब्यक्तिले प्रमाणित क्षेत्र प्रयोग गर्दछ ।',
'If checked, the notification will contain all modified records. If not checked, a notification will be send for each modified record.': 'जाँच गरिएको खण्डमा जानकारीमा परिवर्तन गरिएको सम्पूर्ण विवरण हुँदछ ।',
'If it is a URL leading to HTML, then this will downloaded.': 'यदि यो यू.आर.एल., एच.टि.एम.एल.ले प्रतिनिधित्व गरेको छ भने, यो डाउनलोड हुनेछ ।',
'If neither are defined, then the Default Marker is used.': 'कुनै पनि परिभाषित भएन भने, स्वचलानमा रहेको चिन्ह प्रयोग हुनेछ ।',
'If not found, you can have a new location created.': 'यदि प्राप्त भएन भने, तपाईंले नयाँ क्षेत्र बनाईएको पाउनुहुनेछ ।',
'If the location is a geographic area, then state at what level here.': 'यदि स्थान एउटा भौगोलिक स्थान हो भने, यहाँ कुन स्तरमा छ बताउनुहोस् ।',
'If the person counts as essential staff when evacuating all non-essential staff.': 'सम्पूर्ण अनावश्यक कर्मचारीलाई निकाला गर्ने क्रममा, यदि ब्यक्ति एक अति आवश्यक कर्मचारीको रूपमा लिइन्छ भने ।',
'If there are multiple configs for a person, which should be their default?': 'यदि कुनै ब्यक्तिको लागि बहुमुखिय बनावटहरू छन भने, उनिहरूको स्वचलानमा रहेको कुन् हो?',
"If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'यदि यो बनावट जि.आइ.एस. बनावट मेनु मा देखाईयो भने, मेनुलाई प्रयोग गर्नको निम्ति एउटा नाम दिनुहोस् । ब्यक्तिअन्तिम नक्सा को नाम बनावट द्वारा स्चालितरूपमा प्रयोग कर्ताको नाम राख्नेछ ।',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'डोमेन नमिलेको खण्डमा बाहेक, यो क्षेत्रमा एकाउण्ट बनाउदा धेरै जनसंख्या चाप भएमा प्रयोग कर्ता जस्ले संस्थाको रूपमा आफूलाई प्रतिनिधित्व गरेको हुन्छ भने उसलाई संस्थाको कर्मचारीकोरूपमा कार्य दिइन्छ ।',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'यदि यो क्षेत्रमा चाप भएमा ब्यक्तिको डोमेनलाई आधार मानि स्वचालितरूपमा नै ब्यक्तिलाई यो संस्थाको कर्मचारीकोरूपमा काम दिइनेछ ।',
'If this record should be restricted then select which role is required to access the record here.': 'यदि यो विवरणलाई सुरक्षित गर्न पर्छ भने, यसमा पहुँचको लागि कुन भूमिका आवस्यक पर्छ छान्नुहोस् ।',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'यदि यो विवरणलाई सुरक्षित गर्न पर्छ भने, यसमा भएको विवरणमा पहुँचको लागि कुन-कुन् भूमिका(हरू) लाई स्वीकृति दिइएको छ छान्नुहोस् ।',
"If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ कृयाकलाप राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ कृयाकलाप राख्न सक्नुहुन्छ ।',
"If you don't see the beneficiary in the list, you can add a new one by clicking link 'Create Beneficiary'.": 'यदि तालिकामा भागिदार देख्नुहुन्न भने, तपाईं "नयाँ भागिदार राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ भागिदार राख्न सक्नुहुन्छ ।',
"If you don't see the campaign in the list, you can add a new one by clicking link 'Create Campaign'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ क्याम्पिन राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ क्याम्पिन राख्न सक्नुहुन्छ ।',
"If you don't see the Cluster in the list, you can add a new one by clicking link 'Create Cluster'.": 'यदि तालिकामा समूह देख्नुहुन्न भने, तपाईं "नयाँ समूह राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ समूह राख्न सक्नुहुन्छ ।',
"If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ समूदाय राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ समूदाय राख्न सक्नुहुन्छ ।',
"If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.": 'यदि तालिकामा स्थान देख्नुहुन्न भने, तपाईं "नयाँ स्थान राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ स्थान राख्न सक्नुहुन्छ ।',
"If you don't see the milestone in the list, you can add a new one by clicking link 'Create Milestone'.": 'यदि तालिकामा उद्देश्य देख्नुहुन्न भने, तपाईं "नयाँ उद्देश्य राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ उद्देश्य राख्न सक्नुहुन्छ ।',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'यदि तालिकामा संस्था देख्नुहुन्न भने, तपाईं "नयाँ संस्था राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ संस्था राख्न सक्नुहुन्छ ।',
"If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.": 'यदि तालिकामा परियोजना देख्नुहुन्न भने, तपाईं "नयाँ परियोजना राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ परियोजना राख्न सक्नुहुन्छ ।',
"If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.": 'यदि तालिकामा क्षेत्र देख्नुहुन्न भने, तपाईं "नयाँ क्षेत्र राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ क्षेत्र राख्न सक्नुहुन्छ ।',
"If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ कृयाकलाप प्रकार राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ कृयाकलाप राख्न सक्नुहुन्छ ।',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ सुविधा प्रकार राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ सूविधा राख्न सक्नुहुन्छ ।',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ संस्था प्रकार राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ प्रकार राख्न सक्नुहुन्छ ।',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.": 'यदि तालिकामा संस्था देख्नुहुन्न भने, तपाईं "नयाँ संस्था प्रकार राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ संस्था राख्न सक्नुहुन्छ ।',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Region'.": 'यदि तालिकामा प्रकार देख्नुहुन्न भने, तपाईं "नयाँ क्षेत्र राख्न" भन्ने लिङ्कमा क्लिक गरेर नयाँ क्षेत्र राख्न सक्नुहुन्छ ।',
"If you enter a foldername then the layer will appear in this folder in the Map's layer switcher. A sub-folder can be created by separating names with a '/'": "यदि तपाईं फोल्डरनाम राख्नुहुन्छ भने, उक्त फोल्डरमा नक्साको तह खुल्ने एउटा तह देखा पर्नेछ । '/' ले नाम छुट्याएर यसमा सहायोग फोल्डर बनाउन सकिनेछ ।",
'If you have any questions or need support, please see': 'यदि तपाईंसँग कुनै प्रश्न छ वा सहयोगको आवश्यकता छ भने, कृपया हेर्नुहोस्',
'If you would like to help, then please %(sign_up_now)s': 'यदि तपाईं सहयोग गर्न चाहानुहुन्छ भने कृपया %(sign_up_now)s',
'ignore': 'अस्विकार',
'Ignore Errors?': 'गल्तीहरूलाई बेवास्ता गर्नुहुन्छ?',
'Image': 'तस्विर',
'Image added': 'तस्विर संचित गरियो',
'Image deleted': 'तस्विर हटाइयो',
'Image Details': 'तस्विर विवरण',
'Image File(s), one image per page': 'तस्विर फाइल(हरू), प्रत्येक पेजको लागि एउटा तस्विर',
'Image Type': 'तस्विर प्रकार',
'Image updated': 'तस्विर परिमार्जन गरियो',
'Images': 'तस्विरहरू',
'Immediately': 'तत्कालै',
'Immunisation Campaigns': 'सूइहाल्ने क्याम्पिनहरू',
'Import': 'आयात',
'Import Activity Data': 'आयात कृयाकलाप आँकडा',
'Import Activity Type data': 'आयात कृयाकलाप प्रकार आँकडा',
'Import Annual Budget data': 'आयात वार्षिक बजेट आँकडा',
'Import Awards': 'आयात परस्कारहरू',
'Import Certificates': 'आयात प्रमाण-पत्रहरू',
'Import Community Data': 'आयात समुदाय आँकडा',
'Import Contacts': 'आयात सम्पर्क',
'Import Courses': 'आयात पाठ्यक्रम',
'Import Data': 'आयात आँकडा',
'Import Data for Theme Layer': 'स्वरूप तहको लागि आयात आँकडा',
'Import Departments': 'आयात मन्त्रालयहरू',
'Import Deployments': 'आयात परियोजन',
'Import Facilities': 'आयात सूबिधाहरू',
'Import Facility Types': 'आयात सुविधा प्रकार',
'Import from CSV': 'सि.एस.भि. बाटको आयात',
'Import from OpenStreetMap': 'खुलासडकनक्सा बाटको आयात',
'Import Hazard data': 'आयात खतरा आँकडा',
'Import Hazards': 'आयात खतरा',
'Import Hours': 'आयात समय ',
'Import Layers': 'आयात तह',
'Import Location Data': 'आयात स्थान आँकडा',
'Import Location data': 'आयात स्थान आँकडा',
'Import Locations': 'आयात स्थान',
'Import Logged Time data': 'आयात तालिका समय आँकडा',
'Import Members': 'आयात सदस्य',
'Import Membership Types': 'आयात सदस्यता प्रकार',
'Import Offices': 'आयात कार्यलय',
'Import Organizations': 'आयात संस्था',
'Import Participant List': 'सहगागीको सुची समावेश',
'Import Participants': 'आयात सहभागी',
'Import Partner Organizations': 'आयात साझेदार संस्था',
'Import PoI Types': 'आयात पोल प्रकार(हरू)',
'Import Points of Interest': 'आयात रूचीको बुँदा',
'Import Policies & Strategies': 'आयात नियम तथा उद्देश्य',
'Import Project Organizations': 'आयात परियोजना संस्था',
'Import Projects': 'आयात परियोजना',
'Import Red Cross & Red Crescent National Societies': 'आयात रेड क्रस तथा रेड क्रिसेन्ट राष्ट्रिय सोसाइटि',
'Import Resource Types': 'आयात स्रोत प्रकार(हरू)',
'Import Resources': 'आयात स्रोत',
'Import Sector data': 'आयात क्षेत्र आँकडा',
'Import Service data': 'आयात सेवा आँकडा',
'Import Services': 'आयात सेवा',
'Import Staff': 'कर्मचारीको फाईल आयात',
'Import Tasks': 'आयात काम',
'Import Theme data': 'आयात स्वरूप आँकडा',
'Import Training Events': 'आयात तालिम कार्यक्रम',
'Import Training Participants': 'तालिमका सहभागीहरुका सूची आयात गर्नुहोस्',
'Import Volunteer Cluster Positions': 'आयात स्वयम्-सेवक समूह पद',
'Import Volunteer Cluster Types': 'आयात स्वयम्-सेवक समूह प्रकार',
'Import Volunteer Clusters': 'आयात स्वयम्-सेवक समूह',
'Import Volunteers': 'आयात स्वयम्-सेवक',
'Improved Production Techniques': 'सुधारिएको उत्पादन उपाय',
'In error': 'गल्तीमा',
'In order to be able to edit OpenStreetMap data from within %(name_short)s, you need to register for an account on the OpenStreetMap server.': 'आन्तरिकबा खुलासडकनक्सा आँकडालाई परिवर्तन गर्नको निम्ति %(name_short)s, खुलासडक सर्भरमा तपाईंले एउटा एकाउन्ट दर्ता गर्नु पर्दछ ।',
'Inactive': 'निस्कृय',
'InBox': 'इनबक्स',
'Incident': 'घटनाहरु',
'Incident Categories': 'घटनाहरु प्रकारहरू',
'Incident Reports': 'घटनाहरुको प्रतिवेदन ',
'Incidents': 'घटनाहरु',
'Incorrect parameters': 'गलत प्यारामिटर(हरू)',
'Infant (0-1)': 'नवालक (0-१)',
'Infant and Young Child Feeding': 'नवालक र ठूलो बच्चा खुवाउने',
'Information Management': 'सूचना व्यवस्थापन',
'Information Technology': 'सूचना प्रविधि',
'Infrastructure Development': 'पूर्वाधार विकास',
'Inherited?': 'भागिदारमा रहेको?',
'Initials': 'सुरुवात(हरू)',
'injured': 'घाइते',
'input': 'इनपुट',
'Insect Infestation': 'किराबाट हुने संक्रमण',
'Installation of Rainwater Harvesting Systems': 'वर्षा पानि बटुल्ने प्रकृया(हरू) इन्सटलेसन्',
'Instructor': 'सिकाउने',
'insufficient number of pages provided': 'अपर्याप्त पृष्ठ संख्या प्रदान',
'Insufficient Privileges': 'अपर्याप्त मौका(हरू)',
'Insufficient vars: Need module, resource, jresource, instance': 'अपर्याप्त बारहरू: एकाइ, स्रोत, जेस्रोत, अवस्थाको आवश्यकता',
'Insurance ': 'बिमा',
'Integrity error: record can not be deleted while it is referenced by other records': 'इमानदारिता गल्ती: विवरण हटाइउन सकिँदैन जव यसलाई अन्य विवरणहरूले उल्लेख गरेको हुन्छ ।',
'Intergovernmental': 'सरकारको आन्तरिक',
'Invalid data: record %(id)s not accessible in table %(table)s': 'अमान्य आँकडा: तालिकामा विवरण %(id)s पहुँच हुन नसक्ने %(table)s',
'Invalid form (re-opened in another window?)': 'अमान्य फारम (अर्को विण्डोमा पुन खुला गरिएको छ?)',
'Invalid Location!': 'अमान्य स्थान!',
'Invalid phone number': 'अमान्य फोन नम्बर',
'Invalid phone number!': 'अमान्य फोन नम्बर!',
'Invalid request': 'अमान्य अनुरोध',
'Invalid Site!': 'अमान्य क्षेत्र!',
'Invalid source': 'अमान्य स्रोत',
'Inventory': 'लेखा विवरण',
'Inventory Items': 'लेखा विवरण वस्तु(हरू)',
'Irrigation and Watershed Management': 'सिँचाई र पानि बाँडफाँड ब्यबस्थापन',
'Is editing level L%d locations allowed?': 'के परिवर्तन स्तर L%d स्थान(हरू) अनुमति दिइएको हो?',
'Is this a strict hierarchy?': 'के यो कडा बनावट हो?',
'Issuing Authority': 'अधिकार निकालिँदै',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'सकृय भएको स्थानहरू मात्र होइन, यसले परियोजना क्षेत्रको पहुँचमा उपलब्ध गराइएको क्षेत्रको जानकारि पनि लिँदछ ।',
'IT Telecom': 'आइ.टि. टेलिकम',
'Item': 'वस्तु',
'Item Categories': 'वस्तु प्रकारहरू',
'Items': 'वस्तु(हरू)',
'Jewish': 'यहुदि',
'JNAP Priorities': 'जे.एन.ए.पि. प्राथमिकताहरू',
'JNAP-1: Strategic Area 1: Governance': 'जे.एन.ए.पि.-१: लक्षात्मक क्षेत्र १: जाँच',
'JNAP-2: Strategic Area 2: Monitoring': 'जे.एन.ए.पि.-२: लक्षात्मक क्षेत्र २: अनुगमन',
'JNAP-3: Strategic Area 3: Disaster Management': 'जे.एन.ए.पि.-३: लक्षात्मक क्षेत्र ३: प्रकोप ब्यबस्थापन',
'JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation': 'जे.एन.ए.पि.-४: लक्षात्मक क्षेत्र ४: खतरा न्यूनिकरण र मौसम परिवर्तन लागु गर्ने कार्य',
'Job added': 'काम संचित गरियो',
'Job deleted': 'काम हटाइयो',
'Job Schedule': 'काम तालिका',
'Job Title': 'पद',
'Job Title added': 'पद संचित गरियो',
'Job Title Catalog': 'पदहरुको विवरण क्याटलग',
'Job Title deleted': 'पद हटाइयो',
'Job Title Details': 'पद विवरण',
'Job Title updated': 'पद परिमार्जन गरियो',
'Job Titles': 'पद',
'Job updated': 'काम परिमार्जन गरियो',
'Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only': ' प्रकोप खतरा ब्यबस्थापन र मौसम परिवर्तन लागु गर्ने कार्यको लागि संयुक्त राष्ट्रिय कार्य योजना । कुक आइस्ल्याण्डको लागि मात्र लागु हुने ।',
'Journal': 'लेख',
'Journal entry added': 'लेख प्रवेश संचित गरियो',
'Journal entry deleted': 'लेख प्रवेश हटाइयो',
'Journal Entry Details': 'लेख प्रवेश विवरण',
'Journal entry updated': 'लेख प्रवेश परिमार्जन गरियो',
'JS Layer': 'जे.एस. तह',
'Keep Duplicate': 'नक्कल प्रति राख्नुहोस्',
'Keep Original': 'सक्कल प्रति राख्नुहोस्',
'Key': 'चाबि',
'Key Value pairs': 'चाबि महत्व जोडी(हरू)',
'Keyword': 'मुख्यशब्द',
'Keyword Added': 'मुख्यशब्द संचित गरियो',
'Keyword Deleted': 'मुख्यशब्द हटाइयो',
'Keyword Updated': 'मुख्यशब्द परिमार्जन गरियो',
'Keywords': 'मुख्यशब्द(हरू)',
'Kit': 'किट',
'KML Layer': 'के.एम.एल. तह',
'Knowledge Management': 'ज्ञान व्यवस्थापन',
'Land Slide': 'पहिरो',
'Language': 'भाषा',
'Last': 'अन्तिम',
'Last Checked': 'अन्तिम जाँच',
'Last Contacted': 'अन्तिममा सम्पर्क गरिएको',
'Last known location': 'अन्तिम थाहा भएको स्थान',
"Last Month's Work": 'अन्तिम महिनाको काम',
'Last Name': 'अन्तिम नाम',
'Last run': 'अन्तिम प्रयोग',
'Last status': 'अन्तिम अवस्था',
'Last updated': 'अन्तिम परिमार्जन गरियो ',
"Last Week's Work": 'अन्तिम हप्ताको काम',
'Latitude': 'अक्षांश',
'Latitude & Longitude': 'अक्षांश र देशान्तर',
'Latitude and Longitude are required': 'अक्षांश र देशान्तर आवश्यक पर्ने',
'Latitude is Invalid!': 'अक्षांश अमान्य!',
'Latitude is North - South (Up-Down).': 'अक्षांश उत्तर - दक्षिण (माथी-तल)।',
'Latitude is North-South (Up-Down).': 'अक्षांश उत्तर-दक्षिण (माथी-तल)।',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'इक्वेटरमा अक्षांश सुन्य र उत्तरी गोलार्धमा सकरात्मक र दक्षिणि गोलार्धमा नकरात्मक',
'Latitude must be between -90 and 90.': 'अक्षांश -९0 र ९0 मध्येमा हुनुपर्छ ।',
'Latitude of far northern end of the region of interest.': 'उत्तरको अन्त्य रूचीको क्षेत्रको अक्षांश',
'Latitude of far southern end of the region of interest.': 'दक्षिणको अन्त्य रूचीको क्षेत्रको अक्षांश',
'Latitude of Map Center': 'मध्य नक्साको अक्षांश ',
'Latitude should be between': 'अक्षांश मध्ये हुनुपर्छ',
'Latrine Construction': 'चर्पि निर्माण',
'Layer': 'तह',
'Layer added': 'तह संचित गरियो',
'Layer deleted': 'तह हटाइयो',
'Layer Details': 'तह विवरण',
'Layer has been Disabled': 'तह निस्कृय गरियो',
'Layer has been Enabled': 'तह सकृय गरियो',
'Layer Name': 'तह नाम',
'Layer Properties': 'तह प्रपटिज्',
'Layer removed from Symbology': 'तह चिन्हताबाट हटाइयो',
'Layer updated': 'तह परिमार्जन गरियो',
'Layers': 'तह(हरू)',
'Lead Implementer': 'मुख्य लागुकर्ता',
'Lead Implementer for this project is already set, please choose another role.': 'यो परियोजनाको मुख्य लागुकर्ता पहिले नै राखिएको छ, कृपय अर्को भूमिका छान्नुहोस्',
'Leader': 'अगुवा',
'Left-side is fully transparent (0), right-side is opaque (1.0).': 'वायाँ-तर्फ पूर्ण छर्लङ्ग छ(0), दायाँ-तर्फ छर्लङ्ग छैन (१.0)।',
'Legal Approvals': 'कानूनी प्रमाणिकरणहरू',
'Legend': 'विशेष',
'Legend URL': 'विशेष यू.आर.एल.',
'less': 'थोरै',
'Less Options': 'कम्ति विकल्पहरू',
'Level': 'स्तर',
"Level is higher than parent's": 'परिवारको भन्दा स्तर माथि',
'Level of Award': 'परस्कारको स्तर',
'Level of competency this person has with this skill.': 'यो ब्यक्तिसँग भएको सिपको आधारमा प्रतिस्पर्धा को स्तर ।',
'License Number': 'अनुमति-पत्र संख्या',
'light': 'लाइट',
'Link to this result': 'यो नतिजामा जोड्नुहोस्',
'List': 'तालिका',
'List %(site_label)s Status': 'तालिका %(site_स्तर)s अवस्था',
'List Activities': 'कृयाकलापहरूलाई तालिकामा राख्नुहोस्',
'List Activity Organizations': 'कृयाकलाप ब्यबस्थानहरूलाई तालिकामा राख्नुहोस्',
'List Activity Types': 'कृयाकलाप प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Addresses': 'ठेगाना(हरू)लाई तालिकामा राख्नुहोस्',
'List Affiliations': 'स्वीकृती(हरू)लाई तालिकामा राख्नुहोस्',
'List All': 'सबैलाई तालिकामा राख्नुहोस्',
'List All Community Contacts': 'सम्पूर्ण समुदाय सम्पर्कहरूलाई तालिकामा राख्नुहोस्',
'List Annual Budgets': 'वार्षिक बजेटहरूलाई तालिकामा राख्नुहोस्',
'List Awards': 'परस्कारहरूलाई तालिकामा राख्नुहोस्',
'List Beneficiaries': 'भागिदारहरूलाई तालिकामा राख्नुहोस्',
'List Beneficiary Types': 'भागिदार प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Branch Organizations': 'शाखा संस्था(हरू)लाई तालिकामा राख्नुहोस्',
'List Campaigns': 'क्याम्पिन(हरू)लाई तालिकामा राख्नुहोस्',
'List Certificates': 'प्रमाण-पत्र(हरू)लाई तालिकामा राख्नुहोस्',
'List Certifications': 'प्रमाणिकरण(हरू)लाई तालिकामा राख्नुहोस्',
'List Clusters': 'समूह(हरू)लाई तालिकामा राख्नुहोस्',
'List Coalitions': 'संस्थाहरूलाई तालिकामा राख्नुहोस्',
'List Communities': 'समुदाय(हरू)लाई तालिकामा राख्नुहोस्',
'List Competency Ratings': 'प्रतिस्पर्धा स्तर(हरू)लाई तालिकामा राख्नुहोस्',
'List Contact Information': 'सम्पर्क जानकारीलाई तालिकामा राख्नुहोस्',
'List Contacts': 'सम्पर्क(हरू)लाई तालिकामा राख्नुहोस्',
'List Course Certificates': 'पाठ्यक्रम प्रमाण-पत्र(हरू)लाई तालिकामा राख्नुहोस्',
'List Courses': 'पाठ्यक्रम(हरू)लाई तालिकामा राख्नुहोस्',
'List Credentials': 'कागजात(हरू)लाई तालिकामा राख्नुहोस्',
'List Data in Theme Layer': 'स्वरूप तहमा तालिका आँकडा ',
'List Departments': 'मन्त्रालय(हरू)लाई तालिकामा राख्नुहोस्',
'List Deployments': 'परियोजनहरूलाई तालिकामा राख्नुहोस्',
'List Donors': 'दाता(हरू)लाई तालिकामा राख्नुहोस्',
'List Education Details': 'शिक्षा विवरणलाई तालिकामा राख्नुहोस्',
'List Facilities': 'सूबिधाहरूलाई तालिकामा राख्नुहोस्',
'List Facility Types': 'सुविधा प्रकारहरूलाई तालिकामा राख्नुहोस्',
'List Feature Layers': 'विशेषता तहज(हरू)लाई तालिकामा राख्नुहोस्',
'List Groups': 'समूह(हरू)लाई तालिकामा राख्नुहोस्',
'List Hazards': 'खतराहरूलाई तालिकामा राख्नुहोस्',
'List Hours': 'समय (घण्टा)लाई तालिकामा राख्नुहोस्',
'List Identities': 'परिचयहरूलाई तालिकामा राख्नुहोस्',
'List Images': 'तस्विर(हरू)लाई तालिकामा राख्नुहोस्',
'List Job Titles': 'पद लाई तालिकामा राख्नुहोस्',
'List Jobs': 'काम लाई तालिकामा राख्नुहोस्',
'List Keywords': 'मुख्यशब्द(हरू)लाई तालिकामा राख्नुहोस्',
'List Layers': 'तह(हरू)लाई तालिकामा राख्नुहोस्',
'List Layers in Profile': 'प्रोफाइलको तहहरूलाई तालिकामा राख्नुहोस्',
'List Layers in Symbology': 'चिन्हताको तहहरूलाई तालिकामा राख्नुहोस्',
'List Location Hierarchies': 'स्थान संरचनाहरूलाई तालिकामा राख्नुहोस्',
'List Locations': 'स्थान(हरू)लाई तालिकामा राख्नुहोस्',
'List Log Entries': 'दर्ता भर्नालाई तालिकामा राख्नुहोस्',
'List Logged Time': 'समय विवरणहरूलाई तालिकामा राख्नुहोस्',
'List Mailing Lists': 'ठेगाना सूचीहरूलाई तालिकामा राख्नुहोस्',
'List Map Profiles': 'नक्सा बनावटहरूलाई तालिकामा राख्नुहोस्',
'List Markers': 'चिन्हहरूलाई तालिकामा राख्नुहोस्',
'List Members': 'सदस्यहरूलाई तालिकामा राख्नुहोस्',
'List Membership Types': 'सदस्यता प्रकारहरूलाई तालिकामा राख्नुहोस्',
'List Memberships': 'सदस्यताहरूलाई तालिकामा राख्नुहोस्',
'List Milestones': 'उद्देश्य(हरू)लाई तालिकामा राख्नुहोस्',
'List Networks': 'नेटवर्क(हरू)लाई तालिकामा राख्नुहोस्',
'List of Appraisals': 'मुल्यांकन(हरू)को तालिका',
'List of Facilities': 'सूबिधाहरूको तालिका',
'List of Professional Experience': 'ब्यबसायिक अनुभवको तालिका',
'List of Roles': 'भूमिका(हरू)को तालिका',
'List Office Types': 'कार्यलय प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Offices': 'कार्यलय(हरू)लाई तालिकामा राख्नुहोस्',
'List Organization Types': 'संस्था प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Organizations': 'संस्था(हरू)लाई तालिकामा राख्नुहोस्',
'List Outputs': 'नतिजा(हरू)लाई तालिकामा राख्नुहोस्',
'List Participants': 'सहभागी(हरू)लाई तालिकामा राख्नुहोस्',
'List Partner Organizations': 'साझेदार संस्था(हरू)लाई तालिकामा राख्नुहोस्',
'List Persons': 'ब्यक्ति(हरू)लाई तालिकामा राख्नुहोस्',
"List Persons' Details": 'ब्यक्तिहरूको विवरणलाई तालिकामा राख्नुहोस्',
'List PoI Types': 'पोलको प्रकारहरूलाई तालिकामा राख्नुहोस्',
'List Points of Interest': 'रूचीको बुँदालाई तालिकामा राख्नुहोस्',
'List Policies & Strategies': 'नियम तथा उद्देश्य(हरू)लाई तालिकामा राख्नुहोस्',
'List Profiles configured for this Layer': 'यो तहको लागि प्रोफाइलहरूको बनावटलाई तालिकामा राख्नुहोस्',
'List Programs': 'कार्यक्रम(हरू)लाई तालिकामा राख्नुहोस्',
'List Project Organizations': 'परियोजना संस्था(हरू)लाई तालिकामा राख्नुहोस्',
'List Projections': 'योजना(हरू)लाई तालिकामा राख्नुहोस्',
'List Projects': 'परियोजना(हरू)लाई तालिकामा राख्नुहोस्',
'List Records': 'विवरण(हरू)लाई तालिकामा राख्नुहोस्',
'List Red Cross & Red Crescent National Societies': 'रेड क्रस र रेड क्रिसेन्ट राष्ट्रिय सोसाइटि(हरू)लाई तालिकामा राख्नुहोस्',
'List Regions': 'क्षेत्र(हरू)लाई तालिकामा राख्नुहोस्',
'List Response Summaries': 'प्रतिकृया संक्षेप(हरू)लाई तालिकामा राख्नुहोस्',
'List Roles': 'भूमिका(हरू)लाई तालिकामा राख्नुहोस्',
'List Rooms': 'कोठा(हरू)लाई तालिकामा राख्नुहोस्',
'List saved searches': 'संचित खोजीहरूको तालिका',
'List Sectors': 'क्षेत्र(हरू)लाई तालिकामा राख्नुहोस्',
'List Services': 'सेवा(हरू)लाई तालिकामा राख्नुहोस्',
'List Skill Equivalences': 'सिप सरह(हरू)लाई तालिकामा राख्नुहोस्',
'List Skill Types': 'सिप प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Skills': 'सिप(हरू)लाई तालिकामा राख्नुहोस्',
'List Staff & Volunteers': 'कर्मचारी तथा स्वयम्-सेवक(हरू)लाई तालिकामा राख्नुहोस्',
'List Staff Assignments': 'कर्मचारी काम(हरू)लाई तालिकामा राख्नुहोस्',
'List Staff Members': 'कर्मचारी सदस्य(हरू)लाई तालिकामा राख्नुहोस्',
'List Statuses': 'अवस्था(हरू)लाई तालिकामा राख्नुहोस्',
'List Symbologies': 'चिन्हताहरूलाई तालिकामा राख्नुहोस्',
'List Symbologies for Layer': 'तहको चिन्हता(हरू)लाई तालिकामा राख्नुहोस्',
'List Tasks': 'काम(हरू)लाई तालिकामा राख्नुहोस्',
'List Teams': 'समूह(हरू)लाई तालिकामा राख्नुहोस्',
'List Themes': 'स्वरूप(हरू)लाई तालिकामा राख्नुहोस्',
'List Training Events': 'तालिम कार्यक्रम(हरू)लाई तालिकामा राख्नुहोस्',
'List Trainings': 'तालिम(हरू)लाई तालिकामा राख्नुहोस्',
'List Volunteer Cluster Positions': 'स्वयम्-सेवक समूह पद(हरू)लाई तालिकामा राख्नुहोस्',
'List Volunteer Cluster Types': 'स्वयम्-सेवक समूह प्रकार(हरू)लाई तालिकामा राख्नुहोस्',
'List Volunteer Clusters': 'स्वयम्-सेवक समूह(हरू)लाई तालिकामा राख्नुहोस्',
'List Volunteer Roles': 'स्वयम्-सेवक भूमिका(हरू)लाई तालिकामा राख्नुहोस्',
'List Volunteers': 'स्वयम्-सेवक(हरू)लाई तालिकामा राख्नुहोस्',
'Live Help': 'जिवन सहयोग',
'Livelihood / CTP': 'जिविका / सि.टि.पि.',
'Livelihood Manager': 'जिविका व्यवस्थापक',
'Livelihoods': 'जिविका(हरू)',
'Load': 'लोड गर्नुहोस्',
'Load Cleaned Data into Database': 'स्पष्ट आँकडालाई आँकडा डाटामा लोड गर्नुहोस्',
'Load Raw File into Grid': 'कच्चा फाइललाई ग्रिडमा लोड गर्नुहोस्',
'Loading': 'लोड हुँदैछ',
'Local Currency': 'स्थानिय मुद्रा',
'Local Name': 'स्थानिय नाम',
'Local Names': 'स्थानिय नाम(हरू)',
'Location': 'स्थान',
'Location added': 'स्थान संचित गरियो',
'Location Added': 'स्थान संचित गरियो',
'Location added to Organization': 'संस्थामा स्थान संचित गरियो',
'Location deleted': 'स्थान हटाइयो',
'Location Deleted': 'स्थान हटाइयो',
'Location Detail': 'स्थानको अक्षांश, देशान्तर',
'Location Details': 'स्थान विवरण',
'Location Found': 'स्थान भेटियो',
'Location Group': 'स्थान समूह',
'Location Hierarchies': 'स्थान संरचनाहरू',
'Location Hierarchy': 'स्थान बनावट',
'Location Hierarchy added': 'स्थान बनावट संचित गरियो',
'Location Hierarchy deleted': 'स्थान बनावट हटाइयो',
'Location Hierarchy Level 1 Name': 'स्थान बनावट स्तर १ नाम',
'Location Hierarchy Level 2 Name': 'स्थान बनावट स्तर २ नाम',
'Location Hierarchy Level 3 Name': 'स्थान बनावट स्तर ३ नाम',
'Location Hierarchy Level 4 Name': 'स्थान बनावट स्तर ४ नाम',
'Location Hierarchy Level 5 Name': 'स्थान बनावट स्तर ५ नाम',
'Location Hierarchy updated': 'स्थान बनावट परिमार्जन गरियो',
'Location is Required!': 'स्थान आवश्यक छ!',
'Location needs to have WKT!': 'स्थानमा डब्लु.के.टि. आवश्यक छ!',
'Location NOT Found': 'स्थान भेटिएन',
'Location removed from Organization': 'संस्थाबाट स्थान हटाइयो ',
'Location updated': 'स्थान परिमार्जन गरियो',
'Locations': 'स्थान(हरू)',
'Locations of this level need to have a parent of level': 'यो स्तरको स्थान(हरू) स्तरसँग सम्बन्धित हुन आवस्यक छ',
'Log entry added': 'दर्ताप्रवेश संचित गरियो',
'Log Entry Deleted': 'दर्ताप्रवेश हटाइयो',
'Log Entry Details': 'दर्ताप्रवेश विवरण',
'Log entry updated': 'दर्ताप्रवेश परिमार्जन गरियो',
'Log New Time': 'नया दर्ता समय',
'Log Time Spent': 'दर्तासमय सकियो',
'Logged Time': 'समय तालिका',
'Logged Time Details': 'समय तालिका विवरण',
'Login': 'लग-इन',
'login': 'लगिन',
'Login using Facebook account': 'फेसबुक एकाउन्ट प्रयोग गरि लग-इन गर्नुहोस्',
'Login using Google account': 'गुगल एकाउन्ट प्रयोग गरि लग-इन गर्नुहोस्',
'Login with Facebook': 'फेसबुक एकाउन्टद्वारा लग-इन गर्नुहोस्',
'Login with Google': 'गुगल एकाउन्टद्वारा लग-इन गर्नुहोस्',
'Logistics & Warehouses': 'वन्दोवस्ती र गोदामघर',
'Logo': 'लोगो',
'Logout': 'बाहिरीनु',
'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'संस्थाको लोगो । यो png वा jpeg फाइल मा हुनुपर्नेछ र यो ४00x४00 भन्दा ठूलो हुनुपर्छ ।',
'long': 'लामो>',
'Long Name': 'लामो नाम',
'long>12cm': 'लामो>१२ से.मी.',
'Longitude': 'देशान्तर',
'Longitude is Invalid!': 'देशान्तर अमान्य!',
'Longitude is West - East (sideways).': 'देशान्तर पश्चिम-पूर्व (साइडवेज्).',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'केन्द्रबिन्दुमा (ग्रिन्वीच, बेलायत) देशान्तर सून्य हुन्छ र पूर्व तर्फ, यूरोप र एसियामा सकरात्मक हुन्छ । पश्चिमतर्फ एथ्लान्टिक र अमेरीका तर्फ देशान्तर नकरात्मक हुन्छ ।',
'Longitude must be between -180 and 180.': 'देशान्तर -१८0 र १८0 भित्र हुनुपर्छ',
'Longitude of far eastern end of the region of interest.': 'चाहेको पुर्विय क्षेत्रको देशान्तर',
'Longitude of far western end of the region of interest.': 'चाहेको पश्चिमी क्षेत्रको देशान्तर',
'Longitude of Map Center': 'नक्साको केन्द्रबिन्दुको देशान्तर',
'Longitude should be between': 'देशान्तर को मध्येमा हुनुपर्छ',
'Lost': 'हरायो',
'Lost Password': 'पासवर्ड हरायो',
'Low': 'तल्लो',
'Mailing list': 'ठेगाना तालिका',
'Mailing list added': 'ठेगाना तालिका संचित गरियो',
'Mailing list deleted': 'ठेगाना तालिका हटाइयो',
'Mailing List Details': 'ठेगाना तालिका विवरण',
'Mailing List Name': 'ठेगाना तालिका नाम',
'Mailing list updated': 'ठेगाना तालिका परिमार्जन गरियो',
'Mailing Lists': 'ठेगाना तालिकाहरू',
'Main Duties': 'मुख्य जिम्मेवारी(हरू)',
'Main?': 'मुख्य?',
'Mainstreaming DRR': 'विपद् जोखिम न्यूनीकरण मूलप्रवाहीकरण',
'Major': 'प्रमुख',
'male': 'पुरुष',
'Manage Layers in Catalog': 'तालिकाको तह ब्यबस्थापन',
'Manage National Society Data': 'राष्ट्रिय सोसाइटीको तथ्यांक व्यवस्थापन',
'Manage Offices Data': 'कार्यालय(हरु) – कार्यालयहरुको तथ्यांक व्यवस्थापन',
'Manage office inventories and assets.': 'कार्यालय सामग्रीहरुको व्यवस्थापन',
'Manage Staff Data': 'कर्मचारीको तथ्यांक ब्यवस्थापन',
'Manage Teams Data': 'समूहको तथ्यांक व्यवस्थापन',
'Manage Your Facilities': 'तपाईंको सूबिधाहरू ब्यबस्थापन गर्नुहोस्',
'Managing material and human resources together to better prepare for future hazards and vulnerabilities.': 'भविष्यमा हुने जोखिम र संकटासन्नताको लागि तयार हुन मानव स्रोत तथा सामग्रीहरुको व्यवस्थापन',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'आवश्यक. जिओ सर्भरमा, यो तह नाम हो । डब्ल्यू.एफ.एस. भित्र क्षमता प्राप्त गर्नुहोस्, चिन्हपछीको नाम विशेषताप्रकारको हो (:)।',
'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?': 'आवश्यक. सेवामा पहुँचको निम्ति आधारभुत यू.आर.एल. जस्तै, http://host.domain/geoserver/wfs?',
'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wms?': 'आवश्यक. सेवामा पहुँचको निम्ति आधारभुत यू.आर.एल. जस्तै, http://host.domain/geoserver/wms?',
'Map': 'नक्सा',
'Map cannot display without prepop data!': 'आँकडाबिना नक्सा देखाउन सकिँदैन !',
'Map Center Latitude': 'नक्सा केन्द्रिय अक्षांश',
'Map Center Longitude': 'नक्सा केन्द्रिय देशान्तर',
'Map Profile': 'नक्सा बनावट',
'Map Profile added': 'नक्सा बनावट संचित गरियो',
'Map Profile deleted': 'नक्सा बनावट हटाइयो',
'Map Profile updated': 'नक्सा बनावट परिमार्जन गरियो',
'Map Profiles': 'नक्सा बनावट(हरू)',
'Map has been copied and set as Default': 'नक्सा कपि गरिएको छ र स्वचलानमा रहेको छ',
'Map has been set as Default': 'नक्सा स्वचलानमा रहेको छ',
'Map is already your Default': 'नक्सा पहिलेनै स्वचलानमा रहेको छ',
'Map not available: Cannot write projection file - %s': 'नक्सा उपलब्ध छैन: योजना फाइल राख्न सकिँदैन- %s',
'Map not available: No Projection configured': 'नक्सा उपलब्ध छैन: कुनैपनि योजना बनावट छैन',
'Map not available: Projection %(projection)s not supported - please add definition to %(path)s': 'नक्सा उपलब्ध छैन: योजना %(projection)s मिलेन - कृपया %(path)s मा परिभाषा राख्नुहोस्',
'Map of Communities': 'समुदाय(हरू)को नक्सा',
'Map of Facilities': 'सूबिधाहरूको नक्सा',
'Map of Offices': 'कार्यलयहरूको नक्सा',
'Map of Projects': 'परियोजनाहरूको नक्सा',
'Map of Resources': 'स्रोत(हरू)को नक्सा',
'Map Settings': 'नक्सा सेटिङ(हरू)',
'Map Viewing Client': 'नक्सा हेर्ने प्रयोगकर्ता',
'Map Zoom': 'नक्सा जुम',
'Marital Status': 'वैवाहिक अवस्था',
'Mark as duplicate': 'नक्कल प्रतिकोरूपमा चिन्ह लगाउनुहोस्',
'Marker': 'चिन्ह',
'Marker added': 'चिन्ह संचित गरियो',
'Marker deleted': 'चिन्ह हटाइयो',
'Marker Details': 'चिन्ह विवरण',
'Marker updated': 'चिन्ह परिमार्जन गरियो',
'Markers': 'चिन्ह(हरू)',
'Markets/Marketing Analysis, Linkages and Support': 'बजार(हरू)/बजारिकरण अनुसन्धान, सम्पर्कहरू र सहयोग',
'married': 'विवाहित',
'Matching Records': 'मिलेको विवरण(हरू)',
'Max': 'बढीमा',
'Maximum Extent': 'बढी मात्रा',
'Maximum Location Latitude': 'बढी स्थान अक्षांश',
'Maximum Location Longitude': 'बढी स्थान देशान्तर',
'Maximum must be greater than minimum': 'बढि, कम्तिभन्दा धेरै हुनुपर्छ',
'Measure Area: Click the points around the polygon & end with a double-click': 'नाप क्षेत्र: बहुभुजा वरिपरिको बिन्दुहरूमा क्लिक गर्नुहोस् र दूइपटक क्लीक गरेर अन्त्य गर्नुहोस्',
'Measure Length: Click the points along the path & end with a double-click': 'नाप लम्बाइ: बाटोको वरिपरिको बिन्दुहरूमा क्लिक गर्नुहोस् र दूइपटक क्लीक गरेर अन्त्य गर्नुहोस्',
'Medical Conditions': 'मेडिकल अवस्था(हरू)',
'Medical Services': 'चिकित्सा सेवा',
'Medical Supplies and Equipment': 'मेडिकल पुर्ती(हरू) र सामाग्री',
'Media': 'सञ्चार माध्यम',
'medium': 'मध्य',
'Medium': 'मध्य',
'medium<12cm': 'मध्य<१२cm',
'Member': 'सदस्य',
'Member added': 'सदस्य संचित गरियो',
'Member deleted': 'सदस्य हटाइयो',
'Member Details': 'सदस्य विवरण',
'Member ID': 'सदस्य आइ.डि.',
'Member Organizations': 'सदस्य संस्थाहरू',
'Member updated': 'सदस्य परिमार्जन गरियो',
'Members': 'सदस्यहरू',
'Membership': 'सदस्यता',
'Membership added': 'सदस्यता संचित गरियो',
'Membership Approved': 'सदस्यता शुल्क तिरेको मिति',
'Membership deleted': 'सदस्यता हटाइयो',
'Membership Details': 'सदस्यता विवरण',
'Membership Fee': 'सदस्यता शुल्क',
'Membership Type added': 'सदस्यता प्रकार संचित गरियो',
'Membership Type deleted': 'सदस्यता प्रकार हटाइयो',
'Membership Type Details': 'सदस्यता प्रकार विवरण',
'Membership Type updated': 'सदस्यता प्रकार परिमार्जन गरियो',
'Membership Types': 'सदस्यता प्रकार(हरू)',
'Membership updated': 'सदस्यता परिमार्जन गरियो',
'Memberships': 'सदस्यता(हरू)',
'Menu': 'मेनु',
'Merge': 'एकै गर्नुहोस्',
'Merge records': 'विवरण(हरू) एकै गर्नुहोस्',
'Message': 'सन्देश',
'Method disabled': 'शैली निस्कृय गरियो',
'MGRS Layer': 'एम.जि.आर.एस. तह',
'Middle Name': 'बीचको नाम',
'Milestone': 'उद्देश्य',
'Milestone Added': 'उद्देश्य संचित गरियो',
'Milestone Deleted': 'उद्देश्य हटाइयो',
'Milestone Details': 'उद्देश्य विवरण',
'Milestone Updated': 'उद्देश्य परिमार्जन गरियो',
'Milestones': 'उद्देश्य(हरू)',
'Military': 'सैनिक',
'Min': 'कम्ति',
'Minimum Location Latitude': 'कम्ति स्थान अक्षांश',
'Minimum Location Longitude': 'कम्ति स्थान देशान्तर',
'Minute': 'मिनेट',
'Minutes must be a number.': 'मिनेट संख्यामा नै हुनुपर्छ ।',
'Minutes must be less than 60.': 'मिनेट ६0 भन्दा कम हुनुपर्छ ।',
'Missing': 'हराइरहेको',
'missing': 'हराएको',
'Mission': 'मिसन',
'Missions': 'मिसन(हरू)',
'Mobile': 'मोबाइल',
'Mobile Health Units': 'मोबाइल स्वास्थ्य इकाई(हरू)',
'Mobile Phone': 'मोबाइल फोन',
'Mobile Phone Number': 'मोबाइल फोन नम्बर',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': 'विशेषता परिवर्तन: परिवर्तन गर्न चाहानुभएको फारमलाई छान्नुहोस् र बिन्दुमध्येबाट एउटालाई घिस्याएर तपाईँले चाहे अनुसार राख्नुहोस् ।',
'mongoloid': 'मोनोलोइड',
'Monitoring and Evaluation': 'अनुगमन तथा मूल्यांकन',
'Month': 'महिना',
'Monthly': 'महिनावरी',
'more': 'थप',
'More Info': 'थप जानकारी',
'More Options': 'थप विकल्प(हरू)',
'more...': 'थप...',
'Morgue': 'मुर्दाघर',
'Moustache': 'श्मश्री',
'Move Feature: Drag feature to desired location': 'विशेषता सार्नुहोस्: विशेषतालाई चाहेको स्थानमा घिसार्नुहोस्',
'Multiple': 'बहुमुखी',
'Muslim': 'मुस्लिम',
'Must a location have a parent location?': 'स्थानको पारिवारिक स्थान हुनुपर्छ?',
'My Logged Hours': 'मेरो समय (घण्टा) तालिका',
'My Maps': 'मेरो नक्साहरू',
'My Open Tasks': 'मेरो खुला कामहरू',
'My Profile': 'मेरो प्रोफाइल',
'My Tasks': 'मेरो कामहरू',
'Name': 'नाम',
'Name and/or ID': 'नाम र/वा आइ.डि.',
'Name field is required!': 'नाम क्षेत्र आवश्यक छ!',
'Name of a programme or another project which this project is implemented as part of': 'कार्यक्रमको नाम वा अर्को परियोजना जस्मा यो परियोजना एउटा भागको रूपमा समावेस छ',
'Name of Award': 'परस्कारको नाम',
'Name of Father': 'बाबुको नाम',
'Name of Institute': 'शैक्षिक संस्थाको नाम',
'Name of Map': 'नक्साको नाम',
'Name of Mother': 'आमाको नाम',
'Name of the person in local language and script (optional).': 'स्थानिय भाषा तथा लिपिमा ब्यक्तिको नाम (वैकल्पिक) ।',
'National': 'राष्ट्रिय',
'National ID Card': 'राष्ट्रिय आइ.डि. कार्ड',
'National Societies': 'राष्ट्रिय सोसाइटी(हरू)',
'National Society': 'राष्ट्रिय सोसाइटी',
'National Society / Branch': 'राष्ट्रिय सोसाइटी / शाखा',
'National Society added': 'राष्ट्रिय सोसाइटी संचित गरियो',
'National Society deleted': 'राष्ट्रिय सोसाइटी हटाइयो',
'National Society Details': 'राष्ट्रिय सोसाइटी विवरण',
'National Society updated': 'राष्ट्रिय सोसाइटी परिमार्जन गरियो',
'Nationality': 'राष्ट्रियता',
'Nationality of the person.': 'ब्यक्तिको राष्ट्रियता',
'NDRT (National Disaster Response Teams)': 'राष्ट्रिय विपद् प्रतिकार्य समूहहरु',
"Need a 'url' argument!": "यू.आर.एल.' को विश्लेषण !",
'Needs': 'आवश्यकताहरू',
'negroid': 'नेग्रोइड',
'Network added': 'नेटवर्क संचित गरियो',
'Network Details': 'नेटवर्क विवरण',
'Network removed': 'नेटवर्क हटाइयो',
'Network updated': 'नेटवर्क परिमार्जन गरियो',
'Networks': 'नेटवर्क(हरू)',
'Never': 'कहिल्यपनि',
'New': 'नयाँ',
'new ACL': 'नयाँ एसिएल',
'New Annual Budget created': 'नयाँ वार्षिक बजेट बनाइयो',
'New Deployment': 'नयाँ परियोजन',
'New Entry': 'नयाँ प्रवेश',
'New Hazard': 'नयाँ खतरा',
'New Location': 'नयाँ स्थान',
'New Organization': 'नयाँ संस्था',
'New Output': 'नयाँ नतिजा',
'New Post': 'नयाँ लेख',
'New Records': 'नयाँ विवरण(हरू)',
'New Role': 'नयाँ भूमिका',
'New Sector': 'नयाँ क्षेत्र',
'New Service': 'नयाँ सेवा',
'New Theme': 'नयाँ स्वरूप',
'New updates are available.': 'नयाँ परिमार्जनहरू उपलब्ध छन्',
'News': 'समाचारहरू',
'Next': 'अर्को',
'Next run': 'अर्को रन',
'Next View': 'अर्को भिउ',
'NGO': 'सरकारी संस्था',
'no': 'छैन/होइन',
'No': 'छैन/हुँदैन/पर्दैन',
'No access to this record!': 'यो विवरणमा कुनै पहुँच छैन!',
'No Activities Found': 'कुनै कृयाकलापहरू प्राप्त भएन',
'No Activity Organizations Found': 'कुनैपनि कृयाकलाप ब्यबस्थापन प्राप्त भएन',
'No Activity Types Found': 'कुनैपनि कृयाकलाप प्रकारहरू प्राप्त भएन',
'No Activity Types found for this Activity': 'यसको लागि कुनैपनि कृयाकलाप प्रकारहरू प्राप्त भएन',
'No Activity Types found for this Project Location': 'यो परियोजना स्थानको लागि कुनैपनि कृयाकलाप प्रकारहरू प्राप्त भएन',
'No Affiliations defined': 'कुनैपनि स्वीकृती परिभाषित गर्न सकिएन',
'No annual budgets found': 'कुनैपनि वार्षिक बजेट(हरू) प्राप्त भएन',
'No Appraisals found': 'कुनैपनि मुल्यांकनहरू प्राप्त भएन',
'No Awards found': 'कुनैपनि परस्कारहरू प्राप्त भएन',
'No Base Layer': 'कुनैपनि आधारभुत तह छैन',
'No Beneficiaries Found': 'कुनैपनि भागिदारहरू प्राप्त भएन',
'No Beneficiary Types Found': 'कुनैपनि भागिदार प्रकारहरू प्राप्त भएन',
'No Branch Organizations currently registered': 'कुनैपनि शाखा संस्थाहरू हाल दर्ता गरिएको छैन',
'No Campaigns Found': 'कुनैपनि क्याम्पिनहरू प्राप्त भएन',
'No Clusters currently registered': 'कुनैपनि समूहहरू हाल दर्ता गरिएको छैन',
'No Coalitions currently recorded': 'हाल कुनैपनि संस्थाहरूको विवरण राखिएको छैन',
'No Communities Found': 'कुनैपनि समुदाय(हरू) प्राप्त भएन',
'No contact information available': 'कुनैपनि सम्पर्क जानकारी उपलब्ध छैनन्',
'No contact method found': 'कुनैफनि सम्पर्क शैली प्राप्त भएन',
'No Contacts currently registered': 'हाल कुनैपनि सम्पर्कहरू दर्ता गरिएको छैन',
'No Contacts Found': 'कुनैपनि सम्पर्कहरू प्राप्त भएन',
'No data available': 'कुनैपनि आँकडा उपलब्ध छैन',
'No data available in table': 'तालिकामा कुनैपनि आँकडा उपलब्ध छैन',
'No Data currently defined for this Theme Layer': 'यो स्वरूप समूहको लागि कुनैपनि आँकडाहारू छैनन्',
'No Deployments currently registered': 'हाल कुनैपनि परियोजनाहरू दर्ता गरिएको छैन',
'No Donors currently registered': 'हाल कुनैपनि दाताहरू दर्ता गरिएको छैन',
'No education details currently registered': 'हाल कुनैपनि शिक्षा विवरण हाल दर्ता गरिएको छैन',
'No entries currently available': 'हाल कुनैपनि डाटा छैन',
'No entries found': 'कुनैपनि डाटा प्राप्त भएन',
'No entry available': 'कुनैपनि प्रवेश उपलब्ध छैनन्',
'No Facilities currently registered': 'हाल कुनैपनि सूबिधाहरू दर्ता गरिएको छैन',
'No Facility Types currently registered': 'हाल कुनैपनि सुविधा प्रकारहरू दर्ता गरिएको छैन',
'No Feature Layers currently defined': 'हाल कुनैपनि विशेषता तहहरू हाल परिभाषित गरिएको छैन',
'No forms to the corresponding resource have been downloaded yet.': 'अहिलेसम्म सम्बन्धित स्रोतको कुनैपनि फारमहरू अपलोड गरिएको छैन',
'No further users can be assigned.': 'थप प्रयोगकर्ता समावेस गर्न सकिँदैन',
'No Groups currently registered': 'हाल कुनैपनि समूहहरू हाल दर्ता गरिएको छैन',
'No Hazards currently registered': 'हाल कुनैपनि खतराहरू हाल दर्ता गरिएको छैन',
'No Hazards found for this Project': 'हाल यस परियोजनाको लागि कुनैपनि खतराहरू छैनन्',
'No Identities currently registered': 'हाल कुनैपनि परिचयहरू दर्ता गरिएको छैन',
'No Images currently registered': 'हाल कुनैपनि तस्विरहरू दर्ता गरिएको छैन',
'No jobs configured': 'कुनैपनि कामहरू मिलाइएको छैन',
'No jobs configured yet': 'हालसम्म कुनैपनि कामहरू मिलाइएको छैन',
'No Keywords Found': 'कुनैपनि मुख्यशब्द(हरू) प्राप्त भएन ',
'No Layers currently configured in this Profile': 'हाल यो प्रोफाइलको लागि हाल कुनैपनि तहहरू बनावट रहेको छैन',
'No Layers currently defined': 'हाल कुनैपनि तहहरू परिभाषित गरिएको छैन',
'No Layers currently defined in this Symbology': 'यो चिन्हताको लागि हाल कुनैपनि तहहरू परिभाषित गरिएको छैन',
'No Location Hierarchies currently defined': 'हाल कुनैपनि स्थान संरचनाहरू परिभाषित गरिएको छैन',
'No location information defined!': 'कुनैपनि स्थान जानकारी परिभाषित गरिएको छैन!',
'No Locations currently available': 'हाल कुनैपनि स्थानहरू उपलब्ध हुन सकेन',
'No Locations Found': 'कुनैपनि स्थानहरू प्राप्त भएन',
'No Locations found for this Organization': 'यो संस्थाको लागि कुनैपनि स्थानहरू प्राप्त भएन',
'No Mailing List currently established': 'हाल कुनैपनि ठेगाना तालिका राखिएको छैन',
'No Map Profiles currently defined': 'हाल कुनैफनि नक्सा बनावटहरू परिभाषित गरिएको छैन',
'No Markers currently available': 'हाल कुनैपनि चिन्हहरू उपलब्ध छैण',
'No match': 'कुनै मिलेन',
'No matching element found in the data source': 'आँकडा स्रोतमा कुनैपनि मिल्ने कुरा प्राप्त भएको छैन',
'No Matching Records': 'कुनैपनि मिल्दो विवरणहरू छैनन्',
'No matching records found': 'कुनैपनि मिल्ने विवरण(हरू) प्राप्त भएको छैन',
'No Members currently registered': 'हाल कुनैफनि सदस्यहरू दर्ता गरिएको छैन',
'No members currently registered': 'हाल कुनैपनि सदस्यहरू दर्ता गरिएको छैन',
'No membership types currently registered': 'हाल कुनैपनि सदस्यता प्रकार(हरू) दर्ता गरिएको छैन',
'No Memberships currently registered': 'हाल कुनैपनि सदस्यताहरू दर्ता गरिएको छैन',
'No Milestones Found': 'कुनैपनि उद्देश्यहरू प्राप्त भएन',
'No Networks currently recorded': 'हाल कुनैपनि नेटवर्कहरूका विवरण दिइएको छैन',
'No Office Types currently registered': 'हाल कुनैपनि कार्यलय प्रकारहरू दर्ता गरिएको छैन',
'No Offices currently registered': 'हाल कुनैपनि कार्यलयहरू दर्ता गरिएको छैन',
'No Open Tasks for %(project)s': '%(project)s को लागि हाल कुनैपनि खुला कामहरू छैनन्',
'No options available': 'कुनैपनि विकल्पहरू उपलब्ध छैनन्',
'no options available': 'कुनैपनि विकल्पहरू उपलब्द छैनन्',
'No options currently available': 'हाल कुनैपनि विकल्पहरू उपलब्ध छैनन्',
'No Organization Types currently registered': 'हाल कुनैपनि संस्था प्रकारहरू दर्ता गरिएको छैन',
'No Organizations currently registered': 'हाल कुनैपनि संस्थाहरू दर्ता गरिएको छैन',
'No Organizations for Project(s)': 'परियोजना(हरू)को निम्ति कुनैपनि संस्थाहरू छैनन्',
'No Organizations found for this Policy/Strategy': 'यो नियम/उद्देश्यको निम्ति कुनैपनि संस्था(हरू) प्राप्त भएन',
'No outputs found': 'कुनैपनि नतिजाहरू प्राप्त भएन',
'No Partner Organizations currently registered': 'हाल कुनैफनि साझेदार संस्था(हरू) दर्ता गरिएको छैन',
'No Persons currently registered': 'हाल कुनैपनि ब्यक्तिहरू दर्ता गरिएको छैन',
'No PoI Types currently available': 'हाल कुनैपनि पोल प्रकारहरू उपलब्ध छैनन्',
'No Points of Interest currently available': 'हाल कुनैफनि रूचीको बुँदा उपलब्ध छैनन्',
'No PoIs available.': 'कुनै पोलहरू उपलब्ध छैनन्',
'No Policies or Strategies found': 'कुनैपनि नियम तथा लक्षहरू प्राप्त भएन',
'No Presence Log Entries currently registered': 'हाल कुनैपनि उपस्थिति दर्ताहरू दर्ता गरिएको छैन',
'No Professional Experience found': 'कुनैपनि ब्यबसायिक अनुभव प्राप्त भएन',
'No Profiles currently have Configurations for this Layer': 'यो तहको लागि हाल कुनैपनि प्रोफाइलहरूको बनावट छैनन्',
'No Projections currently defined': 'हाल कुनैपनि योजनाहरू परिभाषित गरिएको छैन',
'No Projects currently registered': 'हाल कुनैपनि परियोजनाहरू दर्ता गरिएको छैन',
'No Ratings for Skill Type': 'सिप प्रकारको लागि कुनैपनि स्तरहरू छैनन्',
'No Records currently available': 'हाल कुनैपनि विवरणहरू उपलब्ध छैनन्',
'No records found': 'अभिलेख उपलब्ध नभएको',
'No records in this resource': 'यो स्रोतमा कुनैपनि विवरणहरू छैनन्',
'No records in this resource. Add one more records manually and then retry.': 'यो स्रोतमा कुनैपनि विवरण छैनन् । विस्तृतरूपमा थप विवरण राख्नुहोस् र त्यसपछि पुन: प्रयास गर्नुहोस्',
'No records to review': 'पुर्न अवलोकनको लागि कुनै विवरण(हरू) छैनन्',
'No Red Cross & Red Crescent National Societies currently registered': 'हाल कुनैपनि रेड क्रस तथा रेड क्रिसेन्ट राष्ट्रिय सोसाइटि(हरू) दर्ता गरिएको छैन',
'No Regions currently registered': 'हाल कुनैपनि क्षेत्रहरू दर्ता गरिएको छैन',
'No report specified.': 'कुनैपनि प्रतिवेदन उल्लेख गरिएको छैन',
'No Resource Types defined': 'कुनैपनि स्रोत प्रकारहहरू परिभाषित गरिएको छैन',
'No Resources in Inventory': 'लेखा विवरणमा कुनैपनि स्रोतहरू छैनन्',
'No Response': 'कुनै प्रतिकृया छैन',
'No Response Summaries Found': 'कुनैपनि प्रतिकृया संक्षिप्त प्राप्त भएन',
'No Restrictions': 'कुनैपनि बाधाहरू छैनन्',
'No role to delete': 'हटाउनको लागि कुनै भूमिका छैनन्',
'No roles currently assigned to this user.': 'यस प्रयोगकर्ताको लागि हाल कुनैपनि भूमिकाहरू मिलाइएको छैन ।',
'No Roles defined': 'कुनैपनि भूमिकाहरू परिभाषित गरिएका छैनन्',
'No Rooms currently registered': 'हाल कुनैपनि कोठाहरू दर्ता गरिएको छैनन्',
'No Search saved': 'कुनैपनि खोजी संचित भएको छैन',
'No Sectors currently registered': 'हाल कुनैपनि क्षेत्रहरू दर्ता गरिएको छैन',
'No Sectors found for this Organization': 'यो संस्थाको लागि कुनैपनि क्षेत्र(हरू) प्राप्त भएन',
'No Sectors found for this Project': 'यो परियोजनाको लागि कुनैपनि क्षेत्रहहरू छैनन्',
'No Sectors found for this Theme': 'यो स्वरूपको लागि कुनैपनि क्षेत्रहरू प्राप्त भएन',
'No Services currently registered': 'हाल कुनैपनि सेवाहरू दर्ता गरिएको छैन',
'No Services found for this Organization': 'यो संस्थाको लागि कुनैपनि सेवाहरू प्राप्त भएन',
'No Staff currently registered': 'हाल कुनैपनि कर्मचारी दर्ता गरिएको छैन',
'No staff or volunteers currently registered': 'हाल कुनैपनि कर्मचारी वा स्वयम्-सेवकहरू हाल दर्ता गरिएको छैन',
'No Statuses currently registered': 'हाल कुनैपनि अवस्थाहरू दर्ता गरिएको छैन',
'No Symbologies currently defined': 'हाल कुनैपनि चिन्हताहरू परिभाषित गरिएको छैन',
'No Symbologies currently defined for this Layer': 'यो तहको लागि हाल कुनैपनि चिन्हताहरू परिभाषित गरिएको छैन',
'No Tasks Assigned': 'कुनैपनि कामहरू लगाइएको छैन',
'No tasks currently registered': 'हाल कुनैपनि कामहरू दर्ता गरिएको छैन',
'No Teams currently registered': 'हाल कुनैफनि समूहहरू दर्ता गरिएको छैन',
'No Themes currently registered': 'हाल कुनैपनि स्वरूपहरू दर्ता गरिएको छैन',
'No Themes found for this Activity': 'यो कृयाकलापको लागि कुनैपनि स्वरूप प्राप्त भएन',
'No Themes found for this Project': 'यो परियोजनाको निम्ति कुनैपनि स्वरूपहरू प्राप्त भएन',
'No Themes found for this Project Location': 'यो परियोजना स्थानको लागि कुनैपनि स्वरूपहरू प्राप्त भएन',
'No Time Logged': 'कुनैपनि समय सूची छैन',
'No time stamps found in this resource': 'यस स्रोतको लागि कुनैपनि समय छाप प्राप्त भएन',
'No users with this role at the moment.': 'यो भूमिकामा हाल कुनैपनि प्रयोगकर्ता छैनन्',
"No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530": "कुनैपनि यू.टि.सि. समस्या प्राप्त भएन । कृपया तपाईंको 'प्रयोगकर्ता प्रोफाइल' विवरणमा यू.टि.सि. समस्या राख्नुहोस् । उदाहारण यू.टि.सि.+0५३0",
'No Volunteer Cluster Positions': 'कुनैपनि स्वयम्-सेवक समूह पदहरू छैनन्',
'No Volunteer Cluster Types': 'कुनैपनि स्वयम्-सेवक समूह प्रकारहरू छैनन्',
'No Volunteer Clusters': 'कुनैफनि स्वयम्-सेवक समूहहरू छैनन्',
'No Volunteers currently registered': 'हाल कुनैपनि स्वयम्-सेवकहरू दर्ता गरिएको छैन',
'Non-Communicable Diseases': 'नसर्ने रोगहरु',
'none': 'कुनैपनि होइन',
'None': 'कुनैपनि',
'NONE': 'खाली',
'None (no such record)': 'कुनैपनि (कुनैपनि मिल्दो विवरण छैन)',
'None of the above': 'माथिको कुनैपनि होइन',
'Nonexistent or invalid resource': 'अस्थित्वमा नभएको वा अमान्य स्रोत',
'Normal': 'साधारण',
'Normal Job': 'साधारण काम',
'NOT %s AND NOT %s': ' %s होइन र %s होइन',
'NOT %s OR NOT %s': ' %s होइन र %s होइन',
'Not Authorized': 'स्वीकृती गरिएको छैन',
'Not implemented': 'लागु गरिएको छैन',
'Not installed or incorrectly configured.': 'इन्स्टल गरिएको छैन वा गलत बनावट दिइको ।',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'याद गर्नुहोस्, यो तालिकाले सकृय स्वयम्-सेवकहरू मात्र देखाउँदछ । दर्ता भएको सबैलाई हेर्नको निम्ति, यहाँबाट खोजी गर्नुहोस् ।',
'Note that when using geowebcache, this can be set in the GWC config.': 'याद गर्नुहोस्, जिओवेवकेच प्रयोग गर्दा, यसलाई जिडब्लुसि बनावटमा राख्न सकिन्छ ।',
'Notification frequency': 'सूचना घण्टि पराध्वनी',
'Notification method': 'सूचना घण्टि शैली',
'Notify': 'जानकारी',
'Number': 'संख्या',
'Number of Activities': 'कृयाकलापहरूको संख्या',
'Number of Beneficiaries': 'भागिदारहरूको संख्या',
'Number of Countries': 'देशहरूको संख्या',
'Number of Deployments': 'परियोजनाहरूको संख्या',
'Number of Disaster Types': 'प्रकोप प्रकारहरूको संख्या',
'Number of Facilities': 'सूबिधाहरूको संख्या',
'Number of Missions': 'मिस्सनहरूको संख्या',
'Number of People Affected': 'प्र्रभावितको संख्या',
'Number of People Dead': 'मृतकको संख्या',
'Number of People Injured': 'घाइतेको संख्या',
'Number of Responses': 'प्रतिकृयाहरूको संख्या',
'Number or Label on the identification tag this person is wearing (if any).': 'ब्यक्तिले लगाइराखेको खण्डमा, परिचयपत्रको संख्या वा स्तर',
'Nutrition': 'पोषण',
'Nutritional Assessments': 'खाद्द मुल्यंकन',
'Object': 'वस्तु',
'Objectives': 'उद्देश्यहरु',
'Observer': 'निरिक्षणकर्ता',
'obsolete': 'ढिलो',
'Obsolete': 'ठोस',
'OCR Form Review': 'ओ.सि.आर. फारम पुर्नअवलोकन',
'OCR module is disabled. Ask the Server Administrator to enable it.': 'ओ.सि.आर. भाग निस्कृय गरियो । यसलाई सकृय गर्नको निम्ति सेवा संचालकलाई सम्पर्क गर्नुहोस् ।',
'OCR review data has been stored into the database successfully.': 'आँकडाआधारभुतमा ओ.सि.आर. पुर्नअवलोकन आँकडा पूर्णरूपले संचित भयो ।',
'OD Coordinator': 'ओ.डि. प्रतिनिधी',
'Office': 'कार्यालय',
'Office added': 'कार्यालय संचित गरियो',
'Office Address': 'कार्यालय ठेगाना',
'Office deleted': 'कार्यालय हटाइयो',
'Office Details': 'कार्यालय विवरण',
'Office Phone': 'कार्यालय फोन',
'Office Type': 'कार्यालय प्रकार',
'Office Type added': 'कार्यालय प्रकार संचित गरियो',
'Office Type deleted': 'कार्यालय प्रकार हटाइयो',
'Office Type Details': 'कार्यालय प्रकार विवरण',
'Office Type updated': 'कार्यालय प्रकार परिमार्जन गरियो',
'Office Types': 'कार्यालय प्रकारहरु',
'Office updated': 'कार्यालय परिमार्जन गरियो',
'Offices': 'कार्यालयहरु',
'Office/Warehouse/Facility': 'कार्यालय, गोदामघर, सुविधा',
'OK': 'हुन्छ',
'on %(date)s': ' %(date)s मा',
'On by default?': 'स्वचलानमा रहेकोअनुसार खुला?',
'On Hold': 'होल्डमा राखिएको छ',
'Only showing accessible records!': 'पहुँचमा रहेको विवरणहरू मात्र देखाइएको !',
'Opacity': 'क्षमता',
'Open': 'खुला',
'Open Chart': 'खुला तालिका',
'Open Incidents': 'खुला भवितब्यहरू',
'Open Map': 'खुला नक्सा',
'Open recent': 'खुला भर्खरैको',
'Open Report': 'खुला प्रतिवेदन',
'Open Table': 'खुला तालिका',
'Open Tasks for %(project)s': '%(project)sको लागि खुला कामहरू',
'Open Tasks for Project': 'परियोजनाको लागि खुला कामहरू',
'Opening Times': 'खुलाहुने समय(हरू)',
'OpenStreetMap Layer': 'खुलासडकनक्सा तह',
'OpenStreetMap OAuth Consumer Key': 'खुलासडकनक्सा ग्राहक चाबि',
'OpenStreetMap OAuth Consumer Secret': 'खुलासडकनक्सा ग्राहक गोप्यता',
'OpenStreetMap (Humanitarian)': 'खुल्ला सडक नक्सा (मानवीय)',
'OpenStreetMap (MapQuest)': 'खुल्ला सडक नक्सा कोयष्ट',
'OpenWeatherMap Layer': 'खुला मौसम नक्सा तह',
'Operation not permitted': 'कृयाकलाप अनुमति छैन',
'Optional password for HTTP Basic Authentication.': 'एच.टि.टि.पि. आधार्भूत पुष्टिको लागि वैकल्पिक पासवर्ड ।',
'Optional selection of a background color.': 'पृष्ठभूमिको लागि रंग वैकल्पिक छनौट',
'Optional selection of a MapServer map.': 'नक्सासर्वर नक्साको लागि वैकल्पिक छनौट',
'Optional selection of an alternate style.': 'उल्टो तरिकाको लागि वैकल्पिक छनौट',
'Optional username for HTTP Basic Authentication.': 'एच.टि.टि.पि. आधार्भूत पुष्टिको लागि वैकल्पिक प्रयोगकर्ताको नाम',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:).': 'वैकल्पिक। जिओ सर्वरमा, काम स्थान, नाम स्थान यू.आर.आइ. (नाम होइन!) । डब्ल्यू.एफ.एस. भित्र क्षमता प्राप्त गर्नुहोस्, काम स्थान चिन्ह अगाडिको भाग विशेषत प्रकारको नाम हो (:)।',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'वैकल्पिक । कुनै वस्तुको नाम, जस्मा समावेश कुरा पप्-अपमा राखिएको एउटा तस्विर फाइलको यू.आर.एल. हो ।',
'Optional. The name of an element whose contents should be put into Popups.': 'वैकल्पिक । कुनै वस्तुको नाम जसमा समावेश कुरा पप्-अपमा राखिएको हुन्छ ।',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "वैकल्पिक । ज्यामिती भागको नाम । पोष्ट जि.आइ.एस. मा यो स्वचलानमा 'the_geom' रहेको हुन्छ ।",
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'वैकल्पिक । योजनाको नाम । जियो सर्वरमा यसको फारम हुन्छ । http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'or': 'वा',
'Organisational Preparedness - Nhq and Branches': 'संस्थागत पुर्वतयारी-एन.एच.क्यू. र शाखाहरू',
'Organization': 'संस्था',
'Organization added': 'संस्था संचित गरियो',
'Organization added to Policy/Strategy': 'नियम/उद्देश्यमा संस्था संचित गरियो ',
'Organization added to Project': 'परियोजनामा संस्था संचित गरियो',
'Organization deleted': 'संस्था हटाइयो',
'Organization Details': 'संस्था विवरण',
'Organization group': 'संस्था समूह',
'Organization removed from Policy/Strategy': 'नियम/उद्देश्यबाट संस्था हटाइयो',
'Organization removed from Project': 'परियोजनाबाट संस्था हटाइयो',
'Organization Type': 'संस्था प्रकार',
'Organization Type added': 'संस्था प्रकार संचित गरियो',
'Organization Type deleted': 'संस्था प्रकार हटाइयो',
'Organization Type Details': 'संस्था प्रकार विवरण',
'Organization Type updated': 'संस्था प्रकार परिमार्जन गरियो',
'Organization Types': 'संस्था प्रकारहरू',
'Organization Units': 'संस्था इकाईहरू',
'Organization updated': 'संस्था परिमार्जन गरियो',
'Organization(s)': 'संस्था(हरू)',
'Organization/Branch': 'संस्था/शाखा',
'Organizational Development': 'संस्थागत विकास',
'Organizations': 'संस्थाहरू',
'Organizations / Teams / Facilities': 'संस्थाहरू/ समूहहरू / सूबिधाहरू',
'Origin': 'मुख्य',
'Original': 'सक्कल प्रति',
'OSM file generation failed!': 'ओ.एस.एम. फाइल प्रकृया असफल !',
'OSM file generation failed: %s': 'ओ.एस.एम. फाइल प्रकृया असफल: %s',
'Other': 'अन्य',
'other': 'अन्य',
'Other Address': 'अन्य ठेगाना',
'Other Details': 'अन्य विवरण',
'Other Users': 'अन्य प्रयोगकर्ताहरू',
'Others': 'अन्यहरू',
'Outcomes, Impact, Challenges': 'नतिजा, प्रभाव, चुनौतीहरू',
'Output': 'नतिजा',
'Output added': 'नतिजा संचित गरियो',
'Output removed': 'नतिजा हटाइयो',
'Output updated': 'नतिजा परिमार्जन गरियो',
'Outputs': 'नतिजाहरू',
'Outreach Staff': 'बाहिर खटाइएको कर्मचारी',
'overdue': 'ढिलो',
'Overlays': 'प्रमुखरूपमा',
'Owned Records': 'प्राप्त विवरणहरू',
'Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only': 'मौसम परिवर्तनको निम्ति कार्यको निम्ति प्यासिफिक आइर्ल्याण्डहरूको प्रयास् । प्यासिफिक देशहरूको परियोजनाहरूमा मात्र लागु हुने ।',
'Page': 'पृष्ठ',
'paid': 'भुक्तानी भयो',
'Paid': 'भुक्तानी भयो',
'Pan Map: keep the left mouse button pressed and drag the map': 'प्यान नक्सा: वायाँ माउस बटन थिचिराख्नुहोस् र नक्सालाई घिसार्नुहोस् ।',
'Parent': 'परिवार',
"Parent level should be higher than this record's level. Parent level is": 'यो विवरणको स्तर भन्दा परिवारको स्तर माथि हुनुपर्छ । परिवार स्तर, हो',
'Parent needs to be of the correct level': 'परिवार सहि स्तरमा हुन आवश्यक छ',
'Parent needs to be set': 'परिवारलाइ राखिनुपर्छ',
'Parent needs to be set for locations of level': 'परिवारलाई स्तरको स्थानको निम्ति राखिनुपर्छ',
'Part of the URL to call to access the Features': 'विशेषताहरूमा पहुँचको निम्ति यू.आर.एल.को भाग',
'Participant': 'सहभागी',
'Participant added': 'सहभागी संचित गरियो',
'Participant deleted': 'सहभागी हटाइयो',
'Participant Details': 'सहभागी विवरण',
'Participant updated': 'सहभागी परिमार्जन गरियो',
'Participants': 'सहभागीहरू',
'Participatory Hygiene Promotion': 'सहभागिमुलक स्वास्थ्य बढुवा',
'Partner': 'साझेदार',
'Partner National Society': 'साझेदार राष्ट्रिय समाज',
'Partner Organization added': 'साझेदार संस्था संचित गरियो',
'Partner Organization deleted': 'साझेदार संस्था हटाइयो',
'Partner Organization Details': 'साझेदार संस्था विवरण',
'Partner Organization updated': 'साझेदार संस्था परिमार्जन गरियो',
'Partner Organizations': 'साझेदार संस्थाहरू',
'Partners': 'साझेदारहरू',
'Partnerships': 'साझोदारी',
'Pass': 'पास',
'Passport': 'पास्पोर्ट',
'Password': 'पासवर्ड',
'PDF File': 'पि.डि.एफ. फाइल',
'Peer Support': 'मित्र सहयोग',
'Pending': 'प्रकृयाको क्रममा रहेको',
'per': 'प्रति',
'Percentage': 'प्रतिशत',
'Performance Rating': 'प्रश्तुती स्तर',
'Permanent Home Address': 'स्थायी गृह ठेगाना',
'Person': 'ब्यक्तिको नाम',
'Person added': 'ब्यक्ति संचित गरियो',
'Person deleted': 'ब्यक्ति हटाइयो',
'Person Details': 'ब्यक्ति विवरण',
'Person details updated': 'ब्यक्ति विवरण परिमार्जन गरियो',
'Person Entity': 'ब्यक्ति अंग',
'Person must be specified!': 'ब्यक्ति उल्लेख हुनैपर्छ!',
'Person or OU': 'ब्यक्ति वा ओ.यू.',
'Person Registry': 'ब्यक्ति दर्ता',
'Person who has actually seen the person/group.': 'ब्यक्ति जसले वास्तबमानै ब्यक्ति/समूहलाई देखेको छ ।',
"Person's Details": 'ब्यक्तिको विवरण',
"Person's Details added": 'ब्यक्तिको विवरण संचित गरियो',
"Person's Details deleted": 'ब्यक्तिको विवरण हटाइयो',
"Person's Details updated": 'ब्यक्तिको विवरण परिमार्जन गरियो',
'Personal': 'ब्यक्तिगत',
'Personal Details': 'ब्यक्तिगत विवरण',
'Personal Profile': 'ब्यक्तिगत प्रोफाइल',
'Persons': 'ब्यक्तिहरू',
"Persons' Details": 'ब्यक्तिको विवरण',
'Philippine Pesos': 'फिलिपिनि पिसोस्',
'Phone': 'फोन',
'Phone #': 'फोन #',
'Phone 1': 'फोन १',
'Phone 2': 'फोन २',
'Phone number is required': 'फोन नम्बर आवश्यक छ',
'Photograph': 'फोटो',
'PIFACC Priorities': 'पि.आइ.एफ.ए.सि.सि. प्राथमिकताहरू',
'PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures': 'पि.आइ.एफ.ए.सि.सि.-१: भौतिक लागु, तल्लो तहमा लागु गर्ने कार्य मापदण्डहरू',
'PIFACC-2: Governance and Decision Making': 'पि.आइ.एफ.ए.सि.सि.-२: जाँच र निर्णय',
'PIFACC-3: Improving our understanding of climate change': 'पि.आइ.एफ.ए.सि.सि.-३: मौसम परिवर्तनको बारेमा हाम्रो बुझाइ सुधार गर्नु',
'PIFACC-4: Education, Training and Awareness': 'पि.आइ.एफ.ए.सि.सि.-४: शिक्षा, तालिम र जनचेतना',
'PIFACC-5: Mitigation of Global Greenhouse Gas Emissions': 'पि.आइ.एफ.ए.सि.सि.-५: विश्य ग्रिनहाउस ग्यस इमिसनमा सुधार',
'PIFACC-6: Partnerships and Cooperation': 'पि.आइ.एफ.ए.सि.सि.-6: साझेदार र सहकार्य',
'PIL (Python Image Library) not installed': 'पि.आइ.एल. (बहुभुजिय तस्विरलय) इन्स्टल भएको छैन',
'PIL (Python Image Library) not installed, images cannot be embedded in the PDF report': 'पि.आइ.एल. (बहुभुजिय तस्विरलय) इन्स्टल भएको छैन, तस्विरहरू पि.डि.एफ. प्रतिवेदनमा देखिन सक्दैन',
'Place of Birth': 'जन्म स्थान',
'Place on Map': 'नक्सामा स्थान',
'Planning and Construction of Drainage Systems ': 'ड्रेनएज प्रकृयाहरूको योजना र निर्माण',
'Please choose a type': 'कृपया एउटा प्रकार छान्नुहोस्',
'Please enter a first name': 'कृपया पहिलो नाम टाइप गर्नुहोस्',
'Please enter a last name': 'कृपया अन्तिम नाम टाइप गर्नुहोस्',
'Please enter a number only': 'संख्यामात्र टाइप गर्नुहोस् ',
'Please enter a valid email address': 'कृपया प्रमाणित इमेल ठेगाना टाइप गर्नुहोस्',
'Please fill this!': 'कृपया यसलाई भर्नुहोस्!',
"Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.": 'समस्या देखा पर्ने स्थानको यू.आर.एल.(हरू) सहित सकेसम्म धेरै जानकारी प्रदान गर्नुहोस्, वा नयाँ विशेषतामा जानु चाहानुहुन्छ भने ।',
'Please record Beneficiary according to the reporting needs of your project': 'कृपया तपाईंको परियोजनाको आवश्यकता अनुसार हकदारको विवरण राख्नुहोस्',
'Please Select a Facility': 'कृपया एउटा सुविधा छान्नुहोस्',
'Please select a valid image!': 'कृपया मान्य तस्विर राख्नुहोस्!',
'Please select exactly two records': 'कृपया दूइ विवरणहरू छान्नुहोस्',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'कुनैपनि थप जानकारी विवरण राख्नको निम्ति यो क्षेत्र प्रयोग गर्नुहोस्, विवरण परिमार्जन गरिएको छ भने कृपया विवरण इतिहास प्रदान गर्नुहोस् ।',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'कृपया कुनैपनि थप जानकारी राख्नको निम्ति यो क्षेत्र प्रयोग गर्नुहोस्, जस्तै उसाहिदिको उदाहरण आइ.डि.हरू । विवरण परिमार्जन गरिएको छ भने कृपया विवरण इतिहास प्रदान गर्नुहोस् ।',
'PMER': 'पि.एम.इ.आर.',
'PMER Development': 'सुरक्षित आवास जनचेतना सहभागिता अवधारणा',
'PoI': 'धुर्व',
'PoI Type added': 'धुर्व प्रकार संचित गरियो',
'PoI Type deleted': 'धुर्व प्रकार हटाइयो',
'PoI Type Details': 'धुर्व प्रकार विवरण',
'PoI Type updated': 'धुर्व प्रकार परिमार्जन गरियो',
'PoI Types': 'धुर्व प्रकारहरू',
'Point of Interest added': 'रूचीको बुँदा संचित गरियो',
'Point of Interest deleted': 'रूचीको बुँदा हटाइयो',
'Point of Interest Details': 'रूचीको बुँदा विवरण',
'Point of Interest updated': 'रूचीको बुँदा परिमार्जन गरियो',
'Points of Interest': 'रूचीको बुँदा',
'PoIs': 'धुर्वहरू',
'PoIs successfully imported.': 'धुर्व पूर्णरूपले प्रवेश गरियो',
'Policies & Strategies': 'नियम तथा उद्देश्य(हरू)',
'Policy Development': 'नीति निर्माण',
'Policy or Strategy': 'नियम वा उद्देश्य',
'Policy or Strategy added': 'नियम वा उद्देश्य संचित गरियो',
"Policy or Strategy added, awaiting administrator's approval": 'नियम वा उद्देश्य संचित गरियो, प्रतिक्षित संचालकको प्रमाणिकरण',
'Policy or Strategy deleted': 'नियम वा उद्देश्य हटाइयो',
'Policy or Strategy updated': 'नियम वा उद्देश्य परिमार्जन गरियो',
'Polygon': 'बहुभुजा',
'Poor': 'गरिब',
'Population': 'जनसंख्या',
'Population Density 2010 (Persons per km2)': 'जनघनत्व 2010 (Persons per km2)',
'Popup Fields': 'पप्-अप क्षेत्रहरू',
'Popup Label': 'पप्-अप स्तर',
'Position': 'पद',
'Positions': 'पदहरू',
'Post Harvest Storage and Management': 'भावि कटनि भण्डारण र ब्यबस्थापन',
'Postcode': 'पोष्ट कोड',
'Power Supply Type': 'शक्ति निर्यात प्रकार',
'Powered by': 'प्रायोजन',
'Powered by Sahana Eden': 'साहाना इडेन प्रायोजन',
'Preferred Name': 'रूचिको नाम',
'Presence': 'उपस्थिति',
'Presence Condition': 'उपस्थिति अवस्था',
'Presence Log': 'उपस्थिति सूची',
'Previous': 'अघिल्लो',
'Previous View': 'अघिल्लो दृश्य',
'Print': 'प्रिन्ट',
'Priority': 'प्राथमिकता',
'Priority from 1 to 9. 1 is most preferred.': '१ देखी ९ सम्म प्राथमिकता । १ सबैभन्दा रूचाइएको',
'Privacy': 'गोप्यता',
'Private': 'ब्यक्तिगत',
'Private-Public Partnerships': 'ब्यक्तिगत',
'Procedure': 'प्रकृया',
'Processing': 'कार्य प्रकृया',
'Profession': 'ब्यवसाय',
'Professional Experience': 'ब्यबसायिक अनुभव',
'Professional Experience added': 'ब्यबसायिक अनुभव संचित गरियो',
'Professional Experience deleted': 'ब्यबसायिक अनुभव हटाइयो',
'Professional Experience Details': 'ब्यबसायिक अनुभव विवरण',
'Professional Experience updated': 'ब्यबसायिक अनुभव परिमार्जन गरियो',
'Profile': 'विवरण',
'Profile Configuration': 'प्रोफाइल बनावट',
'Profile Configuration removed': 'प्रोफाइल बनावट हटाइयो',
'Profile Configuration updated': 'प्रोफाइल बनावट परिमार्जन गरियो',
'Profile Configurations': 'प्रोफाइल बनावटहरू',
'Profile Configured': 'प्रोफाइल बनावट मिलाइयो',
'Profile Details': 'प्रोफाइल विवरण',
'Profile Page': 'प्रोफाइल पृष्ठ',
'Profile Picture': 'प्रोफाइल तस्बिर',
'Profile Picture?': 'प्रोफाइल तस्बिर?',
'Profiles': 'प्रोफाइलहरू',
'Program': 'कार्यक्रम',
'Program added': 'कार्यक्रम संचित गरियो',
'Program deleted': 'कार्यक्रम हटाइयो',
'Program Details': 'कार्यक्रम विवरण',
'Program Hours (Month)': 'कार्यक्रम समय (घण्टा) (महिना)',
'Program Hours (Year)': 'कार्यक्रम समय (घण्टा) (वर्ष)',
'Program updated': 'कार्यक्रम परिमार्जन गरियो',
'Programme Manager': 'कार्यक्रम व्यवस्थापक',
'Programme Planning and Management': 'कार्यक्रम योजना तर्जुमा र व्यवस्थापन',
'Programme Preparation and Action Plan, Budget & Schedule': 'योजना तर्जुमा, कार्य तालिका, बजेट',
'Programs': 'कार्यक्रमहरू',
'Project': 'परियोजना',
'Project added': 'परियोजना संचित गरियो',
'Project Assessments and Planning': 'परियोजना लेखाजोखा र तर्जुमा',
'Project Calendar': 'परियोजना पात्रो',
'Project Communities': 'परियोजना संचालित समुदायहरु',
'Project deleted': 'परियोजना हटाइयो',
'Project Details': 'परियोजना विवरण',
'Project Name': 'परियोजना नाम',
'Project not Found': 'परियोजना प्राप्त हुन सकेन',
'Project Officer': 'परियोजना कर्मचारी',
'Project Organization Details': 'परियोजना संस्था विवरण',
'Project Organization updated': 'परियोजना संस्था परिमार्जन गरियो',
'Project Organizations': 'परियोजना संस्थाहरू',
'Project Report': 'परियोजना प्रतिवेदन',
'Project Task': 'परियोजना काम',
'Project Time Report': 'परियोजना समय प्रतिवेदन',
'Project updated': 'परियोजना परिमार्जन गरियो',
'Projection': 'योजना',
'Projection added': 'योजना संचित गरियो',
'Projection deleted': 'योजना हटाइयो',
'Projection Details': 'योजना विवरण',
'Projection Type': 'योजना प्रकार',
'Projection updated': 'योजना परिमार्जन गरियो',
'Projections': 'योजनाहरू',
'Projects': 'परियोजनाहरु',
'Projects Map': 'परियोजनाहरु नक्सा',
'Proposed': 'प्रस्तावित',
'Protecting Livelihoods': 'योजना तर्जुमा र ब्यवस्थापन',
'Provide a password': 'पासवर्ड उपलब्ध गर्नुहोस्',
'Provision of Inputs': 'लागतको ब्यबस्था',
'Provision of Tools and Equipment': 'औजार र उपकरणहरुको व्यवस्था',
'Psychosocial Support': 'जीविकोपार्जन संरक्षण',
'Public': 'सामाजिक',
'Purchase Date': 'खरिद मिति',
'Purchase Price': 'खरिद रकम',
'Python GDAL required for Shapefile support!': 'आकारफाइल सहयोगको लागि बहुभुजिय जि.डि.ए.एल. आवश्यक !',
'Python needs the ReportLab module installed for PDF export': 'प्रतिवेदन ल्याब इन्स्टल भएको छैन',
'Python needs the xlrd module installed for XLS export': 'गल्ती: एक्स.एल.एस. निर्यातको लागि चलिरहेको पाइथनलाई एक्स.एल.आर.डि. मोड्यूल इन्स्टल भएको हुनुपर्दछ ।',
'Python needs the xlwt module installed for XLS export': 'गल्ती: एक्स.एल.एस. निर्यातको लागि चलिरहेको पाइथनलाई एक्स.एल.डब्ल्यू.टि. मोड्यूल इन्स्टल भएको हुनुपर्दछ ।',
'Quantity': 'परिमाण',
'Query': 'सोधपुछ',
'Query Feature': 'सोधपुछ विशेषता',
'Queryable?': 'सोधपुछयोग्य ?',
'Race': 'दौड',
'Rainfall - last 1 day (mm)': 'बर्षा – गएको एक दिन (मिमि)',
'Rainfall - last 10 days accumulated (mm)': 'बर्षा – गएको दश दिन (मिमि) जम्मा',
'Rangeland, Fisheries and Forest Management': 'भुमी, माछा-क्षेत्र र वन ब्यबस्थापन',
'Rapid Data Entry': 'लगातार आँकडा प्रवेश',
'Rating': 'स्तर',
'RDRT (Regional Disaster Response Teams)': 'आर.डि.आर.टि. (क्षेत्रिय प्रकोप प्रतिकृया समूहहरू)',
'RDRT Members': 'आर.डि.आर.टि. सदस्यहरू',
'RDRT Type': 'आर.डि.आर.टि. प्रकार',
'READ': 'हेर्नुहोस्',
'Ready': 'तयार',
'Receive %(opt_in)s updates:': ' %(opt_in)s परिमार्जन(हरू) प्राप्त गर्नुहोस्:',
'Receive updates': 'परिमार्जन(हरू) प्राप्त गर्नुहोस्',
'Received Shipments': 'जहाजिकरण प्राप्त गर्नुहोस्',
'Record': 'विवरण',
'Record added': 'विवरण संचित गरियो',
'Record already exists': 'विवरण पहिले नै रहेको छ',
'Record approved': 'विवरण प्रमाणित भयो',
'Record could not be approved.': 'विवरण प्रमाणित हुन सकेन',
'Record could not be deleted.': 'विवरण हटाउन सकिएन',
'Record deleted': 'विवरण हटाइयो',
'Record Details': 'विवरण विवरण',
'Record not found': 'विवरण प्राप्त भएन',
'Record not found!': 'विवरण प्राप्त भएन!',
'Record updated': 'विवरण परिमार्जन गरियो',
'Record Updates': 'विवरण परिमार्जन(हरू)',
'Records': 'विवरणहरू',
'records deleted': 'विवरणs हटाइयो',
'Records merged successfully.': 'विवरणहरू पूर्णरूपमा एकै गरियो',
'Recovery': 'पूनर्लाभ',
'red': 'रेड',
'Red Cross & Red Crescent National Societies': 'रेडक्रस तथा रेडक्रिसेन्ट सोसाइटीहरु',
'Red Cross / Red Crescent': 'रेड क्रस / रेड क्रिसेन्ट',
'Referral': 'निवेदन',
'Refresh Rate (seconds)': 'रिफ्रेस् दर (सेकेण्ड)',
'Region': 'क्षेत्र',
'Region added': 'क्षेत्र संचित गरियो',
'Region deleted': 'क्षेत्र हटाइयो',
'Region Details': 'क्षेत्र विवरण',
'Region Location': 'क्षेत्र स्थान',
'Region updated': 'क्षेत्र परिमार्जन गरियो',
'Regional': 'क्षेत्रीय',
'Regions': 'क्षेत्रहरू',
'Register': 'दर्ता',
'Register As': 'को रूपमा दर्ता',
'Register for Account': 'एकाउन्टको लागि दर्ता',
'Registered users can %(login)s to access the system': 'दर्ता गरिएको प्रयोगकर्ताहरू सिस्टम पहुँचको लागि %(login)s गर्न सक्छन्',
'Registration not permitted': 'दर्ताकार्य अनुमति छैन',
'Reject': 'अस्विकार',
'Relationship': 'सम्बन्ध',
'Relief Team': 'राहात समूह',
'Religion': 'धर्म',
'reload': 'पुन:लोड गर्नुहोस्',
'Reload': 'पुन:लोड गर्नुहोस्',
'Remove': 'हटाउनुहोस्',
'Remove Coalition': 'संस्था हटाउनुहोस्',
'Remove existing data before import': 'आयात गर्नुभन्दा पहिले हालको तथ्यांक हटाउनुहोस्',
'Remove Feature: Select the feature you wish to remove & press the delete key': 'विशेषता हटाउनुहोस्: तपाईंले हटाउन चहानुभएको विशेषता छान्नुहोस् र डिलिट कि थिच्नुहोस्',
'Remove Layer from Profile': 'प्रोफाइलबाट तह हटाउनुहोस्',
'Remove Layer from Symbology': 'चिन्हताबाट तह हटाउनुहोस्',
'Remove Network': 'नेटवर्क हटाउनुहोस्',
'Remove Organization from Project': 'परियोजनाबाट संस्था हटाउनुहोस्',
'Remove Profile Configuration for Layer': 'तहको लागि प्रोफाइल बनावट हटाउनुहोस्',
'Remove selection': 'छानिएको हटाउनुहोस्',
'Remove Skill': 'सिप हटाउनुहोस्',
'Remove Symbology from Layer': 'तहबाट चिन्हता हटाउनुहोस्',
'Remove this entry': 'यो प्रवेश हटाउनुहोस्',
'Reopened': 'पुन:खोलियो',
'Repeat': 'दोहोर्याउनुहोस्',
'Repeat your password': 'तपाईंको पासवर्ड दोहोर्याउनुहोस्',
'Replace': 'स्थानान्तर',
'Replacing or Provisioning Livelihoods': 'पुनर्लाभ',
'Reply': 'उत्तर',
'Report': 'प्रतिवेदन',
'Report of': 'को प्रतिवेदन',
'Report on Annual Budgets': 'वार्षिक बजेट(हरू)को प्रतिवेदन',
'Report Options': 'प्रतिवेदन विकल्पहरू',
'Reports': 'प्रतिवेदनहरु',
'representation of the Polygon/Line.': 'बहुभुजा/धर्काको प्रस्तुतिकरण',
'Request': 'अनुरोध',
'Requested By Facility': 'सुविधाद्वारा अनुरोध गरियो',
'Requested Items': 'अनुरोध गरिएका वस्तुहरू',
'Requests': 'अनुरोधहरू',
'Requires Login': 'लगिन गर्न आवश्यक',
'Reset': 'पहिलेको स्थितिमा',
'Reset all filters': 'फिल्टर(हरू) पहिलेको स्थितिमा राख्नुहोस्',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'विशेषता पुन:आकार दिनुहोस्: तपाईंले आकार दिन चाहानुभएको विशेषतालाई छान्नुहोस् र सम्बन्धित बिन्दुलाई तपाईंले चाहेको आकारमा मिलाउनुहोस्',
'Resource added': 'स्रोत संचित गरियो',
'Resource deleted': 'स्रोत हटाइयो',
'Resource Details': 'स्रोत विवरण',
'Resource Inventory': 'स्रोत लेखा विवरण',
'Resource Management System': 'स्रोत ब्यबस्थापन प्रकृया',
'Resource Management System account has been activated': 'स्रोत ब्यबस्थापन प्रकृया एकाउन्ट सकृय गरिएको छ',
'Resource Mobilization': 'जीविकोपार्जन प्रतिस्थापन र प्रावधान',
'Resource Transfers for Acquiring Assets': 'भएको सम्पतिकोलागि स्रोत पठाउनुहोस्',
'Resource Transfers for Replacing/ Provisioning Or Consumption': 'स्थानान्त्रण/व्यवस्थापन वा प्रयोगको लागि स्रोत पठाउनुहोस्',
'Resource Type': 'स्रोत प्रकार',
'Resource Type added': 'स्रोत प्रकार संचित गरियो',
'Resource Type deleted': 'स्रोत प्रकार हटाइयो',
'Resource Type Details': 'स्रोत प्रकार विवरण',
'Resource Type updated': 'स्रोत प्रकार परिमार्जन गरियो',
'Resource Types': 'स्रोत प्रकारहरू',
'Resource updated': 'स्रोत परिमार्जन गरियो',
'Responded': 'प्रतिकृया दिइयो',
'Response': 'प्रतिकृया',
'Response Summaries': 'प्रतिकृया संक्षेप(हरू)',
'Response Summary Added': 'प्रतिकृया संक्षेप संचित गरियो',
'Response Summary Deleted': 'प्रतिकृया संक्षेप हटाइयो',
'Response Summary Details': 'प्रतिकृया संक्षेप विवरण',
'Response Summary Report': 'प्रतिकृया संक्षेप प्रतिवेदन',
'Response Summary Updated': 'प्रतिकृया संक्षेप परिमार्जन गरियो',
'REST Filter': 'बाँकि फिल्टर',
'Restarting Livelihoods': 'श्रोत परिचालन',
'Retrieve Password': 'पुन:प्राप्त पासवर्ड',
'retry': 'पुन:प्रयास् गर्नुहोस्',
'Revert Entry': 'उल्टो प्रवेश',
'Review': 'पुर्नअवलोकन',
'RFA Priorities': 'आर.एफ.ए. प्राथमिकताहरू',
'RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework': 'आर.एफ.ए.१: जाँच-संस्थागत, शैक्षिक-संस्थागत, नियम र निर्णय तयारी',
'RFA2: Knowledge, Information, Public Awareness and Education': 'आर.एफ.ए.२: ज्ञान, जानकारी, सामाजिक जनचेतना र शिक्षा',
'RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk': 'आर.एफ.ए.३: खतरा(हरू)को अनुसन्धान र मुल्याङ्कन, पूर्वतयारी र खतरामा रहेको सामाग्री',
'RFA4: Planning for Effective Preparedness, Response and Recovery': 'आर.एफ.ए.४: प्रभावकारी पुर्वतयारीको निम्ति योजना, प्रतिकृया र सुधार',
'RFA5: Effective, Integrated and People-Focused Early Warning Systems': 'आर.एफ.ए.५: प्रभावकारी, समायोजित र जन-केन्द्रित अग्रिम सचेतना प्रकृयाहरू',
'RFA6: Reduction of Underlying Risk Factors': 'आर.एफ.ए.6: वर्तमान खतराका पक्षहरू न्यूनिकरण',
'Risk Identification & Assessment': 'जीवीकोर्पाजन पुनर्शुरुवात',
'Risk Management and Quality Assurance': 'खतरा ब्यबस्थापन र गुणस्तर सुनिस्चितता',
'Risk Transfer': 'खतरा कटौती',
'RMS': 'आर.एम.एस.',
'RMS Team': 'आर.एम.एस. समूह',
'Road Safety': 'सडक सुरक्षा',
'Role': 'भूमिका',
'Role added': 'भूमिका संचित गरियो',
'Role assigned to User': 'प्रयोगकर्तालाई भूमिका हस्तान्तरण गरियो',
'Role deleted': 'भूमिका हटाइयो',
'Role Details': 'भूमिका विवरण',
'Role Name': 'भूमिका नाम',
'Role Required': 'भूमिका आवश्यक',
'Role updated': 'भूमिका परिमार्जन गरियो',
'Roles': 'भूमिकाहरू',
'Roles currently assigned': 'हाल हस्तान्तरण गरिएको भूमिका',
'Roles of User': 'प्रयोगकर्ताको भूमिका(हरू)',
'Roles Permitted': 'भूमिकाहरू स्विकृति गरियो',
'Roles updated': 'भूमिकाs परिमार्जन गरियो',
'Room': 'कोठा',
'Room added': 'कोठा संचित गरियो',
'Room deleted': 'कोठा हटाइयो',
'Room Details': 'कोठा विवरण',
'Room updated': 'कोठा परिमार्जन गरियो',
'Rooms': 'कोठा(हरू)',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': 'विशेषता अर्को तर्फ राख्नु: तपाईंले अर्को तर्फ राख्न चाहानुभएको विशेषता छान्नुहोस् र सम्बन्धित बिन्दुको माध्यम्द्वारा तापईंले चाहे अनुसार स्थान परिवर्तन गरि राख्नुहोस्',
'Run every': 'हरेक संचालन गर्नुहोस्',
'S3PivotTable unresolved dependencies': 'S३ वृत्ततालिका समाधान नभएको क्षेत्र',
'Sahana Community Chat': 'साहाना समुदाय कुराकानी',
'Sahana Eden Humanitarian Management Platform': 'साहाना इदेन मानवतावादि ब्यबस्थापन कार्यक्षेत्र',
'Sahana Eden Website': 'साहाना इदेन वेवसाइट',
'Sanitation': 'सफाई',
'Save': 'संचित गर्नुहोस्',
'Save and Continue Editing': 'परिवर्तन संचित गरेर निरन्तरता दिनुहुन्छ?',
'Save as New Map?': 'नयाँ नक्साको रूपमा संचित गर्नुहुन्छ?',
'Save Map': 'नक्सा संचित गर्नुहोस्',
'Save search': 'खोजी संचित गर्नुहोस्',
'Save this search': 'यस खोजीलाई संचित गर्नुहोस्',
'Save: Default Lat, Lon & Zoom for the Viewport': 'संचित गर्नुहोस्: भिउपोर्टको लागि स्वचलानमा रहेको लाट, लोन तथा जुम',
'Saved': 'संचित',
'Saved Filters': 'संचित फिल्टर(हरू)',
'Saved filters': 'संचित फिल्टर(हरू)',
'Saved Filters...': 'संचित फिल्टर(हरू)...',
'Saved Maps': 'संचित नक्सा(हरू)',
'Saved search added': 'संचित खोजी संचित गरियो',
'Saved search deleted': 'संचित खोजी हटाइयो',
'Saved search details': 'संचित खोजी विवरण',
'Saved search updated': 'संचित खोजी परिमार्जन गरियो',
'Saved Searches': 'संचित खोजी(हरू)',
'Saved searches': 'संचित खोजी(हरू)',
'Scanned Copy': 'स्क्यान गरिएको प्रति',
'Scanned Forms Upload': 'स्क्यान गरिएको फारम(हरू) अपलोड गर्नुहोस्',
'Scheduled Jobs': 'सूचिकृत काम(हरू)',
'Schema': 'योजना',
'School Health': 'विधालय स्वास्थ',
'School Holidays only': 'विद्यालय विदाहरू मात्र',
'School RC Units Development': 'विद्यालय आर.सि.इकाईहरू विकास',
'School Safety and Children Education,': 'विद्यालय सुरक्षा र बाल शिक्षा,',
'Seaport': 'खेलकुद',
'Search': 'खोजी',
'Search %(site_label)s Status': '%(site_label)s अवस्था खोजी',
'Search Activities': 'कृयाकलाप(हरू) खोजी',
'Search Activity Types': 'कृयाकलाप प्रकार(हरू) खोजी',
'Search Addresses': 'ठेगाना(हरू) खोजी',
'Search Affiliations': 'स्वीकृती(हरू) खोजी',
'Search Annual Budgets': 'वार्षिक बजेट(हरू) खोजी',
'Search Appraisals': 'मुल्यांकन(हरू) खोजी',
'Search Awards': 'परस्कार(हरू) खोजी',
'Search Beneficiaries': 'भागिदारहरू खोजी',
'Search Beneficiary Types': 'भागिदार प्रकार(हरू) खोजी',
'Search Branch Organizations': 'शाखा संस्था(हरू) खोजी',
'Search by skills': 'सिप(हरू) अनुसार खोजी',
'Search Campaigns': 'क्याम्पिन(हरू) खोजी',
'Search Certificates': 'प्रमाण-पत्र(हरू) खोजी',
'Search Certifications': 'प्रमाणिकरण(हरू) खोजी',
'Search Clusters': 'समूह(हरू) खोजी',
'Search Coalitions': 'संस्था(हरू) खोजी',
'Search Communities': 'समुदाय(हरू) खोजी',
'Search Community Contacts': 'समुदाय सम्पर्क(हरू) खोजी',
'Search Competency Ratings': 'प्रतिस्पर्धा स्तर(हरू) खोजी',
'Search Contact Information': 'सम्पर्क जानकारी खोजी',
'Search Contacts': 'सम्पर्क(हरू) खोजी',
'Search Course Certificates': 'पाठ्यक्रम प्रमाण-पत्र(हरू) खोजी',
'Search Courses': 'पाठ्यक्रम(हरू) खोजी',
'Search Credentials': 'कागजात(हरू) खोजी',
'Search Criteria': 'सिमितता खोजी',
'Search Departments': 'मन्त्रालय(हरू) खोजी',
'Search Deployments': 'परियोजन(हरू) खोजी',
'Search Donors': 'दाता(हरू) खोजी',
'Search Education Details': 'शिक्षा विवरण खोजी',
'Search Entries': 'प्रवेश(हरू) खोजी',
'Search Facilities': 'सूबिधाहरू खोजी',
'Search Facility Types': 'सुविधा प्रकार(हरू) खोजी',
'Search Feature Layers': 'विशेषता तह(हरू) खोजी',
'Search for a Person': 'ब्यक्ति खोजी',
'Search for a Project by name, code, location, or description.': 'नाम, कोड, स्थान, वा ब्याख्याअनुसार परियोजना खोजी',
'Search for a Project by name, code, or description.': 'नाम, कोड, वा ब्याख्याअनुसार परियोजना खोजी',
'Search for a Project Community by name.': 'नामद्वरा परियोजना समुदाय खोजी',
'Search for Activity Organization': 'कृयाकलाप ब्यबस्थापन खोजी',
'Search for Activity Type': 'कृयाकलाप प्रकार खोजी',
'Search for office by organization or branch.': 'संस्था वा शाखाअनुसार कार्यलय खोजी',
'Search for office by organization.': 'संस्थाअनुसार कार्यलय खोजी',
'Search Groups': 'समूह(हरू) खोजी',
'Search Hazards': 'खतरा(हरू) खोजी',
'Search Hours': 'समय (घण्टा) खोजी',
'Search Identity': 'परिचय खोजी',
'Search Images': 'तस्विर(हरू) खोजी',
'Search Job Titles': 'पद खोजी',
'Search Keywords': 'मुख्यशब्द(हरू) खोजी',
'Search Layers': 'तह(हरू) खोजी',
'Search Location': 'स्थान खोजी',
'Search Location Hierarchies': 'स्थान संरचनाहरू खोजी',
'Search location in Geonames': 'भु-नाम अनुसार स्थान खोजी',
'Search Locations': 'स्थान(हरू) खोजी',
'Search Log Entry': 'दर्ताप्रवेश खोजी',
'Search Logged Time': 'सूचिकृत समय खोजी',
'Search Mailing Lists': 'ठेगाना तालिका(हरू) खोजी',
'Search Map Profiles': 'नक्सा बनावट(हरू) खोजी',
'Search Markers': 'चिन्ह(हरू) खोजी',
'Search Member': 'सदस्य खोजी',
'Search Members': 'सदस्य(हरू) खोजी',
'Search Membership': 'सदस्यता खोजी',
'Search Membership Types': 'सदस्यता प्रकार(हरू) खोजी',
'Search Milestones': 'उद्देश्य(हरू) खोजी',
'Search Networks': 'नेटवर्क(हरू) खोजी',
'Search Office Types': 'कार्यलय प्रकार(हरू) खोजी',
'Search Offices': 'कार्यलय(हरू) खोजी',
'Search Open Tasks for %(project)s': ' %(project)s को लागि खुला काम(हरू) खोजी',
'Search Organization Types': 'संस्था प्रकार(हरू) खोजी',
'Search Organizations': 'संस्था(हरू) खोजी',
'Search Participants': 'सहभागी(हरू) खोजी',
'Search Partner Organizations': 'साझेदार संस्था(हरू) खोजी',
"Search Person's Details": 'ब्यक्तिको विवरण खोजी',
'Search Persons': 'ब्यक्ति(हरू) खोजी',
'Search PoI Types': 'धुर्व प्रकार(हरू) खोजी',
'Search Points of Interest': 'रूचीको बुँदा खोजी',
'Search Policies & Strategies': 'नियम तथा उद्देश्य(हरू) खोजी',
'Search Professional Experience': 'ब्यबसायिक अनुभव खोजी',
'Search Programs': 'कार्यक्रम(हरू) खोजी',
'Search Project Organizations': 'परियोजना संस्था(हरू) खोजी',
'Search Projections': 'योजना(हरू) खोजी',
'Search Projects': 'परियोजना(हरू) खोजी',
'Search Records': 'विवरण(हरू) खोजी',
'Search Red Cross & Red Crescent National Societies': 'रेड क्रस तथा रेड क्रिसेन्ट राष्ट्रिय सोसाइटिज् खोजी',
'Search Regions': 'क्षेत्र(हरू) खोजी',
'Search Resource Types': 'स्रोत प्रकार(हरू) खोजी',
'Search Resource Inventory': 'स्रोत लेखा विवरण खोजी',
'Search Response Summaries': 'प्रतिकृया संक्षेप खोजी',
'Search Results': 'नतिजाहरू खोजी',
'Search Roles': 'भूमिका(हरू) खोजी',
'Search Rooms': 'कोठा(हरू) खोजी',
'Search saved searches': 'संचित खोजीहरू अनुसार खोजी',
'Search Sectors': 'क्षेत्र(हरू) खोजी',
'Search Services': 'सेवा(हरू) खोजी',
'Search Shipped Items': 'स्थानान्तर वस्तु(हरू) खोजी',
'Search Skill Equivalences': 'सिप सरह(हरू) खोजी',
'Search Skill Types': 'सिप प्रकार(हरू) खोजी',
'Search Skills': 'सिप(हरू) खोजी',
'Search Staff': 'कर्मचारी खोजी',
'Search Staff & Volunteers': 'कर्मचारी तथा स्वयम्-सेवक(हरू) खोजी',
'Search Staff Assignments': 'कर्मचारी काम(हरू) खोजी',
'Search Symbologies': 'चिन्हताहरू खोजी',
'Search Tasks': 'काम(हरू) खोजी',
'Search Teams': 'समूह(हरू) खोजी',
'Search Theme Data': 'स्वरूप आँकडा खोजी',
'Search Themes': 'स्वरूप(हरू) खोजी',
'Search Training Events': 'तालिम कार्यक्रम(हरू) खोजी',
'Search Training Participants': 'तालिम सहभागी(हरू) खोजी',
'Search Volunteer Cluster Positions': 'स्वयम्-सेवक समूह पद(हरू) खोजी',
'Search Volunteer Cluster Types': 'स्वयम्-सेवक समूह प्रकार(हरू) खोजी',
'Search Volunteer Clusters': 'स्वयम्-सेवक समूह(हरू) खोजी',
'Search Volunteer Roles': 'स्वयम्-सेवक भूमिका(हरू) खोजी',
'Search Volunteers': 'स्वयम्-सेवक(हरू) खोजी',
'Secondary Server (Optional)': 'द्दित्तिय सर्वर (वैकल्पिक)',
'seconds': 'सेकेण्ड',
'Seconds must be a number.': 'सेकेण्ड संख्यामा नै हुनुपर्छ ।',
'Seconds must be less than 60.': 'सेकेण्ड ६० भन्दा कम हुनुपर्छ ।',
'Secretary General': 'प्रमुख',
'Sector': 'क्षेत्र',
'Sector added': 'क्षेत्र संचित गरियो',
'Sector added to Organization': 'संस्थामा क्षेत्र संचित गरियो',
'Sector added to Project': 'परियोजनामा क्षेत्र संचित गरियो',
'Sector added to Theme': 'स्वरूपमा क्षेत्र संचित गरियो',
'Sector deleted': 'क्षेत्र हटाइयो',
'Sector Details': 'क्षेत्र विवरण',
'Sector removed from Organization': 'संस्थाबाट क्षेत्र हटाइयो',
'Sector removed from Project': 'परियोजनाबाट क्षेत्र हटाइयो',
'Sector removed from Theme': 'स्वरूपबाट क्षेत्र हटाइयो',
'Sector updated': 'क्षेत्र परिमार्जन गरियो',
'Sectors': 'क्षेत्र(हरू)',
'Sectors to which this Activity Type can apply': 'यो कृयाकलाप प्रकार लागु गर्न सकिने क्षेत्र(हरू)',
'Sectors to which this Theme can apply': 'यो स्वरूप लागु गर्न सकिने क्षेत्र(हरू)',
'Security': 'सुरक्षा',
'Security Officer': 'सुरक्षा कर्मचारी',
'See All Entries': 'सम्पूण प्रवेश(हरू) हेर्नुहोस्',
'see comment': 'टिप्पणी हेर्नुहोस्',
'see more': 'अझै हेर्नुहोस्',
'Seen': 'हेरियो',
'Select': 'छान्नुहोस्',
'Select %(location)s': '%(location)s छान्नुहोस्',
"Select 2 records from this list, then click 'Merge'.": "यो तालिकाबाट २ विवरणहरू छान्नुहोस्, त्यसपछी 'एकै गर्नुहोस्'मा थिच्नुहोस्",
"Select a Room from the list or click 'Add Room'": "तालिकाबाट एउटा कोठा छान्नुहोस् र 'कोठा राख्नुहोस्'मा क्लिक गर्नुहोस्",
'Select all': 'सबैलाई छान्नुहोस्',
'Select All': 'सबैलाई छान्नुहोस्',
'Select an existing bin': 'हालको बिनलाई छान्नुहोस्',
'Select an image to upload. You can crop this later by opening this record.': 'अपलोड गर्नुको लागि तस्बिर छान्नुहोस् । यो विवरणलाई खोलेर तपाईंले यो तहलाई काट्न सक्नुहुन्छ',
'Select Existing Location': 'हालको स्थान छान्नुहोस्',
'Select from registry': 'दर्ताबाट छान्नुहोस्',
'Select one or more option(s) that apply': 'लागु हुने एक वा थप विकल्प(s) छान्नुहोस्',
'Select resources to import': 'राख्नको निम्ति स्रोत छान्नुहोस्',
'Select the default site.': 'स्वचलानमा रहेको क्षेत्र छान्नुहोस्',
'Select the option that applies': 'लागु हुने विकल्प छान्नुहोस्',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'दुरी पत्ता लगाउनको निम्ति मुल्यंकन र कृयाकलाप(हरू)लाई छान्नुहोस्',
'Select the person assigned to this role for this project.': 'यो परियोजनाको लागि खटिएको व्यक्ति छान्नुहोस्',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "यदि निश्चितहरूलाई बनावट स्थलबाट गहिरो स्तरमा पारिवारिक क्षेत्रको आवस्यक पर्छ भने यसलाई छान्नुहोस् । उदाहरणको लागि, यदि बनावटको सबैभन्दा सानो बिभाजन 'जिल्ला' हो भने, तोकिएको सबै स्थानहरू परिवारको रूपमा जिल्ला नै हुनुपर्दछ ।",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'स्थान बनावटमा सम्पूर्ण क्षेत्रहरूलाई परिवार क्षेत्रको आवस्यकता पर्छ भने, यसलाई छान्नुहोस् । प्रभावित क्षेत्रलाई प्रतिनिधित्व गर्दै यसलाई "क्षेत्र" तोकिएर गर्न सकिन्छ ।',
'Select this if you need this resource to be mapped from site_id instead of location_id.': 'स्थान_आइ.डि.को साटो, क्षेत्र_आइ.डि.बाट यो चित्रत भएको चाहानुहुन्छ भने यसलाई छान्नुहोस्।',
'Select This Location': 'यो स्थान छान्नुहोस्',
'Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form.': 'छानिएको ओ.सि.आर. फारमको कुनै पृष्ठ(हरू) छैनन् । नयाँ फारमलाई अपलोड गरेर अर्को दोहोरो कार्य गर्नुहोस् ।',
'Send a message to this person': 'यो व्यक्तिलाई संदेश पठउनुहोस्',
'Send a message to this team': 'यो समूहलाई संदेश पठाउनुहोस्',
'Send batch': 'व्यच पठाउनुहोस्',
'Send Message': 'संदेश पठाउनुहोस्',
'Send Task Notification': 'काम सूचना घण्टि पठाउनुहोस्',
'Senior (50+)': 'ठूलो (५0+)',
'Sent Shipments': 'जहाजिकरण पठाउनुहोस्',
'separated': 'छुटाईएको',
'separated from family': 'परिवारबाट छुटाईएको',
'Serial Number': 'क्रम संख्या',
'Service': 'सेवा',
'Service added': 'सेवा संचित गरियो',
'Service added to Organization': 'संस्थामा सेवा संचित गरियो',
'Service deleted': 'सेवा हटाइयो',
'Service Details': 'सेवा विवरण',
'Service Record': 'सेवा विवरण',
'Service removed from Organization': 'संस्थाबाट सेवा हटाइयो',
'Service updated': 'सेवा परिमार्जन गरियो',
'Services': 'सेवा(हरू)',
'Set as my Default': 'मेरो स्वचिलत राख्नुहोस्',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'मुख्यब्यक्ति बाहेकका ब्यक्ति(हरू)ले यो स्तरलाई परिवर्तन गर्नको निम्ति "सत्य"मा राख्नुहोस् ।',
'Settings': 'सेटिङ(हरू)',
'Sex': 'लिंग',
'Sexual and Reproductive Health': 'यौन तथा प्रजनन स्वास्थ',
'Shapefile Layer': 'आकारफाइल तह',
'Share': 'बाँड्नुहोस्',
'shaved': 'काटिएको',
'Shelter': 'बसोबास',
'Shelter Repair Kit': 'आवास मर्मत किट',
'short': 'छोटो',
'Short Description': 'छोटो विवरण',
'Short Title / ID': 'छोटो शिर्षक / आइ.डि.',
'short<6cm': 'छोटो<६से.मि.',
'Show': 'देखाउनुहोस्',
'Show _MENU_ entries': '_मेनु_ प्रवेश(हरू) देखाउनुहोस्',
'Show on Map': 'नक्सामा देखाउनुहोस्',
'Show Pivot Table': 'वृत्त तालिका देखाउनुहोस्',
'Show Table': 'तालिका देखाउनुहोस्',
'Show totals': 'जम्मा(हरू) देखाउनुहोस्',
'Showing 0 to 0 of 0 entries': '0 देखी0 मा 0 प्रवेश(हरू) देखाईंदै',
'Showing _START_ to _END_ of _TOTAL_ entries': ' _शुरू_ देखी _अन्त्य_ को _जम्मा_ प्रवेश(हरू) देखाईंदै',
'sides': 'साइडहरू',
'sign-up now': 'अहिले साइनअप गर्नुहोस्',
'Signature': 'सही',
'Simple Search': 'साधारण खोजी',
'Simulation ': 'बृद्दि ',
'single': 'एकल',
'Single PDF File': 'एकल पि.डि.एफ. फाइल',
'Site': 'क्षेत्र',
'Site Name': 'क्षेत्र नाम',
'Site Planning': 'क्षेत्र योजना',
'Site Selection': 'क्षेत्र निर्धारण',
'Sitemap': 'क्षेत्रनक्सा',
'Situation': 'अवस्था',
'Situation Monitoring/Community Surveillance': 'अवस्था अनुगमन/समुदाय जाँच',
'Skeleton Example': 'फ्रेम उदाहरण',
'Sketch': 'खाका',
'Skill': 'सिप',
'Skill added': 'सिप संचित गरियो',
'Skill Catalog': 'सिप तालिका',
'Skill deleted': 'सिप हटाइयो',
'Skill Details': 'सिप विवरण',
'Skill Equivalence': 'सिप सरह',
'Skill Equivalence added': 'सिप सरह संचित गरियो',
'Skill Equivalence deleted': 'सिप सरह हटाइयो',
'Skill Equivalence Details': 'सिप सरह विवरण',
'Skill Equivalence updated': 'सिप सरह परिमार्जन गरियो',
'Skill Equivalences': 'सिप सरह(हरू)',
'Skill removed': 'सिप हटाइयो',
'Skill Type': 'सिप प्रकार',
'Skill Type added': 'सिप प्रकार संचित गरियो',
'Skill Type Catalog': 'सिप प्रकार तालिका',
'Skill Type deleted': 'सिप प्रकार हटाइयो',
'Skill Type updated': 'सिप प्रकार परिमार्जन गरियो',
'Skill updated': 'सिप परिमार्जन गरियो',
'Skills': 'सिप(हरू)',
'Skin Marks': 'अनुहारको छाला दाग(हरू)',
'slim': 'पातलो',
'Small Scale Mitigation': 'सानो मात्रा सुधार',
'Social Impacts & Resilience': 'सामाजिक प्रभाव र उत्थानशिलता',
'Social Inclusion / Diversity': 'सामाजिक समावेशीकरण/ विविधता',
'Social Mobilisation': 'सामाजिक परिचालन',
'Solid Waste Management': 'ठोस फोहर ब्यवस्थापन',
'Sops and Guidelines Development': 'स्तरीय संचालन प्रकृया र निर्देशिका निर्माण',
'Sorry location %(location)s appears to be outside the area of parent %(parent)s.': 'माफगर्नुहोस्, स्थान %(location)s परिवार %(parent)s क्षेत्र भन्दा बाहिर परेको जस्तो देखिन्छ ।',
'Sorry location %(location)s appears to be outside the area supported by this deployment.': 'माफगर्नुहोस्, स्थान %(location)s यो परियोजनाले उल्लेख गर्ने क्षेत्र भन्दा बाहिर परेको जस्तो देखिन्छ ।',
'Sorry location appears to be outside the area of parent %(parent)s.': 'माफगर्नुहोस्, स्थान परिवार क्षेत्र भन्दा बाहिर परेको जस्तो देखिन्छ ।',
'Sorry location appears to be outside the area supported by this deployment.': 'माफगर्नुहोस्, स्थान यो परियोजनाले उल्लेख गर्ने क्षेत्र भन्दा बाहिर परेको जस्तो देखिन्छ ।',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'माफगर्नुहोस्, नक्सासंचालक भूमिका प्रयोगकर्ता(हरू)का लागि मात्र यी क्षेत्रमा अनुमति छ ।',
'Sorry, there are no addresses to display': 'माफगर्नुहोस्, देखाउनको लागि कुनैपनि ठेगानाहरू छैनन्',
'Source': 'स्रोत',
'Source Name': 'स्रोत नाम',
'Source URL': 'स्रोत यू.आर.एल.',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'निश्चित क्षेत्र (जस्तै, भवन/कोठा) स्थान भित्र यो ब्यक्ति/समूह देखियो ।',
'Specific locations need to have a parent of level': 'निश्चित स्थान(हरू)को स्तरको परिवार हुन आवश्यक छ ।',
'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.': 'खुलासडकनक्सा/गुगल/विङ्गको आधारभुत तह(हरू)को प्रयोग गर्नको लागि स्फेरिकल मेर्कटर (९00९१३)को आवश्यक पर्छ ।',
'Spraying of Vectors': 'विक्टोरहरू छरिँदै',
'Staff': 'कर्मचारी',
'Staff & Volunteers': 'कर्मचारी तथा स्वयम्-सेवक(हरू)',
'Staff & Volunteers (Combined)': 'कर्मचारी तथा स्वयम्-सेवक(हरू) (मिलाइएको)',
'Staff Assigned': 'कर्मचारी खटाइएको',
'Staff Assignment Details': 'कर्मचारी काम विवरण',
'Staff Assignment removed': 'कर्मचारी काम हटाइयो',
'Staff Assignment updated': 'कर्मचारी काम परिमार्जन गरियो',
'Staff Assignments': 'कर्मचारी काम(हरू)',
'Staff ID': 'कर्मचारी आइ.डि.',
'Staff Management': 'कर्मचारी ब्यबस्थापन',
'Staff Member added': 'कर्मचारी सदस्य संचित गरियो',
'Staff member added': 'कर्मचारी सदस्य संचित गरियो',
'Staff Member deleted': 'कर्मचारी सदस्य हटाइयो',
'Staff Member Details': 'कर्मचारी सदस्य विवरण',
'Staff Member Details updated': 'कर्मचारी सदस्य विवरण परिमार्जन गरियो',
'Staff Record': 'कर्मचारी विवरण',
'Staff Report': 'कर्मचारी प्रतिवेदन',
'Staff with Contracts Expiring in the next Month': 'अर्को महिनामा सम्झौता(हरू)को म्याद सकिने कर्मचारीहरू',
'Staff/Volunteer Record': 'कर्मचारी/स्वयम्-सेवक विवरण',
'Start Date': 'शुरु मिति',
'Status': 'अवस्था',
"Status 'assigned' requires the %(fieldname)s to not be blank": "खटाइएको' अवस्थालाई खालि नछोड्नको निम्ति %(fieldname)s को आवश्यकता पर्दछ ।",
'Status added': 'अवस्था संचित गरियो',
'Status deleted': 'अवस्था हटाइयो',
'Status Details': 'अवस्था विवरण',
'Status updated': 'अवस्था परिमार्जन गरियो',
'Statuses': 'अवस्था(हरू)',
'Stockpiling, Prepositioning of Supplies': 'भण्डारण, पुर्ती(हरू)को तयारी',
'Stocks and relief items.': 'मौज्दात र राहत सामग्रीहरु',
'Storm Surge': 'हावाहुरी',
'straight': 'सिधा',
'Strategy Development': 'उद्देश्य विकास',
'Street Address': 'सडक ठेगाना',
'Street View': 'सडक दृश्य',
'Strengthening Livelihoods': 'जीवीकोपार्जन सुदृढीकरण',
'String used to configure Proj4js. Can be found from %(url)s': 'बनावट प्रोजे४जे.एस मिलाउनको निम्ति स्ट्रिङ्ग प्रयोग गरिएको छ । %(url)s प्राप्त गर्न सकिन्छ ।',
'Strong': 'बलियो',
'Structural Safety': 'संरचनात्मक सुरक्षा',
'Style': 'तरिका',
'Style invalid': 'तरिका अमान्य',
'Sub Chapter': 'सह अध्याय',
'Sub Regional': 'उपक्षेत्रीय',
'Submission successful - please wait': 'निवेदन सफल - कृपया धैर्य गर्नुहोस्',
'Submit': 'पेश गर्नुहोस्',
'suffered financial losses': 'भोगिएको बित्तिय घाटा(हरू)',
'Supervisor': 'सुपरभाइजर',
'Supplier': 'निर्यातकर्ता',
'Suppliers': 'निर्यातकर्ता(हरू)',
'Supplier/Donor': 'आपूर्तिकर्ता/दाता',
'Swiss Francs': 'स्विजरल्याण्ड फ्र्याङ्क',
'Switch to 3D': '३डि मा जानुहोस्',
'Symbologies': 'चिन्हताहरू',
'Symbology': 'चिन्हता',
'Symbology added': 'चिन्हता संचित गरियो',
'Symbology deleted': 'चिन्हता हटाइयो',
'Symbology Details': 'चिन्हता विवरण',
'Symbology removed from Layer': 'तहबाट चिन्हता हटाइयो',
'Symbology updated': 'चिन्हता परिमार्जन गरियो',
'Table': 'तालिका',
'Table Permissions': 'तालिका स्वीकृती',
'Tablename': 'तालिकानाम',
'Tags': 'ट्याग',
'tall': 'अग्लो',
'Task': 'काम',
'Task added': 'काम संचित गरियो',
'Task deleted': 'काम हटाइयो',
'Task Details': 'काम विवरण',
'Task updated': 'काम परिमार्जन गरियो',
'Tasks': 'काम',
'Team': 'ब्द्यम् रुरुरुरुरु',
'Team added': 'ब्द्यम् रुरुरुरुरु संचित गरियो',
'Team deleted': 'ब्द्यम् रुरुरुरुरु हटाइयो',
'Team Description': 'ब्द्यम् रुरुरुरुरु ब्याख्या',
'Team Details': 'ब्द्यम् रुरुरुरुरु विवरण',
'Team Leader': 'ब्द्यम् रुरुरुरुरु अगुवा',
'Team Member added': 'ब्द्यम् रुरुरुरुरु सदस्य संचित गरियो',
'Team Members': 'ब्द्यम् रुरुरुरुरु सदस्य(हरू)',
'Team Name': 'ब्द्यम् रुरुरुरुरु नाम',
'Team Type': 'ब्द्यम् रुरुरुरुरु प्रकार',
'Team updated': 'ब्द्यम् रुरुरुरुरु परिमार्जन गरियो',
'Teams': 'ब्द्यम् रुरुरुरुरु',
'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'भुसर्वर लाई मेटाटिलिङ्ग गर्न निर्देशन दिन्छ जसले नक्कल प्रति स्तर(हरू)लाई कम गर्दछ ।',
'Template': 'ढाँचा',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'पाचौँ-स्तरको लागि देश संचालक शाखा भित्र (जस्तै, भोट प्रकृया वा लेखकोड सह-शाखा)। यो स्तर प्राय प्रयोग हुँदैन',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'चौथो-स्तरको लागि देश संचालक शाखा भित्र (जस्तै, गाउँ, छिमेक वा टोल)',
'Term for the primary within-country administrative division (e.g. State or Province).': 'प्रथमिकको लागि देश संचालक भित्र (जस्तै, राज्य वा क्षेत्र)।',
'Term for the secondary within-country administrative division (e.g. District or County).': 'द्दित्तियको लागि देश संचालक शाखा (जस्तै, जिल्ला वा क्षेत्र) ।',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'तेस्रो-स्तरको लागि देश संचालक शाखा (जस्तै, शहर वा नगर) ।',
'Terms of Service': 'सेवाको निति',
'Tertiary Server (Optional)': 'क्षेत्र सर्वर (वैकल्पिक)',
'Text': 'टेक्स्ट',
'The area is': 'क्षेत्र हो',
'The Area which this Site is located within.': 'क्षेत्र जसमा यो स्थान रहेको छ ।',
'The attribute used to determine which features to cluster together (optional).': 'कुन विशेषताहरू हरूलाइ सँगै राख्ने सो देखाउनलाई प्रयोग भएको(वैकल्पिक) ',
'The attribute which is used for the title of popups.': 'पप्-अप(हरू)को शिर्षकको लागि प्रयोग भएको',
'The attribute within the KML which is used for the title of popups.': 'के.एम.एल. भित्रको आदेश जुन पप्-अप(हरू)को शिर्षकको लागि प्रयोग हुँदछ',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'के.एम.एल. भित्रको आदेश(हरू) जुन पप्-अप(हरू)को बनावटको लागि प्रयोग भएको छ । (आदेशहरूको बिचमा स्पेस प्रयोग गर्नुहोस्)',
'The body height (crown to heel) in cm.': 'से.मि.मा शरिरको उचाइ (शिर देखि पाइताला सम्म)',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'ब्यक्ति/समूहको हालको स्थान, जुन साधारण(प्रतिवेदनको लागि) वा आकार दिइएको(नक्सामा देखाईएको) । उपलब्ध स्थान(हरू)बाट खोजी गर्नको लागि केहि शब्दहरू प्रवेश गर्नुहोस् ।',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'इमेल ठेगाना जसमा प्रमाणिकरण अनुरोधहरू पठाइएको छ (साधारणतया यसमा इमेल ठेगानाको समूहहरू हुन्छन्, ब्यक्तिगत ठेगाना होइन) । यदि क्षेत्र खालि भएमा, र डोमेन मिलेमा अनुरोधहरू स्वचालितरूपमा नै प्रमाणित हुनेछ ।',
'The facility where this position is based.': 'यो पदरहेको क्षेत्रमा सुविधा',
'The first or only name of the person (mandatory).': 'ब्यक्तिको पहिलो नाम वा मात्र नाम (आवश्यक).',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'यू.आर.एल.को बनावट हुन्छ: http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.',
'The language you wish the site to be displayed in.': 'तपाईंले क्षेत्रमा देखियोस् भनेर चाहानु भएको भाषा ',
'The length is': 'लाम्बाइ हो',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'स्थान, जहाँबाट ब्यक्ति आएको हो, जुन साधारण छ(प्रतिवेदनको लागि) वा आकार दिइएको(नक्सामा देखाईएको) । उपलब्ध स्थान(हरू)बाट खोजी गर्नको लागि केहि शब्दहरू प्रवेश गर्नुहोस् ।',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'स्थान, जहाँ ब्यक्ति जाँदैछ, जुन साधारण छ(प्रतिवेदनको लागि) वा आकार दिइएको(नक्सामा देखाईएको) । उपलब्ध स्थान(हरू)बाट खोजी गर्नको लागि केहि शब्दहरू प्रवेश गर्नुहोस् ।',
'The map will be displayed initially with this latitude at the center.': 'शुरुमा नक्सा यो अक्षांशमा बिचमा देखिनेछ ',
'The map will be displayed initially with this longitude at the center.': 'शुरुमा नक्सा यो देशान्तरमा बिचमा देखिनेछ ',
'The Maximum valid bounds, in projected coordinates': 'नियन्त्रित अवस्थामा बढि मान्य क्षेत्र',
'The minimum number of features to form a cluster. 0 to disable.': 'समूह बनाउनको निम्ति विशेषताहरूको कम्ति संख्या । निस्कृय गर्नको निम्ति 0 ',
'The name to be used when calling for or directly addressing the person (optional).': 'ब्यक्तिलाई सिधै बोलाउँदा प्रयोग गर्दा बोलाईने नाम(वैकल्पिक) ।',
'The number of pixels apart that features need to be before they are clustered.': 'विशेषताहरूलाई समूहमा राख्न भन्दा पहिले पिक्सेलको संख्या ।',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'अपलोड गर्नको निम्ति देखिने नक्साको वरिपरिको टाइलको संख्या । सुन्यको अर्थ, पहिलो पृष्ठ छिटो लोड हुन्छ, ठूलो संख्यको अर्थ ढिलो हुँदै जाने भन्ने हुन्छ ।',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'संस्था दर्ताले क्षेत्रमा काम गरिरहेको सम्पूर्ण राहत संस्थाहरूलाई ट्र्याकमा राख्दछ ।',
"The Project module can be used to record Project Information and generate Who's Doing What Where reports.": 'परियोजना भाग परियोजना जानकारी र को कहाँ के गरिरहेको छ भन्ने प्रतिवेदन(हरू) विवरण राख्नको निम्त प्रयोग गर्न सकिन्छ ।',
"The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server.": "प्रदान गरिएको 'फर्मड' अमान्य छ । यो सर्वरमा उपलब्ध नभएको फारम पुन:अवलोकन तपाईंले छान्नु भएको छ ।",
"The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading.": "उपलब्ध गरिएको 'फर्मड' अमान्य छ । फारम अपलोड गर्नुहोस् भन्ने भाग अमान्य छ । अपलोड गर्न प्रयास् गर्नुहोस् ।",
"The staff member's official job title": 'कर्मचारी सदस्यको संस्थागत काम शिर्षक',
'The system supports 2 projections by default:': 'स्वचालित प्रकृया सहयोगहरू २ योजनाहरू:',
'The uploaded Form is unreadable, please do manual data entry.': 'संचित गरिएको फारम पढ्न नसकिने, विस्तृ आँकडा प्रवेश गर्नुहोस् ।',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'वेव नक्साको क्षमता प्राप्त गर्नुहोस् पृष्ठको यू.आर.एल. जस्को तह(हरू) तपाईंले नक्सामा ब्राउजरको माध्यमबाट उपलब्ध गराउन चाहानुहुन्छ ।',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'तस्बिर फाइलको यू.आर.एल. । यदि तपाईं तस्विर फाइल अपलोड गर्नुहुन्न भने होस्, यस्को स्थान यहाँ देखाउनुपर्छ ।',
'The URL to access the service.': 'सेवा पहुँचको निम्ति यू.आर.एल. ',
"The volunteer's role": 'स्वयम्-सेवकको भूमिका',
'The weight in kg.': 'तौल केजीमा',
'Theme': 'स्वरूप',
'Theme added': 'स्वरूप संचित गरियो',
'Theme added to Activity': 'कृयाकलापमा स्वरूप संचित गरियो',
'Theme added to Project': 'परियोजनामा स्वरूप संचित गरियो',
'Theme added to Project Location': 'परियोजना स्थानमा स्वरूप संचित गरियो',
'Theme Data': 'स्वरूप आँकडा',
'Theme Data deleted': 'स्वरूप आँकडा हटाइयो',
'Theme Data updated': 'स्वरूप आँकडा परिमार्जन गरियो',
'Theme deleted': 'स्वरूप हटाइयो',
'Theme Details': 'स्वरूप विवरण',
'Theme Layer': 'स्वरूप तह',
'Theme removed from Activity': 'कृयाकलापमा स्वरूप हटाइयो',
'Theme removed from Project': 'परियोजनामा स्वरूप हटाइयो',
'Theme removed from Project Location': 'परियोजना स्थानमा स्वरूप हटाइयो',
'Theme updated': 'स्वरूप परिमार्जन गरियो',
'Themes': 'स्वरूपहरू',
'There are multiple records at this location': 'यस स्थानमा बहु विवरणहरू छन्',
"There are no details for this person yet. Add Person's Details.": 'यस ब्यक्तिको निम्ति अहिले सम्म कुनै विवरण छैन ।',
'There are too many features, please Zoom In': 'धेरै विशेषताहरू छन्, कृपया नजिक ल्याउनुहोस्',
'There is no address for this person yet. Add new address.': 'यो ब्यक्तिको निम्ति अहिलेसम्म कुनै ठेगाना छैन । नयाँ ठेगाना राख्नुहोस्.',
'There is no status for this %(site_label)s yet. Add %(site_label)s Status.': 'क्षहिलेसम्म यस %(site_label)s को लागि कुनै अवस्था छैन । अवस्था %(site_label)s राख्नुहोस् ।',
'There was a problem, sorry, please try again later.': 'समस्या थियो, माफगर्नुहोला, कृपया पछि प्रयास गर्नुहोला ।',
'These are the filters being used by the search.': 'खोजीद्वारा प्रयोग गरिएको फिल्टरहरू छन् ।',
'These need to be added in Decimal Degrees.': 'यो अंक डिग्रीमा संचित गरिनु पर्छ ।',
'This email-address is already registered.': 'यो इमेल-ठेगाना पहिले नै दर्ता गरिएको छ ।',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'यदि यो स्तर निर्माण प्रकृयामा रहेको छ भने यो सहि हो । यो स्तर पुरा भएपछि, आपतकालिन परिवर्तन हुन नदिनको निम्ति यसलाई गलत भनेर राख्न सक्नुहुन्छ ।',
'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.': 'यो साधारणतया नक्साको तह प्रपटिको स्टाइल ट्यबमा विजेट प्रयोग गरि परिवर्तन गरिएको हो ।',
'This job has already been finished successfully.': 'यो काम पहिलेनै सफलतापुर्वक समाप्त भएको छ ।',
'This level is not open for editing.': 'यो स्तर परिवर्तनको लागि खुला छैन ।',
'This role can not be assigned to users.': 'यो भूमिका प्रयोगकर्ता(हरू)लाई हस्तान्तरण गर्न सकिँदैन ।',
'This should be an export service URL, see': 'यो निर्यात सेवा यू.आर.एल. हुनुपर्छ, हेर्नुहोस्',
'Thunderbolt': 'चट्याङ्ग',
'Tiled': 'टाइल हालिएको',
'Time': 'समय',
'Time Actual': 'वास्तविक समय',
'Time Estimate': 'अडकल समय',
'Time Estimated': 'अडकल गरिएको समय',
'Time Frame': 'समय अवधी',
'Time Log': 'समय सूची',
'Time Log Deleted': 'समय दर्ताहटाइयो',
'Time Log Updated': 'समय दर्तापरिमार्जन गरियो',
'Time Logged': 'सूचिकृत समय',
'Time Taken': 'लागेको समय',
'Timeline': 'समयसीमा',
'times': 'समय(हरू)',
'times (0 = unlimited)': 'समयs (0 = असिमित)',
'times and it is still not working. We give in. Sorry.': 'समय(हरू) र यो अझै काम गरिरहेको छैन । माफ गर्नुहोला',
'Title': 'शिर्षक',
'Title to show for the Web Map Service panel in the Tools panel.': 'टुल्स् प्यानलमा वेव नक्सा सेवा प्यानलको लागि देखाउने शिर्षक ',
'TMS Layer': 'टि.एम.एस. तह',
'to download a OCR Form.': 'ओ.सि.आर. फारम अपलोड गर्न',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config': 'खुलासडकनक्सा परिवर्तन गर्नको निम्ति, तपाईंको नक्सा बनावटमा खुलासडकनक्सा सेटिङको आवस्यकता पर्दछ ।',
'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': 'समयसिमा सार्न: माउस्को स्क्रोल ह्विल प्रयोग गर्नुहोस्, एरो कि वा तान्नुहोस् र समयसिमालाई ड्र्याग गर्नुहोस् ।',
'To Print or Share the Map you will have to take a screenshot. If you need help taking a screen shot, have a look at these instructions for %(windows)s or %(mac)s': 'प्रिन्ट वा बाँडनको लागि तपाईंले नक्साको स्क्रिनसट लिनु पर्नेहुन्छ । यदि स्क्रिनसट लिनको निम्ति तपाईंलाई सहयोग चाहिन्छ भने, %(windows)s र %(mac)s को लागि यि निर्देशनहरूलाई हेर्नुहोस् ।',
'to reset your password': 'पासवर्ड परिवर्तन गर्न',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "स्थानको खोजीको लागि, नाम प्रवेश गर्नुहोस् । तपाईं % विल्डकार्डकोरूपमा प्रयोग गर्न सक्नुहुन्छ । सम्पूर्ण स्थान(हरू) लाई तालिकामा नराखिकन, 'खोजी' थिच्नुहोस् ।",
"To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members.": "सदस्य खोजीको निम्ति, ब्यक्ति वा समूहको नामको कुनै भाग टाइप गर्नुहोस् । तपाईं % विल्डकार्डकोरूपमा प्रयोग गर्न सक्नुहुन्छ । सम्पूर्ण स्थान(हरू) लाई तालिकामा नराखिकन, 'खोजी' थिच्नुहोस् ।",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "ब्यक्तिको खोजीको निम्ति, स्पेस्ले छुटाएर पहिलो, बिचको वा अन्तिमको नाम र/वा ब्यक्तिको आइ.डि. संख्या मध्ये कुनै टाइप गर्नुहोस् । तपाईं % विल्डकार्डकोरूपमा प्रयोग गर्न सक्नुहुन्छ । सम्पूर्ण स्थान(हरू) लाई तालिकामा नराखिकन, 'खोजी' थिच्नुहोस् ।",
'tonsure': 'टनस्यूर',
'Tools and Guidelines Development': 'औजार(हरू) र निर्देशन विकास',
'total': 'जम्मा',
'Total': 'जम्मा',
'Total Annual Budget': 'जम्मा वार्षिक बजेट',
'Total Funding (Local Currency)': 'जम्मा अनुदान (स्थानिय मुद्रा)',
'Total Funding Amount': 'जम्मा अनुदान मात्रा',
'Total Persons': 'जम्मा ब्यक्ति(हरू)',
'Total Population': 'जम्मा जनसंख्या',
'Total Records: %(numrows)s': 'जम्मा विवरणहरू: %(numrows)s',
'Tourist Group': 'पर्यटक समूह',
'Trackable': 'ट्र्याक गर्न सकिने',
'Tracking and analysis of Projects and Activities.': 'परियोजना र कार्यक्रमहरुको ट्र्याकिङ्ग',
'Training': 'तालिम',
'Training added': 'तालिम संचित गरियो',
'Training Course Catalog': 'तालिम पाठ्यक्रम तालिका',
'Training Courses': 'तालिम कोर्सहरु',
'Training deleted': 'तालिम हटाइयो',
'Training Details': 'तालिम विवरण',
'Training Event': 'तालिम विवरण',
'Training Event added': 'तालिम विवरण संचित गरियो',
'Training Event deleted': 'तालिम विवरण हटाइयो',
'Training Event Details': 'तालिम विवरण विवरण',
'Training Event updated': 'तालिम विवरण परिमार्जन गरियो',
'Training Events': 'तालिम विवरण',
'Training Facility': 'तालिम सुविधा',
'Training Hours (Month)': 'तालिम समय (घण्टा) (महिना)',
'Training Hours (Year)': 'तालिम समय (घण्टा) (वर्ष)',
'Training of Community/First Responders': 'समुदाय/पहिलो प्रतिकृया दिने ब्यक्तिको तालिम ',
'Training of Master Trainers/Trainers': 'प्रशिक्षक प्रशिक्षण तालिम',
'Training Report': 'तालिम प्रतिवेदन',
'Training updated': 'तालिम परिमार्जन गरियो',
'Trainings': 'तालिम(हरू)',
'Transfer': 'पठाउनुहोस्',
'Transit': 'द्वार',
'Transitional Shelter': 'संक्रमणकालिन आवास',
'Transparent?': 'पारदर्शक?',
'Tree and Mangrove Planting': 'रुख योजना',
'Type': 'प्रकार',
"Type the first few characters of one of the Participant's names.": 'सहभागीको नामको पहिलो शब्दहरू टाइप गर्नुहोस्',
"Type the first few characters of one of the Person's names.": 'ब्यक्तिको नामको पहिलो शब्दहरू टाइप गर्नुहोस्',
'UN agency': 'यू.एन. एजेन्सि',
'Unable to parse CSV file or file contains invalid data': 'सि.एस.भि. फाइल सुचारु हुन सकेन वा फाइलमा अमान्य आँकडा रहेको',
'Uncheck all': 'सबैको चिन्ह हटाउनुहोस्',
'United States Dollars': 'संयूक्त राज्य डलर',
'Units': 'इकाई(हरू)',
'Unknown': 'थाहा नभएको',
'unknown': 'विवरणहरू हटाइयो',
'unlimited': 'असिमित',
'Unmark as duplicate': 'नक्कल प्रतिको रूपमा चिन्ह हटाउनुहोस्',
'Unspecified': 'नतोकिएको',
'Unsupported data format': 'नमिल्ने आँकडा नमुना',
'Unsupported method': 'नमिल्ने शैली',
'UPDATE': 'परिमार्जन गर्नुहोस्',
'Update Coalition': 'संस्था परिमार्जन गर्नुहोस्',
'Update Report': 'प्रतिवेदन परिमार्जन गर्नुहोस्',
'Update this entry': 'यो प्रवेश परिमार्जन गर्नुहोस्',
'updated': 'परिमार्जन गरियो',
'Upload an image file (png or jpeg), max. 400x400 pixels!': 'तस्विर फाइल (png वा jpeg), बढिमा ४00x४00 pixels! अपलोड गर्नुहोस् ',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'तस्विर फाइल यहाँ अपलोड गर्नुहोस् । तस्विर फाइल अपलोड गर्नु भएन भने, तपाईंले यसको यू.आर.एल. क्षेत्र देखाउनै पर्छ ।',
'Upload Format': 'नमुना अपलोड गर्नुहोस्',
'Upload Scanned OCR Form': 'स्क्यान गरिएको ओ.सि.आर. फारम अपलोड गर्नुहोस् ',
'Upload Shapefile': 'आकारफाइल अपलोड गर्नुहोस् ',
'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.': 'अपलोड गरिएको फाईल पि.डि.एफ. फाइल होइन । सहि पि.डि.एफ. फाइल उपलब्ध गराउनुहोस्',
"Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.": "अपलोड गरिएका फाइल(हरू) तस्विर(हरू) होइनन् । लिने तस्विर नमुनाहरू '.png', '.jpg', '.bmp', '.gif' आदि हुन् ।",
'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.': 'संचित गरिएको पि.डि.एफ. फाइलमा आवश्यक भन्दा बढि/कम पृष्ठ संख्या रहेको छ । तपाईंले सहि फारम उपलब्ध गराउनु भयो भएन, वा फारममा भएको पृष्ठ संख्याहरू सहि छन् कि छैनन् जाँच गर्नुहोस् ।',
'Urban Risk & Planning': 'शहरी जोखिम र योजना',
'Urgent': 'तत्काल',
'URL': 'यू.आर.एल.',
'URL to a Google Calendar to display on the project timeline.': 'परियोजना समयमा देखाउनको निम्ति गुगल पात्रोको लागि यू.आर.एल.',
'Use decimal': 'बिन्दु प्रयोग',
'Use default': 'स्वचलान प्रयोग',
'Use deg, min, sec': 'मिनेट, सेकेण्ड प्रयोग',
'Use Geocoder for address lookups?': 'ठेगानाको निम्ति जिओकोड प्रयोग गर्नुहोस् ।',
'Use Site?': 'क्षेत्र प्रयोग?',
'Use this to set the starting location for the Location Selector.': 'स्थान छान्नेको लागि शुरू स्थान राख्नको निम्ति यो प्रयोग गर्नुहोस् ।',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'प्रकारहरूको फरक छुट्याउनको लागि अनहोवर टुलकिट तथा समूह पप्-अपमा प्रयोग गरिएको ।',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'विवरणहरू छुट्याउनको निम्ति अनहोवर टुलकिट बनाउनको लागि प्रयोग भएको र समूह पप्-अपमा पनि प्रयोग भएको',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'प्रवेश गरिएको स्थानको आक्षांश ठिक छ कि छैन भनेर जाँच गर्नको निम्ति प्रयोग गरिएको । स्थानहरू भएको स्रोतहारूको तालिका फिल्टर गर्नको निम्ति प्रयोग गर्न सकिने ।',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'प्रवेश गरिएको स्थानको देशान्तरहरू ठिक छ कि छैन भनेर जाँच गर्नको निम्ति प्रयोग गरिएको । स्थानहरू भएको स्रोतहारूको तालिका फिल्टर गर्नको निम्ति प्रयोग गर्न सकिने ।',
'Used to populate feature attributes which can be used for Styling.': 'स्टाइलिङको लागि प्रयोग हुने विशेष कार्यहरूलाई सहयोग गर्नको निम्ति प्रयोग भएको ।',
'User': 'प्रयोगकर्ता',
'User Account': 'प्रयोगकर्ता एकाउन्ट',
'User added to Role': 'भूमिकामा प्रयोगकर्तालाई संचित गरियो ',
'User Profile': 'प्रयोगकर्ता प्रोफाइल',
'User Roles': 'प्रयोगकर्ता भूमिकाहरू',
'User with Role': 'भूमिकासहितको प्रयोगकर्ता',
'Username': 'प्रयोगकर्तानाम',
'Users': 'प्रयोगकर्ताहरू',
'Users in my Organizations': 'मेरो संस्था(हरू) मा प्रयोगकर्ता(हरू)',
'Users with this Role': 'यस भूमिकाको प्रयोगकर्ता(हरू)',
'Uses the REST Query Format defined in': 'परिभाषित गरिएको सोधपुछ नमुना यथास्थितिमा ल्याउन प्रयोग',
'using default': 'स्वचलित प्रयोग गरिँदै',
'Valid From': 'देखि मान्य',
'Valid Until': 'सम्म मान्य',
'Validation error': 'मान्यता गल्ति',
'Value': 'महत्व',
'Value per Pack': 'प्रति एकाई मूल्य',
'VCA (Vulnerability and Capacity Assessment)': 'संकटासन्नता र क्षमता लेखाजोखा',
'Vector Control': 'भेक्टोर नियन्त्रण',
'Verified': 'प्रमाणित गर्ने',
'Version': 'भर्सन',
'Very Good': 'धेरै राम्रो',
'Very Strong': 'धेरै बलियो',
'Video Tutorials': 'भिडियो ट्युटरियल्सहरू',
'View': 'हेर्नुहोस्',
'View full screen': 'पूर्ण स्क्रिन हेर्नुहोस्',
'View Fullscreen Map': 'पूर्णस्क्रिन नक्सा हेर्नुहोस्',
'View Location Details': 'स्थान विवरण हेर्नुहोस्',
'View on Map': 'नक्सा हेर्नुहोस्',
'Vocational Training and Employment Skills': 'छुट्टिमा लइने तालिम र रोजगार सिप(हरू)',
'Volunteer': 'स्वयम्-सेवक',
'Volunteer added': 'स्वयम्-सेवक संचित गरियो',
'Volunteer and Staff Management': 'स्वयम्सेवक र कर्मचारी ब्यवस्थापन',
'Volunteer Cluster': 'स्वयम्-सेवक समूह',
'Volunteer Cluster added': 'स्वयम्-सेवक समूह संचित गरियो',
'Volunteer Cluster deleted': 'स्वयम्-सेवक समूह हटाइयो',
'Volunteer Cluster Position': 'स्वयम्-सेवक समूह पद',
'Volunteer Cluster Position added': 'स्वयम्-सेवक समूह पद संचित गरियो',
'Volunteer Cluster Position deleted': 'स्वयम्-सेवक समूह पद हटाइयो',
'Volunteer Cluster Position updated': 'स्वयम्-सेवक समूह पद परिमार्जन गरियो',
'Volunteer Cluster Type': 'स्वयम्-सेवक समूह प्रकार',
'Volunteer Cluster Type added': 'स्वयम्-सेवक समूह प्रकार संचित गरियो',
'Volunteer Cluster Type deleted': 'स्वयम्-सेवक समूह प्रकार हटाइयो',
'Volunteer Cluster Type updated': 'स्वयम्-सेवक समूह प्रकार परिमार्जन गरियो',
'Volunteer Cluster updated': 'स्वयम्-सेवक समूह परिमार्जन गरियो',
'Volunteer deleted': 'स्वयम्-सेवक हटाइयो',
'Volunteer Details': 'स्वयम्-सेवक विवरण',
'Volunteer Details updated': 'स्वयम्-सेवक विवरण परिमार्जन गरियो',
'Volunteer Hours': 'स्वयम्-सेवक समय (घण्टा)',
'Volunteer Insurance': 'स्वयम्-सेवक विमा',
'Volunteer Management': 'स्वयम्-सेवक ब्यबस्थापन',
'Volunteer Recognition': 'स्वयम्-सेवक सम्मान',
'Volunteer Record': 'स्वयम्-सेवक विवरण',
'Volunteer Recruitment': 'स्वयम्-सेवक नियुक्ति',
'Volunteer Report': 'स्वयम्-सेवक प्रतिवेदन',
'Volunteer Role': 'स्वयमसेवकको भूमिका',
'Volunteer Role added': 'स्वयमसेवकको भूमिका संचित गरियो',
'Volunteer Role Catalog': 'स्वयमसेवकको भूमिका तालिका',
'Volunteer Role deleted': 'स्वयमसेवकको भूमिका हटाइयो',
'Volunteer Role Details': 'स्वयमसेवकको भूमिका विवरण',
'Volunteer Role updated': 'स्वयमसेवकको भूमिका परिमार्जन गरियो',
'Volunteer Roles': 'स्वयमसेवकको भूमिका',
'Volunteer Service Record': 'स्वयम्-सेवक सेवा विवरण',
'Volunteer Training': 'स्वयम्-सेवक तालिम',
'Volunteering in Emergencies Guidelines/Toolkit': 'आपतकालिन निर्देशन/टुलकिटमा स्वयम्-सेवक कार्य',
'Volunteering in Pandemic Emergency Situations': 'माहामरी आपतकालिन अवस्था(हरू)मा स्वयम्-सेवक कार्य',
'Volunteers': 'स्वयम्-सेवक(हरू)',
'Vulnerability': 'संकटासन्नता',
'Vulnerable Populations': 'संकटासन्नता जनताहरु',
'Warehouse': 'गोदामघर',
'Warehouse Manager': 'भण्डार व्यवस्थापक',
'Warehouse Stock': 'भण्डार सामान',
'Warehouse Type': 'प्रकार',
'Warehouses': 'गोदामघरहरु',
'WARNING': 'चेतावनि',
'Water': 'पानी',
'Water and Sanitation': 'खानेपानी र सरसफाई',
"Water, Sanitation & Hygiene": 'खानेपानी, सरसफाई तथा स्वच्छता',
'Water Supply': 'पानी आपूर्ती',
'Water Testing': 'पानी परिक्षण',
'Watsan': 'वाटसन्',
'Watsan Officer': 'वाटसन् कर्मचारी',
'Watsan Technician': 'वाटसन् प्राविधिक',
'wavy': 'गिलो',
'We have tried': 'हामिले प्रयास गर्यौं',
'Weak': 'कमजोर',
'Weather': 'मौसम',
'Web Map Service': 'वेभ नक्सा सेवा',
'Web Map Service Browser Name': 'वेव नक्सा सेवा ब्राउजर नाम',
'Web Map Service Browser URL': 'वेव नक्सा सेवा ब्राउजर यू.आर.एल.',
'Website': 'वेवसाइट',
'Week': 'हप्ता',
'Weekends only': 'हप्ताको अन्त्यमा मात्र',
'Weekly': 'साप्ताहिक',
'Weight': 'तौल',
'Weight (kg)': 'तौल (केजि)',
'Well-Known Text': 'राम्ररी थाहा भएको-शब्द',
'WFS Layer': 'डब्ल्यू.एफ.एस. तह',
'WGS84 (EPSG 4236) is required for many WMS servers.': 'धेरै डब्लु.एम.एस. सर्वरहरूका लागि डब्ल्यु.जि.एस.८४ (इ.पि.एस.जि. ४२३6) आवश्यक छ',
'What order to be contacted in.': 'कुन तरिकामा सम्पर्क गर्ने',
'When this search was last checked for changes.': 'जब यो खोजी परिवर्तनको लागि अन्तिममा जाँच गरियो ',
'Whether calls to this resource should use this configuration as the default one': 'यो बनावटलाइ स्रोतले स्वचालितरूपमा प्रयोग गर्नकोलागि हो होइन',
'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.': ' फरकरूपमा बनावट प्रवेशको साटो अक्षांश तथा देशान्तर स्थान बनावटमाउच्चमा छ कि छैन ।',
'Whether the resource should be tracked using S3Track rather than just using the Base Location': 'आधारभुत स्थान प्रयोग गर्नुको साटो स्रोतलाई एस ३ट्र्याक प्रयोग गरि ट्र्याक गर्ने कि नगर्ने',
'Whiskers': 'दारी',
'white': 'सेतो',
'Who is doing What Where': 'को कहाँ के गर्दैछ',
'wider area, longer term, usually contain multiple Activities': 'ठूलो क्षेत्र, लामो शब्द, साधारणतया बहुमुखी कृयाकलाप(हरू)समाबेश गर्दछ',
'widowed': 'बिधुवा',
'Will create and link your user account to the following records': 'तलको विवरणमा तपाईंको एकाउन्ट जोड्ने र बनाउनेछ ।',
'With best regards': 'स-धन्यवाद',
'WKT is Invalid!': 'डब्लु.के.टि. अमान्य!',
'WMS Layer': 'डब्लु.एम.एस. तह',
'Work': 'काम',
'Work on Program': 'कार्यक्रममा काम',
'X-Ray': 'एक्स-रे',
'XML parse error': 'एक्स्.एम.एल. गल्ती',
'XSLT stylesheet not found': 'एक्स.एस.एल.टि. स्टाइलसिट प्राप्त भएन',
'XSLT transformation error': 'एक्स.एस.एल.टि. परिवर्तन गल्ती',
'XYZ Layer': 'एक्स.वाइ.जेट. तह',
'Year': 'वर्ष',
'Year that the organization was founded': 'संस्था स्थापना गरिएको वर्ष',
'yes': 'हुन्छ',
'Yes': 'हुन्छ',
'You can click on the map below to select the Lat/Lon fields': 'ल्याट/लोन क्षेत्र(हरू)लाई छान्नको लागि तलको नक्सामा क्लिक गर्नुहोस् ।',
"You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all.": "तपाईले समूह नाम, ब्याख्या वा टिप्पणीहरू र संस्था नाम वा सम्बन्धित नामको आधारमा खोज्न सक्नुहुन्छ । तपाईंले % वाइल्डकार्डको रूपमा प्रयोग गर्न सक्नुहुन्छ । तालिकामा सबै नराखिकन 'खोजी'मा क्लिक गर्नुहोस् ।",
"You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.": "पाठ्यक्रम नाम, स्थान नाम वा कार्यक्रम टिप्पणी(हरू)को आधारमा खोजी । तपाईंले % वाइल्डकार्डको रूपमा प्रयोग गर्न सक्नुहुन्छ । सबै कार्यक्रमहरू तालिकामा नहाली 'खोजी' मा थिच्नुहोस् ।",
"You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "काम शिर्षक वा ब्यक्ति नाम अनुसार खोजी गर्न सक्नुहुन्छ - कुनैपनि पहिलो, बीचको वा अन्तिम नामहरू, स्पेसद्वारा छुटाएर टाइप गर्न सक्नुहुन्छ । तपाईंले % वाइल्डकार्डको रूपमा प्रयोग गर्न सक्नुहुन्छ । सबै व्यक्तिहरू तालिकामा नहाली 'खोजी' मा थिच्नुहोस् ।",
'You can search by name, acronym or comments': 'नाम, मिल्दोनाम वा टिप्पणी(हरू)को आधारमा खोजी गर्न सक्नुहुन्छ ।',
'You can search by name, acronym, comments or parent name or acronym.': 'नाम, मिल्दो नाम, टिप्पणी(हरू) वा परिवार नाम वा मिल्दोनामको आधारमा खोजी गर्न सक्नुहुन्छ ।',
"You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "ब्यक्तिको नामको आधारमा खोजी गर्न सक्नुहुन्छ- कुनैपनि पहिलो, बीचको वा अन्तिमको नाम(हरू), स्पेसले छुट्यएर टाइप गर्न सक्नुहुन्छ । तपाईंले % वाइल्डकार्डको रूपमा प्रयोग गर्न सक्नुहुन्छ । सबै व्यक्तिहरू तालिकामा नहाली 'खोजी' मा थिच्नुहोस् ।",
"You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.": "तपाईंले तालिम दिने व्यक्तिको नाम, पाठ्यक्रम नाम वा टिप्पणी(हरू). तपाईंले % वाइल्डकार्डको रूपमा प्रयोग गर्न सक्नुहुन्छ । सबै तालिम दिने व्यक्तिहरूली तालिकामा नहाली 'खोजी' मा थिच्नुहोस् ।",
'You can select an area on the image and save to crop it.': 'तपाईंले तस्बिरको क्षेत्रमा छानेर र काट्नको निम्ति संचित गर्न सक्नुहुन्छ ।',
'You can select the Draw tool': 'तपाईंले चित्र बनाउने टुल छान्न सक्नुहुन्छ',
'You can set the modem settings for SMS here.': 'एस.एम.एस.को लागि तपाईंले यहाँ मोडेम मिलाउन सक्नुहुन्छ ।',
'You do not have permission for any facility to perform this action.': 'यो कार्य गर्नको निम्ति तपाईंसँग कुनैपनि सुविधा छैन ।',
'You do not have permission for any organization to perform this action.': 'यो कार्य गर्नको निम्ति तपाईंसँग कुनैपनि संस्था छैन ।',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "तपाईंसँग संचित नगरिएको परिवर्तन(हरू) छन् । रद्द गर्नुहोस् र संचित गर्नको निम्ति 'संचित' मा क्लिक गर्नुहोस् । अस्विकार गर्नको निम्ति 'हुन्छ' मा क्लिक गर्नुहोस् ।",
'You have unsaved changes. You need to press the Save button to save them': "तपाईंसँग संचित नगरिएको परिवर्तन(हरू) छन् । तिनिहरूलाई संचित गर्नको निम्ति 'संचित' बटन थिच्नुहोस् ।",
'You must agree to the Terms of Service': 'तपाईंले सेवाको नियमलाई मान्नै पर्छ ।',
'You must enter a minimum of %d characters': 'कम्ति तपाईंले %d शव्दहरू प्रवेश गर्नैपर्छ ।',
'You need to have at least 2 records in this list in order to merge them.': 'मिलाउनको निम्ति तपाईंसँग तालिकामा कम्तिमा २ विवरणहरू हुनै पर्छ ।',
'Your name for this search. Notifications will use this name.': 'यो खोजीको लागि तपाईंको नाम । सूचना घण्टि(हरू) ले यो नाम प्रयोग गर्नेछ ।',
'Your request for Red Cross and Red Crescent Resource Management System (RMS) has been approved and you can now access the system at': 'रेड क्रस र रेड क्रिसेन्ट स्रोत ब्यबस्थापन प्रकृया (आर.एम.एस.) को लागि तपाईंको अनुरोध प्रमाणित भएको छ र अब तपाईं प्रकृयामा पहुँच प्राप्त गर्न सक्नुहुन्छ ।',
'Youth and Volunteer Development': 'युवा तथा स्वयंसेवक विकास',
'Youth Development': 'युवा विकास',
'Youth Leadership Development': 'जवान अगुवाइ विकास',
'Zonal': 'अञ्चल',
'Zone': 'क्षेत्र',
'Zoom': 'नजिक ल्याउनुहोस्',
'Zoom In': 'नजिक ल्याउनुहोस्',
'Zoom in closer to Edit OpenStreetMap layer': 'खुलासडकनक्सा तहलाई नजिक तान्नुहोस्',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'नजिक ल्याउनुहोस्: नक्सामा क्लिक गर्नुहोस् वा माउसको वायाँ बटन थिच्नुहोस् र चतुर्भुज बनाउनुहोस्',
'Zoom Levels': 'जुम स्तरहरू',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'टाढा लानुहोस्: नक्सामा क्लिक गर्नुहोस् वा माउँसको वायाँ बटम थिच्नुहोस् र चत्तुर्भुजा बनाउनको निम्ति तान्नुहोस्',
'Zoom to Current Location': 'हालको स्थानलाई नजिक तान्नुहोस्',
'Zoom to maximum map extent': 'बढि नक्सा क्षेत्र देखिने गरि नजिक तान्नुहोस्',
}
| mit | -359,877,159,719,963,600 | 56.044527 | 477 | 0.599018 | false |
PySimulator/PySimulator | PySimulator/Plugins/SimulationResult/__init__.py | 1 | 1172 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
def get_immediate_subdirectories(directory):
return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name)) and name[0] != '.']
PlugInNames = get_immediate_subdirectories(os.path.abspath(os.path.dirname(__file__)))
plugin = []
for i in range(len(PlugInNames)):
try:
mod = __import__(PlugInNames[i] + "." + PlugInNames[i], locals(), globals(), [PlugInNames[i] + "." + PlugInNames[i]])
plugin.append(mod)
except ImportError as e:
print PlugInNames[i] + " plug-in could not be loaded. Error message: '" + e.message + "'"
except SyntaxError as e:
print PlugInNames[i] + " plug-in could not be loaded. Error message: '" + str(e) + "'"
except Exception as e:
info = str(e)
if info == '' or info is None:
print PlugInNames[i] + " plug-in could not be loaded."
else:
print PlugInNames[i] + " plug-in could not be loaded. Error message: '" + info + "'"
fileExtension = []
description = []
for p in plugin:
fileExtension.append(p.fileExtension)
description.append(p.description)
| lgpl-3.0 | -7,544,794,805,492,171,000 | 38 | 125 | 0.625641 | false |
useblocks/groundwork | groundwork/plugins/gw_recipes_builder.py | 1 | 3282 | # -*- coding: utf-8 -*-
import os
from click import Argument
from groundwork.patterns import GwCommandsPattern, GwRecipesPattern
class GwRecipesBuilder(GwCommandsPattern, GwRecipesPattern):
"""
Provides commands for listing and building recipes via command line interface.
Provided commands:
* recipe_list
* recipe_build
Provides also the recipe **gw_package**, which can be used to setup a groundwork related python package.
Content of the package:
* setup.py: Preconfigured and ready to use.
* groundwork package structure: Directories for applications, patterns, plugins and recipes.
* Simple, runnable example of a groundwork application and plugins.
* usable test, supported by py.test and tox.
* expandable documentation, supported by sphinx and the groundwork sphinx template.
* .gitignore
This code is hardly based on Cookiecutter's main.py file:
https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/main.py
"""
def __init__(self, *args, **kwargs):
self.name = kwargs.get("name", self.__class__.__name__)
super(GwRecipesBuilder, self).__init__(*args, **kwargs)
def activate(self):
self.commands.register("recipe_list", "Lists all recipes", self._recipe_list)
self.commands.register("recipe_build", "Builds a given recipe", self._recipe_build,
params=[Argument(("recipe",), required=True)])
self.recipes.register("gw_package",
os.path.abspath(os.path.join(os.path.dirname(__file__), "../recipes/gw_package")),
description="Groundwork basic package. Includes places for "
"apps, plugins, patterns and recipes.",
final_words="Recipe Installation is done.\n\n"
"During development use buildout:\n"
"Run: python bootstrap.py\n"
"Then: bin/buildout\n"
"Start the app: bin/app\n\n"
"For installation run: 'python setup.py install' \n"
"For documentation run: 'make html' inside doc folder "
"(after installation!)\n\n"
"For more information, please take a look into the README file "
"to know how to go on.\n"
"For help visit: https://groundwork.readthedocs.io\n\n"
"Have fun with your groundwork package.")
def deactivate(self):
pass
def _recipe_list(self):
print("Recipes:")
for key, recipe in self.app.recipes.get().items():
print(" %s by plugin '%s' - %s" % (recipe.name, recipe.plugin.name, recipe.description))
def _recipe_build(self, recipe):
recipe_obj = self.app.recipes.get(recipe)
if recipe_obj is None:
print("Recipe %s not found." % recipe)
else:
recipe_obj.build(no_input=False, extra_context=None)
| mit | -8,096,438,655,980,942,000 | 45.885714 | 112 | 0.555454 | false |
rpavlik/jhbuild-vrjuggler | jhbuild/frontends/buildscript.py | 1 | 11073 | # jhbuild - a build script for GNOME 1.x and 2.x
# Copyright (C) 2001-2006 James Henstridge
# Copyright (C) 2003-2004 Seth Nickell
#
# buildscript.py: base class of the various interface types
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
from jhbuild.utils import packagedb
from jhbuild.errors import FatalError, CommandError, SkipToPhase, SkipToEnd
class BuildScript:
def __init__(self, config, module_list=None):
if self.__class__ is BuildScript:
raise NotImplementedError('BuildScript is an abstract base class')
self.modulelist = module_list
self.module_num = 0
self.config = config
# the existence of self.config.prefix is checked in config.py
if not os.access(self.config.prefix, os.R_OK|os.W_OK|os.X_OK):
raise FatalError(_('install prefix (%s) must be writable') % self.config.prefix)
if not os.path.exists(self.config.checkoutroot):
try:
os.makedirs(self.config.checkoutroot)
except OSError:
raise FatalError(
_('checkout root (%s) can not be created') % self.config.checkoutroot)
if not os.access(self.config.checkoutroot, os.R_OK|os.W_OK|os.X_OK):
raise FatalError(_('checkout root (%s) must be writable') % self.config.checkoutroot)
if self.config.copy_dir and not os.path.exists(self.config.copy_dir):
try:
os.makedirs(self.config.copy_dir)
except OSError:
raise FatalError(
_('checkout copy dir (%s) can not be created') % self.config.copy_dir)
if not os.access(self.config.copy_dir, os.R_OK|os.W_OK|os.X_OK):
raise FatalError(_('checkout copy dir (%s) must be writable') % self.config.copy_dir)
packagedbdir = os.path.join(self.config.prefix, 'share', 'jhbuild')
try:
if not os.path.isdir(packagedbdir):
os.makedirs(packagedbdir)
except OSError:
raise FatalError(_('could not create directory %s') % packagedbdir)
self.packagedb = packagedb.PackageDB(os.path.join(packagedbdir,
'packagedb.xml'))
def execute(self, command, hint=None, cwd=None, extra_env=None):
'''Executes the given command.
If an error occurs, CommandError is raised. The hint argument
gives a hint about the type of output to expect.
'''
raise NotImplementedError
def build(self, phases=None):
'''start the build of the current configuration'''
self.start_build()
failures = [] # list of modules that couldn't be built
self.module_num = 0
for module in self.modulelist:
self.module_num = self.module_num + 1
if self.config.min_age is not None:
installdate = self.packagedb.installdate(module.name)
if installdate > self.config.min_age:
self.message(_('Skipping %s (installed recently)') % module.name)
continue
self.start_module(module.name)
failed = False
for dep in module.dependencies:
if dep in failures:
if self.config.module_nopoison.get(dep,
self.config.nopoison):
self.message(_('module %(mod)s will be built even though %(dep)s failed')
% { 'mod':module.name, 'dep':dep })
else:
self.message(_('module %(mod)s not built due to non buildable %(dep)s')
% { 'mod':module.name, 'dep':dep })
failed = True
if failed:
failures.append(module.name)
self.end_module(module.name, failed)
continue
if not phases:
build_phases = self.get_build_phases(module)
else:
build_phases = phases
phase = None
num_phase = 0
# if there is an error and a new phase is selected (be it by the
# user or an automatic system), the chosen phase must absolutely
# be executed, it should in no condition be skipped automatically.
# The force_phase variable flags that condition.
force_phase = False
while num_phase < len(build_phases):
last_phase, phase = phase, build_phases[num_phase]
try:
if not force_phase and module.skip_phase(self, phase, last_phase):
num_phase += 1
continue
except SkipToEnd:
break
if not module.has_phase(phase):
# skip phases that do not exist, this can happen when
# phases were explicitely passed to this method.
num_phase += 1
continue
self.start_phase(module.name, phase)
error = None
try:
try:
error, altphases = module.run_phase(self, phase)
except SkipToPhase, e:
try:
num_phase = build_phases.index(e.phase)
except ValueError:
break
continue
except SkipToEnd:
break
finally:
self.end_phase(module.name, phase, error)
if error:
try:
nextphase = build_phases[num_phase+1]
except IndexError:
nextphase = None
newphase = self.handle_error(module, phase,
nextphase, error,
altphases)
force_phase = True
if newphase == 'fail':
failures.append(module.name)
failed = True
break
if newphase is None:
break
if newphase in build_phases:
num_phase = build_phases.index(newphase)
else:
# requested phase is not part of the plan, we insert
# it, then fill with necessary phases to get back to
# the current one.
filling_phases = self.get_build_phases(module, targets=[phase])
canonical_new_phase = newphase
if canonical_new_phase.startswith('force_'):
# the force_ phases won't appear in normal build
# phases, so get the non-forced phase
canonical_new_phase = canonical_new_phase[6:]
if canonical_new_phase in filling_phases:
filling_phases = filling_phases[
filling_phases.index(canonical_new_phase)+1:-1]
build_phases[num_phase:num_phase] = [newphase] + filling_phases
if build_phases[num_phase+1] == canonical_new_phase:
# remove next phase if it would just be a repeat of
# the inserted one
del build_phases[num_phase+1]
else:
force_phase = False
num_phase += 1
self.end_module(module.name, failed)
self.end_build(failures)
if failures:
return 1
return 0
def get_build_phases(self, module, targets=None):
'''returns the list of required phases'''
if targets:
tmp_phases = targets[:]
else:
tmp_phases = self.config.build_targets[:]
i = 0
while i < len(tmp_phases):
phase = tmp_phases[i]
depadd = []
try:
phase_method = getattr(module, 'do_' + phase)
except AttributeError:
# unknown phase for this module type, simply skip
del tmp_phases[i]
continue
if hasattr(phase_method, 'depends'):
for subphase in phase_method.depends:
if subphase not in tmp_phases[:i+1]:
depadd.append(subphase)
if depadd:
tmp_phases[i:i] = depadd
else:
i += 1
# remove duplicates
phases = []
for phase in tmp_phases:
if not phase in phases:
phases.append(phase)
return phases
def start_build(self):
'''Hook to perform actions at start of build.'''
pass
def end_build(self, failures):
'''Hook to perform actions at end of build.
The argument is a list of modules that were not buildable.'''
pass
def start_module(self, module):
'''Hook to perform actions before starting a build of a module.'''
pass
def end_module(self, module, failed):
'''Hook to perform actions after finishing a build of a module.
The argument is true if the module failed to build.'''
pass
def start_phase(self, module, phase):
'''Hook to perform actions before starting a particular build phase.'''
pass
def end_phase(self, module, phase, error):
'''Hook to perform actions after finishing a particular build phase.
The argument is a string containing the error text if something
went wrong.'''
pass
def message(self, msg, module_num=-1):
'''Display a message to the user'''
raise NotImplementedError
def set_action(self, action, module, module_num=-1, action_target=None):
'''inform the buildscript of a new stage of the build'''
raise NotImplementedError
def handle_error(self, module, phase, nextphase, error, altphases):
'''handle error during build'''
raise NotImplementedError
| gpl-2.0 | 3,736,484,522,379,380,700 | 40.943182 | 101 | 0.530118 | false |
MC911-MV-1s2016/lya-compiler-python | lyacompiler/lya_debug_source.py | 1 | 11212 | lya_source_dcl = """
dcl dcl1 int;
dcl dcl2, dcl3, dcl4, dcl5 char;
dcl dcl6, dcl7 int, dcl8 bool;
dcl dcl9 int = 5;
dcl dcl10, dcl11 int = 6;
dcl dcl12 int, dcl13, dcl14 int = 10;
dcl dcl15 int (2:5);
dcl dcl16 char (0:10);
dcl dcl17 bool(10:11);
dcl dcl18 dcl17 (1:2);
dcl dcl19 int (0:1) (1:2);
"""
lya_source_syn = """
syn syn1 = 1;
syn syn2, syn3, syn4 = 3;
syn syn5 int = 2;
syn syn6, syn7 int = 3;
syn syn8 = 10, syn9 = 12;
syn syn10, syn11 int = 13, syn12 = 20;
"""
lya_source_type = """
type type1 = int;
type type2 = char;
type type3 = bool;
type type4 = type3;
type type7, type8 = int;
type type9, type10, type11 = char;
type type12 = bool, type13 = type9;
type type14 = int, type15, type16 = char, type17, type18, type19 = char;
type type20 = ref int;
type type21 = ref ref type20;
type type22 = chars[20];
type type23 = array [int] char;
type type24 = array[1:2] bool;
type type25 = array[int, bool, char, mode1(1:4), int(3:5), 1:5] bool;
"""
lya_source_composite_mode = """
dcl cms1 chars [10];
dcl cma1 array [int] bool;
dcl cma2 array [bool, int] char;
"""
lya_source_procedure1 = """
power: proc (n int, r int) returns (int);
dcl c int;
type t = bool;
end;
"""
lya_source_procedure2 = """
power: proc (n int, r int) returns (int);
end;
"""
lya_source_procedure3 = """
power: proc (n int, r int);
dcl c int;
type t = bool;
end;
"""
lya_source_procedure4 = """
power: proc () returns (int);
dcl c int;
type t = bool;
end;
"""
lya_source_procedure5 = """
power: proc (n int, r int);
end;
"""
lya_source_procedure6 = """
power: proc () returns (int);
end;
"""
lya_source_procedure7 = """
power: proc ();
dcl c int;
end;
"""
lya_source_procedure8 = """
power: proc ();
end;
"""
lya_source_procedure9 = """
power: proc (n int loc, r, z int) returns (int loc);
dcl c, d int = 1;
type t = bool;
end;
"""
lya_source_if1 = """
label: if 1+2 then
exit label1;
else
exit label2;
fi;
"""
lya_source_if2 = """
if 1+2 then
exit label1;
exit label2;
fi;
"""
lya_source_if3 = """
if 1+2 then
else
exit label2;
exit label3;
fi;
"""
lya_source_if4 = """
if 1+2 then
else
fi;
"""
lya_source_if5 = """
if 1+2 then
exit label1;
elsif 1+2 then
exit label2;
exit label22;
else
exit lable3;
fi;
"""
lya_source_if6 = """
if 1+2 then
exit label1;
elsif 1+2 then
exit label2;
exit label22;
fi;
"""
lya_source_if7 = """
if 1+2 then
if 1+3 then
exit label1;
fi;
elsif 1+2 then
exit label2;
if 2+5 then
else
exit label22;
fi;
else
if 2+5 then
exit a1;
elsif 1+2 then
exit label22;
fi;
fi;
"""
lya_source_action1 = """
label1: ac1 = 10 + 10;
ac2 += 2;
ac3 -= 10;
ac4 *= 55;
ac5 /= 1;
ac5 %= 20;
ac6 &= 2;
"""
lya_source_expression = """
dcl var1 int=3+5-7*7/9%3;
dcl var2 int = 2 in 3;
dcl var3 bool = 5 && 3 || 1 == 2 & 2;
dcl var4 bool = if 2 then 3 else 5 fi;
dcl var2 int = var1 + 3;
"""
lya_source_action2 = """
exit label1;
result 1 + 2;
return;
return 2 + 1;
"""
lya_source_call1 = """
function();
function(1);
function(1, 2);
function(1+2, 2);
function(1,2,3/2);
"""
lya_source_call2 = """
num(1);
pred();
succ(1,2);
upper(1/2);
lower(2/3);
length();
read(100);
print(var2+2);
"""
lya_source_do1 = """
dcl var int = 3;
do od;
do var = 2; od;
do while 1; od;
do while 3; var = 32; od;
"""
lya_source_do2 = """
do for counter in int; od;
do for counter in bool; var3 = 12; od;
do for counter down in char; od;
do for counter in int while 3; var = 32; od;
do for counter = 3 to 8; od;
do for counter = 3 down to 8; od;
do for counter = 3 by 5 to 8; od;
do for counter = 3 by 5 down to 8; od;
"""
lya_source_do3 = """
dcl var int = 3;
do od;
do var = 2; od;
do while var; od;
do while 3; var = 32; od;
"""
test2_source = """dcl m int = 2, n int = 3;
p: proc (x int);
dcl s int;
s = m * x;
print("s = ", s);
end;
p(n);
print(m);"""
test3_source = """dcl m int = 2, n int = 3;
p: proc (x, y int, b bool) returns (int);
dcl s int = x;
if b then
s += y;
result s;
else
result y;
fi;
end;
dcl b bool;
read (b);
print (p(m, n, b));"""
test4_source = """dcl i int, b bool = true;
x:
do while b;
read (i);
if i <= 0 then
exit x;
fi;
print (i*i);
od;
print (0);"""
test5_source = """dcl i, soma int;
soma = 0;
do for i=1 to 10;
soma += i;
od;
print (soma);
"""
test6_source = """dcl i int;
dcl soma int = 0, b bool = true;
do for i=1 to 10 while b;
soma += i;
if soma > 100 then
b = false;
fi;
od;
print (soma);"""
test7_source = """dcl i,j int, r ref int;
p: proc(x int, y ref int) returns (int);
dcl b bool;
read(b);
if b then
y = -> i;
result y->;
else
y = r;
result r->;
fi;
end;
read(i);
r = -> i;
print(p(i,->j));"""
test8_source = """dcl i int, j,k int = 2;
p: proc(x int, y int loc) returns (int loc);
dcl z int = y;
y = x;
result k;
print(z); /* print 2 */
end;
i = p(3,j);
print(i, j); /* print 2,3 */"""
test9_source = """dcl a array[3:10] int;
dcl i,j int;
read(j);
a[3]=2*j;
do
for i = 4 to 10;
a[i] = 5+i;
od;
print(a[j]);"""
test10_source = """dcl x, y int;
p: proc (b bool) returns (int loc);
if b then
result x;
else
result y;
fi;
end;
dcl b bool = false;
p(b) = 20;
p(true) = 10;
print(x, y); // display 10, 20
"""
test11_source = """type vector = array[1:10] int;
dcl v vector, i int;
sum: proc (v vector) returns (int);
dcl s, i int;
i = 1;
s = 0;
do
while i<=10;
s = s + v[i];
i += 1;
od;
return s;
end;
do
for i = 1 to 10;
read(v[i]);
od;
print(sum(v));"""
syn_test_source = """syn sy1 = 20;
syn sy6 = sy1;
syn sy2 char = 'c';
syn sy3 bool = true;
syn sy4 int = 1 + sy1;"""
dcl_op_source = """dcl var1 int=3+5-7*7/9%3; dcl var2 int = 2 in 3;"""
dcl_op_source2 = """dcl var2, varx char;\ndcl var3, var4 int = 10;\ndcl var5 = 10 + 5 * (10 - 20);"""
test_rel_exp_source = """dcl m bool = false, n bool = false;
p: proc (x bool);
dcl s bool;
s = m >= x;
end;
p(n);"""
test_unary_op_source = """dcl m int = 2, n int = 3;
p: proc (x int);
dcl s bool;
s = !true;
end;
p(n);"""
test_elsif_source = """dcl m int = 2, n int = 3, y, s int, b bool = true;
if b then
s += y;
elsif b then
s = y;
else
s = 3;
fi;
print (s);"""
testret_source = """dcl m int = 2, n int = 3;
p: proc (x, y int, b bool) returns (int);
dcl s int = x;
if b then
s += y;
return s;
else
result y;
fi;
end;
dcl b bool = true;
read (b);
print (p(m, n, b));"""
typedef_source = """type my_int = int;
dcl x my_int = 2;
type vector = array[1:10] int;
dcl v vector;
type p_int = ref int;
dcl pi p_int;
print(x);
print(v);
print(pi);
type r_my_int = ref my_int;
dcl uou r_my_int;
print(uou);"""
printtest_source = """
dcl c chars[10] = "BANANA";
print("Oi", "tudo bem?");
print(c);"""
# The only variable exported from this module.
__all__ = ['lya_debug_source']
lya_gcd = """
gcd: proc (x int, y int) returns (int);
dcl g int;
g = y;
do
while x > 0;
g = x;
x = y - (y/x) * x;
y = g;
od;
return g;
end;
dcl a, b int;
print("give-me two integers separated by space:");
read(a);
read(b);
print ("GCD of ", a, b, " is ", gcd(a,b));"""
lya_gen_primes = """dcl n1, n2, i, j int, flag bool;
print("Enter 2 numbers (intervals) separated by space: ");
read(n1);
read(n2);
print("Prime numbers between ", n1, " and ", n2, " are:\n");
do
for i = n1 to n2;
flag = true;
loop: do
for j = 2 to i/2;
if i % j == 0 then
flag = false;
exit loop;
fi;
od;
if flag then
print(i, " ");
fi;
od;
"""
lya_bubble_sort = """dcl v array[0:100] int;
dcl n, c, d, swap int;
print("Enter number of elements: ");
read(n);
print("Enter ", n, " integers\n");
do
for c = 0 to n-1;
read(v[c]);
od;
do
for c = 0 to n-2;
do
for d = 0 to n-c-2;
// For decreasing order use "<"
if v[d] > v[d+1] then
swap = v[d];
v[d] = v[d+1];
v[d+1] = swap;
fi;
od;
od;
print("Sorted list in ascending order:\n");
do
for c = 0 to n-1;
print(v[c], " ");
od;
"""
lya_palindrome = """dcl n,t int, reverse int = 0;
print("Enter a number: ");
read(n);
t = n;
do
while t != 0;
reverse = reverse * 10;
reverse = reverse + t % 10;
t = t / 10;
od;
if n == reverse then
print(n, " is a palindrome number.\n");
else
print(n, " is not a palindrome number.\n");
fi;"""
lya_ref_example = """swapByRef: proc(x ref int, y ref int);
dcl t int = x->;
x-> = y->;
y-> = t;
end;
dcl i int = 10, j int = 20;
// declaring reference to int
dcl r ref int = ->i;
swapByRef( r, ->j );
print(i, j);"""
lya_fibo = """fibo: proc (n int, g int loc);
dcl h int;
if n < 0 then
print(g);
return;
else
h = g; fibo(n-1, h);
g = h; fibo(n-2, g);
fi;
print(n,g);
end;
dcl k int = 0;
fibo(3,k);
//fibo(-1,k);
"""
lya_armstrong = """power: proc (n int, r int) returns (int);
dcl c int, p int = 1;
do
for c = 1 to r;
p = p*n;
od;
return p;
end;
dcl n int, sum int = 0;
dcl temp, remainder int, digits int = 0;
print("Input an integer: ");
read(n);
temp = n;
do
while temp != 0;
digits += 1;
temp = temp / 10;
od;
temp = n;
do
while temp != 0;
remainder = temp % 10;
sum = sum + power(remainder, digits);
temp = temp / 10;
od;
if n == sum then
print(n, " is an Armstrong number.\n");
else
print(n, " is not an Armstrong number.\n");
fi;"""
lya_fat = """
fat: proc (n int) returns (int);
if n==0 then
return 1;
else
return n * fat (n-1);
fi;
end;
dcl x int;
print("give-me a positive integer:");
read(x);
print("fatorial of ", x, " = ", fat(x));"""
lya_int_stack = """syn top int = 10;
type stack = array [1:top+1] int;
push: proc (s stack loc, elem int);
if s[top+1] == top then
print("stack is full");
else
s[top+1] += 1;
s[s[top+1]] = elem;
fi;
end;
pop: proc (s stack loc) returns (int);
if s[top+1] == 0 then
print("empty stack");
result 0;
else
result s[s[top+1]];
s[top+1] -= 1;
fi;
end;
init: proc (s stack loc);
s[top+1] = 0;
end;
dcl q stack, v1, v2 int;
init(q);
read(v1, v2);
push(q,v1);
push(q,v2);
print(pop(q) + pop(q));"""
lya_debug_source = lya_bubble_sort
| bsd-3-clause | -97,219,455,865,958,500 | 16.382946 | 101 | 0.505619 | false |
Harmon758/Harmonbot | Twitch/Harmonbot.py | 1 | 8336 |
from twitchio.ext import commands
import asyncio
import datetime
import os
import sys
import aiohttp
import asyncpg
import dotenv
import pyowm
from utilities import context
from utilities import logging
sys.path.insert(0, "..")
from units.games import eightball
sys.path.pop(0)
class Bot(commands.Bot):
def __init__(self, loop = None, initial_channels = None, **kwargs):
self.version = "3.0.0-b.138"
loop = loop or asyncio.get_event_loop()
if initial_channels is None:
initial_channels = []
initial_channels = list(initial_channels)
# Constants
self.char_limit = self.character_limit = 500
# aiohttp Client Session - initialized on ready
self.aiohttp_session = None
# Credentials
for credential in ("DATABASE_PASSWORD", "OWM_API_KEY", "POSTGRES_HOST", "WORDNIK_API_KEY",
"YANDEX_TRANSLATE_API_KEY"):
setattr(self, credential, os.getenv(credential))
if not self.POSTGRES_HOST:
self.POSTGRES_HOST = "localhost"
self.DATABASE_HOST = self.POSTGRES_HOST
## OpenWeatherMap
self.owm_client = pyowm.OWM(self.OWM_API_KEY)
self.weather_manager = self.owm_client.weather_manager()
# TODO: Async OWM calls
# TODO: Weather functions in location unit
# PostgreSQL database connection
self.db = self.database = self.database_connection_pool = None
self.connected_to_database = asyncio.Event()
self.connected_to_database.set()
loop.run_until_complete(self.initialize_database())
records = loop.run_until_complete(self.db.fetch("SELECT channel FROM twitch.channels"))
initial_channels.extend(record["channel"] for record in records)
super().__init__(loop = loop, initial_channels = initial_channels, **kwargs)
# TODO: Handle channel name changes, store channel ID
# Add commands with set responses
loop.run_until_complete(self.add_set_response_commands())
# Load cogs
for file in sorted(os.listdir("cogs")):
if file.endswith(".py"):
self.load_module("cogs." + file[:-3])
async def connect_to_database(self):
if self.database_connection_pool:
return
if self.connected_to_database.is_set():
self.connected_to_database.clear()
self.database_connection_pool = await asyncpg.create_pool(user = "harmonbot",
password = self.DATABASE_PASSWORD,
database = "harmonbot", host = self.DATABASE_HOST)
self.db = self.database = self.database_connection_pool
self.connected_to_database.set()
else:
await self.connected_to_database.wait()
async def initialize_database(self):
await self.connect_to_database()
await self.db.execute("CREATE SCHEMA IF NOT EXISTS twitch")
await self.db.execute(
"""
CREATE TABLE IF NOT EXISTS twitch.birthdays (
channel TEXT PRIMARY KEY,
month INT,
day INT
)
"""
)
await self.db.execute(
"""
CREATE TABLE IF NOT EXISTS twitch.channels (
channel TEXT PRIMARY KEY
)
"""
)
await self.db.execute(
"""
CREATE TABLE IF NOT EXISTS twitch.commands (
channel TEXT,
name TEXT,
response TEXT,
PRIMARY KEY (channel, name)
)
"""
)
await self.db.execute(
"""
CREATE TABLE IF NOT EXISTS twitch.aliases (
channel TEXT,
name TEXT,
alias TEXT,
PRIMARY KEY (channel, alias),
FOREIGN KEY (channel, name) REFERENCES twitch.commands (channel, name) ON DELETE CASCADE
)
"""
)
await self.db.execute(
"""
CREATE TABLE IF NOT EXISTS twitch.counters (
channel TEXT,
name TEXT,
value INT,
PRIMARY KEY (channel, name)
)
"""
)
await self.db.execute(
"""
CREATE TABLE IF NOT EXISTS twitch.messages (
timestamp TIMESTAMPTZ PRIMARY KEY DEFAULT NOW(),
channel TEXT,
author TEXT,
message TEXT,
message_timestamp TIMESTAMPTZ
)
"""
)
await self.db.execute(
"""
CREATE TABLE IF NOT EXISTS twitch.locations (
channel TEXT PRIMARY KEY,
location TEXT
)
"""
)
await self.db.execute(
"""
CREATE TABLE IF NOT EXISTS twitch.toggles (
channel TEXT,
name TEXT,
status BOOLEAN,
PRIMARY KEY (channel, name)
)
"""
)
# Migrate variables
import json
for file in os.listdir("data/variables"):
channel = file[:-5] # - .json
with open(f"data/variables/{channel}.json", 'r') as variables_file:
variables = json.load(variables_file)
for name, value in variables.items():
if isinstance(value, bool) or value is None:
if name.endswith(".status"):
name = name[:-7]
await self.db.execute(
"""
INSERT INTO twitch.toggles (channel, name, status)
VALUES ($1, $2, $3)
ON CONFLICT (channel, name) DO
UPDATE SET status = $3
""",
channel, name, value
)
elif isinstance(value, int) and not name.startswith("birthday"):
await self.db.execute(
"""
INSERT INTO twitch.counters (channel, name, value)
VALUES ($1, $2, $3)
ON CONFLICT (channel, name) DO
UPDATE SET value = $3
""",
channel, name, value
)
async def add_set_response_commands(self):
"""Add commands with set responses"""
records = await self.db.fetch("SELECT name, response FROM twitch.commands WHERE channel = 'harmonbot'")
def set_response_command_wrapper(response):
async def set_response_command(ctx):
await ctx.send(response)
return set_response_command
for record in records:
self.add_command(commands.Command(name = record["name"],
func = set_response_command_wrapper(record["response"])))
async def event_ready(self):
print(f"Ready | {self.nick}")
# Initialize aiohttp Client Session
if not self.aiohttp_session:
self.aiohttp_session = aiohttp.ClientSession(loop = self.loop)
async def event_message(self, message):
# Log messages
await self.db.execute(
"""
INSERT INTO twitch.messages (timestamp, channel, author, message, message_timestamp)
VALUES ($1, $2, $3, $4, $5)
""",
datetime.datetime.now(), message.channel.name, message.author.name, message.content,
None if message.echo else message.timestamp.replace(tzinfo = datetime.timezone.utc)
)
# Ignore own messages
if message.author.name == "harmonbot":
return
# Get Context
ctx = await self.get_context(message, cls = context.Context)
# Handle channel-specific commands with set responses
if ctx.prefix and ctx.channel.name != "harmonbot":
arguments = message.content[len(ctx.prefix):].lstrip().lower().split()
if arguments:
command = arguments[0]
aliased = await self.db.fetchval(
"""
SELECT name
from twitch.aliases
WHERE channel = $1 AND alias = $2
""",
ctx.channel.name, command
)
if aliased:
command = aliased
response = await self.db.fetchval(
"""
SELECT response
FROM twitch.commands
WHERE channel = $1 AND name = $2
""",
ctx.channel.name, command
)
if response:
await ctx.send(response)
ctx.channel_command = command
# Return? Override main commands?
# Handle commands
await self.handle_commands(message, ctx = ctx)
# TODO: command on/off settings
# TODO: help command, command help?
if message.content.startswith('\N{BILLIARDS}'):
await ctx.send(f"\N{BILLIARDS} {eightball()}")
async def event_command_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send(str(error).replace('`', "'").replace("<class ", "").replace('>', ""))
elif isinstance(error, commands.CommandNotFound):
# TODO: Handle command not found
if ctx.channel_command:
return
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(str(error).rstrip('.').replace("argument", "input"))
else:
# TODO: Sentry
await super().event_command_error(ctx, error)
async def event_raw_data(self, data):
logging.raw_data_logger.info(data)
@commands.command()
async def test(self, ctx):
await ctx.send("Hello, World!")
@commands.command(aliases = ("8ball", '\N{BILLIARDS}'))
async def eightball(self, ctx):
await ctx.send(f"\N{BILLIARDS} {eightball()}")
dotenv.load_dotenv()
bot = Bot(irc_token = os.getenv("TWITCH_BOT_ACCOUNT_OAUTH_TOKEN"),
client_id = os.getenv("TWITCH_CLIENT_ID"),
nick = "harmonbot", prefix = '!')
bot.run()
| mit | -940,917,193,460,998,400 | 27.944444 | 105 | 0.658709 | false |
argriffing/arb | doc/source/conf.py | 1 | 8167 | # -*- coding: utf-8 -*-
#
# Arb documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 11 09:33:44 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Arb'
copyright = u'2012-2016, Fredrik Johansson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
for _line in open("../../arb/version.c").readlines():
if _line.startswith("const char * arb_version"):
_i1 = _line.find('"')
_i2 = _line.find('"', _i1 + 1)
version = _line[_i1+1:_i2]
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'math'
latex_preamble = r"""
\usepackage{amsmath,amssymb}
\usepackage{breakurl}
\setcounter{tocdepth}{2}
"""
primary_domain = 'c'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'sidebarwidth' : 300}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/arbwhite.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/arb.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Arbdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
'fontpkg': '',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '\\usepackage{lmodern}\n\\setcounter{tocdepth}{2}\n\\urlstyle{tt}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Arb.tex', u'Arb Documentation',
u'Fredrik Johansson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/arbtext.pdf"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'arb', u'Arb Documentation',
[u'Fredrik Johansson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Arb', u'Arb Documentation',
u'Fredrik Johansson', 'Arb', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| lgpl-2.1 | 4,570,452,874,068,180,000 | 31.027451 | 81 | 0.697563 | false |
pmuller/ipkg | ipkg/versions.py | 1 | 1177 | import __builtin__ # because we override sorted in this module
import pkg_resources
def compare(a, b):
if a < b:
return -1
elif a == b:
return 0
else: # a > b
return 1
def extract(item):
if isinstance(item, dict):
version = item['version']
revision = item['revision']
else:
version = item.version
revision = item.revision
return parse(version), parse(str(revision))
def parse(version):
"""Parses a ``version`` string.
Currently a simple wrapper around ``pkg_resources.parse_version()``,
for API purpose. Parsing could change later.
"""
return pkg_resources.parse_version(version)
def sorted(versions, parser=parse, reverse=False):
"""Returned sorted ``versions``.
"""
return __builtin__.sorted(versions, key=parser, cmp=compare,
reverse=reverse)
def most_recent(versions, parser=parse):
"""Returns the most recent version among ``versions``.
* ``versions`` must be an iterable of versions.
* ``parser`` defaults to ``parse`` which parses version strings.
"""
return sorted(versions, reverse=True)[0]
| mit | -3,061,816,019,314,012,000 | 24.042553 | 72 | 0.620221 | false |
MRtrix3/mrtrix3 | lib/mrtrix3/fsl.py | 1 | 6782 | # Copyright (c) 2008-2021 the MRtrix3 contributors.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Covered Software is provided under this License on an "as is"
# basis, without warranty of any kind, either expressed, implied, or
# statutory, including, without limitation, warranties that the
# Covered Software is free of defects, merchantable, fit for a
# particular purpose or non-infringing.
# See the Mozilla Public License v. 2.0 for more details.
#
# For more details, see http://www.mrtrix.org/.
import os
from distutils.spawn import find_executable
from mrtrix3 import MRtrixError
_SUFFIX = ''
# Functions that may be useful for scripts that interface with FMRIB FSL tools
# FSL's run_first_all script can be difficult to wrap, since it does not provide
# a meaningful return code, and may run via SGE, which then requires waiting for
# the output files to appear.
def check_first(prefix, structures): #pylint: disable=unused-variable
from mrtrix3 import app, path #pylint: disable=import-outside-toplevel
vtk_files = [ prefix + '-' + struct + '_first.vtk' for struct in structures ]
existing_file_count = sum([ os.path.exists(filename) for filename in vtk_files ])
if existing_file_count != len(vtk_files):
if 'SGE_ROOT' in os.environ and os.environ['SGE_ROOT']:
app.console('FSL FIRST job may have been run via SGE; awaiting completion')
app.console('(note however that FIRST may fail silently, and hence this script may hang indefinitely)')
path.wait_for(vtk_files)
else:
app.DO_CLEANUP = False
raise MRtrixError('FSL FIRST has failed; ' + ('only ' if existing_file_count else '') + str(existing_file_count) + ' of ' + str(len(vtk_files)) + ' structures were segmented successfully (check ' + path.to_scratch('first.logs', False) + ')')
# Get the name of the binary file that should be invoked to run eddy;
# this depends on both whether or not the user has requested that the CUDA
# version of eddy be used, and the various names that this command could
# conceivably be installed as.
def eddy_binary(cuda): #pylint: disable=unused-variable
from mrtrix3 import app #pylint: disable=import-outside-toplevel
if cuda:
if find_executable('eddy_cuda'):
app.debug('Selected soft-linked CUDA version (\'eddy_cuda\')')
return 'eddy_cuda'
# Cuda versions are now provided with a CUDA trailing version number
# Users may not necessarily create a softlink to one of these and
# call it "eddy_cuda"
# Therefore, hunt through PATH looking for them; if more than one,
# select the one with the highest version number
binaries = [ ]
for directory in os.environ['PATH'].split(os.pathsep):
if os.path.isdir(directory):
for entry in os.listdir(directory):
if entry.startswith('eddy_cuda'):
binaries.append(entry)
max_version = 0.0
exe_path = ''
for entry in binaries:
try:
version = float(entry.lstrip('eddy_cuda'))
if version > max_version:
max_version = version
exe_path = entry
except:
pass
if exe_path:
app.debug('CUDA version ' + str(max_version) + ': ' + exe_path)
return exe_path
app.debug('No CUDA version of eddy found')
return ''
for candidate in [ 'eddy_openmp', 'eddy_cpu', 'eddy', 'fsl5.0-eddy' ]:
if find_executable(candidate):
app.debug(candidate)
return candidate
app.debug('No CPU version of eddy found')
return ''
# In some FSL installations, all binaries get prepended with "fsl5.0-". This function
# makes it more convenient to locate these commands.
# Note that if FSL 4 and 5 are installed side-by-side, the approach taken in this
# function will select the version 5 executable.
def exe_name(name): #pylint: disable=unused-variable
from mrtrix3 import app #pylint: disable=import-outside-toplevel
if find_executable(name):
output = name
elif find_executable('fsl5.0-' + name):
output = 'fsl5.0-' + name
app.warn('Using FSL binary \"' + output + '\" rather than \"' + name + '\"; suggest checking FSL installation')
else:
raise MRtrixError('Could not find FSL program \"' + name + '\"; please verify FSL install')
app.debug(output)
return output
# In some versions of FSL, even though we try to predict the names of image files that
# FSL commands will generate based on the suffix() function, the FSL binaries themselves
# ignore the FSLOUTPUTTYPE environment variable. Therefore, the safest approach is:
# Whenever receiving an output image from an FSL command, explicitly search for the path
def find_image(name): #pylint: disable=unused-variable
from mrtrix3 import app #pylint: disable=import-outside-toplevel
prefix = os.path.join(os.path.dirname(name), os.path.basename(name).split('.')[0])
if os.path.isfile(prefix + suffix()):
app.debug('Image at expected location: \"' + prefix + suffix() + '\"')
return prefix + suffix()
for suf in ['.nii', '.nii.gz', '.img']:
if os.path.isfile(prefix + suf):
app.debug('Expected image at \"' + prefix + suffix() + '\", but found at \"' + prefix + suf + '\"')
return prefix + suf
raise MRtrixError('Unable to find FSL output file for path \"' + name + '\"')
# For many FSL commands, the format of any output images will depend on the string
# stored in 'FSLOUTPUTTYPE'. This may even override a filename extension provided
# to the relevant command. Therefore use this function to 'guess' what the names
# of images provided by FSL commands will be.
def suffix(): #pylint: disable=unused-variable
from mrtrix3 import app #pylint: disable=import-outside-toplevel
global _SUFFIX
if _SUFFIX:
return _SUFFIX
fsl_output_type = os.environ.get('FSLOUTPUTTYPE', '')
if fsl_output_type == 'NIFTI':
app.debug('NIFTI -> .nii')
_SUFFIX = '.nii'
elif fsl_output_type == 'NIFTI_GZ':
app.debug('NIFTI_GZ -> .nii.gz')
_SUFFIX = '.nii.gz'
elif fsl_output_type == 'NIFTI_PAIR':
app.debug('NIFTI_PAIR -> .img')
_SUFFIX = '.img'
elif fsl_output_type == 'NIFTI_PAIR_GZ':
raise MRtrixError('MRtrix3 does not support compressed NIFTI pairs; please change FSLOUTPUTTYPE environment variable')
elif fsl_output_type:
app.warn('Unrecognised value for environment variable FSLOUTPUTTYPE (\"' + fsl_output_type + '\"): Expecting compressed NIfTIs, but FSL commands may fail')
_SUFFIX = '.nii.gz'
else:
app.warn('Environment variable FSLOUTPUTTYPE not set; FSL commands may fail, or script may fail to locate FSL command outputs')
_SUFFIX = '.nii.gz'
return _SUFFIX
| mpl-2.0 | 6,801,620,265,861,604,000 | 42.754839 | 247 | 0.692716 | false |
jithinbp/SEELablet-apps | seel_res/GUI/E_MISCELLANEOUS/A_Add-ons/DUST_SENSOR.py | 1 | 4615 | #!/usr/bin/python
from __future__ import print_function
from SEEL_Apps.utilitiesClass import utilitiesClass
from templates import ui_dsm501 as dsm501
import numpy as np
from PyQt4 import QtGui,QtCore
import sys,time
params = {
'image' : 'DSM501.png',
'helpfile': 'http://www.takingspace.org/make-your-own-aircasting-particle-monitor/',
'name':'Dust Sensor\nDSM501',
'hint':'''
Study the concentration of PM2.5 particles over time using a DSM501/PPD42NS sensor. Connect PIN2 of the sensor to ID1, PIN3 to 5V, PIN5 to GND
'''
}
class AppWindow(QtGui.QMainWindow, dsm501.Ui_MainWindow,utilitiesClass):
def __init__(self, parent=None,**kwargs):
super(AppWindow, self).__init__(parent)
self.setupUi(self)
self.I=kwargs.get('I',None)
self.setWindowTitle(self.I.H.version_string+' : '+params.get('name','').replace('\n',' ') )
self.plot1=self.add2DPlot(self.plot_area)
labelStyle = {'color': 'rgb(255,255,255)', 'font-size': '11pt'}
self.plot1.setLabel('bottom','Time -->', units='S',**labelStyle)
self.plot1.getAxis('left').setLabel('Concentration -->>', color='#ffffff')
self.plot1.setLimits(xMin=0,yMin=0)
self.total_samples = 100
self.acquired_samples = 0
self.timegap = 10 #mS
self.sampling_time = 2000 #mS
self.timer2 = QtCore.QTimer()
self.timer2.timeout.connect(self.updateProgress)
self.timer2.start(500)
self.I.set_state(SQR1=True)
self.curve = self.addCurve(self.plot1,'Concentration')
self.resultsTable.setRowCount(self.total_samples)
self.resultsTable.setColumnCount(3)
self.resultsTable.setHorizontalHeaderLabels(['time','Occupancy %','Concentration mg/m^3'])
self.running=False
self.start_time = time.time()
self.samplingStartTime=time.time()
self.timer = self.newTimer()
#self.running=True
#self.timer.singleShot(0,self.run)
self.X=[]
self.Y=[]
def start(self):
self.X=[]
self.Y=[]
self.running = True
self.timer.singleShot(0,self.run)
def stop(self):
self.running=False
def updateProgress(self):
if not self.running:return
val = 1e5*(time.time()-self.samplingStartTime)/(self.sampling_time)
self.timeProgressBar.setValue(val)
def run(self):
if not self.running:return
self.samplingStartTime = time.time()
self.sampling_time = self.integrationBox.value()*1e3 #convert to mS
self.I.start_one_channel_LA(channel='ID1',channel_mode=1,trigger_mode=0) #every edge
if self.running: self.timer.singleShot(self.sampling_time,self.plotData)
def plotData(self):
if not self.running:return
a,b,c,d,e = self.I.get_LA_initial_states()
if a==self.I.MAX_SAMPLES/4: a = 0
tmp = self.I.fetch_long_data_from_LA(a,1)
print (a,b,c,d,e,tmp)
self.I.dchans[0].load_data(e,tmp)
#print (self.I.dchans[0].timestamps,self.I.dchans[0].initial_state)
stamps = self.I.dchans[0].timestamps
if len(stamps)>2:
if not self.I.dchans[0].initial_state:
stamps = stamps[1:] - stamps[0]
diff = np.diff(stamps)
lows = diff[::2]
highs = diff[1::2]
#print(stamps,sum(lows),sum(highs))
low_occupancy = 100*sum(lows)/stamps[-1] #Occupancy ratio
self.progressBar.setValue(low_occupancy)
concentration = 1.1*pow(low_occupancy,3)-3.8*pow(low_occupancy,2)+520*low_occupancy+0.62; #From the spec sheet curve
self.X.append(time.time()-self.start_time)
self.Y.append(concentration)
self.curve.setData(self.X,self.Y)
item = QtGui.QTableWidgetItem();item.setText('%s'%(time.strftime("%H:%M:%S %d-%h")));self.resultsTable.setItem(self.acquired_samples, 0, item);#item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled)
item = QtGui.QTableWidgetItem();item.setText('%.3f'%(low_occupancy));self.resultsTable.setItem(self.acquired_samples, 1, item);#item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled)
item = QtGui.QTableWidgetItem();item.setText('%.3f'%(concentration));self.resultsTable.setItem(self.acquired_samples, 2, item);#item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled)
self.acquired_samples +=1
if self.acquired_samples==self.total_samples:
self.total_samples = self.acquired_samples+10
self.resultsTable.setRowCount(self.total_samples)
if self.running: self.timer.singleShot(self.timegap,self.run)
def saveData(self):
self.saveDataWindow([self.curve],self.plot1)
def closeEvent(self, event):
self.timer.stop()
self.finished=True
self.running = False
def __del__(self):
self.timer.stop()
print ('bye')
if __name__ == "__main__":
from SEEL import interface
app = QtGui.QApplication(sys.argv)
myapp = AppWindow(I=interface.connect())
myapp.show()
sys.exit(app.exec_())
| gpl-3.0 | -4,322,714,388,244,980,700 | 34.5 | 212 | 0.711376 | false |
kgullikson88/TS23-Scripts | CheckSyntheticTemperature.py | 1 | 14868 | import os
import re
from collections import defaultdict
from operator import itemgetter
import logging
import pandas
from scipy.interpolate import InterpolatedUnivariateSpline as spline
from george import kernels
import matplotlib.pyplot as plt
import numpy as np
import george
import emcee
import StarData
import SpectralTypeRelations
def classify_filename(fname, type='bright'):
"""
Given a CCF filename, it classifies the star combination, temperature, metallicity, and vsini
:param fname:
:return:
"""
# First, remove any leading directories
fname = fname.split('/')[-1]
# Star combination
m1 = re.search('\.[0-9]+kps', fname)
stars = fname[:m1.start()]
star1 = stars.split('+')[0].replace('_', ' ')
star2 = stars.split('+')[1].split('_{}'.format(type))[0].replace('_', ' ')
# secondary star vsini
vsini = float(fname[m1.start() + 1:].split('kps')[0])
# Temperature
m2 = re.search('[0-9]+\.0K', fname)
temp = float(m2.group()[:-1])
# logg
m3 = re.search('K\+[0-9]\.[0-9]', fname)
logg = float(m3.group()[1:])
# metallicity
metal = float(fname.split(str(logg))[-1])
return star1, star2, vsini, temp, logg, metal
def get_ccf_data(basedir, primary_name=None, secondary_name=None, vel_arr=np.arange(-900.0, 900.0, 0.1), type='bright'):
"""
Searches the given directory for CCF files, and classifies
by star, temperature, metallicity, and vsini
:param basedir: The directory to search for CCF files
:keyword primary_name: Optional keyword. If given, it will only get the requested primary star data
:keyword secondary_name: Same as primary_name, but only reads ccfs for the given secondary
:keyword vel_arr: The velocities to interpolate each ccf at
:return: pandas DataFrame
"""
if not basedir.endswith('/'):
basedir += '/'
all_files = ['{}{}'.format(basedir, f) for f in os.listdir(basedir) if type in f.lower()]
primary = []
secondary = []
vsini_values = []
temperature = []
gravity = []
metallicity = []
ccf = []
for fname in all_files:
star1, star2, vsini, temp, logg, metal = classify_filename(fname, type=type)
if primary_name is not None and star1.lower() != primary_name.lower():
continue
if secondary_name is not None and star2.lower() != secondary_name.lower():
continue
vel, corr = np.loadtxt(fname, unpack=True)
fcn = spline(vel, corr)
ccf.append(fcn(vel_arr))
primary.append(star1)
secondary.append(star2)
vsini_values.append(vsini)
temperature.append(temp)
gravity.append(logg)
metallicity.append(metal)
# Make a pandas dataframe with all this data
df = pandas.DataFrame(data={'Primary': primary, 'Secondary': secondary, 'Temperature': temperature,
'vsini': vsini_values, 'logg': gravity, '[Fe/H]': metallicity, 'CCF': ccf})
return df
def get_ccf_summary(basedir, vel_arr=np.arange(-900.0, 900.0, 0.1), velocity='highest', type='bright'):
"""
Very similar to get_ccf_data, but does it in a way that is more memory efficient
:param basedir: The directory to search for CCF files
:keyword velocity: The velocity to measure the CCF at. The default is 'highest', and uses the maximum of the ccf
:keyword vel_arr: The velocities to interpolate each ccf at
:return: pandas DataFrame
"""
if not basedir.endswith('/'):
basedir += '/'
all_files = ['{}{}'.format(basedir, f) for f in os.listdir(basedir) if type in f.lower()]
file_dict = defaultdict(lambda: defaultdict(list))
for fname in all_files:
star1, star2, vsini, temp, logg, metal = classify_filename(fname, type=type)
file_dict[star1][star2].append(fname)
# Now, read the ccfs for each primary/secondary combo, and find the best combination
summary_dfs = []
for primary in file_dict.keys():
for secondary in file_dict[primary].keys():
data = get_ccf_data(basedir, primary_name=primary, secondary_name=secondary,
vel_arr=vel_arr, type=type)
summary_dfs.append(find_best_pars(data, velocity=velocity, vel_arr=vel_arr))
return pandas.concat(summary_dfs, ignore_index=True)
def find_best_pars(df, velocity='highest', vel_arr=np.arange(-900.0, 900.0, 0.1)):
"""
Find the 'best-fit' parameters for each combination of primary and secondary star
:param df: the dataframe to search in
:keyword velocity: The velocity to measure the CCF at. The default is 'highest', and uses the maximum of the ccf
:keyword vel_arr: The velocities to interpolate each ccf at
:return: a dataframe with keys of primary, secondary, and the parameters
"""
# Get the names of the primary and secondary stars
primary_names = pandas.unique(df.Primary)
secondary_names = pandas.unique(df.Secondary)
# Find the ccf value at the given velocity
if velocity == 'highest':
fcn = lambda row: (np.max(row), vel_arr[np.argmax(row)])
vals = df['CCF'].map(fcn)
df['ccf_max'] = vals.map(lambda l: l[0])
df['rv'] = vals.map(lambda l: l[1])
# df['ccf_max'] = df['CCF'].map(np.max)
else:
df['ccf_max'] = df['CCF'].map(lambda arr: arr[np.argmin(np.abs(vel_arr - velocity))])
# Find the best parameter for each combination
d = defaultdict(list)
for primary in primary_names:
for secondary in secondary_names:
good = df.loc[(df.Primary == primary) & (df.Secondary == secondary)]
best = good.loc[good.ccf_max == good.ccf_max.max()]
d['Primary'].append(primary)
d['Secondary'].append(secondary)
d['Temperature'].append(best['Temperature'].item())
d['vsini'].append(best['vsini'].item())
d['logg'].append(best['logg'].item())
d['[Fe/H]'].append(best['[Fe/H]'].item())
d['rv'].append(best['rv'].item())
return pandas.DataFrame(data=d)
def get_detected_objects(df, tol=1.0):
"""
Takes a summary dataframe with RV information. Finds the median rv for each star,
and removes objects that are 'tol' km/s from the median value
:param df: A summary dataframe, such as created by find_best_pars
:param tol: The tolerance, in km/s, to accept an observation as detected
:return: a dataframe containing only detected companions
"""
secondary_names = pandas.unique(df.Secondary)
secondary_to_rv = defaultdict(float)
for secondary in secondary_names:
rv = df.loc[df.Secondary == secondary]['rv'].median()
secondary_to_rv[secondary] = rv
print secondary, rv
keys = df.Secondary.values
good = df.loc[abs(df.rv.values - np.array(itemgetter(*keys)(secondary_to_rv))) < tol]
return good
def add_actual_temperature(df, method='spt'):
"""
Add the actual temperature to a given summary dataframe
:param df: The dataframe to which we will add the actual secondary star temperature
:param method: How to get the actual temperature. Options are:
- 'spt': Use main-sequence relationships to go from spectral type --> temperature
- 'excel': Use tabulated data, available in the file 'SecondaryStar_Temperatures.xls'
:return: copy of the original dataframe, with an extra column for the secondary star temperature
"""
# First, get a list of the secondary stars in the data
secondary_names = pandas.unique(df.Secondary)
secondary_to_temperature = defaultdict(float)
secondary_to_error = defaultdict(float)
if method.lower() == 'spt':
MS = SpectralTypeRelations.MainSequence()
for secondary in secondary_names:
star_data = StarData.GetData(secondary)
spt = star_data.spectype[0] + re.search('[0-9]\.*[0-9]*', star_data.spectype).group()
T_sec = MS.Interpolate(MS.Temperature, spt)
secondary_to_temperature[secondary] = T_sec
elif method.lower() == 'excel':
table = pandas.read_excel('SecondaryStar_Temperatures.xls', 0)
for secondary in secondary_names:
T_sec = table.loc[table.Star.str.lower().str.contains(secondary.strip().lower())]['Literature_Temp'].item()
T_error = table.loc[table.Star.str.lower().str.contains(secondary.strip().lower())][
'Literature_error'].item()
secondary_to_temperature[secondary] = T_sec
secondary_to_error[secondary] = T_error
df['Tactual'] = df['Secondary'].map(lambda s: secondary_to_temperature[s])
df['Tact_err'] = df['Secondary'].map(lambda s: secondary_to_error[s])
return
def make_gaussian_process_samples(df):
"""
Make a gaussian process fitting the Tactual-Tmeasured relationship
:param df: pandas DataFrame with columns 'Temperature' (with the measured temperature)
and 'Tactual' (for the actual temperature)
:return: emcee sampler instance
"""
# First, find the uncertainties at each actual temperature
# Tactual = df['Tactual'].values
#Tmeasured = df['Temperature'].values
#error = df['Tact_err'].values
temp = df.groupby('Temperature').mean()['Tactual']
Tmeasured = temp.keys().values
Tactual = temp.values
error = np.nan_to_num(df.groupby('Temperature').std(ddof=1)['Tactual'].values)
default = np.median(error[error > 1])
error = np.maximum(error, np.ones(error.size) * default)
for Tm, Ta, e in zip(Tmeasured, Tactual, error):
print Tm, Ta, e
plt.figure(1)
plt.errorbar(Tmeasured, Tactual, yerr=error, fmt='.k', capsize=0)
plt.plot(Tmeasured, Tmeasured, 'r--')
plt.xlim((min(Tmeasured) - 100, max(Tmeasured) + 100))
plt.xlabel('Measured Temperature')
plt.ylabel('Actual Temperature')
plt.show(block=False)
# Define some functions to use in the GP fit
def model(pars, T):
#polypars = pars[2:]
#return np.poly1d(polypars)(T)
return T
def lnlike(pars, Tact, Tmeas, Terr):
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeas, Terr)
return gp.lnlikelihood(Tact - model(pars, Tmeas))
def lnprior(pars):
lna, lntau = pars[:2]
polypars = pars[2:]
if -20 < lna < 20 and 4 < lntau < 20:
return 0.0
return -np.inf
def lnprob(pars, x, y, yerr):
lp = lnprior(pars)
return lp + lnlike(pars, x, y, yerr) if np.isfinite(lp) else -np.inf
# Set up the emcee fitter
initial = np.array([0, 6])#, 1.0, 0.0])
ndim = len(initial)
nwalkers = 100
p0 = [np.array(initial) + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(Tactual, Tmeasured, error))
print 'Running first burn-in'
p1, lnp, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print "Running second burn-in..."
p_best = p1[np.argmax(lnp)]
p2 = [p_best + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
p3, _, _ = sampler.run_mcmc(p2, 250)
sampler.reset()
print "Running production..."
sampler.run_mcmc(p3, 1000)
# Plot a bunch of the fits
print "Plotting..."
N = 100
Tvalues = np.arange(3300, 7000, 20)
idx = np.argsort(-sampler.lnprobability.flatten())[:N] # Get N 'best' curves
par_vals = sampler.flatchain[idx]
for i, pars in enumerate(par_vals):
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeasured, error)
s = gp.sample_conditional(Tactual - model(pars, Tmeasured), Tvalues) + model(pars, Tvalues)
plt.plot(Tvalues, s, 'b-', alpha=0.1)
plt.draw()
# Finally, get posterior samples at all the possibly measured temperatures
print 'Generating posterior samples at all temperatures...'
N = 10000 # This is 1/10th of the total number of samples!
idx = np.argsort(-sampler.lnprobability.flatten())[:N] # Get N 'best' curves
par_vals = sampler.flatchain[idx]
Tvalues = np.arange(3000, 6900, 100)
gp_posterior = []
for pars in par_vals:
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeasured, error)
s = gp.sample_conditional(Tactual - model(pars, Tmeasured), Tvalues) + model(pars, Tvalues)
gp_posterior.append(s)
# Finally, make confidence intervals for the actual temperatures
gp_posterior = np.array(gp_posterior)
l, m, h = np.percentile(gp_posterior, [16.0, 50.0, 84.0], axis=0)
conf = pandas.DataFrame(data={'Measured Temperature': Tvalues, 'Actual Temperature': m,
'Lower Bound': l, 'Upper bound': h})
conf.to_csv('Confidence_Intervals.csv', index=False)
return sampler, np.array(gp_posterior)
def check_posterior(df, posterior, Tvalues):
"""
Checks the posterior samples: Are 95% of the measurements within 2-sigma of the prediction?
:param df: The summary dataframe
:param posterior: The MCMC predicted values
:param Tvalues: The measured temperatures the posterior was made with
:return: boolean, as well as some warning messages if applicable
"""
# First, make 2-sigma confidence intervals
l, m, h = np.percentile(posterior, [5.0, 50.0, 95.0], axis=0)
# Save the confidence intervals
# conf = pandas.DataFrame(data={'Measured Temperature': Tvalues, 'Actual Temperature': m,
# 'Lower Bound': l, 'Upper bound': h})
#conf.to_csv('Confidence_Intervals.csv', index=False)
Ntot = [] # The total number of observations with the given measured temperature
Nacc = [] # The number that have actual temperatures within the confidence interval
g = df.groupby('Temperature')
for i, T in enumerate(Tvalues):
if T in g.groups.keys():
Ta = g.get_group(T)['Tactual']
low, high = l[i], h[i]
Ntot.append(len(Ta))
Nacc.append(len(Ta.loc[(Ta >= low) & (Ta <= high)]))
p = float(Nacc[-1]) / float(Ntot[-1])
if p < 0.95:
logging.warn(
'Only {}/{} of the samples ({:.2f}%) were accepted for T = {} K'.format(Nacc[-1], Ntot[-1], p * 100,
T))
print low, high
print sorted(Ta)
else:
Ntot.append(0)
Nacc.append(0)
p = float(sum(Nacc)) / float(sum(Ntot))
if p < 0.95:
logging.warn('Only {:.2f}% of the total samples were accepted!'.format(p * 100))
return False
return True
if __name__ == '__main__':
pass
| gpl-3.0 | -920,756,524,181,589,000 | 39.402174 | 120 | 0.625572 | false |
txomon/SpockBot | spock/plugins/helpers/physics.py | 1 | 6091 | """
PhysicsPlugin is planned to provide vectors and tracking necessary to implement
SMP-compliant client-side physics for entities. Primarirly this will be used to
keep update client position for gravity/knockback/water-flow etc. But it should
also eventually provide functions to track other entities affected by SMP
physics
Minecraft client/player physics is unfortunately very poorly documented.
Most of
these values are based of experimental results and the contributions of a
handful of people (Thank you 0pteron!) to the Minecraft wiki talk page on
Entities and Transportation. Ideally someone will decompile the client with MCP
and document the totally correct values and behaviors.
"""
# Gravitational constants defined in blocks/(client tick)^2
PLAYER_ENTITY_GAV = 0.08
THROWN_ENTITY_GAV = 0.03
RIDING_ENTITY_GAV = 0.04
BLOCK_ENTITY_GAV = 0.04
ARROW_ENTITY_GAV = 0.05
# Air drag constants defined in 1/tick
PLAYER_ENTITY_DRG = 0.02
THROWN_ENTITY_DRG = 0.01
RIDING_ENTITY_DRG = 0.05
BLOCK_ENTITY_DRG = 0.02
ARROW_ENTITY_DRG = 0.01
# Player ground acceleration isn't actually linear, but we're going to pretend
# that it is. Max ground velocity for a walking client is 0.215blocks/tick, it
# takes a dozen or so ticks to get close to max velocity. Sprint is 0.28, just
# apply more acceleration to reach a higher max ground velocity
PLAYER_WLK_ACC = 0.15
PLAYER_SPR_ACC = 0.20
PLAYER_GND_DRG = 0.41
# Seems about right, not based on anything
PLAYER_JMP_ACC = 0.45
import logging
import math
from spock.mcmap import mapdata
from spock.plugins.base import PluginBase
from spock.utils import BoundingBox, Position, pl_announce
from spock.vector import Vector3
logger = logging.getLogger('spock')
class PhysicsCore(object):
def __init__(self, vec, pos):
self.vec = vec
self.pos = pos
def jump(self):
if self.pos.on_ground:
self.pos.on_ground = False
self.vec += Vector3(0, PLAYER_JMP_ACC, 0)
def walk(self, angle, radians=False):
if not radians:
angle = math.radians(angle)
z = math.cos(angle) * PLAYER_WLK_ACC
x = math.sin(angle) * PLAYER_WLK_ACC
self.vec += Vector3(x, 0, z)
def sprint(self, angle, radians=False):
if not radians:
angle = math.radians(angle)
z = math.cos(angle) * PLAYER_SPR_ACC
x = math.sin(angle) * PLAYER_SPR_ACC
self.vec += Vector3(x, 0, z)
@pl_announce('Physics')
class PhysicsPlugin(PluginBase):
requires = ('Event', 'ClientInfo', 'World')
events = {
'physics_tick': 'tick',
}
def __init__(self, ploader, settings):
super(PhysicsPlugin, self).__init__(ploader, settings)
self.vec = Vector3(0.0, 0.0, 0.0)
# wiki says 0.6 but I made it 0.8 to give a little wiggle room
self.playerbb = BoundingBox(0.8, 1.8)
self.pos = self.clientinfo.position
ploader.provides('Physics', PhysicsCore(self.vec, self.pos))
def tick(self, _, __):
self.check_collision()
self.apply_horizontal_drag()
self.apply_vector()
def check_collision(self):
cb = Position(math.floor(self.pos.x), math.floor(self.pos.y),
math.floor(self.pos.z))
if self.block_collision(cb, y=2): # we check +2 because above my head
self.vec.y = 0
if self.block_collision(cb, y=-1): # we check below feet
self.pos.on_ground = True
self.vec.y = 0
self.pos.y = cb.y
else:
self.pos.on_ground = False
self.vec -= Vector3(0, PLAYER_ENTITY_GAV, 0)
self.apply_vertical_drag()
# feet or head collide with x
if self.block_collision(cb, x=1) or \
self.block_collision(cb, x=-1) or \
self.block_collision(cb, y=1, x=1) or \
self.block_collision(cb, y=1, x=-1):
self.vec.x = 0
# replace with real info in event
self.event.emit("phy_collision", "x")
# feet or head collide with z
if self.block_collision(cb, z=1) or \
self.block_collision(cb, z=-1) or \
self.block_collision(cb, y=1, z=1) or \
self.block_collision(cb, y=1, z=-1):
self.vec.z = 0
# replace with real info in event
self.event.emit("phy_collision", "z")
def block_collision(self, cb, x=0, y=0, z=0):
block_id, meta = self.world.get_block(cb.x + x, cb.y + y, cb.z + z)
block = mapdata.get_block(block_id, meta)
if block is None:
return False
# possibly we want to use the centers of blocks as the starting
# points for bounding boxes instead of 0,0,0 this might make thinks
# easier when we get to more complex shapes that are in the center
# of a block aka fences but more complicated for the player uncenter
# the player position and bump it up a little down to prevent
# colliding in the floor
pos1 = Position(self.pos.x - self.playerbb.w / 2, self.pos.y - 0.2,
self.pos.z - self.playerbb.d / 2)
bb1 = self.playerbb
bb2 = block.bounding_box
if bb2 is not None:
pos2 = Position(cb.x + x + bb2.x, cb.y + y + bb2.y,
cb.z + z + bb2.z)
if ((pos1.x + bb1.w) >= (pos2.x) and (pos1.x) <= (
pos2.x + bb2.w)) and (
(pos1.y + bb1.h) >= (pos2.y) and (pos1.y) <= (
pos2.y + bb2.h)) and (
(pos1.z + bb1.d) >= (pos2.z) and (pos1.z) <= (
pos2.z + bb2.d)):
return True
return False
def apply_vertical_drag(self):
self.vec.y -= self.vec.y * PLAYER_ENTITY_DRG
def apply_horizontal_drag(self):
self.vec.x -= self.vec.x * PLAYER_GND_DRG
self.vec.z -= self.vec.z * PLAYER_GND_DRG
def apply_vector(self):
p = self.pos
p.x = p.x + self.vec.x
p.y = p.y + self.vec.y
p.z = p.z + self.vec.z
| mit | 4,147,684,726,026,790,000 | 35.915152 | 79 | 0.602036 | false |
bunnyinc/django-oidc-provider | oidc_provider/models.py | 1 | 6596 | # -*- coding: utf-8 -*-
import base64
import binascii
from hashlib import md5, sha256
import json
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
CLIENT_TYPE_CHOICES = [
('confidential', 'Confidential'),
('public', 'Public'),
]
RESPONSE_TYPE_CHOICES = [
('code', 'code (Authorization Code Flow)'),
('id_token', 'id_token (Implicit Flow)'),
('id_token token', 'id_token token (Implicit Flow)'),
('code token', 'code token (Hybrid Flow)'),
('code id_token', 'code id_token (Hybrid Flow)'),
('code id_token token', 'code id_token token (Hybrid Flow)'),
]
JWT_ALGS = [
('HS256', 'HS256'),
('RS256', 'RS256'),
]
class Client(models.Model):
name = models.CharField(max_length=100, default='', verbose_name=_(u'Name'))
client_type = models.CharField(max_length=30, choices=CLIENT_TYPE_CHOICES, default='confidential', verbose_name=_(u'Client Type'), help_text=_(u'<b>Confidential</b> clients are capable of maintaining the confidentiality of their credentials. <b>Public</b> clients are incapable.'))
client_id = models.CharField(max_length=255, unique=True, verbose_name=_(u'Client ID'))
client_secret = models.CharField(max_length=255, blank=True, verbose_name=_(u'Client SECRET'))
response_type = models.CharField(max_length=30, choices=RESPONSE_TYPE_CHOICES, verbose_name=_(u'Response Type'))
jwt_alg = models.CharField(max_length=10, choices=JWT_ALGS, default='RS256', verbose_name=_(u'JWT Algorithm'), help_text=_(u'Algorithm used to encode ID Tokens.'))
date_created = models.DateField(auto_now_add=True, verbose_name=_(u'Date Created'))
website_url = models.CharField(max_length=255, blank=True, default='', verbose_name=_(u'Website URL'))
terms_url = models.CharField(max_length=255, blank=True, default='', verbose_name=_(u'Terms URL'), help_text=_(u'External reference to the privacy policy of the client.'))
contact_email = models.CharField(max_length=255, blank=True, default='', verbose_name=_(u'Contact Email'))
logo = models.FileField(blank=True, default='', upload_to='oidc_provider/clients', verbose_name=_(u'Logo Image'))
_redirect_uris = models.TextField(default='', verbose_name=_(u'Redirect URIs'), help_text=_(u'Enter each URI on a new line.'))
def redirect_uris():
def fget(self):
return self._redirect_uris.splitlines()
def fset(self, value):
self._redirect_uris = '\n'.join(value)
return locals()
redirect_uris = property(**redirect_uris())
_post_logout_redirect_uris = models.TextField(blank=True, default='', verbose_name=_(u'Post Logout Redirect URIs'), help_text=_(u'Enter each URI on a new line.'))
def post_logout_redirect_uris():
def fget(self):
return self._post_logout_redirect_uris.splitlines()
def fset(self, value):
self._post_logout_redirect_uris = '\n'.join(value)
return locals()
post_logout_redirect_uris = property(**post_logout_redirect_uris())
class Meta:
verbose_name = _(u'Client')
verbose_name_plural = _(u'Clients')
def __str__(self):
return u'{0}'.format(self.name)
def __unicode__(self):
return self.__str__()
@property
def default_redirect_uri(self):
return self.redirect_uris[0] if self.redirect_uris else ''
class BaseCodeTokenModel(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_(u'User'))
client = models.ForeignKey(Client, verbose_name=_(u'Client'))
expires_at = models.DateTimeField(verbose_name=_(u'Expiration Date'))
_scope = models.TextField(default='', verbose_name=_(u'Scopes'))
def scope():
def fget(self):
return self._scope.split()
def fset(self, value):
self._scope = ' '.join(value)
return locals()
scope = property(**scope())
def has_expired(self):
return timezone.now() >= self.expires_at
def __str__(self):
return u'{0} - {1}'.format(self.client, self.user.email)
def __unicode__(self):
return self.__str__()
class Meta:
abstract = True
class Code(BaseCodeTokenModel):
code = models.CharField(max_length=255, unique=True, verbose_name=_(u'Code'))
nonce = models.CharField(max_length=255, blank=True, default='', verbose_name=_(u'Nonce'))
is_authentication = models.BooleanField(default=False, verbose_name=_(u'Is Authentication?'))
code_challenge = models.CharField(max_length=255, null=True, verbose_name=_(u'Code Challenge'))
code_challenge_method = models.CharField(max_length=255, null=True, verbose_name=_(u'Code Challenge Method'))
class Meta:
verbose_name = _(u'Authorization Code')
verbose_name_plural = _(u'Authorization Codes')
class Token(BaseCodeTokenModel):
access_token = models.CharField(max_length=255, unique=True, verbose_name=_(u'Access Token'))
refresh_token = models.CharField(max_length=255, unique=True, verbose_name=_(u'Refresh Token'))
_id_token = models.TextField(verbose_name=_(u'ID Token'))
def id_token():
def fget(self):
return json.loads(self._id_token)
def fset(self, value):
self._id_token = json.dumps(value)
return locals()
id_token = property(**id_token())
class Meta:
verbose_name = _(u'Token')
verbose_name_plural = _(u'Tokens')
@property
def at_hash(self):
# @@@ d-o-p only supports 256 bits (change this if that changes)
hashed_access_token = sha256(
self.access_token.encode('ascii')
).hexdigest().encode('ascii')
return base64.urlsafe_b64encode(
binascii.unhexlify(
hashed_access_token[:len(hashed_access_token) // 2]
)
).rstrip(b'=').decode('ascii')
class UserConsent(BaseCodeTokenModel):
date_given = models.DateTimeField(verbose_name=_(u'Date Given'))
class Meta:
unique_together = ('user', 'client')
class RSAKey(models.Model):
key = models.TextField(verbose_name=_(u'Key'), help_text=_(u'Paste your private RSA Key here.'))
class Meta:
verbose_name = _(u'RSA Key')
verbose_name_plural = _(u'RSA Keys')
def __str__(self):
return u'{0}'.format(self.kid)
def __unicode__(self):
return self.__str__()
@property
def kid(self):
return u'{0}'.format(md5(self.key.encode('utf-8')).hexdigest() if self.key else '')
| mit | -7,244,896,210,937,017,000 | 35.043716 | 285 | 0.642359 | false |
anbangleo/NlsdeWeb | Python-3.6.0/Lib/test/test_unicodedata.py | 1 | 12371 | """ Test script for the unicodedata module.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import sys
import unittest
import hashlib
from test.support import script_helper
encoding = 'utf-8'
errors = 'surrogatepass'
### Run tests
class UnicodeMethodsTest(unittest.TestCase):
# update this, if the database changes
expectedchecksum = 'c1fa98674a683aa8a8d8dee0c84494f8d36346e6'
def test_method_checksum(self):
h = hashlib.sha1()
for i in range(0x10000):
char = chr(i)
data = [
# Predicates (single char)
"01"[char.isalnum()],
"01"[char.isalpha()],
"01"[char.isdecimal()],
"01"[char.isdigit()],
"01"[char.islower()],
"01"[char.isnumeric()],
"01"[char.isspace()],
"01"[char.istitle()],
"01"[char.isupper()],
# Predicates (multiple chars)
"01"[(char + 'abc').isalnum()],
"01"[(char + 'abc').isalpha()],
"01"[(char + '123').isdecimal()],
"01"[(char + '123').isdigit()],
"01"[(char + 'abc').islower()],
"01"[(char + '123').isnumeric()],
"01"[(char + ' \t').isspace()],
"01"[(char + 'abc').istitle()],
"01"[(char + 'ABC').isupper()],
# Mappings (single char)
char.lower(),
char.upper(),
char.title(),
# Mappings (multiple chars)
(char + 'abc').lower(),
(char + 'ABC').upper(),
(char + 'abc').title(),
(char + 'ABC').title(),
]
h.update(''.join(data).encode(encoding, errors))
result = h.hexdigest()
self.assertEqual(result, self.expectedchecksum)
class UnicodeDatabaseTest(unittest.TestCase):
def setUp(self):
# In case unicodedata is not available, this will raise an ImportError,
# but the other test cases will still be run
import unicodedata
self.db = unicodedata
def tearDown(self):
del self.db
class UnicodeFunctionsTest(UnicodeDatabaseTest):
# Update this if the database changes. Make sure to do a full rebuild
# (e.g. 'make distclean && make') to get the correct checksum.
expectedchecksum = 'f891b1e6430c712531b9bc935a38e22d78ba1bf3'
def test_function_checksum(self):
data = []
h = hashlib.sha1()
for i in range(0x10000):
char = chr(i)
data = [
# Properties
format(self.db.digit(char, -1), '.12g'),
format(self.db.numeric(char, -1), '.12g'),
format(self.db.decimal(char, -1), '.12g'),
self.db.category(char),
self.db.bidirectional(char),
self.db.decomposition(char),
str(self.db.mirrored(char)),
str(self.db.combining(char)),
]
h.update(''.join(data).encode("ascii"))
result = h.hexdigest()
self.assertEqual(result, self.expectedchecksum)
def test_digit(self):
self.assertEqual(self.db.digit('A', None), None)
self.assertEqual(self.db.digit('9'), 9)
self.assertEqual(self.db.digit('\u215b', None), None)
self.assertEqual(self.db.digit('\u2468'), 9)
self.assertEqual(self.db.digit('\U00020000', None), None)
self.assertEqual(self.db.digit('\U0001D7FD'), 7)
self.assertRaises(TypeError, self.db.digit)
self.assertRaises(TypeError, self.db.digit, 'xx')
self.assertRaises(ValueError, self.db.digit, 'x')
def test_numeric(self):
self.assertEqual(self.db.numeric('A',None), None)
self.assertEqual(self.db.numeric('9'), 9)
self.assertEqual(self.db.numeric('\u215b'), 0.125)
self.assertEqual(self.db.numeric('\u2468'), 9.0)
self.assertEqual(self.db.numeric('\ua627'), 7.0)
self.assertEqual(self.db.numeric('\U00020000', None), None)
self.assertEqual(self.db.numeric('\U0001012A'), 9000)
self.assertRaises(TypeError, self.db.numeric)
self.assertRaises(TypeError, self.db.numeric, 'xx')
self.assertRaises(ValueError, self.db.numeric, 'x')
def test_decimal(self):
self.assertEqual(self.db.decimal('A',None), None)
self.assertEqual(self.db.decimal('9'), 9)
self.assertEqual(self.db.decimal('\u215b', None), None)
self.assertEqual(self.db.decimal('\u2468', None), None)
self.assertEqual(self.db.decimal('\U00020000', None), None)
self.assertEqual(self.db.decimal('\U0001D7FD'), 7)
self.assertRaises(TypeError, self.db.decimal)
self.assertRaises(TypeError, self.db.decimal, 'xx')
self.assertRaises(ValueError, self.db.decimal, 'x')
def test_category(self):
self.assertEqual(self.db.category('\uFFFE'), 'Cn')
self.assertEqual(self.db.category('a'), 'Ll')
self.assertEqual(self.db.category('A'), 'Lu')
self.assertEqual(self.db.category('\U00020000'), 'Lo')
self.assertEqual(self.db.category('\U0001012A'), 'No')
self.assertRaises(TypeError, self.db.category)
self.assertRaises(TypeError, self.db.category, 'xx')
def test_bidirectional(self):
self.assertEqual(self.db.bidirectional('\uFFFE'), '')
self.assertEqual(self.db.bidirectional(' '), 'WS')
self.assertEqual(self.db.bidirectional('A'), 'L')
self.assertEqual(self.db.bidirectional('\U00020000'), 'L')
self.assertRaises(TypeError, self.db.bidirectional)
self.assertRaises(TypeError, self.db.bidirectional, 'xx')
def test_decomposition(self):
self.assertEqual(self.db.decomposition('\uFFFE'),'')
self.assertEqual(self.db.decomposition('\u00bc'), '<fraction> 0031 2044 0034')
self.assertRaises(TypeError, self.db.decomposition)
self.assertRaises(TypeError, self.db.decomposition, 'xx')
def test_mirrored(self):
self.assertEqual(self.db.mirrored('\uFFFE'), 0)
self.assertEqual(self.db.mirrored('a'), 0)
self.assertEqual(self.db.mirrored('\u2201'), 1)
self.assertEqual(self.db.mirrored('\U00020000'), 0)
self.assertRaises(TypeError, self.db.mirrored)
self.assertRaises(TypeError, self.db.mirrored, 'xx')
def test_combining(self):
self.assertEqual(self.db.combining('\uFFFE'), 0)
self.assertEqual(self.db.combining('a'), 0)
self.assertEqual(self.db.combining('\u20e1'), 230)
self.assertEqual(self.db.combining('\U00020000'), 0)
self.assertRaises(TypeError, self.db.combining)
self.assertRaises(TypeError, self.db.combining, 'xx')
def test_normalize(self):
self.assertRaises(TypeError, self.db.normalize)
self.assertRaises(ValueError, self.db.normalize, 'unknown', 'xx')
self.assertEqual(self.db.normalize('NFKC', ''), '')
# The rest can be found in test_normalization.py
# which requires an external file.
def test_pr29(self):
# http://www.unicode.org/review/pr-29.html
# See issues #1054943 and #10254.
composed = ("\u0b47\u0300\u0b3e", "\u1100\u0300\u1161",
'Li\u030dt-s\u1e73\u0301',
'\u092e\u093e\u0930\u094d\u0915 \u091c\u093c'
+ '\u0941\u0915\u0947\u0930\u092c\u0930\u094d\u0917',
'\u0915\u093f\u0930\u094d\u0917\u093f\u091c\u093c'
+ '\u0938\u094d\u0924\u093e\u0928')
for text in composed:
self.assertEqual(self.db.normalize('NFC', text), text)
def test_issue10254(self):
# Crash reported in #10254
a = 'C\u0338' * 20 + 'C\u0327'
b = 'C\u0338' * 20 + '\xC7'
self.assertEqual(self.db.normalize('NFC', a), b)
def test_east_asian_width(self):
eaw = self.db.east_asian_width
self.assertRaises(TypeError, eaw, b'a')
self.assertRaises(TypeError, eaw, bytearray())
self.assertRaises(TypeError, eaw, '')
self.assertRaises(TypeError, eaw, 'ra')
self.assertEqual(eaw('\x1e'), 'N')
self.assertEqual(eaw('\x20'), 'Na')
self.assertEqual(eaw('\uC894'), 'W')
self.assertEqual(eaw('\uFF66'), 'H')
self.assertEqual(eaw('\uFF1F'), 'F')
self.assertEqual(eaw('\u2010'), 'A')
self.assertEqual(eaw('\U00020000'), 'W')
def test_east_asian_width_9_0_changes(self):
self.assertEqual(self.db.ucd_3_2_0.east_asian_width('\u231a'), 'N')
self.assertEqual(self.db.east_asian_width('\u231a'), 'W')
class UnicodeMiscTest(UnicodeDatabaseTest):
def test_failed_import_during_compiling(self):
# Issue 4367
# Decoding \N escapes requires the unicodedata module. If it can't be
# imported, we shouldn't segfault.
# This program should raise a SyntaxError in the eval.
code = "import sys;" \
"sys.modules['unicodedata'] = None;" \
"""eval("'\\\\N{SOFT HYPHEN}'")"""
# We use a separate process because the unicodedata module may already
# have been loaded in this process.
result = script_helper.assert_python_failure("-c", code)
error = "SyntaxError: (unicode error) \\N escapes not supported " \
"(can't load unicodedata module)"
self.assertIn(error, result.err.decode("ascii"))
def test_decimal_numeric_consistent(self):
# Test that decimal and numeric are consistent,
# i.e. if a character has a decimal value,
# its numeric value should be the same.
count = 0
for i in range(0x10000):
c = chr(i)
dec = self.db.decimal(c, -1)
if dec != -1:
self.assertEqual(dec, self.db.numeric(c))
count += 1
self.assertTrue(count >= 10) # should have tested at least the ASCII digits
def test_digit_numeric_consistent(self):
# Test that digit and numeric are consistent,
# i.e. if a character has a digit value,
# its numeric value should be the same.
count = 0
for i in range(0x10000):
c = chr(i)
dec = self.db.digit(c, -1)
if dec != -1:
self.assertEqual(dec, self.db.numeric(c))
count += 1
self.assertTrue(count >= 10) # should have tested at least the ASCII digits
def test_bug_1704793(self):
self.assertEqual(self.db.lookup("GOTHIC LETTER FAIHU"), '\U00010346')
def test_ucd_510(self):
import unicodedata
# In UCD 5.1.0, a mirrored property changed wrt. UCD 3.2.0
self.assertTrue(unicodedata.mirrored("\u0f3a"))
self.assertTrue(not unicodedata.ucd_3_2_0.mirrored("\u0f3a"))
# Also, we now have two ways of representing
# the upper-case mapping: as delta, or as absolute value
self.assertTrue("a".upper()=='A')
self.assertTrue("\u1d79".upper()=='\ua77d')
self.assertTrue(".".upper()=='.')
def test_bug_5828(self):
self.assertEqual("\u1d79".lower(), "\u1d79")
# Only U+0000 should have U+0000 as its upper/lower/titlecase variant
self.assertEqual(
[
c for c in range(sys.maxunicode+1)
if "\x00" in chr(c).lower()+chr(c).upper()+chr(c).title()
],
[0]
)
def test_bug_4971(self):
# LETTER DZ WITH CARON: DZ, Dz, dz
self.assertEqual("\u01c4".title(), "\u01c5")
self.assertEqual("\u01c5".title(), "\u01c5")
self.assertEqual("\u01c6".title(), "\u01c5")
def test_linebreak_7643(self):
for i in range(0x10000):
lines = (chr(i) + 'A').splitlines()
if i in (0x0a, 0x0b, 0x0c, 0x0d, 0x85,
0x1c, 0x1d, 0x1e, 0x2028, 0x2029):
self.assertEqual(len(lines), 2,
r"\u%.4x should be a linebreak" % i)
else:
self.assertEqual(len(lines), 1,
r"\u%.4x should not be a linebreak" % i)
if __name__ == "__main__":
unittest.main()
| mit | -6,953,591,835,689,975,000 | 38.148734 | 86 | 0.57465 | false |
rolandgeider/wger | wger/mailer/management/commands/send-mass-emails.py | 1 | 1409 | # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.conf import settings
from django.core import mail
from django.core.management.base import BaseCommand
# wger
from wger.mailer.models import CronEntry
class Command(BaseCommand):
"""
Sends the prepared mass emails
"""
def handle(self, **options):
"""
Send some mails and remove them from the list
"""
if CronEntry.objects.count():
for email in CronEntry.objects.all()[:100]:
mail.send_mail(email.log.subject,
email.log.body,
settings.DEFAULT_FROM_EMAIL,
[email.email],
fail_silently=True)
email.delete()
| agpl-3.0 | -9,051,630,063,604,642,000 | 32.547619 | 78 | 0.645848 | false |
arsenalstriker14/imagetraccloud | imagetrac_docker/taskmanager/migrations/0002_auto_20170122_1808.py | 1 | 3274 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-22 18:08
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('taskmanager', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='inboxentry',
name='attachment',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment10',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment2',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment3',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment4',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment5',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment6',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment7',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment8',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment9',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
]
| mit | 2,795,914,541,022,039,600 | 48.606061 | 183 | 0.63989 | false |
rwl/PyCIM | CIM14/ENTSOE/Dynamics/IEC61970/Meas/MeasMeasurement.py | 1 | 2367 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Dynamics.IEC61970.Core.CoreIdentifiedObject import CoreIdentifiedObject
class MeasMeasurement(CoreIdentifiedObject):
def __init__(self, PowerSystemResource=None, *args, **kw_args):
"""Initialises a new 'MeasMeasurement' instance.
@param PowerSystemResource:
"""
self._PowerSystemResource = None
self.PowerSystemResource = PowerSystemResource
super(MeasMeasurement, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["PowerSystemResource"]
_many_refs = []
def getPowerSystemResource(self):
"""
"""
return self._PowerSystemResource
def setPowerSystemResource(self, value):
if self._PowerSystemResource is not None:
filtered = [x for x in self.PowerSystemResource.Measurements if x != self]
self._PowerSystemResource._Measurements = filtered
self._PowerSystemResource = value
if self._PowerSystemResource is not None:
if self not in self._PowerSystemResource._Measurements:
self._PowerSystemResource._Measurements.append(self)
PowerSystemResource = property(getPowerSystemResource, setPowerSystemResource)
| mit | 2,298,372,686,646,743,600 | 39.810345 | 89 | 0.718209 | false |
panchr/Cellular-Automata | src/tk/graphics.py | 1 | 30211 | # tk/graphics.py
# by John M. Zelle, Ph.D
# edited by Rushy Panchal
"""Simple object oriented graphics library
The library is designed to make it very easy for novice programmers to
experiment with computer graphics in an object oriented fashion. It is
written by John Zelle for use with the book "Python Programming: An
Introduction to Computer Science" (Franklin, Beedle & Associates).
LICENSE: This is open-source software released under the terms of the
GPL (http://www.gnu.org/licenses/gpl.html).
PLATFORMS: The package is a wrapper around Tkinter and should run on
any platform where Tkinter is available.
INSTALLATION: Put this file somewhere where Python can see it.
OVERVIEW: There are two kinds of objects in the library. The GraphWin
class implements a window where drawing can be done and various
GraphicsObjects are provided that can be drawn into a GraphWin. As a
simple example, here is a complete program to draw a circle of radius
10 centered in a 100x100 window:
--------------------------------------------------------------------
from graphics import *
try:
from Tkinter import *
except ImportError:
from tkinter import *
def main():
root = Tk()
win = GraphWin(root, 100, 100)
c = Circle(Point(50,50), 10)
c.draw(win)
win.getMouse() # Pause window
main()
--------------------------------------------------------------------
GraphWin objects support coordinate transformation through the
setCoords method and pointer-based input through getMouse.
The library provides the following graphical objects:
Point
Line
Circle
Oval
Rectangle
Polygon
Text
Entry (for text-based input)
Image
Various attributes of graphical objects can be set such as
outline-color, fill-color and line-width. Graphical objects also
support moving and hiding for animation effects.
The library also provides a very simple class for pixel-based image
manipulation, Pixmap. A pixmap can be loaded from a file and displayed
using an Image object. Both getPixel and setPixel methods are provided
for manipulating the image.
DOCUMENTATION: For complete documentation, see Chapter 4 of "Python
Programming: An Introduction to Computer Science" by John Zelle,
published by Franklin, Beedle & Associates. Also see
http://mcsp.wartburg.edu/zelle/python for a quick reference"""
'''
Changes made to create a Tkinter-friendly 'graphics.py' Version 5.0:
- In the main body frame, get rid of the '_root = Tk(); _root.withdraw()'
- create a 'master' parameter in GraphWin() AND get rid of the 'title' parameter --- the Tk() is already titled in the application ('Tk(className = _title_)')
- create a global variable _root (it is used elsewhere but was originally initialized in the main body of graphics.py)
- set _root to master
- get rid of the master.title(title) command
- delete the master.resizable(0, 0) command
- create global _root instances in EVERY function it is mentioned in:
* main code structure: update
* class GraphWin: __init__, __autoflush
* class GraphicsObject: draw, undraw, move, _reconfig
* class Entry:
__init__
* class Image: __init__
- in class GraphWin, change __autoflush to:
def __autoflush(self):
global _root
if self.autoflush:
try:
_root.update()
except tkinter.TclError:
pass
* The function must be changed because if the window is closed using _root.destroy(), you get an error if you try to update it
Notes:
- I noticed that graphics.py creates a Tkinter window ('_root = Tk()') as _root in the beginning, so I figured if I deleted that and then replaced it with my own window,
I could add whatever I wanted to the window since it becomes a Tkinter instance, not solely a 'graphics.py' instance
- The geometry manager is 'grid':
the Canvas itself is gridded at (row = 0, column = 0) by default
'''
# Version 5.1 12/23/2013
# * Allows saving of window using the Python Imaging Library (PIL) to an image file
# - This is set within the constructor: GraphWin(..., save_image = True). By default, it is False.
# * Currently, does not support undrawing of objects from the image
# Version 5.0 4/6/2013
# * Allows mixing with Tkinter
#
# Version 4.2 5/26/2011
# * Modified Image to allow multiple undraws like other GraphicsObjects
# Version 4.1 12/29/2009
# * Merged Pixmap and Image class. Old Pixmap removed, use Image.
# Version 4.0.1 10/08/2009
# * Modified the autoflush on GraphWin to default to True
# * Autoflush check on close, setBackground
# * Fixed getMouse to flush pending clicks at entry
# Version 4.0 08/2009
# * Reverted to non-threaded version. The advantages (robustness,
# efficiency, ability to use with other Tk code, etc.) outweigh
# the disadvantage that interactive use with IDLE is slightly more
# cumbersome.
# * Modified to run in either Python 2.x or 3.x (same file).
# * Added Image.getPixmap()
# * Added update() -- stand alone function to cause any pending
# graphics changes to display.
#
# Version 3.4 10/16/07
# Fixed GraphicsError to avoid "exploded" error messages.
# Version 3.3 8/8/06
# Added checkMouse method to GraphWin
# Version 3.2.3
# Fixed error in Polygon init spotted by Andrew Harrington
# Fixed improper threading in Image constructor
# Version 3.2.2 5/30/05
# Cleaned up handling of exceptions in Tk thread. The graphics package
# now raises an exception if attempt is made to communicate with
# a dead Tk thread.
# Version 3.2.1 5/22/05
# Added shutdown function for tkinter thread to eliminate race-condition
# error "chatter" when main thread terminates
# Renamed various private globals with _
# Version 3.2 5/4/05
# Added Pixmap object for simple image manipulation.
# Version 3.1 4/13/05
# Improved the Tk thread communication so that most Tk calls
# do not have to wait for synchonization with the Tk thread.
# (see _tkCall and _tkExec)
# Version 3.0 12/30/04
# Implemented Tk event loop in separate thread. Should now work
# interactively with IDLE. Undocumented autoflush feature is
# no longer necessary. Its default is now False (off). It may
# be removed in a future version.
# Better handling of errors regarding operations on windows that
# have been closed.
# Addition of an isClosed method to GraphWindow class.
# Version 2.2 8/26/04
# Fixed cloning bug reported by Joseph Oldham.
# Now implements deep copy of config info.
# Version 2.1 1/15/04
# Added autoflush option to GraphWin. When True (default) updates on
# the window are done after each action. This makes some graphics
# intensive programs sluggish. Turning off autoflush causes updates
# to happen during idle periods or when flush is called.
# Version 2.0
# Updated Documentation
# Made Polygon accept a list of Points in constructor
# Made all drawing functions call TK update for easier animations
# and to make the overall package work better with
# Python 2.3 and IDLE 1.0 under Windows (still some issues).
# Removed vestigial turtle graphics.
# Added ability to configure font for Entry objects (analogous to Text)
# Added setTextColor for Text as an alias of setFill
# Changed to class-style exceptions
# Fixed cloning of Text objects
# Version 1.6
# Fixed Entry so StringVar uses _root as master, solves weird
# interaction with shell in Idle
# Fixed bug in setCoords. X and Y coordinates can increase in
# "non-intuitive" direction.
# Tweaked wm_protocol so window is not resizable and kill box closes.
# Version 1.5
# Fixed bug in Entry. Can now define entry before creating a
# GraphWin. All GraphWins are now toplevel windows and share
# a fixed root (called _root).
# Version 1.4
# Fixed Garbage collection of Tkinter images bug.
# Added ability to set text atttributes.
# Added Entry boxes.
import time, os, sys
try:
# import as appropriate for 2.x vs. 3.x
import tkinter
except ImportError:
import Tkinter as tkinter
try:
import Image as PILImage
import ImageDraw
HAS_PIL = True
except ImportError:
HAS_PIL = False
##########################################################################
# Module Exceptions
class GraphicsError(Exception):
"""Generic error class for graphics module exceptions."""
pass
OBJ_ALREADY_DRAWN = "Object currently drawn"
UNSUPPORTED_METHOD = "Object doesn't support operation"
BAD_OPTION = "Illegal option value"
DEAD_THREAD = "Graphics thread quit unexpectedly"
### Other Constants
LOCAL = "local"
GLOBAL = "global"
def update():
global _root
_root.update()
############################################################################
# Graphics classes start here
class GraphWin(tkinter.Canvas):
"""A GraphWin is a toplevel window for displaying graphics."""
def __init__(self, master = None, width = 200, height = 200, autoflush = True, row = None, column = None, padx = None, pady = None, title = "Graphics Window", save_image = False):
global _root
if master is None:
master = tkinter.Tk(className = ' ' + title)
_root = master
self.width, self.height = width, height
tkinter.Canvas.__init__(self, master, width=width, height=height)
if row or column or padx or pady:
if not row:
row = 0
if not column:
column = 0
if not padx:
padx = 0
if not pady:
pady = 5
self.grid(row = row, column = column, padx = padx, pady = pady)
if not HAS_PIL:
self.save_image = False
else:
self.save_image = save_image
if self.save_image:
self.image = PILImage.new('RGBA', (self.width, self.height))
self.drawing_image = ImageDraw.Draw(self.image)
self.image_path = 'tk_graphTools_Graph_image.jpg'
self.foreground = "black"
self.items = []
self.mouseX = None
self.mouseY = None
self.bind("<Button-1>", self._onClick)
self.height = height
self.width = width
self.autoflush = autoflush
self._mouseCallback = None
self.trans = None
self.closed = False
self.all_objects = {}
master.lift()
if autoflush:
_root.update()
def __checkOpen(self):
if self.closed:
raise GraphicsError("window is closed")
def setBackground(self, color):
"""Set background color of the window"""
self.__checkOpen()
self.config(bg=color)
self.__autoflush()
def title(self, name):
'''Titles the main'''
self.master.title(name)
def setCoords(self, x1, y1, x2, y2):
"""Set coordinates of window to run from (x1,y1) in the
lower-left corner to (x2,y2) in the upper-right corner."""
self.center = Point(x2 - x1, y2 - y1)
self.xMin, self.yMin, self.xMax, self.yMax = x1, y1, x2, y2
self.trans = Transform(self.width, self.height, x1, y1, x2, y2)
def clear(self, *items):
self.delete(tkinter.ALL)
items = list(items) + list(self.all_objects.values())
undrawAll(*items)
if self.save_image:
self.image = PILImage.new('RGBA', (self.width, self.height))
self.drawing_image = ImageDraw.Draw(self.image)
def close(self):
"""Close the window"""
if self.closed:
return
self.closed = True
self.master.destroy()
self.__autoflush()
def isClosed(self):
return self.closed
def isOpen(self):
return not self.closed
def __autoflush(self):
global _root
if self.autoflush:
try:
_root.update()
except tkinter.TclError:
pass
def plot(self, x, y, color="black"):
"""Set pixel (x,y) to the given color"""
self.__checkOpen()
xs,ys = self.toScreen(x,y)
self.create_line(xs,ys,xs+1,ys+1, fill=color)
self.__autoflush()
if self.save_image:
self.drawing_image.point((xs, ys), color)
def plotPixel(self, x, y, color="black"):
"""Set pixel raw (independent of window coordinates) pixel
(x,y) to color"""
self.__checkOpen()
self.create_line(x,y,x+1,y+1, fill=color)
self.__autoflush()
if self.save_image:
self.drawing_image.point((x, y), color)
def flush(self):
"""Update drawing to the window"""
self.__checkOpen()
self.update_idletasks()
def getMouse(self):
"""Wait for mouse click and return Point object representing
the click"""
self.update() # flush any prior clicks
self.mouseX = None
self.mouseY = None
while self.mouseX == None or self.mouseY == None:
self.update()
if self.isClosed():
raise GraphicsError("getMouse in closed window")
time.sleep(.1) # give up thread
x,y = self.toWorld(self.mouseX, self.mouseY)
self.mouseX = None
self.mouseY = None
return Point(x,y)
def checkMouse(self):
"""Return last mouse click or None if mouse has
not been clicked since last call"""
if self.isClosed():
raise GraphicsError("checkMouse in closed window")
self.update()
if self.mouseX != None and self.mouseY != None:
x,y = self.toWorld(self.mouseX, self.mouseY)
self.mouseX = None
self.mouseY = None
return Point(x,y)
else:
return None
def getHeight(self):
"""Return the height of the window"""
return self.height
def getWidth(self):
"""Return the width of the window"""
return self.width
def save(self, filepath = "graphwin.jpg"):
'''Saves the drawn image under the given filepath'''
if HAS_PIL and self.save_image:
self.image.save(filepath)
def saveImage(self, filepath = "graphwin.jpg"):
'''Deprecated (but maintained for backwards compatability), please use GraphWin.save'''
self.save(filepath)
def translate(self, x, y, mode = LOCAL):
'''Translates the (x, y) pixel coordinate to the custom coordinates'''
if mode == GLOBAL:
x -= self.winfo_rootx()
y -= self.winfo_rooty()
return self.toWorld(x, y)
def translateCustom(self, x, y, mode = LOCAL):
'''Translates custom coordinates to pixel coordinates'''
x, y = self.toScreen(x, y)
if mode == GLOBAL:
x += self.winfo_rootx()
y += self.winfo_rooty()
return (x, y)
def toScreen(self, x, y):
'''Returns pixel coordinates'''
trans = self.trans
if trans:
return self.trans.screen(x,y)
else:
return x,y
def toWorld(self, x, y):
'''Returns custom coordinates'''
trans = self.trans
if trans:
return self.trans.world(x,y)
else:
return x,y
def setMouseHandler(self, func):
self._mouseCallback = func
def _onClick(self, e):
self.mouseX = e.x
self.mouseY = e.y
if self._mouseCallback:
self._mouseCallback(Point(e.x, e.y))
class Transform:
"""Internal class for 2-D coordinate transformations"""
def __init__(self, w, h, xlow, ylow, xhigh, yhigh):
# w, h are width and height of window
# (xlow,ylow) coordinates of lower-left [raw (0,h-1)]
# (xhigh,yhigh) coordinates of upper-right [raw (w-1,0)]
xspan = (xhigh-xlow)
yspan = (yhigh-ylow)
self.xbase = xlow
self.ybase = yhigh
self.xscale = xspan/float(w)
self.yscale = yspan/float(h)
def screen(self,x,y):
# Returns x,y in screen (actually window) coordinates
xs = (x-self.xbase) / self.xscale
ys = (self.ybase-y) / self.yscale
return int(xs+0.5),int(ys+0.5)
def world(self,xs,ys):
# Returns xs,ys in world coordinates
x = xs*self.xscale + self.xbase
y = self.ybase - ys*self.yscale
return x,y
# Default values for various item configuration options. Only a subset of
# keys may be present in the configuration dictionary for a given item
DEFAULT_CONFIG = {"fill":"",
"outline":"black",
"width":"1",
"arrow":"none",
"text":"",
"justify":"center",
"font": ("helvetica", 12, "normal")}
class GraphicsObject:
"""Generic base class for all of the drawable objects"""
# A subclass of GraphicsObject should override _draw and
# and _move methods.
def __init__(self, options):
# options is a list of strings indicating which options are
# legal for this object.
# When an object is drawn, canvas is set to the GraphWin(canvas)
# object where it is drawn and id is the TK identifier of the
# drawn shape.
self.canvas = None
self.id = None
# config is the dictionary of configuration options for the widget.
config = {}
for option in options:
config[option] = DEFAULT_CONFIG[option]
self.config = config
def setFill(self, color):
"""Set interior color to color"""
self._reconfig("fill", color)
def setOutline(self, color):
"""Set outline color to color"""
self._reconfig("outline", color)
def setWidth(self, width):
"""Set line weight to width"""
self._reconfig("width", width)
def draw(self, graphwin):
global _root
"""Draw the object in graphwin, which should be a GraphWin
object. A GraphicsObject may only be drawn into one
window. Raises an error if attempt made to draw an object that
is already visible."""
if self.canvas and not self.canvas.isClosed():
raise GraphicsError(OBJ_ALREADY_DRAWN)
if graphwin.isClosed():
raise GraphicsError("Can't draw to closed window")
self.canvas = graphwin
self.id = self._draw(graphwin, self.config)
if graphwin.autoflush:
_root.update()
graphwin.all_objects[self.id] = self
def undraw(self):
global _root
"""Undraw the object (i.e. hide it). Returns silently if the
object is not currently drawn."""
if not self.canvas:
return
if not self.canvas.isClosed():
self.canvas.delete(self.id)
if self.canvas.autoflush:
_root.update()
try:
del self.canvas.all_objects[self.id]
except (AttributeError, KeyError):
pass
self.canvas = None
self.id = None
def move(self, dx, dy):
global _root
"""move object dx units in x direction and dy units in y
direction"""
self._move(dx,dy)
canvas = self.canvas
if canvas and not canvas.isClosed():
trans = canvas.trans
if trans:
x = dx/ trans.xscale
y = -dy / trans.yscale
else:
x = dx
y = dy
self.canvas.move(self.id, x, y)
if canvas.autoflush:
_root.update()
def _reconfig(self, option, setting):
global _root
# Internal method for changing configuration of the object
# Raises an error if the option does not exist in the config
# dictionary for this object
if option not in self.config:
raise GraphicsError(UNSUPPORTED_METHOD)
options = self.config
options[option] = setting
if self.canvas and not self.canvas.isClosed():
self.canvas.itemconfig(self.id, options)
if self.canvas.autoflush:
_root.update()
def getColor(self, attribute):
'''Gets the color'''
color = self.config[attribute]
if not color:
if isinstance(self, (Line, Point)):
return 'black'
return 'black' if attribute == 'outline' else (0, 0, 0, 0)
return color
def _draw(self, canvas, options):
"""draws appropriate figure on canvas with options provided
Returns Tk id of item drawn"""
pass # must override in subclass
def _move(self, dx, dy):
"""updates internal state of object to move it dx,dy units"""
pass # must override in subclass
class Point(GraphicsObject):
def __init__(self, x, y):
GraphicsObject.__init__(self, ["outline", "fill"])
self.setFill = self.setOutline
self.x = x
self.y = y
def _draw(self, canvas, options):
x,y = canvas.toScreen(self.x,self.y)
if self.save_image:
self.drawing_image.point((x, y), color)
return canvas.create_rectangle(x,y,x+1,y+1,options)
def _move(self, dx, dy):
self.x = self.x + dx
self.y = self.y + dy
def clone(self):
other = Point(self.x,self.y)
other.config = self.config.copy()
return other
def getX(self): return self.x
def getY(self): return self.y
class _BBox(GraphicsObject):
# Internal base class for objects represented by bounding box
# (opposite corners) Line segment is a degenerate case.
def __init__(self, p1, p2, options=["outline","width","fill"]):
GraphicsObject.__init__(self, options)
self.p1 = p1.clone()
self.p2 = p2.clone()
def _move(self, dx, dy):
self.p1.x = self.p1.x + dx
self.p1.y = self.p1.y + dy
self.p2.x = self.p2.x + dx
self.p2.y = self.p2.y + dy
def getP1(self): return self.p1.clone()
def getP2(self): return self.p2.clone()
def getCenter(self):
p1 = self.p1
p2 = self.p2
return Point((p1.x+p2.x)/2.0, (p1.y+p2.y)/2.0)
class Rectangle(_BBox):
def __init__(self, p1, p2):
_BBox.__init__(self, p1, p2)
def _draw(self, canvas, options):
p1 = self.p1
p2 = self.p2
x1,y1 = canvas.toScreen(p1.x,p1.y)
x2,y2 = canvas.toScreen(p2.x,p2.y)
if canvas.save_image:
canvas.drawing_image.rectangle((x1, y2, x2 + 1, y1 + 1), self.getColor('fill'), self.getColor('outline'))
return canvas.create_rectangle(x1,y1,x2,y2,options)
def clone(self):
other = Rectangle(self.p1, self.p2)
other.config = self.config.copy()
return other
class Oval(_BBox):
def __init__(self, p1, p2):
_BBox.__init__(self, p1, p2)
def clone(self):
other = Oval(self.p1, self.p2)
other.config = self.config.copy()
return other
def _draw(self, canvas, options):
p1 = self.p1
p2 = self.p2
x1,y1 = canvas.toScreen(p1.x,p1.y)
x2,y2 = canvas.toScreen(p2.x,p2.y)
if canvas.save_image:
canvas.drawing_image.ellipse((x1, y2, x2, y1), self.getColor('fill'), self.getColor('outline'))
return canvas.create_oval(x1,y1,x2,y2,options)
class Circle(Oval):
def __init__(self, center, radius):
p1 = Point(center.x-radius, center.y-radius)
p2 = Point(center.x+radius, center.y+radius)
Oval.__init__(self, p1, p2)
self.radius = radius
def clone(self):
other = Circle(self.getCenter(), self.radius)
other.config = self.config.copy()
return other
def getRadius(self):
return self.radius
class Line(_BBox):
def __init__(self, p1, p2):
_BBox.__init__(self, p1, p2, ["arrow","fill","width"])
self.setFill(DEFAULT_CONFIG['outline'])
self.setOutline = self.setFill
def clone(self):
other = Line(self.p1, self.p2)
other.config = self.config.copy()
return other
def _draw(self, canvas, options):
p1 = self.p1
p2 = self.p2
x1,y1 = canvas.toScreen(p1.x,p1.y)
x2,y2 = canvas.toScreen(p2.x,p2.y)
if canvas.save_image:
canvas.drawing_image.line((x1, y1, x2, y2), self.getColor('fill'))
return canvas.create_line(x1,y1,x2,y2,options)
def setArrow(self, option):
if not option in ["first","last","both","none"]:
raise GraphicsError(BAD_OPTION)
self._reconfig("arrow", option)
class Polygon(GraphicsObject):
def __init__(self, *points):
# if points passed as a list, extract it
if len(points) == 1 and type(points[0]) == type([]):
points = points[0]
self.points = list(map(Point.clone, points))
GraphicsObject.__init__(self, ["outline", "width", "fill"])
def clone(self):
other = Polygon(*self.points)
other.config = self.config.copy()
return other
def getPoints(self):
return list(map(Point.clone, self.points))
def _move(self, dx, dy):
for p in self.points:
p.move(dx,dy)
def _draw(self, canvas, options):
args = [canvas]
image_args = []
for p in self.points:
x,y = canvas.toScreen(p.x,p.y)
args.append(x)
args.append(y)
image_args.append((x, y))
args.append(options)
if canvas.save_image:
if len(image_args) > 2:
canvas.drawing_image.polygon(image_args, self.getColor('fill'), self.getColor('outline'))
return GraphWin.create_polygon(*args)
class Text(GraphicsObject):
def __init__(self, p, text):
GraphicsObject.__init__(self, ["justify","fill","text","font"])
self.setText(text)
self.anchor = p.clone()
self.setFill(DEFAULT_CONFIG['outline'])
self.setOutline = self.setFill
def _draw(self, canvas, options):
p = self.anchor
x,y = canvas.toScreen(p.x,p.y)
if canvas.save_image:
canvas.drawing_image.text((x, y), self.text, font = None)
return canvas.create_text(x,y,options)
def _move(self, dx, dy):
self.anchor.move(dx,dy)
def clone(self):
other = Text(self.anchor, self.config['text'])
other.config = self.config.copy()
return other
def setText(self,text):
self.text = text
self._reconfig("text", text)
def getText(self):
return self.config["text"]
def getAnchor(self):
return self.anchor.clone()
def setFace(self, face):
if face in ['helvetica','arial','courier','times roman', 'comic sans']:
f,s,b = self.config['font']
self._reconfig("font",(face,s,b))
else:
raise GraphicsError(BAD_OPTION)
def setSize(self, size):
if 5 <= size <= 36:
f,s,b = self.config['font']
self._reconfig("font", (f,size,b))
else:
raise GraphicsError(BAD_OPTION)
def setStyle(self, style):
if style in ['bold','normal','italic', 'bold italic']:
f,s,b = self.config['font']
self._reconfig("font", (f,s,style))
else:
raise GraphicsError(BAD_OPTION)
def setTextColor(self, color):
self.setFill(color)
class Entry(GraphicsObject):
def __init__(self, p, width):
global _root
GraphicsObject.__init__(self, [])
self.anchor = p.clone()
#print self.anchor
self.width = width
self.text = tkinter.StringVar(_root)
self.text.set("")
self.fill = "gray"
self.color = "black"
self.font = DEFAULT_CONFIG['font']
self.entry = None
def _draw(self, canvas, options):
p = self.anchor
x,y = canvas.toScreen(p.x,p.y)
frm = tkinter.Frame(canvas.master)
self.entry = tkinter.Entry(frm,
width=self.width,
textvariable=self.text,
bg = self.fill,
fg = self.color,
font=self.font)
self.entry.pack()
#self.setFill(self.fill)
return canvas.create_window(x,y,window=frm)
def getText(self):
return self.text.get()
def _move(self, dx, dy):
self.anchor.move(dx,dy)
def getAnchor(self):
return self.anchor.clone()
def clone(self):
other = Entry(self.anchor, self.width)
other.config = self.config.copy()
other.text = tkinter.StringVar()
other.text.set(self.text.get())
other.fill = self.fill
return other
def setText(self, t):
self.text.set(t)
def setFill(self, color):
self.fill = color
if self.entry:
self.entry.config(bg=color)
def _setFontComponent(self, which, value):
font = list(self.font)
font[which] = value
self.font = tuple(font)
if self.entry:
self.entry.config(font=self.font)
def setFace(self, face):
if face in ['helvetica','arial','courier','times roman']:
self._setFontComponent(0, face)
else:
raise GraphicsError(BAD_OPTION)
def setSize(self, size):
if 5 <= size <= 36:
self._setFontComponent(1,size)
else:
raise GraphicsError(BAD_OPTION)
def setStyle(self, style):
if style in ['bold','normal','italic', 'bold italic']:
self._setFontComponent(2,style)
else:
raise GraphicsError(BAD_OPTION)
def setTextColor(self, color):
self.color=color
if self.entry:
self.entry.config(fg=color)
class Image(GraphicsObject):
idCount = 0
imageCache = {} # tkinter photoimages go here to avoid GC while drawn
def __init__(self, p, *pixmap):
global root
GraphicsObject.__init__(self, [])
self.anchor = p.clone()
self.imageId = Image.idCount
Image.idCount = Image.idCount + 1
if len(pixmap) == 1:
# file name provided
self.img = tkinter.PhotoImage(file=pixmap[0], master=_root)
else:
# width and height provided
width, height = pixmap
self.img = tkinter.PhotoImage(master=_root, width=width, height=height)
def _draw(self, canvas, options):
p = self.anchor
x,y = canvas.toScreen(p.x,p.y)
self.imageCache[self.imageId] = self.img # save a reference
if canvas.save_image:
canvas.drawing_image.bitmap((x, y), self.img)
return canvas.create_image(x,y,image=self.img)
def _move(self, dx, dy):
self.anchor.move(dx,dy)
def undraw(self):
try:
del self.imageCache[self.imageId] # allow gc of tkinter photoimage
except KeyError:
pass
GraphicsObject.undraw(self)
def getAnchor(self):
return self.anchor.clone()
def clone(self):
other = Image(Point(0,0), 0, 0)
other.img = self.img.copy()
other.anchor = self.anchor.clone()
other.config = self.config.copy()
return other
def getWidth(self):
"""Returns the width of the image in pixels"""
return self.img.width()
def getHeight(self):
"""Returns the height of the image in pixels"""
return self.img.height()
def getPixel(self, x, y):
"""Returns a list [r,g,b] with the RGB color values for pixel (x,y)
r,g,b are in range(256)
"""
value = self.img.get(x,y)
if type(value) == type(0):
return [value, value, value]
else:
return list(map(int, value.split()))
def setPixel(self, x, y, color):
"""Sets pixel (x,y) to the given color"""
self.img.put("{" + color +"}", (x, y))
def save(self, filename):
"""Saves the pixmap image to filename.
The format for the save image is determined from the filname extension."""
path, name = os.path.split(filename)
ext = name.split(".")[-1]
self.img.write( filename, format=ext)
def color_rgb(r,g,b):
"""r,g,b are intensities of red, green, and blue in range(256)
Returns color specifier string for the resulting color"""
return "#%02x%02x%02x" % (r,g,b)
def drawAll(window, *itemsToDraw):
"""Draw all items to a window"""
if not isinstance(window, GraphWin):
raise TypeError('Window must be a GraphWin object')
for item in itemsToDraw: item.draw(window)
def undrawAll(*itemsToUndraw):
"""Undraws all items from a window"""
for item in itemsToUndraw: item.undraw()
def redrawAll(window, *itemsToRedraw):
"""Redraws all items to a window"""
if not isinstance(window, GraphWin):
raise TypeError('Window must be a GraphWin object')
undrawAll(*itemsToRedraw)
drawAll(window, *itemsToRedraw)
def test():
win = GraphWin(tkinter.Tk())
win.setCoords(0,0,10,10)
win.grid()
t = Text(Point(5,5), "Centered Text")
t.draw(win)
p = Polygon(Point(1,1), Point(5,3), Point(2,7))
p.draw(win)
e = Entry(Point(5,6), 10)
e.draw(win)
win.getMouse()
p.setFill("red")
p.setOutline("blue")
p.setWidth(2)
s = ""
for pt in p.getPoints():
s = s + "(%0.1f,%0.1f) " % (pt.getX(), pt.getY())
t.setText(e.getText())
e.setFill("green")
e.setText("Spam!")
e.move(2,0)
win.getMouse()
p.move(2,3)
s = ""
for pt in p.getPoints():
s = s + "(%0.1f,%0.1f) " % (pt.getX(), pt.getY())
t.setText(s)
win.getMouse()
p.undraw()
e.undraw()
t.setStyle("bold")
win.getMouse()
t.setStyle("normal")
win.getMouse()
t.setStyle("italic")
win.getMouse()
t.setStyle("bold italic")
win.getMouse()
t.setSize(14)
win.getMouse()
t.setFace("arial")
t.setSize(20)
win.getMouse()
win.close()
if __name__ == "__main__":
test()
| gpl-3.0 | 969,176,999,425,045,900 | 28.021134 | 180 | 0.674026 | false |
matplotlib/freetypy | docstrings/truetype.py | 1 | 5852 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future__ import print_function, unicode_literals, absolute_import
TT_PLATFORM = """
Platform identifier codes.
- `APPLE_UNICODE`: Used by Apple to indicate a Unicode character map
and/or name entry. See `TT_APPLE_ID` for corresponding ‘encoding_id’
values. Note that name entries in this format are coded as
big-endian UCS-2 character codes only.
- `MACINTOSH`: Used by Apple to indicate a MacOS-specific charmap
and/or name entry. See `TT_MAC_ID` for corresponding ‘encoding_id’
values. Note that most TrueType fonts contain an Apple roman charmap
to be usable on MacOS systems (even if they contain a Microsoft
charmap as well).
- `ISO`: This value was used to specify ISO/IEC 10646 charmaps. It is
however now deprecated. See `TT_ISO_ID` for a list of
corresponding ‘encoding_id’ values.
- `MICROSOFT`: Used by Microsoft to indicate Windows-specific
charmaps. See `TT_MS_ID` for a list of corresponding ‘encoding_id’
values. Note that most fonts contain a Unicode charmap using
(`TT_PLATFORM.MICROSOFT`, `TT_MS_ID.UNICODE_CS`).
- `CUSTOM`: Used to indicate application-specific charmaps.
- `ADOBE`: This value isn't part of any font format specification, but
is used by FreeType to report Adobe-specific charmaps in an
`CharMap` object. See `TT_ADOBE_ID`.
"""
TT_APPLE_ID = """
Apple-specific encoding values.
- `DEFAULT`: Unicode version 1.0.
- `UNICODE_1_1`: Unicode 1.1; specifies Hangul characters starting at
U+34xx.
- `ISO_10646`: Deprecated (identical to preceding).
- `UNICODE_2_0`: Unicode 2.0 and beyond (UTF-16 BMP only).
- `UNICODE_32`: Unicode 3.1 and beyond, using UTF-32.
- `VARIANT_SELECTOR`: From Adobe, not Apple. Not a normal
cmap. Specifies variations on a real cmap.
"""
TT_ADOBE_ID = """
Adobe-specific encoding values.
- `STANDARD`: Adobe standard encoding.
- `EXPERT`: Adobe expert encoding.
- `CUSTOM`: Adobe custom encoding.
- `LATIN_1`: Adobe Latin 1 encoding.
"""
TT_ISO_ID = """
Standard ISO encodings.
- `ISO_7BIT_ASCII`: ASCII.
- `ISO_10646`: ISO/10646.
- `ISO_8859_1`: Also known as Latin-1.
"""
TT_MAC_ID = """
Macintosh-specific encoding values.
- `ROMAN`
- `JAPANESE`
- `TRADITIONAL_CHINESE`
- `KOREAN`
- `ARABIC`
- `HEBREW`
- `GREEK`
- `RUSSIAN`
- `RSYMBOL`
- `DEVANAGARI`
- `GURMUKHI`
- `GUJARATI`
- `ORIYA`
- `BENGALI`
- `TAMIL`
- `TELUGU`
- `KANNADA`
- `MALAYALAM`
- `SINHALESE`
- `BURMESE`
- `KHMER`
- `THAI`
- `LAOTIAN`
- `GEORGIAN`
- `ARMENIAN`
- `MALDIVIAN`
- `SIMPLIFIED_CHINESE`
- `TIBETAN`
- `MONGOLIAN`
- `GEEZ`
- `SLAVIC`
- `VIETNAMESE`
- `SINDHI`
- `UNINTERP`
"""
TT_MAC_LANGID = """
Language identifier.
Used in the name records of the TTF "name" table if the "platform"
identifier code is `TT_PLATFORM.MACINTOSH`.
"""
TT_MS_ID = """
Microsoft-specific encoding values.
- `SYMBOL_CS`: Corresponds to Microsoft symbol encoding. See
`ENCODING.MS_SYMBOL`.
- `UNICODE_CS`: Corresponds to a Microsoft WGL4 charmap, matching
Unicode. See `ENCODING.UNICODE`.
- `SJIS`: Corresponds to SJIS Japanese encoding. See `ENCODING.SJIS`.
- `GB2312`: Corresponds to Simplified Chinese as used in Mainland
China. See `ENCODING.GB2312`.
- `BIG_5`: Corresponds to Traditional Chinese as used in Taiwan and
Hong Kong. See `ENCODING.BIG5`.
- `WANSUNG`: Corresponds to Korean Wansung encoding. See
`ENCODING.WANSUNG`.
- `JOHAB`: Corresponds to Johab encoding. See `ENCODING.JOHAB`.
- `UCS_4`: Corresponds to UCS-4 or UTF-32 charmaps. This has been
added to the OpenType specification version 1.4 (mid-2001.)
"""
TT_MS_LANGID = """
Language identifier.
Used in in the name records of the TTF "name" table if the "platform"
identifier code is `TT_PLATFORM.MICROSOFT`.
"""
TT_NAME_ID = """
The type of value stored in a `SfntName` record.
- `COPYRIGHT`
- `FONT_FAMILY`
- `FONT_SUBFAMILY`
- `UNIQUE_ID`
- `FULL_NAME`
- `VERSION_STRING`
- `PS_NAME`
- `TRADEMARK`
The following values are from the OpenType spec:
- `MANUFACTURER`
- `DESIGNER`
- `DESCRIPTION`
- `VENDOR_URL`
- `DESIGNER_URL`
- `LICENSE`
- `LICENSE_URL`
- `PREFERRED_FAMILY`
- `PREFERRED_SUBFAMILY`
- `MAC_FULL_NAME`
- `SAMPLE_TEXT`
This is new in OpenType 1.3:
- `CID_FINDFONT_NAME`
This is new in OpenType 1.5:
- `WWS_FAMILY`
- `WWS_SUBFAMILY`
"""
| bsd-2-clause | -7,203,823,751,697,514,000 | 25.527273 | 72 | 0.72207 | false |
calispac/digicampipe | digicampipe/scripts/nsb_rate_camera.py | 1 | 5147 | """
Display the NSB rate for each pixel
Usage:
nsb_rate_camera.py [options] [--] <INPUT>
Options:
--help Show this
<INPUT> File of histogram of baselines during a run.
Output of raw.py with --baseline_filename on
science data.
--dark_hist=LIST Histogram of ADC samples during dark run.
Output of raw.py on dark data.
--plot=FILE path to the output plot. Will show the average
over all events of the NSB.
If set to "show", the plot is displayed and not
saved.
If set to "none", no plot is done.
[Default: show]
--parameters=FILE Calibration parameters file path
--template=FILE Pulse template file path
--bias_resistance=FLOAT Bias resistance in Ohm. [Default: 1e4]
--cell_capacitance=FLOAT Cell capacitance in Farad. [Default: 5e-14]
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from ctapipe.visualization import CameraDisplay
from docopt import docopt
import yaml
from astropy import units as u
from histogram.histogram import Histogram1D
from digicampipe.calib.baseline import _compute_nsb_rate
from digicampipe.instrument.camera import DigiCam
from digicampipe.utils.docopt import convert_text, convert_float
from digicampipe.utils.pulse_template import NormalizedPulseTemplate
from digicampipe.scripts.bad_pixels import get_bad_pixels
from digicampipe.calib.charge import _get_average_matrix_bad_pixels
def nsb_rate(
baseline_histo_file, dark_histo_file, param_file, template_filename,
plot="show", plot_nsb_range=None, norm="log",
bias_resistance=1e4 * u.Ohm, cell_capacitance=5e-14 * u.Farad
):
baseline_histo = Histogram1D.load(baseline_histo_file)
dark_histo = Histogram1D.load(dark_histo_file)
baseline_shift = baseline_histo.mean()-dark_histo.mean()
n_pixel = len(DigiCam.geometry.neighbors)
pixels = np.arange(n_pixel, dtype=int)
with open(param_file) as file:
pulse_template = NormalizedPulseTemplate.load(template_filename)
pulse_area = pulse_template.integral() * u.ns
charge_to_amplitude = pulse_template.compute_charge_amplitude_ratio(7, 4)
calibration_parameters = yaml.load(file)
gain_integral = np.array(calibration_parameters['gain'])
gain_amplitude = gain_integral * charge_to_amplitude
crosstalk = np.array(calibration_parameters['mu_xt'])
rate = _compute_nsb_rate(
baseline_shift=baseline_shift, gain=gain_amplitude,
pulse_area=pulse_area, crosstalk=crosstalk,
bias_resistance=bias_resistance, cell_capacitance=cell_capacitance
)
bad_pixels = get_bad_pixels(
calib_file=param_file, nsigma_gain=5, nsigma_elecnoise=5,
dark_histo=dark_histo_file, nsigma_dark=8, plot=None, output=None
)
bad_pixels = np.unique(np.hstack(
(
bad_pixels,
pixels[rate < 0],
pixels[rate > 5 * u.GHz]
)
))
avg_matrix = _get_average_matrix_bad_pixels(DigiCam.geometry, bad_pixels)
good_pixels_mask = np.ones(n_pixel, dtype=bool)
good_pixels_mask[bad_pixels] = False
good_pixels = pixels[good_pixels_mask]
rate[bad_pixels] = avg_matrix[bad_pixels, :].dot(rate[good_pixels])
if plot is None:
return rate
fig1, ax = plt.subplots(1, 1)
display = CameraDisplay(DigiCam.geometry, ax=ax, norm=norm,
title='NSB rate [GHz]')
rate_ghz = rate.to(u.GHz).value
display.image = rate_ghz
if plot_nsb_range is None:
plot_nsb_range = (np.min(rate_ghz), np.max(rate_ghz))
display.set_limits_minmax(*plot_nsb_range)
display.add_colorbar(ax=ax)
display.highlight_pixels(bad_pixels, color='r', linewidth=2)
plt.tight_layout()
output_path = os.path.dirname(plot)
if plot == "show" or \
(output_path != "" and not os.path.isdir(output_path)):
if not plot == "show":
print('WARNING: Path ' + output_path + ' for output trigger ' +
'uniformity does not exist, displaying the plot instead.\n')
plt.show()
else:
plt.savefig(plot)
print(plot, 'created')
plt.close(fig1)
return rate
def entry():
args = docopt(__doc__)
baseline_histo_file = args['<INPUT>']
dark_histo_file = convert_text(args['--dark_hist'])
param_file = convert_text(args['--parameters'])
template_filename = convert_text(args['--template'])
plot = convert_text(args['--plot'])
bias_resistance = convert_float(args['--bias_resistance']) * u.Ohm
cell_capacitance = convert_float(args['--cell_capacitance']) * u.Farad
nsb_rate(
baseline_histo_file, dark_histo_file, param_file, template_filename,
plot=plot, bias_resistance=bias_resistance,
cell_capacitance=cell_capacitance
)
if __name__ == '__main__':
entry() | gpl-3.0 | 1,548,803,328,053,787,000 | 40.184 | 81 | 0.626967 | false |
DataONEorg/d1_python | lib_common/src/d1_common/system_metadata.py | 1 | 14840 | # This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for handling the DataONE SystemMetadata type.
DataONE API methods such as `MNStorage.create()` require a Science Object and System
Metadata pair.
Examples:
Example v2 SystemMetadata XML document with all optional values included:
::
<v2:systemMetadata xmlns:v2="http://ns.dataone.org/service/types/v2.0">
<!--Optional:-->
<serialVersion>11</serialVersion>
<identifier>string</identifier>
<formatId>string</formatId>
<size>11</size>
<checksum algorithm="string">string</checksum>
<!--Optional:-->
<submitter>string</submitter>
<rightsHolder>string</rightsHolder>
<!--Optional:-->
<accessPolicy>
<!--1 or more repetitions:-->
<allow>
<!--1 or more repetitions:-->
<subject>string</subject>
<!--1 or more repetitions:-->
<permission>read</permission>
</allow>
</accessPolicy>
<!--Optional:-->
<replicationPolicy replicationAllowed="true" numberReplicas="3">
<!--Zero or more repetitions:-->
<preferredMemberNode>string</preferredMemberNode>
<!--Zero or more repetitions:-->
<blockedMemberNode>string</blockedMemberNode>
</replicationPolicy>
<!--Optional:-->
<obsoletes>string</obsoletes>
<obsoletedBy>string</obsoletedBy>
<archived>true</archived>
<dateUploaded>2014-09-18T17:18:33</dateUploaded>
<dateSysMetadataModified>2006-08-19T11:27:14-06:00</dateSysMetadataModified>
<originMemberNode>string</originMemberNode>
<authoritativeMemberNode>string</authoritativeMemberNode>
<!--Zero or more repetitions:-->
<replica>
<replicaMemberNode>string</replicaMemberNode>
<replicationStatus>failed</replicationStatus>
<replicaVerified>2013-05-21T19:02:49-06:00</replicaVerified>
</replica>
<!--Optional:-->
<seriesId>string</seriesId>
<!--Optional:-->
<mediaType name="string">
<!--Zero or more repetitions:-->
<property name="string">string</property>
</mediaType>
<!--Optional:-->
<fileName>string</fileName>
</v2:systemMetadata>
"""
import datetime
import logging
import os
import d1_common.checksum
import d1_common.date_time
import d1_common.type_conversions
import d1_common.types.dataoneTypes
import d1_common.wrap.access_policy
import d1_common.xml
logger = logging.getLogger(__name__)
SYSMETA_ROOT_CHILD_LIST = [
"serialVersion",
"identifier",
"formatId",
"size",
"checksum",
"submitter",
"rightsHolder",
"accessPolicy",
"replicationPolicy",
"obsoletes",
"obsoletedBy",
"archived",
"dateUploaded",
"dateSysMetadataModified",
"originMemberNode",
"authoritativeMemberNode",
"replica",
"seriesId",
"mediaType",
"fileName",
]
def is_sysmeta_pyxb(sysmeta_pyxb):
"""Args: sysmeta_pyxb: Object that may or may not be a SystemMetadata PyXB object.
Returns:
bool:
- ``True`` if ``sysmeta_pyxb`` is a SystemMetadata PyXB object.
- ``False`` if ``sysmeta_pyxb`` is not a PyXB object or is a PyXB object of a
type other than SystemMetadata.
"""
return (
d1_common.type_conversions.is_pyxb_d1_type(sysmeta_pyxb)
and d1_common.type_conversions.pyxb_get_type_name(sysmeta_pyxb)
== "SystemMetadata"
)
def normalize_in_place(sysmeta_pyxb, reset_timestamps=False, reset_filename=False):
"""Normalize SystemMetadata PyXB object in-place.
Args:
sysmeta_pyxb:
SystemMetadata PyXB object to normalize.
reset_timestamps: bool
``True``: Timestamps in the SystemMetadata are set to a standard value so that
objects that are compared after normalization register as equivalent if only
their timestamps differ.
Notes:
The SystemMetadata is normalized by removing any redundant information and
ordering all sections where there are no semantics associated with the order. The
normalized SystemMetadata is intended to be semantically equivalent to the
un-normalized one.
"""
if sysmeta_pyxb.accessPolicy is not None:
sysmeta_pyxb.accessPolicy = d1_common.wrap.access_policy.get_normalized_pyxb(
sysmeta_pyxb.accessPolicy
)
if getattr(sysmeta_pyxb, "mediaType", False):
d1_common.xml.sort_value_list_pyxb(sysmeta_pyxb.mediaType.property_)
if getattr(sysmeta_pyxb, "replicationPolicy", False):
d1_common.xml.sort_value_list_pyxb(
sysmeta_pyxb.replicationPolicy.preferredMemberNode
)
d1_common.xml.sort_value_list_pyxb(
sysmeta_pyxb.replicationPolicy.blockedMemberNode
)
d1_common.xml.sort_elements_by_child_values(
sysmeta_pyxb.replica,
["replicaVerified", "replicaMemberNode", "replicationStatus"],
)
sysmeta_pyxb.archived = bool(sysmeta_pyxb.archived)
if reset_timestamps:
epoch_dt = datetime.datetime(1970, 1, 1, tzinfo=d1_common.date_time.UTC())
sysmeta_pyxb.dateUploaded = epoch_dt
sysmeta_pyxb.dateSysMetadataModified = epoch_dt
for replica_pyxb in getattr(sysmeta_pyxb, "replica", []):
replica_pyxb.replicaVerified = epoch_dt
else:
sysmeta_pyxb.dateUploaded = d1_common.date_time.round_to_nearest(
sysmeta_pyxb.dateUploaded
)
sysmeta_pyxb.dateSysMetadataModified = d1_common.date_time.round_to_nearest(
sysmeta_pyxb.dateSysMetadataModified
)
for replica_pyxb in getattr(sysmeta_pyxb, "replica", []):
replica_pyxb.replicaVerified = d1_common.date_time.round_to_nearest(
replica_pyxb.replicaVerified
)
if reset_filename:
sysmeta_pyxb.fileName = None
def are_equivalent_pyxb(a_pyxb, b_pyxb, ignore_timestamps=False, ignore_filename=False):
"""Determine if SystemMetadata PyXB objects are semantically equivalent.
Normalize then compare SystemMetadata PyXB objects for equivalency.
Args:
a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare
ignore_timestamps: bool
``True``: Timestamps are ignored during the comparison.
ignore_filename: bool
``True``: FileName elements are ignored during the comparison.
This is necessary in cases where GMN returns a generated filename because one
was not provided in the SysMeta.
Returns: bool:
``True`` if SystemMetadata PyXB objects are semantically equivalent.
Notes:
The SystemMetadata is normalized by removing any redundant information and
ordering all sections where there are no semantics associated with the order. The
normalized SystemMetadata is intended to be semantically equivalent to the
un-normalized one.
"""
normalize_in_place(a_pyxb, ignore_timestamps, ignore_filename)
normalize_in_place(b_pyxb, ignore_timestamps, ignore_filename)
a_xml = d1_common.xml.serialize_to_xml_str(a_pyxb)
b_xml = d1_common.xml.serialize_to_xml_str(b_pyxb)
are_equivalent = d1_common.xml.are_equivalent(a_xml, b_xml)
if not are_equivalent:
logger.debug("XML documents not equivalent:")
logger.debug(d1_common.xml.format_diff_xml(a_xml, b_xml))
return are_equivalent
def are_equivalent_xml(a_xml, b_xml, ignore_timestamps=False):
"""Determine if two SystemMetadata XML docs are semantically equivalent.
Normalize then compare SystemMetadata XML docs for equivalency.
Args:
a_xml, b_xml: bytes
UTF-8 encoded SystemMetadata XML docs to compare
ignore_timestamps: bool
``True``: Timestamps in the SystemMetadata are ignored so that objects that are
compared register as equivalent if only their timestamps differ.
Returns: bool:
``True`` if SystemMetadata XML docs are semantically equivalent.
Notes:
The SystemMetadata is normalized by removing any redundant information and
ordering all sections where there are no semantics associated with the order. The
normalized SystemMetadata is intended to be semantically equivalent to the
un-normalized one.
"""
"""Normalizes then compares SystemMetadata XML docs for equivalency.
``a_xml`` and ``b_xml`` should be utf-8 encoded DataONE System Metadata XML
documents.
"""
return are_equivalent_pyxb(
d1_common.xml.deserialize(a_xml),
d1_common.xml.deserialize(b_xml),
ignore_timestamps,
)
def clear_elements(sysmeta_pyxb, clear_replica=True, clear_serial_version=True):
"""{clear_replica} causes any replica information to be removed from the object.
{clear_replica} ignores any differences in replica information, as this information
is often different between MN and CN.
"""
if clear_replica:
sysmeta_pyxb.replica = None
if clear_serial_version:
sysmeta_pyxb.serialVersion = None
sysmeta_pyxb.replicationPolicy = None
def update_elements(dst_pyxb, src_pyxb, el_list):
"""Copy elements specified in ``el_list`` from ``src_pyxb`` to ``dst_pyxb``
Only elements that are children of root are supported. See
SYSMETA_ROOT_CHILD_LIST.
If an element in ``el_list`` does not exist in ``src_pyxb``, it is removed from
``dst_pyxb``.
"""
invalid_element_set = set(el_list) - set(SYSMETA_ROOT_CHILD_LIST)
if invalid_element_set:
raise ValueError(
'Passed one or more invalid elements. invalid="{}"'.format(
", ".join(sorted(list(invalid_element_set)))
)
)
for el_str in el_list:
setattr(dst_pyxb, el_str, getattr(src_pyxb, el_str, None))
def generate_system_metadata_pyxb(
pid,
format_id,
sciobj_stream,
submitter_str,
rights_holder_str,
authoritative_mn_urn,
# SeriesID and obsolescence
sid=None,
obsoletes_pid=None,
obsoleted_by_pid=None,
is_archived=False,
#
serial_version=1,
uploaded_datetime=None,
modified_datetime=None,
file_name=None,
origin_mn_urn=None,
# Access Policy
is_private=False,
access_list=None,
# Media Type
media_name=None,
media_property_list=None,
# Replication Policy
is_replication_allowed=False,
preferred_mn_list=None,
blocked_mn_list=None,
#
pyxb_binding=None,
):
"""Generate a System Metadata PyXB object
Args:
pid:
format_id:
sciobj_stream:
submitter_str:
rights_holder_str:
authoritative_mn_urn:
pyxb_binding:
sid:
obsoletes_pid:
obsoleted_by_pid:
is_archived:
serial_version:
uploaded_datetime:
modified_datetime:
file_name:
origin_mn_urn:
access_list:
is_private:
media_name:
media_property_list:
is_replication_allowed:
preferred_mn_list:
blocked_mn_list:
Returns:
systemMetadata PyXB object
"""
pyxb_binding = pyxb_binding or d1_common.types.dataoneTypes
sysmeta_pyxb = pyxb_binding.systemMetadata()
sysmeta_pyxb.identifier = pid
sysmeta_pyxb.seriesId = sid
sysmeta_pyxb.formatId = format_id
sysmeta_pyxb.checksum, sysmeta_pyxb.size = gen_checksum_and_size(sciobj_stream)
sysmeta_pyxb.submitter = submitter_str
sysmeta_pyxb.rightsHolder = rights_holder_str
sysmeta_pyxb.authoritativeMemberNode = authoritative_mn_urn
sysmeta_pyxb.originMemberNode = origin_mn_urn or authoritative_mn_urn
sysmeta_pyxb.obsoletes = obsoletes_pid
sysmeta_pyxb.obsoletedBy = obsoleted_by_pid
sysmeta_pyxb.archived = is_archived
sysmeta_pyxb.serialVersion = serial_version
sysmeta_pyxb.dateUploaded = uploaded_datetime or d1_common.date_time.utc_now()
sysmeta_pyxb.dateSysMetadataModified = (
modified_datetime or sysmeta_pyxb.dateUploaded
)
sysmeta_pyxb.fileName = file_name
sysmeta_pyxb.replica = None
gen_access_policy(pyxb_binding, sysmeta_pyxb, is_private, access_list)
sysmeta_pyxb.replicationPolicy = gen_replication_policy(
pyxb_binding, preferred_mn_list, blocked_mn_list, is_replication_allowed
)
if media_name or media_property_list:
sysmeta_pyxb.mediaType = gen_media_type(
pyxb_binding, media_name, media_property_list
)
return sysmeta_pyxb
def gen_checksum_and_size(sciobj_stream):
sciobj_stream.seek(0)
checksum_pyxb = d1_common.checksum.create_checksum_object_from_stream(sciobj_stream)
sciobj_stream.seek(0, os.SEEK_END)
sciobj_size = sciobj_stream.tell()
sciobj_stream.seek(0)
return checksum_pyxb, sciobj_size
def gen_access_policy(pyxb_binding, sysmeta_pyxb, is_private, access_list):
with d1_common.wrap.access_policy.wrap_sysmeta_pyxb(
sysmeta_pyxb, pyxb_binding
) as ap:
if not is_private:
ap.add_public_read()
if access_list is not None:
for subj_str, perm_str in access_list:
ap.add_perm(subj_str, perm_str)
ap.update()
def gen_replication_policy(
pyxb_binding,
preferred_mn_list=None,
blocked_mn_list=None,
is_replication_allowed=False,
):
rp_pyxb = pyxb_binding.replicationPolicy()
rp_pyxb.preferredMemberNode = preferred_mn_list
rp_pyxb.blockedMemberNode = blocked_mn_list
rp_pyxb.replicationAllowed = is_replication_allowed
rp_pyxb.numberReplicas = 3 if is_replication_allowed else 0
return rp_pyxb
def gen_media_type(pyxb_binding, media_name, media_property_list=None):
assert (
media_name is not None
), "When a media_property_list is set, the media_name must also be set"
media_type_pyxb = pyxb_binding.MediaType(name=media_name)
for name_str, value_str in media_property_list or []:
media_type_pyxb.property_.append(
pyxb_binding.MediaTypeProperty(value_str, name=name_str)
)
return media_type_pyxb
| apache-2.0 | 5,772,235,243,419,589,000 | 31.121212 | 88 | 0.674124 | false |
paramecio/pastafari | scripts/monit/debian_wheezy/alive.py | 1 | 4720 | #!/usr/bin/python3 -u
# A script for install alive script
import subprocess
import argparse
import re
import os
import shutil
import pwd
from subprocess import call
parser = argparse.ArgumentParser(description='A script for install alive script and cron')
parser.add_argument('--url', help='The url where notify that this server is alive', required=True)
parser.add_argument('--user', help='The user for pastafari', required=True)
parser.add_argument('--pub_key', help='The pub key used in pastafari user', required=True)
args = parser.parse_args()
url=args.url
check_url = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if check_url.match(args.url):
# Create users
if call("sudo useradd -m -s /bin/sh %s" % args.user, shell=True) > 0:
print('Error, cannot add a new user')
exit(1)
else:
print('Added user')
if call("sudo mkdir -p /home/"+args.user+"/.ssh && sudo chown "+args.user+":"+args.user+" /home/"+args.user+"/.ssh && sudo chmod 700 /home/"+args.user+"/.ssh", shell=True) > 0:
print('Error, cannot add ssh directory')
exit(1)
else:
print('Added ssh directory')
if call("sudo cp "+args.pub_key+" /home/"+args.user+"/.ssh/authorized_keys && sudo chown "+args.user+":"+args.user+" /home/"+args.user+"/.ssh/authorized_keys && sudo chmod 600 /home/"+args.user+"/.ssh/authorized_keys", shell=True) > 0:
print('Error, cannot pub key to user')
exit(1)
else:
print('Added pub key to user')
# Edit alive cron
with open('modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive') as f:
alive_cron=f.read()
with open('modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive', 'w') as f:
alive_cron=alive_cron.replace('/home/spanel/modules/pastafari/scripts/monit/debian_wheezy/files/get_info.py', '/usr/local/bin/get_info.py')
f.write(alive_cron)
# Edit get_info.py
with open('modules/pastafari/scripts/monit/debian_wheezy/files/get_info.py') as f:
get_info=f.read()
with open('/usr/local/bin/get_info.py', 'w') as f:
get_info=get_info.replace("http://url/to/server/token/ip", args.url)
f.write(get_info)
os.chmod('/usr/local/bin/get_info.py', 0o700)
user_passwd=pwd.getpwnam(args.user)
os.chown('/usr/local/bin/get_info.py', user_passwd[2], user_passwd[3])
#shutil.chown('/usr/local/bin/get_info.py', args.user, args.user)
# Edit get_updates.py
with open('modules/pastafari/scripts/monit/debian_wheezy/files/get_updates.py') as f:
get_updates=f.read()
with open('/etc/cron.daily/get_updates.py', 'w') as f:
url_updates=args.url.replace('/getinfo/', '/getupdates/')
get_updates=get_updates.replace("http://url/to/server/token/ip", url_updates)
f.write(get_updates)
os.chmod('/etc/cron.daily/get_updates.py', 0o700)
# Edit sudo file
with open('modules/pastafari/scripts/monit/debian_wheezy/files/sudoers.d/spanel') as f:
sudoers=f.read()
with open('/etc/sudoers.d/spanel', 'w') as f:
sudoers=sudoers.replace("spanel", args.user)
f.write(sudoers)
# Copy cron alive to /etc/cron.d/
if call("sudo cp modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive /etc/cron.d/alive", shell=True) > 0:
print('Error, cannot install crontab alive file in cron.d')
exit(1)
else:
print('Added contrab alive file in cron.d')
print('Script installed successfully')
# Copy script for upgrades in /usr/local/bin
if call("mkdir /home/"+args.user+"/bin/ && cp modules/pastafari/scripts/standard/debian_wheezy/upgrade.sh /home/"+args.user+"/bin/ && chown -R "+args.user+":"+args.user+" /home/"+args.user+"/bin/", shell=True) > 0:
print('Error, cannot install upgrade.py in /home/'+args.user+'/bin/')
exit(1)
else:
print('Added /home/'+args.user+'/bin/upgrade.py')
print('Script installed successfully')
# Making first call to site
if subprocess.call('/usr/local/bin/get_info.py', shell=True) > 0:
print('Error')
exit(1)
else:
print('Your server should be up in your panel...')
exit(0)
else:
print('Error installing the module, not valid url')
exit(1)
| gpl-2.0 | 6,790,356,266,525,873,000 | 33.962963 | 239 | 0.60911 | false |
sashs/Ropper | ropper/ropchain/arch/ropchainx86.py | 1 | 45777 | # coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE..
from ropper.gadget import Category
from ropper.common.error import *
from ropper.common.utils import *
from ropper.rop import Ropper
from ropper.arch import x86
from ropper.ropchain.ropchain import *
from ropper.loaders.loader import Type
from ropper.loaders.pe import PE
from ropper.loaders.elf import ELF
from ropper.loaders.mach_o import MachO
from ropper.loaders.raw import Raw
from ropper.gadget import Gadget
from re import match
from filebytes.pe import ImageDirectoryEntry
import itertools
import math
import sys
if sys.version_info.major == 2:
range = xrange
class RopChainX86(RopChain):
MAX_QUALI = 7
def _printHeader(self):
toReturn = ''
toReturn += ('#!/usr/bin/env python\n')
toReturn += ('# Generated by ropper ropchain generator #\n')
toReturn += ('from struct import pack\n')
toReturn += ('\n')
toReturn += ('p = lambda x : pack(\'I\', x)\n')
toReturn += ('\n')
return toReturn
def _printRebase(self):
toReturn = ''
for binary,section in self._usedBinaries:
imageBase = Gadget.IMAGE_BASES[binary]
toReturn += ('IMAGE_BASE_%d = %s # %s\n' % (self._usedBinaries.index((binary, section)),toHex(imageBase , 4), binary))
toReturn += ('rebase_%d = lambda x : p(x + IMAGE_BASE_%d)\n\n'% (self._usedBinaries.index((binary, section)),self._usedBinaries.index((binary, section))))
return toReturn
@classmethod
def name(cls):
return ''
@classmethod
def availableGenerators(cls):
return [RopChainX86System, RopChainX86Mprotect, RopChainX86VirtualProtect]
@classmethod
def archs(self):
return [x86]
def _createDependenceChain(self, gadgets):
"""
gadgets - list with tuples
tuple contains:
- method to create chaingadget
- list with arguments
- dict with named arguments
- list with registers which are not allowed to override in the gadget
"""
failed = []
cur_len = 0
cur_chain = ''
counter = 0
failed_chains={}
max_perm = math.factorial(len(gadgets))
for x in itertools.permutations(gadgets):
counter += 1
self._printMessage('[*] Try permuation %d / %d' % (counter, max_perm))
found = False
for y in failed:
if x[:len(y)] == y:
found = True
break
if found:
continue
try:
fail = []
chain2 = ''
dontModify = []
badRegs = []
c = 0
for idx in range(len(x)):
g = x[idx]
if idx != 0:
badRegs.extend(x[idx-1][3])
dontModify.extend(g[3])
fail.append(g)
chain2 += g[0](*g[1], badRegs=badRegs, dontModify=dontModify,**g[2])[0]
cur_chain += chain2
break
except RopChainError as e:
failed_chains[chain2] = fail
failed.append(tuple(fail))
else:
self._printMessage('Cannot create chain which fills all registers')
fail_tmp = None
fail_max = []
chain_tmp = None
for chain,fail in failed_chains.items():
if len(fail) > len(fail_max):
fail_max = fail
chain_tmp = chain
cur_chain = '# Filled registers: '
for fa in fail_max[:-1]:
cur_chain += (fa[2]['reg']) + ', '
cur_chain += '\n'
cur_chain += chain_tmp
# print('Impossible to create complete chain')
return cur_chain
def _isModifiedOrDereferencedAccess(self, gadget, dontModify):
regs = []
for line in gadget.lines[1:]:
line = line[1]
if '[' in line:
return True
if dontModify:
m = match('[a-z]+ (e?[abcds][ixlh]),?.*', line)
if m and m.group(1) in dontModify:
return True
return False
def _paddingNeededFor(self, gadget):
regs = []
for idx in range(1,len(gadget.lines)):
line = gadget.lines[idx][1]
matched = match('^pop (...)$', line)
if matched:
regs.append(matched.group(1))
return regs
def _printRopInstruction(self, gadget, padding=True, number=None, value=None):
value_first = False
toReturn = ('rop += rebase_%d(%s) # %s\n' % (self._usedBinaries.index((gadget.fileName, gadget.section)),toHex(gadget.lines[0][0],4), gadget.simpleString()))
if number is not None:
toReturn +=self._printPaddingInstruction(number)
if padding:
regs = self._paddingNeededFor(gadget)
if len(regs) > 0:
dst = gadget.category[2]['dst']
search = '^pop (%s)$' % dst
first_line = gadget.lines[0][1]
if match(search, first_line):
value_first = True
padding_str = ''
for i in range(len(regs)):
padding_str +=self._printPaddingInstruction()
if value_first:
toReturn += value
toReturn += padding_str
else:
toReturn += padding_str
if value:
toReturn += value
return toReturn
def _printAddString(self, string):
return ('rop += \'%s\'\n' % string)
def _printRebasedAddress(self, addr, comment='', idx=0):
return ('rop += rebase_%d(%s)\n' % (idx,addr))
def _printPaddingInstruction(self, addr='0xdeadbeef'):
return ('rop += p(%s)\n' % addr)
def _containsZeroByte(self, addr):
return self.containsBadbytes(addr)
def _createZeroByteFillerForSub(self, number):
start = 0x01010101
for i in range(start, 0x0f0f0f0f):
if not self._containsZeroByte(i) and not self._containsZeroByte(i+number):
return i
raise RopChainError("Could not create Number for substract gadget")
def _createZeroByteFillerForAdd(self, number):
start = 0x01010101
for i in range(start, 0x0f0f0f0f):
if not self._containsZeroByte(i) and not self._containsZeroByte(number-i):
return i
raise RopChainError("Could not create Number for addition gadget")
def _find(self, category, reg=None, srcdst='dst', badDst=[], badSrc=None, dontModify=None, srcEqDst=False, switchRegs=False ):
quali = 1
while quali < RopChainX86System.MAX_QUALI:
for binary in self._binaries:
for gadget in self._gadgets[binary]:
if gadget.category[0] == category and gadget.category[1] == quali:
if badSrc and (gadget.category[2]['src'] in badSrc \
or gadget.affected_regs.intersection(badSrc)):
continue
if badDst and (gadget.category[2]['dst'] in badDst \
or gadget.affected_regs.intersection(badDst)):
continue
if not gadget.lines[len(gadget.lines)-1][1].strip().endswith('ret') or 'esp' in gadget.simpleString():
continue
if srcEqDst and (not (gadget.category[2]['dst'] == gadget.category[2]['src'])):
continue
elif not srcEqDst and 'src' in gadget.category[2] and (gadget.category[2]['dst'] == gadget.category[2]['src']):
continue
if self._isModifiedOrDereferencedAccess(gadget, dontModify):
continue
if reg:
if gadget.category[2][srcdst] == reg:
self._updateUsedBinaries(gadget)
return gadget
elif switchRegs:
other = 'src' if srcdst == 'dst' else 'dst'
if gadget.category[2][other] == reg:
self._updateUsedBinaries(gadget)
return gadget
else:
self._updateUsedBinaries(gadget)
return gadget
quali += 1
def _createWriteStringWhere(self, what, where, reg=None, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build writewhatwhere gadget!')
write4 = self._find(Category.WRITE_MEM, reg=popReg.category[2]['dst'], badDst=
badDst, srcdst='src')
if not write4:
badRegs.append(popReg.category[2]['dst'])
continue
else:
popReg2 = self._find(Category.LOAD_REG, reg=write4.category[2]['dst'], dontModify=[popReg.category[2]['dst']]+dontModify)
if not popReg2:
badDst.append(write4.category[2]['dst'])
continue
else:
break;
if len(what) % 4 > 0:
what += ' ' * (4 - len(what) % 4)
toReturn = ''
for index in range(0,len(what),4):
part = what[index:index+4]
toReturn += self._printRopInstruction(popReg,False)
toReturn += self._printAddString(part)
regs = self._paddingNeededFor(popReg)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(where+index,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(write4)
return (toReturn,popReg.category[2]['dst'], popReg2.category[2]['dst'])
def _createWriteRegValueWhere(self, what, where, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
write4 = self._find(Category.WRITE_MEM, reg=what, badDst=badDst, dontModify=dontModify, srcdst='src')
if not write4:
raise RopChainError('Cannot build writewhatwhere gadget!')
else:
popReg2 = self._find(Category.LOAD_REG, reg=write4.category[2]['dst'], dontModify=[what]+dontModify)
if not popReg2:
badDst.append(write4.category[2]['dst'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(where,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(write4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createLoadRegValueFrom(self, what, from_reg, dontModify=[], idx=0):
try:
return self._createLoadRegValueFromMov(what, from_reg, dontModify, idx)
except RopChainError:
return self._createLoadRegValueFromXchg(what, from_reg, dontModify, idx)
def _createLoadRegValueFromMov(self, what, from_reg, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
load4 = self._find(Category.LOAD_MEM, reg=what, badDst=badDst, dontModify=dontModify, srcdst='dst')
if not load4:
raise RopChainError('Cannot build loadwhere gadget!')
else:
popReg2 = self._find(Category.LOAD_REG, reg=load4.category[2]['src'], dontModify=[what,load4.category[2]['src']]+dontModify)
if not popReg2:
badDst.append(load4.category[2]['src'])
continue
else:
break;
value = self._printPaddingInstruction(toHex(from_reg,4))
toReturn = self._printRopInstruction(popReg2, False, value=value)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(load4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createLoadRegValueFromXchg(self, what, from_reg, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
load4 = self._find(Category.XCHG_REG, reg=what, badDst=badDst, dontModify=dontModify, srcdst='src')
if not load4:
raise RopChainError('Cannot build loadwhere gadget!')
else:
mov = self._find(Category.LOAD_MEM, reg=load4.category[2]['dst'], badDst=badDst, dontModify=[load4.category[2]['dst']]+dontModify, srcdst='dst')
if not mov:
badDst.append(load4.category[2]['dst'])
continue
popReg2 = self._find(Category.LOAD_REG, reg=mov.category[2]['src'], dontModify=[what,load4.category[2]['src']]+dontModify)
if not popReg2:
badDst.append(load4.category[2]['src'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printPaddingInstruction(toHex(from_reg,4))
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(mov)
toReturn += self._printRopInstruction(load4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createNumberSubtract(self, number, reg=None, badRegs=None, dontModify=None):
if not badRegs:
badRegs=[]
while True:
sub = self._find(Category.SUB_REG, reg=reg, badDst=badRegs, badSrc=badRegs, dontModify=dontModify)
if not sub:
raise RopChainError('Cannot build number with subtract gadget for reg %s!' % reg)
popSrc = self._find(Category.LOAD_REG, reg=sub.category[2]['src'], dontModify=dontModify)
if not popSrc:
badRegs.append=[sub.category[2]['src']]
continue
popDst = self._find(Category.LOAD_REG, reg=sub.category[2]['dst'], dontModify=[sub.category[2]['src']]+dontModify)
if not popDst:
badRegs.append=[sub.category[2]['dst']]
continue
else:
break;
filler = self._createZeroByteFillerForSub(number)
toReturn = self._printRopInstruction(popSrc, False)
toReturn += self._printPaddingInstruction(toHex(filler,4))
regs = self._paddingNeededFor(popSrc)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(popDst, False)
toReturn += self._printPaddingInstruction(toHex(filler+number,4))
regs = self._paddingNeededFor(popDst)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(sub)
return (toReturn, popDst.category[2]['dst'],popSrc.category[2]['dst'])
def _createNumberAddition(self, number, reg=None, badRegs=None, dontModify=None):
if not badRegs:
badRegs=[]
while True:
sub = self._find(Category.ADD_REG, reg=reg, badDst=badRegs, badSrc=badRegs, dontModify=dontModify)
if not sub:
raise RopChainError('Cannot build number with addition gadget for reg %s!' % reg)
popSrc = self._find(Category.LOAD_REG, reg=sub.category[2]['src'], dontModify=dontModify)
if not popSrc:
badRegs.append=[sub.category[2]['src']]
continue
popDst = self._find(Category.LOAD_REG, reg=sub.category[2]['dst'], dontModify=[sub.category[2]['src']]+dontModify)
if not popDst:
badRegs.append(sub.category[2]['dst'])
continue
else:
break;
filler = self._createZeroByteFillerForAdd(number)
toReturn = self._printRopInstruction(popSrc, False)
toReturn += self._printPaddingInstruction(toHex(filler,4))
regs = self._paddingNeededFor(popSrc)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(popDst, False)
toReturn += self._printPaddingInstruction(toHex(number - filler,4))
regs = self._paddingNeededFor(popDst)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(sub)
return (toReturn, popDst.category[2]['dst'],popSrc.category[2]['dst'])
def _createNumberPop(self, number, reg=None, badRegs=None, dontModify=None):
if self._containsZeroByte(0xffffffff):
raise RopChainError("Cannot write value with pop -1 and inc gadgets, because there are badbytes in the negated number")
while True:
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build number with xor gadget!')
incReg = self._find(Category.INC_REG, reg=popReg.category[2]['dst'], dontModify=dontModify)
if not incReg:
if not badRegs:
badRegs = []
badRegs.append(popReg.category[2]['dst'])
else:
break
value = self._printPaddingInstruction(toHex(0xffffffff,4))
toReturn = self._printRopInstruction(popReg, value=value)
for i in range(number+1):
toReturn += self._printRopInstruction(incReg)
return (toReturn ,popReg.category[2]['dst'],)
def _createNumberXOR(self, number, reg=None, badRegs=None, dontModify=None):
while True:
clearReg = self._find(Category.CLEAR_REG, reg=reg, badDst=badRegs, badSrc=badRegs,dontModify=dontModify, srcEqDst=True)
if not clearReg:
raise RopChainError('Cannot build number with xor gadget!')
if number > 0:
incReg = self._find(Category.INC_REG, reg=clearReg.category[2]['src'], dontModify=dontModify)
if not incReg:
if not badRegs:
badRegs = []
badRegs.append(clearReg.category[2]['src'])
else:
break
else:
break
toReturn = self._printRopInstruction(clearReg)
for i in range(number):
toReturn += self._printRopInstruction(incReg)
return (toReturn, clearReg.category[2]['dst'],)
def _createNumberXchg(self, number, reg=None, badRegs=None, dontModify=None):
xchg = self._find(Category.XCHG_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not xchg:
raise RopChainError('Cannot build number gadget with xchg!')
other = xchg.category[2]['src'] if xchg.category[2]['dst'] else xchg.category[2]['dst']
toReturn = self._createNumber(number, other, badRegs, dontModify)[0]
toReturn += self._printRopInstruction(xchg)
return (toReturn, reg, other)
def _createNumberNeg(self, number, reg=None, badRegs=None, dontModify=None):
if number == 0:
raise RopChainError('Cannot build number gadget with neg if number is 0!')
if self._containsZeroByte((~number)+1):
raise RopChainError("Cannot use neg gadget, because there are badbytes in the negated number")
neg = self._find(Category.NEG_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not neg:
raise RopChainError('Cannot build number gadget with neg!')
pop = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not pop:
raise RopChainError('Cannot build number gadget with neg!')
value = self._printPaddingInstruction(toHex((~number)+1)) # two's complement
toReturn = self._printRopInstruction(pop, value=value)
toReturn += self._printRopInstruction(neg)
return (toReturn, reg,)
def _createNumber(self, number, reg=None, badRegs=None, dontModify=None, xchg=True):
try:
if self._containsZeroByte(number):
try:
return self._createNumberNeg(number, reg, badRegs,dontModify)
except RopChainError as e:
if number < 0x50:
try:
return self._createNumberXOR(number, reg, badRegs,dontModify)
except RopChainError:
try:
return self._createNumberPop(number, reg, badRegs,dontModify)
except RopChainError:
try:
return self._createNumberSubtract(number, reg, badRegs,dontModify)
except RopChainError:
return self._createNumberAddition(number, reg, badRegs,dontModify)
else :
try:
return self._createNumberSubtract(number, reg, badRegs,dontModify)
except RopChainError:
return self._createNumberAddition(number, reg, badRegs,dontModify)
else:
popReg =self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build number gadget!')
toReturn = self._printRopInstruction(popReg, padding=True, number=toHex(number,4))
return (toReturn , popReg.category[2]['dst'])
except RopChainError as e:
return self._createNumberXchg(number, reg, badRegs, dontModify)
def _createAddress(self, address, reg=None, badRegs=None, dontModify=None):
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build address gadget!')
toReturn = ''
toReturn += self._printRopInstruction(popReg,False)
toReturn += self._printRebasedAddress(toHex(address, 4), idx=self._usedBinaries.index((popReg.fileName, popReg.section)))
regs = self._paddingNeededFor(popReg)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
return (toReturn,popReg.category[2]['dst'])
def _createSyscall(self, reg=None, badRegs=None, dontModify=None):
syscall = self._find(Category.SYSCALL, reg=None, badDst=None, dontModify=dontModify)
if not syscall:
raise RopChainError('Cannot build syscall gadget!')
toReturn = ''
toReturn += self._printRopInstruction(syscall)
return (toReturn,)
def _createOpcode(self, opcode):
return self._printRopInstruction(self._searchOpcode(opcode))
def _searchOpcode(self, opcode):
r = Ropper()
gadgets = []
for binary in self._binaries:
for section in binary.executableSections:
vaddr = section.virtualAddress
gadgets.extend(r.searchOpcode(binary,opcode=opcode,disass=True))
if len(gadgets) > 0:
for gadget in gadgets:
if not gadget:
continue
if not self.containsBadbytes(gadget.IMAGE_BASES.get(gadget.fileName,0) + gadget.lines[0][0]):
self._updateUsedBinaries(gadget)
return gadget
else:
raise RopChainError('Cannot create gadget for opcode: %s' % opcode)
def create(self):
pass
class RopChainX86System(RopChainX86):
@classmethod
def usableTypes(self):
return (ELF, Raw)
@classmethod
def name(cls):
return 'execve'
def _createCommand(self, what, where, reg=None, dontModify=[], idx=0):
if len(what) % 4 > 0:
what = '/' * (4 - len(what) % 4) + what
return self._createWriteStringWhere(what,where, idx=idx)
def create(self, options={}):
cmd = options.get('cmd')
address = options.get('address')
if not cmd:
cmd = '/bin/sh'
if len(cmd.split(' ')) > 1:
raise RopChainError('No argument support for execve commands')
self._printMessage('ROPchain Generator for syscall execve:\n')
self._printMessage('\nwrite command into data section\neax 0xb\nebx address to cmd\necx address to null\nedx address to null\n')
chain = self._printHeader()
gadgets = []
can_create_command = False
chain_tmp = '\n'
if address is None:
section = self._binaries[0].getSection('.data')
length = math.ceil(float(len(cmd))/4) * 4
nulladdress = section.offset+length
try:
cmdaddress = section.offset
chain_tmp += self._createCommand(cmd,cmdaddress)[0]
can_create_command = True
except RopChainError as e:
self._printMessage('Cannot create gadget: writewhatwhere')
self._printMessage('Use 0x41414141 as command address. Please replace that value.')
cmdaddress = 0x41414141
if can_create_command:
badregs = []
while True:
c = ''
ret = self._createNumber(0x0, badRegs=badregs)
c += ret[0]
try:
c += self._createWriteRegValueWhere(ret[1], nulladdress)[0]
chain_tmp += c
break
except BaseException as e:
#raise e
badregs.append(ret[1])
gadgets.append((self._createAddress, [cmdaddress],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']))
gadgets.append((self._createAddress, [nulladdress],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']))
gadgets.append((self._createAddress, [nulladdress],{'reg':'edx'},['edx', 'dx', 'dl', 'dh']))
gadgets.append((self._createNumber, [0xb],{'reg':'eax'},['eax', 'ax', 'al', 'ah']))
if address is not None and not can_create_command:
if type(address) is str:
cmdaddress = int(address, 16)
nulladdress = options.get('nulladdress')
if nulladdress is None:
self._printMessage('No address to a null bytes was given, 0x42424242 is used instead.')
self._printMessage('Please replace that value.')
nulladdress = 0x42424242
elif type(nulladdress) is str:
nulladdress = int(nulladdress,16)
gadgets.append((self._createNumber, [cmdaddress],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']))
gadgets.append((self._createNumber, [nulladdress],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']))
gadgets.append((self._createNumber, [nulladdress],{'reg':'edx'},['edx', 'dx', 'dl', 'dh']))
gadgets.append((self._createNumber, [0xb],{'reg':'eax'},['eax', 'ax', 'al', 'ah']))
self._printMessage('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp += self._createDependenceChain(gadgets)
try:
self._printMessage('Look for syscall gadget')
chain_tmp += self._createSyscall()[0]
self._printMessage('syscall gadget found')
except RopChainError:
try:
self._printMessage('No syscall gadget found!')
self._printMessage('Look for int 0x80 opcode')
chain_tmp += self._createOpcode('cd80')
self._printMessage('int 0x80 opcode found')
except RopChainError:
try:
self._printMessage('No int 0x80 opcode found')
self._printMessage('Look for call gs:[0x10] opcode')
chain_tmp += self._createOpcode('65ff1510000000')
self._printMessage('call gs:[0x10] found')
except RopChainError:
chain_tmp += '# INSERT SYSCALL GADGET HERE\n'
self._printMessage('No call gs:[0x10] opcode found')
chain += self._printRebase()
chain += 'rop = \'\'\n'
chain += chain_tmp
chain += 'print rop'
return chain
class RopChainX86Mprotect(RopChainX86):
"""
Builds a ropchain for mprotect syscall
eax 0x7b
ebx address
ecx size
edx 0x7 -> RWE
"""
@classmethod
def usableTypes(self):
return (ELF, Raw)
@classmethod
def name(cls):
return 'mprotect'
def _createJmp(self, reg=['esp']):
r = Ropper()
gadgets = []
for binary in self._binaries:
for section in binary.executableSections:
vaddr = section.virtualAddress
gadgets.extend(
r.searchJmpReg(binary,reg))
if len(gadgets) > 0:
self._updateUsedBinaries(gadgets[0])
return self._printRopInstruction(gadgets[0])
else:
return None
def create(self, options={}):
address = options.get('address')
size = options.get('size')
if not address:
raise RopChainError('Missing parameter: address')
if not size:
raise RopChainError('Missing parameter: size')
if not match('0x[0-9a-fA-F]{1,8}', address):
raise RopChainError('Parameter address have to have the following format: <hexnumber>')
if not match('0x[0-9a-fA-F]+', size):
raise RopChainError('Parameter size have to have the following format: <hexnumber>')
address = int(address, 16)
size = int(size, 16)
self._printMessage('ROPchain Generator for syscall mprotect:\n')
self._printMessage('eax 0x7b\nebx address\necx size\nedx 0x7 -> RWE\n')
chain = self._printHeader()
chain += 'shellcode = \'\\xcc\'*100\n\n'
gadgets = []
gadgets.append((self._createNumber, [address],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']))
gadgets.append((self._createNumber, [size],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']))
gadgets.append((self._createNumber, [0x7],{'reg':'edx'},['edx', 'dx', 'dl', 'dh']))
gadgets.append((self._createNumber, [0x7d],{'reg':'eax'},['eax', 'ax', 'al', 'ah']))
self._printMessage('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp = ''
chain_tmp += self._createDependenceChain(gadgets)
try:
self._printMessage('Look for syscall gadget')
chain_tmp += self._createSyscall()[0]
self._printMessage('syscall gadget found')
except RopChainError:
chain_tmp += '\n# ADD HERE SYSCALL GADGET\n\n'
self._printMessage('No syscall gadget found!')
self._printMessage('Look for jmp esp')
jmp_esp = self._createJmp()
if jmp_esp:
self._printMessage('jmp esp found')
chain_tmp += jmp_esp
else:
self._printMessage('no jmp esp found')
chain_tmp += '\n# ADD HERE JMP ESP\n\n'
chain += self._printRebase()
chain += '\nrop = \'\'\n'
chain += chain_tmp
chain += 'rop += shellcode\n\n'
chain += 'print(rop)\n'
return chain
class RopChainX86VirtualProtect(RopChainX86):
"""
Builds a ropchain for a VirtualProtect call using pushad
eax 0x90909090
ecx old protection (writable addr)
edx 0x40 (RWE)
ebx size
esp address
ebp return address (jmp esp)
esi pointer to VirtualProtect
edi ret (rop nop)
"""
@classmethod
def usableTypes(self):
return (PE, Raw)
@classmethod
def name(cls):
return 'virtualprotect'
def _createPushad(self):
pushad = self._find(Category.PUSHAD)
if pushad:
return self._printRopInstruction(pushad)
else:
self._printMessage('No pushad found!')
return '# Add here PUSHAD gadget!'
def _createJmp(self, reg=['esp']):
r = Ropper()
gadgets = []
for binary in self._binaries:
for section in binary.executableSections:
vaddr = section.offset
gadgets.extend(
r.searchJmpReg(binary,reg))
if len(gadgets) > 0:
self._updateUsedBinaries(gadgets[0])
return gadgets[0]
else:
return None
def __extract(self, param):
if (not match('0x[0-9a-fA-F]{1,8},0x[0-9a-fA-F]+', param)) and (not match('0x[0-9a-fA-F]+', param)):
raise RopChainError('Parameter have to have the following format: <hexnumber>')
return (None, int(param, 16))
def __getVirtualProtectEntry(self):
for binary in self._binaries:
if binary.type == Type.PE:
imports = binary._binary.dataDirectory[ImageDirectoryEntry.IMPORT]
if not imports:
return None
for descriptorData in imports:
for thunk in descriptorData.importAddressTable:
if thunk.importByName and thunk.importByName.name == 'VirtualProtect':
return thunk.rva, binary.imageBase
else:
self._printMessage('File is not a PE file.')
return None
def create(self, options={}):
self._printMessage('Ropchain Generator for VirtualProtect:\n')
self._printMessage('eax 0x90909090\necx old protection (writable addr)\nedx 0x40 (RWE)\nebx size\nesp address\nebp return address (jmp esp)\nesi pointer to VirtualProtect\nedi ret (rop nop)\n')
image_base = 0
address = options.get('address')
given = False
if not address:
virtual_protect = self.__getVirtualProtectEntry()
if virtual_protect:
address, image_base = virtual_protect
if not address:
self._printMessage('No IAT-Entry for VirtualProtect found!')
raise RopChainError('No IAT-Entry for VirtualProtect found and no address is given')
else:
if address:
if not match('0x[0-9a-fA-F]{1,8}', address):
raise RopChainError('Parameter address have to have the following format: <hexnumber>')
address = int(address, 16)
given = True
writeable_ptr = self._binaries[0].getWriteableSection().offset
for i in range(0,0x10000,4):
if not self.containsBadbytes((writeable_ptr + i) & 0xffff,2):
writeable_ptr += i
break
jmp_esp = self._createJmp()
ret_addr = self._searchOpcode('c3')
chain = self._printHeader()
chain += '\n\nshellcode = \'\\xcc\'*100\n\n'
gadgets = []
to_extend = []
chain_tmp = ''
got_jmp_esp = False
try:
self._printMessage('Try to create gadget to fill esi with content of IAT address: 0x%x' % (address + image_base))
chain_tmp += self._createLoadRegValueFrom('esi', address+image_base)[0]
gadgets.append((self._createNumber, [0x90909090],{'reg':'eax'},['eax', 'ax', 'ah', 'al','esi','si']))
to_extend = ['esi','si']
if jmp_esp:
gadgets.append((self._createAddress, [jmp_esp.lines[0][0]],{'reg':'ebp'},['ebp', 'bp']+to_extend))
got_jmp_esp = True
except RopChainError:
self._printMessage('Cannot create fill esi gadget!')
self._printMessage('Try to create this chain:\n')
self._printMessage('eax Pointer to VirtualProtect\necx old protection (writable addr)\nedx 0x40 (RWE)\nebx size\nesp address\nebp return address (pop ebp;ret)\nesi pointer to jmp [eax]\nedi ret (rop nop)\n')
jmp_eax = self._searchOpcode('ff20') # jmp [eax]
gadgets.append((self._createAddress, [jmp_eax.lines[0][0]],{'reg':'esi'},['esi','si']))
gadgets.append((self._createNumber, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al']))
pop_ebp = self._searchOpcode('5dc3')
if pop_ebp:
gadgets.append((self._createAddress, [pop_ebp.lines[0][0]],{'reg':'ebp'},['ebp', 'bp']+to_extend))
gadgets.append((self._createNumber, [0x1],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']+to_extend))
gadgets.append((self._createAddress, [writeable_ptr],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']+to_extend))
gadgets.append((self._createNumber, [0x40],{'reg':'edx'},['edx', 'dx', 'dh', 'dl']+to_extend))
gadgets.append((self._createAddress, [ret_addr.lines[0][0]],{'reg':'edi'},['edi', 'di']+to_extend))
self._printMessage('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp += self._createDependenceChain(gadgets)
self._printMessage('Look for pushad gadget')
chain_tmp += self._createPushad()
if not got_jmp_esp and jmp_esp:
chain_tmp += self._printRopInstruction(jmp_esp)
chain += self._printRebase()
chain += 'rop = \'\'\n'
chain += chain_tmp
chain += 'rop += shellcode\n\n'
chain += 'print(rop)\n'
return chain
# class RopChainX86VirtualAlloc(RopChainX86):
# """
# Builds a ropchain for a VirtualProtect call using pushad
# eax 0x90909090
# ecx old protection (writable addr)
# edx 0x40 (RWE)
# ebx size
# esp address
# ebp return address (jmp esp)
# esi pointer to VirtualProtect
# edi ret (rop nop)
# """
# @classmethod
# def name(cls):
# return 'virtualalloc'
# def _createPushad(self):
# pushad = self._find(Category.PUSHAD)
# if pushad:
# return self._printRopInstruction(pushad)
# else:
# self._printer.printInfo('No pushad found!')
# return '# Add here PUSHAD gadget!'
# def _createJmp(self, reg=['esp']):
# r = Ropper()
# gadgets = []
# for binary in self._binaries:
# for section in binary.executableSections:
# vaddr = section.offset
# gadgets.extend(
# r.searchJmpReg(self._binaries[0],reg))
# if len(gadgets) > 0:
# if (gadgets[0]._binary, gadgets[0]._section) not in self._usedBinaries:
# self._usedBinaries.append((gadgets[0]._binary, gadgets[0]._section))
# return gadgets[0]
# else:
# return None
# def __extract(self, param):
# if (not match('0x[0-9a-fA-F]{1,8},0x[0-9a-fA-F]+', param)) and (not match('0x[0-9a-fA-F]+', param)):
# raise RopChainError('Parameter have to have the following format: <hexnumber>,<hexnumber> or <hexnumber>')
# split = param.split(',')
# if len(split) == 2:
# if isHex(split[1]):
# return (int(split[0], 16), int(split[1], 16))
# else:
# return (None, int(split[0], 16))
# def __getVirtualProtectEntry(self):
# for binary in self._binaries:
# if binary.type == Type.PE:
# s = binary._binary.dataDirectory[ImageDirectoryEntry.IMPORT]
# for thunk in s.importNameTable:
# if thunk.importByName.name == 'VirtualAlloc':
# return thunk.rva + binary.imageBase
# else:
# self._printer.printError('File is not a PE file.')
# return None
# def create(self, param=None):
# if not param:
# raise RopChainError('Missing parameter: address,size or size')
# self._printer.printInfo('Ropchain Generator for VirtualProtect:\n')
# self._printer.println('eax 0x90909090\necx old protection (writable addr)\nedx 0x40 (RWE)\nebx size\nesp address\nebp return address (jmp esp)\nesi pointer to VirtualProtect\nedi ret (rop nop)\n')
# address, size = self.__extract(param)
# given = False
# if not address:
# address = self.__getVirtualProtectEntry()
# if not address:
# self._printer.printError('No IAT-Entry for VirtualProtect found!')
# raise RopChainError('No IAT-Entry for VirtualProtect found and no address is given')
# else:
# given = True
# jmp_esp = self._createJmp()
# ret_addr = self._searchOpcode('c3')
# chain = self._printHeader()
# chain += '\n\nshellcode = \'\\xcc\'*100\n\n'
# gadgets = []
# to_extend = []
# chain_tmp = ''
# try:
# self._printer.printInfo('Try to create gadget to fill esi with content of IAT address: %s' % address)
# chain_tmp += self._createLoadRegValueFrom('esi', address)[0]
# if given:
# gadgets.append((self._createNumber, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al','esi','si']))
# else:
# gadgets.append((self._createAddress, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al','esi','si']))
# to_extend = ['esi','si']
# except:
# self._printer.printInfo('Cannot create fill esi gadget!')
# self._printer.printInfo('Try to create this chain:\n')
# self._printer.println('eax Pointer to VirtualProtect\necx old protection (writable addr)\nedx 0x40 (RWE)\nebx size\nesp address\nebp return address (jmp esp)\nesi pointer to jmp [eax]\nedi ret (rop nop)\n')
# jmp_eax = self._searchOpcode('ff20') # jmp [eax]
# gadgets.append((self._createAddress, [jmp_eax.lines[0][0]],{'reg':'esi'},['esi','si']))
# if given:
# gadgets.append((self._createNumber, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al']))
# else:
# gadgets.append((self._createAddress, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al']))
# gadgets.append((self._createNumber, [size],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']+to_extend))
# gadgets.append((self._createNumber, [0x40],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']+to_extend))
# if jmp_esp:
# gadgets.append((self._createAddress, [jmp_esp.lines[0][0]],{'reg':'ebp'},['ebp', 'bp']+to_extend))
# gadgets.append((self._createNumber, [0x1000],{'reg':'edx'},['edx', 'dx', 'dh', 'dl']+to_extend))
# gadgets.append((self._createAddress, [ret_addr.lines[0][0]],{'reg':'edi'},['edi', 'di']+to_extend))
# self._printer.printInfo('Try to create chain which fills registers without delete content of previous filled registers')
# chain_tmp += self._createDependenceChain(gadgets)
# self._printer.printInfo('Look for pushad gadget')
# chain_tmp += self._createPushad()
# chain += self._printRebase()
# chain += 'rop = \'\'\n'
# chain += chain_tmp
# chain += 'rop += shellcode\n\n'
# chain += 'print(rop)\n'
# return chain
| bsd-3-clause | -8,215,692,454,169,640,000 | 39.049869 | 220 | 0.565306 | false |
PhenixI/machine-learning | 2_supervised_regression/1-Linear Regression/ransacregression/ransacregression.py | 1 | 1734 | #load data Housing Dataset: https://archive.ics.uci.edu/ml/datasets/Housing
import pandas as pd
#df = pd.read_csv('https://archive.ics.uci.edu/ml/datasets/housing/Housing.data',header = None,sep = '\s+')
df = pd.read_csv('F:/developSamples/ml/housing.data',header = None,sep = '\s+')
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS','NOX', 'RM', 'AGE', 'DIS', 'RAD','TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
df.head()
X = df['RM'].values.reshape(-1,1)
y = df['MEDV'].values.reshape(-1,1)
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RANSACRegressor
#By setting the residual_threshold parameter to 5.0, we
#only allowed samples to be included in the inlier set if their vertical distance to the
#fitted line is within 5 distance units, which works well on this particular dataset.
ransac = RANSACRegressor(LinearRegression(),
max_trials=100,
min_samples=50,
residual_metric=lambda x: np.sum(np.abs(x), axis=1),
residual_threshold=5.0,
random_state=0)
ransac.fit(X,y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
line_X = np.arange(3, 10, 1)
line_y_ransac = ransac.predict(line_X[:, np.newaxis])
plt.scatter(X[inlier_mask], y[inlier_mask],c='blue', marker='o', label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask],c='lightgreen', marker='s', label='Outliers')
plt.plot(line_X, line_y_ransac, color='red')
plt.xlabel('Average number of rooms [RM]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.legend(loc='upper left')
plt.show()
print('Slope: %.3f' % ransac.estimator_.coef_[0])
print('Intercept: %.3f' % ransac.estimator_.intercept_) | gpl-2.0 | -2,341,679,451,319,219,000 | 43.487179 | 116 | 0.65917 | false |
mit-ll/LO-PHI | lophi-automation/lophi_automation/configs/controllerconfig.py | 1 | 5240 | """
Class for handling configuration files for controller nodes
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import socket
import logging
logger = logging.getLogger(__name__)
from time import sleep
# LO-PHI
import lophi.globals as G
# LO-PHI Automation
import lophi_automation.protobuf.helper as ProtoBuf
from lophi_automation.configs import LophiConfig
from lophi_automation.network.command import LophiCommand
SOCKET_RETRY = 5
class ControllerConfig(LophiConfig):
"""
Very simple class to hand around and leave room for improvement in the
future
"""
def __init__(self, name, Config):
"""
Initialize all of our variables and set any new settings that were
specified in the config file.
"""
# Some storage
self.SOCK = None
self.lophi_configs = None
self.machines = None
# Set our name
self.name = name
# Set our host
if not self._get_option(Config, name, "host"):
logger.error("No host ip provided for %s."%name)
# Set our port
if not self._get_option(Config, name, "port"):
logger.error("No host port provided for %s."%name)
# Make it easier when opening sockets
self.address = (self.host, int(self.port))
def __str__(self):
""" Print out information of this controller """
o = "[%s] IP: %s, Port: %s"%(self.name,self.host,self.port)
return o
def get_machines(self):
""" Get protocol buffer version of remote machines """
while 1:
try:
logger.debug("Getting machine list for Controller/%s" % self.name)
# Get response
cmd = LophiCommand(G.CTRL_CMD_PICKLE, args=["machines"])
data = self.send_cmd(cmd)
status = G.read_socket_data(self.SOCK)
# Unpack our machine list
# (WARNING: This a subset of the objects at the server
if data is not None:
self.machines = ProtoBuf.unpack_machine_list(data)
else:
self.machines = []
return status
except:
G.print_traceback()
self.connect()
def get_analysis(self):
""" Get protocol buffer version of remote analysis """
while 1:
try:
logger.debug("Getting analysis list for Controller/%s" % self.name)
# Get reply
cmd = LophiCommand(G.CTRL_CMD_PICKLE, args=["analysis"])
analysis_buf = self.send_cmd(cmd)
status = G.read_socket_data(self.SOCK)
# unpack protocol buffer
self.analysis = ProtoBuf.unpack_analysis_list(analysis_buf)
return status
except:
self.connect()
def connect(self):
"""
Connect to our controller, retrieve all of the relevant information
and add it to our list.
"""
while 1:
# Try forever to connect
try:
print G.bcolors.WARNING + "Connecting to %s(%s:%s)..." % (self.name, self.host, self.port) + G.bcolors.ENDC,
# Open our socket
self.SOCK = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.SOCK.connect(self.address)
print G.bcolors.OKGREEN + "Connected." + G.bcolors.ENDC
break
except socket.error:
print G.bcolors.FAIL \
+ "Retrying in %d seconds..." % (SOCKET_RETRY) \
+ G.bcolors.ENDC
sleep(SOCKET_RETRY)
continue
def disconnect(self):
"""
Stop all analysis at controllers and close our socket nicely
"""
# Close socket
self.SOCK.close()
def send_analysis(self, filename, cmd):
""" Sends start message and our JSON config """
while 1:
try:
# Send our command to start the analysis
G.send_socket_data(self.SOCK, str(cmd))
# read our analysis file
f = open(filename)
script = f.read()
f.close()
# Send the json config
G.send_socket_data(self.SOCK, script)
status = G.read_socket_data(self.SOCK)
return status
except:
self.connect()
def send_cmd(self, command):
""" Send arbitrary message """
while 1:
try:
# Send our command to start the analysis
G.send_socket_data(self.SOCK, str(command))
# Get our return status
self.status = G.read_socket_data(self.SOCK)
if self.status is None:
raise Exception("Controller Disconnected.")
return self.status
except:
self.connect()
| bsd-3-clause | -6,303,333,739,526,766,000 | 28.273743 | 124 | 0.515649 | false |
pyfa-org/eos | tests/integration/container/unordered/test_type_unique_set.py | 1 | 8045 | # ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos import Fit
from eos import Implant
from eos import Skill
from tests.integration.container.testcase import ContainerTestCase
class TestContainerTypeUniqueSet(ContainerTestCase):
def test_add_none(self):
fit = Fit()
# Action
with self.assertRaises(TypeError):
fit.skills.add(None)
# Verification
self.assertEqual(len(fit.skills), 0)
# Cleanup
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_add_item(self):
fit = Fit()
item_type = self.mktype()
item = Skill(item_type.id)
# Action
fit.skills.add(item)
# Verification
self.assertEqual(len(fit.skills), 1)
self.assertIs(fit.skills[item_type.id], item)
self.assertIn(item, fit.skills)
self.assertIn(item_type.id, fit.skills)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_add_item_type_failure(self):
fit = Fit()
item_type = self.mktype()
item = Implant(item_type.id)
# Action
with self.assertRaises(TypeError):
fit.skills.add(item)
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertNotIn(item, fit.skills)
self.assertNotIn(item_type.id, fit.skills)
fit.implants.add(item)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_add_item_value_failure_has_fit(self):
fit = Fit()
fit_other = Fit()
item_type = self.mktype()
item = Skill(item_type.id)
fit_other.skills.add(item)
# Action
with self.assertRaises(ValueError):
fit.skills.add(item)
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertEqual(len(fit_other.skills), 1)
self.assertIs(fit_other.skills[item_type.id], item)
self.assertIn(item, fit_other.skills)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_solsys_buffers_empty(fit_other.solar_system)
self.assert_log_entries(0)
def test_add_item_value_failure_existing_type_id(self):
fit = Fit()
item_type = self.mktype()
item1 = Skill(item_type.id)
item2 = Skill(item_type.id)
fit.skills.add(item1)
# Action
with self.assertRaises(ValueError):
fit.skills.add(item2)
# Verification
self.assertEqual(len(fit.skills), 1)
self.assertIs(fit.skills[item_type.id], item1)
self.assertIn(item1, fit.skills)
self.assertIn(item_type.id, fit.skills)
fit.skills.remove(item1)
fit.skills.add(item2)
# Cleanup
self.assert_item_buffers_empty(item1)
self.assert_item_buffers_empty(item2)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_remove_item(self):
fit = Fit()
item_type = self.mktype()
item = Skill(item_type.id)
fit.skills.add(item)
# Action
fit.skills.remove(item)
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertNotIn(item, fit.skills)
self.assertNotIn(item_type.id, fit.skills)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_remove_item_failure(self):
fit = Fit()
item_type = self.mktype()
item = Skill(item_type.id)
# Action
with self.assertRaises(KeyError):
fit.skills.remove(item)
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertNotIn(item, fit.skills)
self.assertNotIn(item_type.id, fit.skills)
fit.skills.add(item)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_delitem_item(self):
fit = Fit()
item_type = self.mktype()
item = Skill(item_type.id)
fit.skills.add(item)
# Action
del fit.skills[item_type.id]
# Verification
from dunder_mifflin import papers # WARNING: Malicious operation ahead
self.assertEqual(len(fit.skills), 0)
self.assertNotIn(item, fit.skills)
self.assertNotIn(item_type.id, fit.skills)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_delitem_item_failure(self):
fit = Fit()
item_type = self.mktype()
empty_type_id = self.allocate_type_id()
item = Skill(item_type.id)
fit.skills.add(item)
# Action
with self.assertRaises(KeyError):
del fit.skills[empty_type_id]
# Verification
self.assertEqual(len(fit.skills), 1)
self.assertIn(item, fit.skills)
self.assertIn(item_type.id, fit.skills)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_key_integrity(self):
fit = Fit()
item_type = self.mktype()
item1 = Skill(item_type.id)
item2 = Skill(item_type.id)
fit.skills.add(item1)
with self.assertRaises(KeyError):
fit.skills.remove(item2)
# Verification
self.assertIs(fit.skills[item_type.id], item1)
# Cleanup
self.assert_item_buffers_empty(item1)
self.assert_item_buffers_empty(item2)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_clear(self):
fit = Fit()
item1_type = self.mktype()
item1 = Skill(item1_type.id)
item2_type = self.mktype()
item2 = Skill(item2_type.id)
fit.skills.add(item1)
fit.skills.add(item2)
# Action
fit.skills.clear()
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertNotIn(item1, fit.skills)
self.assertNotIn(item1_type.id, fit.skills)
self.assertNotIn(item2, fit.skills)
self.assertNotIn(item2_type.id, fit.skills)
# Cleanup
self.assert_item_buffers_empty(item1)
self.assert_item_buffers_empty(item2)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_bool(self):
fit = Fit()
item = Skill(self.mktype().id)
self.assertIs(bool(fit.skills), False)
fit.skills.add(item)
self.assertIs(bool(fit.skills), True)
fit.skills.remove(item)
self.assertIs(bool(fit.skills), False)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
| lgpl-3.0 | 6,906,514,192,450,944,000 | 33.676724 | 80 | 0.606339 | false |
dabrahams/0install | zeroinstall/0launch-gui/main.py | 1 | 6132 | # Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os, sys
import logging
import warnings
from optparse import OptionParser
from zeroinstall import _, SafeException
from zeroinstall.injector import requirements
from zeroinstall.injector.driver import Driver
from zeroinstall.injector.config import load_config
from zeroinstall.support import tasks
_recalculate = tasks.Blocker('recalculate')
def recalculate():
"""Ask the mainloop to recalculate. If we're already recalculating, wait for that to finish
and then do it again."""
global _recalculate
_recalculate.trigger()
_recalculate = tasks.Blocker('recalculate')
def run_gui(args):
parser = OptionParser(usage=_("usage: %prog [options] interface"))
parser.add_option("", "--before", help=_("choose a version before this"), metavar='VERSION')
parser.add_option("", "--cpu", help=_("target CPU type"), metavar='CPU')
parser.add_option("", "--command", help=_("command to select"), metavar='COMMAND')
parser.add_option("-d", "--download-only", help=_("fetch but don't run"), action='store_true')
parser.add_option("-g", "--force-gui", help=_("display an error if there's no GUI"), action='store_true')
parser.add_option("", "--message", help=_("message to display when interacting with user"))
parser.add_option("", "--not-before", help=_("minimum version to choose"), metavar='VERSION')
parser.add_option("", "--os", help=_("target operation system type"), metavar='OS')
parser.add_option("-r", "--refresh", help=_("check for updates of all interfaces"), action='store_true')
parser.add_option("", "--select-only", help=_("only download the feeds"), action='store_true')
parser.add_option("-s", "--source", help=_("select source code"), action='store_true')
parser.add_option("", "--systray", help=_("download in the background"), action='store_true')
parser.add_option("-v", "--verbose", help=_("more verbose output"), action='count')
parser.add_option("-V", "--version", help=_("display version information"), action='store_true')
parser.add_option("", "--with-store", help=_("add an implementation cache"), action='append', metavar='DIR')
parser.disable_interspersed_args()
(options, args) = parser.parse_args(args)
if options.verbose:
logger = logging.getLogger()
if options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
if options.version:
import gui
print("0launch-gui (zero-install) " + gui.version)
print("Copyright (C) 2010 Thomas Leonard")
print(_("This program comes with ABSOLUTELY NO WARRANTY,"
"\nto the extent permitted by law."
"\nYou may redistribute copies of this program"
"\nunder the terms of the GNU Lesser General Public License."
"\nFor more information about these matters, see the file named COPYING."))
sys.exit(0)
def nogui(ex):
if options.force_gui:
fn = logging.warn
else:
fn = logging.info
fn("No GUI available", exc_info = ex)
sys.exit(100)
with warnings.catch_warnings():
if not options.force_gui:
warnings.filterwarnings("ignore")
if sys.version_info[0] < 3:
try:
import pygtk; pygtk.require('2.0')
except ImportError as ex:
nogui(ex)
import gui
try:
if sys.version_info[0] > 2:
from zeroinstall.gtkui import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version = '3.0')
import gtk
except (ImportError, ValueError) as ex:
nogui(ex)
if gtk.gdk.get_display() is None:
try:
raise SafeException("Failed to connect to display.")
except SafeException as ex:
nogui(ex) # logging needs this as a raised exception
handler = gui.GUIHandler()
config = load_config(handler)
if options.with_store:
from zeroinstall import zerostore
for x in options.with_store:
config.stores.stores.append(zerostore.Store(os.path.abspath(x)))
if len(args) < 1:
@tasks.async
def prefs_main():
import preferences
box = preferences.show_preferences(config)
done = tasks.Blocker('close preferences')
box.connect('destroy', lambda w: done.trigger())
yield done
tasks.wait_for_blocker(prefs_main())
sys.exit(0)
interface_uri = args[0]
if len(args) > 1:
parser.print_help()
sys.exit(1)
import mainwindow, dialog
r = requirements.Requirements(interface_uri)
r.parse_options(options)
widgets = dialog.Template('main')
driver = Driver(config = config, requirements = r)
root_iface = config.iface_cache.get_interface(interface_uri)
driver.solver.record_details = True
window = mainwindow.MainWindow(driver, widgets, download_only = bool(options.download_only), select_only = bool(options.select_only))
handler.mainwindow = window
if options.message:
window.set_message(options.message)
root = config.iface_cache.get_interface(r.interface_uri)
window.browser.set_root(root)
window.window.connect('destroy', lambda w: handler.abort_all_downloads())
if options.systray:
window.use_systray_icon()
@tasks.async
def main():
force_refresh = bool(options.refresh)
while True:
window.refresh_button.set_sensitive(False)
window.browser.set_update_icons(force_refresh)
solved = driver.solve_with_downloads(force = force_refresh, update_local = True)
if not window.systray_icon:
window.show()
yield solved
try:
window.refresh_button.set_sensitive(True)
window.browser.highlight_problems()
tasks.check(solved)
except Exception as ex:
window.report_exception(ex)
if window.systray_icon and window.systray_icon.get_visible() and \
window.systray_icon.is_embedded():
if driver.solver.ready:
window.systray_icon.set_tooltip(_('Downloading updates for %s') % root_iface.get_name())
window.run_button.set_active(True)
else:
# Should already be reporting an error, but
# blink it again just in case
window.systray_icon.set_blinking(True)
refresh_clicked = dialog.ButtonClickedBlocker(window.refresh_button)
yield refresh_clicked, _recalculate
if refresh_clicked.happened:
force_refresh = True
tasks.wait_for_blocker(main())
| lgpl-2.1 | 4,441,060,670,262,614,500 | 31.791444 | 134 | 0.708415 | false |
davy39/eric | Project/NewPythonPackageDialog.py | 1 | 1662 | # -*- coding: utf-8 -*-
# Copyright (c) 2007 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to add a new Python package.
"""
from __future__ import unicode_literals
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from PyQt5.QtCore import pyqtSlot
from .Ui_NewPythonPackageDialog import Ui_NewPythonPackageDialog
class NewPythonPackageDialog(QDialog, Ui_NewPythonPackageDialog):
"""
Class implementing a dialog to add a new Python package.
"""
def __init__(self, relPath, parent=None):
"""
Constructor
@param relPath initial package path relative to the project root
(string)
@param parent reference to the parent widget (QWidget)
"""
super(NewPythonPackageDialog, self).__init__(parent)
self.setupUi(self)
self.okButton = self.buttonBox.button(QDialogButtonBox.Ok)
self.okButton.setEnabled(False)
rp = relPath.replace("/", ".").replace("\\", ".")
self.packageEdit.setText(rp)
msh = self.minimumSizeHint()
self.resize(max(self.width(), msh.width()), msh.height())
@pyqtSlot(str)
def on_packageEdit_textChanged(self, txt):
"""
Private slot called, when the package name is changed.
@param txt new text of the package name edit (string)
"""
self.okButton.setEnabled(txt != "")
def getData(self):
"""
Public method to retrieve the data entered into the dialog.
@return package name (string)
"""
return self.packageEdit.text()
| gpl-3.0 | -4,034,412,619,558,682,000 | 28.157895 | 72 | 0.619134 | false |
hongzhouye/frankenstein | tools/mol_utils.py | 1 | 8589 | """Utils functions for module MOL
"""
import os
import numpy as np
from frankenstein.tools.io_utils import zmat2xyz
from frankenstein.data.atom_data import get_atomic_number
def get_enuc(Zs, xyzs):
"""Compute nuclear repulsion for a give molecule
Note:
The coordinates must be in unit of Bohr. (1 Bohr = 1.88972612457 Ang)
"""
natom = len(Zs)
assert(len(xyzs) == 3*natom)
rs = np.asarray(xyzs).reshape(natom, 3)
enuc = 0
for i in range(natom):
for j in range(i+1, natom):
enuc += Zs[i]*Zs[j] / np.sum((rs[i]-rs[j])**2.)**0.5
return enuc
# utils for geometry
def parse_gfile(gfile):
"""Parse input geometry file into standard geometry string
"""
if gfile[-4:] == ".xyz":
fname = gfile
elif gfile[-5:] == ".zmat":
fname = ".tmp.xyz"
zmat2xyz(gfile, fname)
else:
raise ValueError("Unknown format of input geometry file {:s}".format(gfile))
with open(fname, "r") as f:
natom = int(f.readline())
f.readline()
gstr = ";".join([" ".join(f.readline().split()[:4]) for i in range(natom)])
if fname == ".tmp.xyz":
os.system("rm .tmp.xyz")
return gstr
def standardize_gstr(gstr):
"""Put input geometry string into standard format
[NOTE] If input string is in Z-mat format, transformation to xyz will be performed first.
"""
atoms = [spg.strip() for spg in gstr.split(";")]
atom0 = atoms[0].split()
if len(atom0) == 1:
fzmat = ".tmp.zmat"
with open(fzmat, "w") as f:
f.write("{:d}\n\n".format(len(atoms)))
f.write("\n".join(atoms))
gstr = parse_gfile(fzmat)
os.system("rm {:s}".format(fzmat))
elif len(atom0) == 4:
gstr = ";".join([" ".join(atom.split()) for atom in atoms])
else:
raise ValueError("Unknown format of input geometry string\n{:s}".format(gstr))
return gstr
def parse_gstr(gstr, scale=1.88972612457):
"""Get atomic numbers and (scaled) atomic coordinates
Inp:
scale (float, optional, default: 1.88972612457):
Scaling factor for coordinates. The default assumes input coordinates are in angstrom and transform them into bohr.
"""
axyzs = [atom.split() for atom in gstr.split(";")]
natom = len(axyzs)
atoms = [None] * natom
xyzs = np.zeros(3*natom)
for ia in range(natom):
atoms[ia] = axyzs[ia][0]
xyzs[ia*3:(ia+1)*3] = list(map(float, axyzs[ia][1:]))
xyzs *= scale
xyzs = xyzs.tolist()
Zs = [get_atomic_number(atoms[ia]) for ia in range(natom)]
return atoms, Zs, xyzs
class GEOM:
"""Parse user-inpute geometry
"""
def __init__(self, gfs):
"""Constructor
Inp:
gfs (str):
Geometry file or string.
Geometry file must end with either ".xyz" or ".zmat" and follow format:
```
Natom
comment
Atom1 x y z
Atom2 x y z
...
```
for ".xyz", or
```
Natom
comment
Atom1
Atom2 1 dist(1,2)
...
```
for ".zmat". Geometry string follows the same format as either file format, but (1) without heading lines (Natom + comment), and (2) line separation is replaced by semicolon. For example, for xyz format,
gstr = "Atom1 x y z; Atom2 x y z; ..."
[NOTE] Input Z-mat format will be transformed into xyz format automatically! And only the latter will be stored.
Properties:
gtype : "file" or "str"
gstr : "Atom1 x y z;Atom2 x y z;..."
"""
self.parse_gfs(gfs)
self.lspace = 8 # number of spaces on the left (for printing)
def parse_gfs(self, gfs):
"""Parsing geometry string or file into standard form.
"""
if ".xyz" in gfs or ".zmat" in gfs:
self.gtype = "file"
self.gstr = parse_gfile(gfs)
else:
self.gtype = "str"
self.gstr = standardize_gstr(gfs)
def parse_gstr(self, scale=1.88972612457):
return parse_gstr(self.gstr, scale=scale)
def __str__(self):
gstr_out = " "*(self.lspace//2) + "Nuclear Coordinates:\n"
atoms = self.gstr.split(";")
for ia in range(len(atoms)):
axyz = atoms[ia].split()
axyz[0] = " " * self.lspace + axyz[0]
atoms[ia] = " ".join(axyz)
gstr_out += "\n".join(atoms)
return gstr_out
def get_Zxyz(geom, scale=1.88972612457, retSymb=False):
"""Get atom symbols and coordinates
Note:
The default of "scale" assumes input geometry uses unit "Angstrom" and
tranformas it into "Bohr". Use "scale = 1." to stay with "Angstrom".
"""
gstr_raw = parse_gfile(geom)
gstr = standardize_gstr(gstr_raw)
atoms, Zs, xyzs = parse_gstr(gstr, scale)
if retSymb:
return atoms, xyzs
else:
return Zs, xyzs
def get_noccs(Zs, charge, spin):
"""Determine # of alpha and beta electrons
Inp:
Zs (list of int):
A list of atomic numbers (i.e., nuclear charges) for each atom
charge (int):
Net charge (nelectron - sum(Zs))
spin (int):
Spin multiplicity (2S + 1)
Out:
noccs (list):
[nocca, noccb]
"""
nuc_charge = int(sum(Zs))
nocca = (nuc_charge - charge + spin - 1) // 2
noccb = nocca + 1 - spin
if nuc_charge - (nocca + noccb) != charge:
raise RuntimeError(("Bad combination of spin (={:d}) and "
"charge (={:d})").format(spin, charge))
return [nocca, noccb]
def get_orth_mat(S, orth_method, ao_lindep_thresh):
"""Compute matrix X that is used for orthogonalizing basis functions
Inp:
S (np.ndarray, nao*nao):
AO overlap matrix
orth_method (str):
Either "symm" or "cano"
ao_lindep_thresh (int):
10**-ao_lindep_thresh is the threshold for "basically zero"
eigenvalues (only used and must be given in "cano"
orthogonalization)
Out:
X (np.ndarray, nao*nmo):
Meaning clear from eqn, h_orth = X.T @ h @ X.
nmo = nao for orth_method = "symm"
nmo = # of linearly dependent AOs for orth_method = "cano"
smin (float):
smallest eigenvalue of S
"""
e, u = np.linalg.eigh(S)
n_lindep = int(np.sum(e < 10**-ao_lindep_thresh))
smin = e[0]
if orth_method.upper() == "SYMM":
if n_lindep > 0:
raise RuntimeError("""orth_method = "symm" cannot handle linear dependency in AO basis. Please use a more tolerant ao_lindep_thresh (default: 6) or use orth_method = "cano".""")
X = u @ np.diag(e**-0.5) @ u.T
Xinv = u @ np.diag(e**0.5) @ u.T
elif orth_method.upper() == "CANO":
X = u[:,n_lindep:] @ np.diag(e[n_lindep:]**-0.5)
Xinv = np.diag(e[n_lindep:]**0.5) @ u[:,n_lindep:].T
else:
raise RuntimeError("Unknown orth_method {:s}.".format(orth_method))
return X, Xinv, smin
# utils for basis (TODO: move these functions to basis_utils.py)
def get_pure_by_l(ls, pures):
"""1. Check if same l has same purity; 2. return pures by l
"""
max_l = max(ls)
pure_by_l = [None] * (max_l+1)
for l, pure in zip(ls, pures):
if pure_by_l[l] is None:
pure_by_l[l] = pure
else:
if pure_by_l[l] != pure:
raise ValueError("Two shells with same angular momentum have different purity.")
return pure_by_l
def get_norb_l(l, pure):
"""Get number of orbitals for a given angular momentum
"""
if pure:
return 2 * l + 1
else:
return (l + 1) * (l + 2) // 2
def get_idao_by_l(ls, pures):
"""Get starting index of each group of AO (grouped by angular momentum)
Inp:
ls ([int] * nbas):
A list of angular momentum
pures ([bool] * nbas):
Indicate each l in ls is spheric (pure=True) or cartesian.
Output:
idao_by_l ([ [int] * nbas_this_l ] * max_l)
"""
max_l = max(ls)
idao_by_l = [[] for i in range(max_l+1)]
p0 = 0
for i in range(len(ls)):
l, pure = ls[i], pures[i]
p1 = p0 + get_norb_l(l, pure)
idao_by_l[l].append(p0)
p0 = p1
return idao_by_l
| bsd-3-clause | -3,732,571,148,976,621,600 | 29.031469 | 219 | 0.544301 | false |
lipixun/pytest | rabbitmq/deadchannel/going2dead.py | 1 | 2112 | #!/usr/bin/env python
# encoding=utf8
# The dead channel applicationn
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from uuid import uuid4
from time import time, sleep
from haigha.connections.rabbit_connection import RabbitConnection
from haigha.message import Message
class Client(object):
"""The RPC Client
"""
def __init__(self, host, port, vhost, user, password):
"""Create a new Server
"""
self._conn = RabbitConnection(host = host, port = port, vhost = vhost, user = user, password = password)
self._channel = self._conn.channel()
result = self._channel.queue.declare(arguments = { 'x-dead-letter-exchange': 'amq.topic', 'x-dead-letter-routing-key': 'test.dead_channel' })
self._deadQueue = result[0]
# Send a message
self._channel.basic.publish(Message('OMG! I\'m dead!'), '', self._deadQueue)
def dead(self):
"""Normal dead
"""
self._channel.close()
if __name__ == '__main__':
from argparse import ArgumentParser
def getArguments():
"""Get arguments
"""
parser = ArgumentParser(description = 'RabbitMQ dead channel client')
parser.add_argument('--host', dest = 'host', required = True, help = 'The host')
parser.add_argument('--port', dest = 'port', default = 5672, type = int, help = 'The port')
parser.add_argument('--vhost', dest = 'vhost', default = '/test', help = 'The virtual host')
parser.add_argument('--user', dest = 'user', default = 'test', help = 'The user name')
parser.add_argument('--password', dest = 'password', default = 'test', help = 'The password')
# Done
return parser.parse_args()
def main():
"""The main entry
"""
args = getArguments()
# Create the server
client = Client(args.host, args.port, args.vhost, args.user, args.password)
# Go to dead
print 'Will go to dead in 10s, or you can use ctrl + c to cause a unexpected death'
sleep(10)
client.dead()
print 'Normal dead'
main()
| gpl-2.0 | -8,202,055,047,594,408,000 | 33.064516 | 149 | 0.606061 | false |
shouya/thinking-dumps | automata/homework/project2/CYK.py | 1 | 4714 | '''
CYK algorithm for Context Free Language
Author: Chenguang Zhu
CS154, Stanford University
'''
import sys,traceback
import os
import string
maxProductionNum = 100 #max number of productions
VarNum = 4
production = [[0] * 3 for i in range(maxProductionNum+1)]
'''Prouductions in Chomsky Normal Form (CNF)
production[i][0] is the number for the variable (0~3, 0: S 1: A, 2: B, 3: C)
If this production is A->BC (two variables), then production[i][1] and production[i][2] will contain the numbers for these two variables
If this production is A->a (a single terminal), then production[i][1] will contain the number for the terminal (0 or 1, 0: a, 1: b), production[i][2]=-1'''
X = [[[False]*3 for i in range(10)] for j in range(10)]
'''X[i][j][s]=true if and only if variable s (0~3, 0: S 1: A, 2: B, 3: C) is in X_ij defined in CYK
Suppose the length of string to be processed is L, then 0<=i<=j<L '''
#check whether (a,b,c) exists in production
def existProd(a, b, c):
global production
for i in range(len(production)):
if ((production[i][0]==a) and
(production[i][1]==b) and
(production[i][2]==c)):
return True
return False
'''CYK algorithm
Calculate the array X
w is the string to be processed'''
def calcCYK(w):
global X
global VarNum
L=len(w)
X=[[[False]*VarNum for i in range(L)] for j in range(L)]
# X=[[[] for i in range(L)] for j in range(L)]
for x in range(L):
calc_cell_basic(x, w)
for dist in range(1,L):
calc_row(dist, L)
tmp = [[lengthify(i) for i in j] for j in X]
X = tmp
def calc_row(dist, l):
global X
for i in range(l - dist):
head = i
tail = i + dist
calc_cell(head, tail)
def lengthify(xs):
global VarNum
result = [False] * VarNum
i = 0
for x in xs:
result[i] = x
i += 1
return result
def calc_cell_basic(col, w):
global X
ww = w[col]
poss = [False] * VarNum
for i in range(7):
if existProd(i,ww,-1):
poss[i] = True
X[col][col] = poss
def prod(xs, ys):
result = []
for x in range(len(xs)):
for y in range(len(ys)):
if xs[x] and ys[y]:
for i in range(7):
if existProd(i, x, y):
result.append(i)
return result
def calc_cell(head, tail):
global X
poss = [False] * VarNum
for i in range(tail - head):
xs = X[head][head + i]
ys = X[head + i + 1][tail]
for i in prod(xs, ys):
poss[i] = True
X[head][tail] = poss
def Start(filename):
global X
global VarNum
global production
result=''
#read data case line by line from file
try:
br=open(filename,'r')
#example on Page 8 of lecture 15_CFL5
production=[[0]*3 for i in range(7)]
production[0][0]=0; production[0][1]=1; production[0][2]=2 #S->AB
production[1][0]=1; production[1][1]=2; production[1][2]=3 #A->BC
production[2][0]=1; production[2][1]=0; production[2][2]=-1 #A->a
production[3][0]=2; production[3][1]=1; production[3][2]=3 #B->AC
production[4][0]=2; production[4][1]=1; production[4][2]=-1 #B->b
production[5][0]=3; production[5][1]=0; production[5][2]=-1 #C->a
production[6][0]=3; production[6][1]=1; production[6][2]=-1 #C->b
result=''
#Read File Line By Line
for string in br:
string=string.strip()
print 'Processing '+string+'...'
length=len(string)
w=[0]*length
for i in range(length):
w[i]=ord(string[i])-ord('a') #convert 'a' to 0 and 'b' to 1
#Use CYK algorithm to calculate X
calcCYK(w)
#Get/print the full table X
for step in range(length-1,-1,-1):
for i in range(length-step):
j=i+step
for k in range(VarNum):
if (X[i][j][k]):
result=result+str(k)
result=result+' '
result=result+'\n'
#Close the input stream
br.close()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,limit=2, file=sys.stdout)
result=result+'error'
return result
def main(filepath):
return Start(filepath)
if __name__ == '__main__':
main(sys.argv[1])
| mit | 6,106,430,936,488,291,000 | 27.098765 | 157 | 0.530972 | false |
twitter/heron | heron/tools/tracker/src/python/handlers/exceptionhandler.py | 2 | 5186 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" exceptionhandler.py """
import traceback
import tornado.gen
import tornado.web
from heron.common.src.python.utils.log import Log
from heron.proto import common_pb2
from heron.proto import tmanager_pb2
from heron.tools.tracker.src.python import constants
from heron.tools.tracker.src.python.handlers import BaseHandler
# pylint: disable=attribute-defined-outside-init
class ExceptionHandler(BaseHandler):
"""
URL - /topologies/exceptions?cluster=<cluster>&topology=<topology> \
&environ=<environment>&component=<component>
Parameters:
- cluster - Name of cluster.
- environ - Running environment.
- role - (optional) Role used to submit the topology.
- topology - Name of topology (Note: Case sensitive. Can only
include [a-zA-Z0-9-_]+)
- component - Component name
- instance - (optional, repeated)
Returns all exceptions for the component of the topology.
"""
def initialize(self, tracker):
""" initialize """
self.tracker = tracker
@tornado.gen.coroutine
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
environ = self.get_argument_environ()
role = self.get_argument_role()
topName = self.get_argument_topology()
component = self.get_argument_component()
topology = self.tracker.get_topology(
cluster, role, environ, topName)
instances = self.get_arguments(constants.PARAM_INSTANCE)
exceptions_logs = yield tornado.gen.Task(self.getComponentException,
topology.tmanager, component, instances)
self.write_success_response(exceptions_logs)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
# pylint: disable=bad-option-value, dangerous-default-value, no-self-use,
# pylint: disable=unused-argument
@tornado.gen.coroutine
def getComponentException(self, tmanager, component_name, instances=[], callback=None):
"""
Get all (last 1000) exceptions for 'component_name' of the topology.
Returns an Array of exception logs on success.
Returns json with message on failure.
"""
if not tmanager or not tmanager.host or not tmanager.stats_port:
return
exception_request = tmanager_pb2.ExceptionLogRequest()
exception_request.component_name = component_name
if len(instances) > 0:
exception_request.instances.extend(instances)
request_str = exception_request.SerializeToString()
port = str(tmanager.stats_port)
host = tmanager.host
url = "http://{0}:{1}/exceptions".format(host, port)
request = tornado.httpclient.HTTPRequest(url,
body=request_str,
method='POST',
request_timeout=5)
Log.debug('Making HTTP call to fetch exceptions url: %s', url)
try:
client = tornado.httpclient.AsyncHTTPClient()
result = yield client.fetch(request)
Log.debug("HTTP call complete.")
except tornado.httpclient.HTTPError as e:
raise Exception(str(e))
# Check the response code - error if it is in 400s or 500s
responseCode = result.code
if responseCode >= 400:
message = "Error in getting exceptions from Tmanager, code: " + responseCode
Log.error(message)
raise tornado.gen.Return({
"message": message
})
# Parse the response from tmanager.
exception_response = tmanager_pb2.ExceptionLogResponse()
exception_response.ParseFromString(result.body)
if exception_response.status.status == common_pb2.NOTOK:
if exception_response.status.HasField("message"):
raise tornado.gen.Return({
"message": exception_response.status.message
})
# Send response
ret = []
for exception_log in exception_response.exceptions:
ret.append({'hostname': exception_log.hostname,
'instance_id': exception_log.instance_id,
'stack_trace': exception_log.stacktrace,
'lasttime': exception_log.lasttime,
'firsttime': exception_log.firsttime,
'count': str(exception_log.count),
'logging': exception_log.logging})
raise tornado.gen.Return(ret)
| apache-2.0 | -599,097,691,548,044,800 | 37.992481 | 89 | 0.669495 | false |
macosforge/ccs-calendarserver | txdav/caldav/datastore/test/test_index_file.py | 1 | 36284 | ##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.task import deferLater
from txdav.caldav.datastore.index_file import Index, MemcachedUIDReserver
from txdav.caldav.datastore.query.filter import Filter
from txdav.common.icommondatastore import ReservationError, \
InternalDataStoreError
from twistedcaldav import caldavxml
from twistedcaldav.caldavxml import TimeRange
from twistedcaldav.ical import Component, InvalidICalendarDataError
from twistedcaldav.instance import InvalidOverriddenInstanceError
from twistedcaldav.test.util import InMemoryMemcacheProtocol
import twistedcaldav.test.util
from pycalendar.datetime import DateTime
import os
class MinimalCalendarObjectReplacement(object):
"""
Provide the minimal set of attributes and methods from CalDAVFile required
by L{Index}.
"""
def __init__(self, filePath):
self.fp = filePath
def iCalendar(self):
with self.fp.open() as f:
text = f.read()
try:
component = Component.fromString(text)
# Fix any bogus data we can
component.validCalendarData()
component.validCalendarForCalDAV(methodAllowed=False)
except InvalidICalendarDataError, e:
raise InternalDataStoreError(
"File corruption detected (%s) in file: %s"
% (e, self._path.path)
)
return component
class MinimalResourceReplacement(object):
"""
Provide the minimal set of attributes and methods from CalDAVFile required
by L{Index}.
"""
class MinimalTxn(object):
def postCommit(self, _ignore):
pass
def postAbort(self, _ignore):
pass
def __init__(self, filePath):
self.fp = filePath
self._txn = MinimalResourceReplacement.MinimalTxn()
def isCalendarCollection(self):
return True
def getChild(self, name):
# FIXME: this should really return something with a child method
return MinimalCalendarObjectReplacement(self.fp.child(name))
def initSyncToken(self):
pass
class SQLIndexTests (twistedcaldav.test.util.TestCase):
"""
Test abstract SQL DB class
"""
def setUp(self):
super(SQLIndexTests, self).setUp()
self.site.resource.isCalendarCollection = lambda: True
self.indexDirPath = self.site.resource.fp
# FIXME: since this resource lies about isCalendarCollection, it doesn't
# have all the associated backend machinery to actually get children.
self.db = Index(MinimalResourceReplacement(self.indexDirPath))
def tearDown(self):
self.db._db_close()
def test_reserve_uid_ok(self):
uid = "test-test-test"
d = self.db.isReservedUID(uid)
d.addCallback(self.assertFalse)
d.addCallback(lambda _: self.db.reserveUID(uid))
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertTrue)
d.addCallback(lambda _: self.db.unreserveUID(uid))
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertFalse)
return d
def test_reserve_uid_twice(self):
uid = "test-test-test"
d = self.db.reserveUID(uid)
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertTrue)
d.addCallback(lambda _:
self.assertFailure(self.db.reserveUID(uid),
ReservationError))
return d
def test_unreserve_unreserved(self):
uid = "test-test-test"
return self.assertFailure(self.db.unreserveUID(uid),
ReservationError)
def test_reserve_uid_timeout(self):
# WARNING: This test is fundamentally flawed and will fail
# intermittently because it uses the real clock.
uid = "test-test-test"
from twistedcaldav.config import config
old_timeout = config.UIDReservationTimeOut
config.UIDReservationTimeOut = 1
def _finally():
config.UIDReservationTimeOut = old_timeout
d = self.db.isReservedUID(uid)
d.addCallback(self.assertFalse)
d.addCallback(lambda _: self.db.reserveUID(uid))
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertTrue)
d.addCallback(lambda _: deferLater(reactor, 2, lambda: None))
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertFalse)
self.addCleanup(_finally)
return d
def test_index(self):
data = (
(
"#1.1 Simple component",
"1.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
False,
True,
),
(
"#2.1 Recurring component",
"2.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
END:VCALENDAR
""",
False,
True,
),
(
"#2.2 Recurring component with override",
"2.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-2.2
RECURRENCE-ID:20080608T120000Z
DTSTART:20080608T120000Z
DTEND:20080608T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
False,
True,
),
(
"#2.3 Recurring component with broken override - new",
"2.3",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.3
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-2.3
RECURRENCE-ID:20080609T120000Z
DTSTART:20080608T120000Z
DTEND:20080608T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
False,
False,
),
(
"#2.4 Recurring component with broken override - existing",
"2.4",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.4
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-2.4
RECURRENCE-ID:20080609T120000Z
DTSTART:20080608T120000Z
DTEND:20080608T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
True,
True,
),
)
for description, name, calendar_txt, reCreate, ok in data:
calendar = Component.fromString(calendar_txt)
if ok:
with open(os.path.join(self.indexDirPath.path, name), "w") as f:
f.write(calendar_txt)
self.db.addResource(name, calendar, reCreate=reCreate)
self.assertTrue(self.db.resourceExists(name), msg=description)
else:
self.assertRaises(InvalidOverriddenInstanceError, self.db.addResource, name, calendar)
self.assertFalse(self.db.resourceExists(name), msg=description)
self.db._db_recreate()
for description, name, calendar_txt, reCreate, ok in data:
if ok:
self.assertTrue(self.db.resourceExists(name), msg=description)
else:
self.assertFalse(self.db.resourceExists(name), msg=description)
self.db.testAndUpdateIndex(DateTime(2020, 1, 1))
for description, name, calendar_txt, reCreate, ok in data:
if ok:
self.assertTrue(self.db.resourceExists(name), msg=description)
else:
self.assertFalse(self.db.resourceExists(name), msg=description)
@inlineCallbacks
def test_index_timerange(self):
"""
A plain (not freebusy) time range test.
"""
data = (
(
"#1.1 Simple component - busy",
"1.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
"20080601T000000Z", "20080602T000000Z",
),
(
"#1.2 Simple component - transparent",
"1.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080602T120000Z
DTEND:20080602T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
""",
"20080602T000000Z", "20080603T000000Z",
),
(
"#1.3 Simple component - canceled",
"1.3",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.3
DTSTART:20080603T120000Z
DTEND:20080603T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
STATUS:CANCELLED
END:VEVENT
END:VCALENDAR
""",
"20080603T000000Z", "20080604T000000Z",
),
(
"#1.4 Simple component - tentative",
"1.4",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.4
DTSTART:20080604T120000Z
DTEND:20080604T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
STATUS:TENTATIVE
END:VEVENT
END:VCALENDAR
""",
"20080604T000000Z", "20080605T000000Z",
),
(
"#2.1 Recurring component - busy",
"2.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.1
DTSTART:20080605T120000Z
DTEND:20080605T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=2
END:VEVENT
END:VCALENDAR
""",
"20080605T000000Z", "20080607T000000Z",
),
(
"#2.2 Recurring component - busy",
"2.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.2
DTSTART:20080607T120000Z
DTEND:20080607T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=2
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-2.2
RECURRENCE-ID:20080608T120000Z
DTSTART:20080608T140000Z
DTEND:20080608T150000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
""",
"20080607T000000Z", "20080609T000000Z",
),
)
for description, name, calendar_txt, trstart, trend in data:
calendar = Component.fromString(calendar_txt)
with open(os.path.join(self.indexDirPath.path, name), "w") as f:
f.write(calendar_txt)
self.db.addResource(name, calendar)
self.assertTrue(self.db.resourceExists(name), msg=description)
# Create fake filter element to match time-range
filter = caldavxml.Filter(
caldavxml.ComponentFilter(
caldavxml.ComponentFilter(
TimeRange(
start=trstart,
end=trend,
),
name=("VEVENT", "VFREEBUSY", "VAVAILABILITY"),
),
name="VCALENDAR",
)
)
filter = Filter(filter)
resources = yield self.db.indexedSearch(filter)
index_results = set()
for found_name, _ignore_uid, _ignore_type in resources:
index_results.add(found_name)
self.assertEqual(set((name,)), index_results, msg=description)
@inlineCallbacks
def test_index_timespan(self):
data = (
(
"#1.1 Simple component - busy",
"1.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
"20080601T000000Z", "20080602T000000Z",
"mailto:user1@example.com",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
),
(
"#1.2 Simple component - transparent",
"1.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080602T120000Z
DTEND:20080602T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
""",
"20080602T000000Z", "20080603T000000Z",
"mailto:user1@example.com",
(('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'T'),),
),
(
"#1.3 Simple component - canceled",
"1.3",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.3
DTSTART:20080603T120000Z
DTEND:20080603T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
STATUS:CANCELLED
END:VEVENT
END:VCALENDAR
""",
"20080603T000000Z", "20080604T000000Z",
"mailto:user1@example.com",
(('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'F', 'F'),),
),
(
"#1.4 Simple component - tentative",
"1.4",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.4
DTSTART:20080604T120000Z
DTEND:20080604T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
STATUS:TENTATIVE
END:VEVENT
END:VCALENDAR
""",
"20080604T000000Z", "20080605T000000Z",
"mailto:user1@example.com",
(('N', "2008-06-04 12:00:00", "2008-06-04 13:00:00", 'T', 'F'),),
),
(
"#2.1 Recurring component - busy",
"2.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.1
DTSTART:20080605T120000Z
DTEND:20080605T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=2
END:VEVENT
END:VCALENDAR
""",
"20080605T000000Z", "20080607T000000Z",
"mailto:user1@example.com",
(
('N', "2008-06-05 12:00:00", "2008-06-05 13:00:00", 'B', 'F'),
('N', "2008-06-06 12:00:00", "2008-06-06 13:00:00", 'B', 'F'),
),
),
(
"#2.2 Recurring component - busy",
"2.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.2
DTSTART:20080607T120000Z
DTEND:20080607T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=2
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-2.2
RECURRENCE-ID:20080608T120000Z
DTSTART:20080608T140000Z
DTEND:20080608T150000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
""",
"20080607T000000Z", "20080609T000000Z",
"mailto:user1@example.com",
(
('N', "2008-06-07 12:00:00", "2008-06-07 13:00:00", 'B', 'F'),
('N', "2008-06-08 14:00:00", "2008-06-08 15:00:00", 'B', 'T'),
),
),
)
for description, name, calendar_txt, trstart, trend, organizer, instances in data:
calendar = Component.fromString(calendar_txt)
with open(os.path.join(self.indexDirPath.path, name), "w") as f:
f.write(calendar_txt)
self.db.addResource(name, calendar)
self.assertTrue(self.db.resourceExists(name), msg=description)
# Create fake filter element to match time-range
filter = caldavxml.Filter(
caldavxml.ComponentFilter(
caldavxml.ComponentFilter(
TimeRange(
start=trstart,
end=trend,
),
name=("VEVENT", "VFREEBUSY", "VAVAILABILITY"),
),
name="VCALENDAR",
)
)
filter = Filter(filter)
resources = yield self.db.indexedSearch(filter, fbtype=True)
index_results = set()
for _ignore_name, _ignore_uid, type, test_organizer, float, start, end, fbtype, transp in resources:
self.assertEqual(test_organizer, organizer, msg=description)
index_results.add((float, start, end, fbtype, transp,))
self.assertEqual(set(instances), index_results, msg=description)
@inlineCallbacks
def test_index_timespan_per_user(self):
data = (
(
"#1.1 Single per-user non-recurring component",
"1.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.1
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080602T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),),
),
(
"user02",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
),
),
),
(
"#1.2 Two per-user non-recurring component",
"1.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user02
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080602T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),),
),
(
"user02",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
),
(
"user03",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
),
),
),
(
"#2.1 Single per-user simple recurring component",
"2.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.1
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080603T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'T'),
),
),
(
"user02",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'F'),
),
),
),
),
(
"#2.2 Two per-user simple recurring component",
"2.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user02
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080603T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'T'),
),
),
(
"user02",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'F'),
),
),
(
"user03",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'F'),
),
),
),
),
(
"#3.1 Single per-user complex recurring component",
"3.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-1.1
RECURRENCE-ID:20080602T120000Z
DTSTART:20080602T130000Z
DTEND:20080602T140000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.1
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
BEGIN:X-CALENDARSERVER-PERINSTANCE
RECURRENCE-ID:20080602T120000Z
TRANSP:OPAQUE
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080604T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'T'),
),
),
(
"user02",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'F'),
),
),
),
),
(
"#3.2 Two per-user complex recurring component",
"3.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-1.2
RECURRENCE-ID:20080602T120000Z
DTSTART:20080602T130000Z
DTEND:20080602T140000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
BEGIN:X-CALENDARSERVER-PERINSTANCE
RECURRENCE-ID:20080602T120000Z
TRANSP:OPAQUE
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user02
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
END:X-CALENDARSERVER-PERINSTANCE
BEGIN:X-CALENDARSERVER-PERINSTANCE
RECURRENCE-ID:20080603T120000Z
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080604T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'T'),
),
),
(
"user02",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'T'),
),
),
(
"user03",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'F'),
),
),
),
),
)
for description, name, calendar_txt, trstart, trend, organizer, peruserinstances in data:
calendar = Component.fromString(calendar_txt)
with open(os.path.join(self.indexDirPath.path, name), "w") as f:
f.write(calendar_txt)
self.db.addResource(name, calendar)
self.assertTrue(self.db.resourceExists(name), msg=description)
# Create fake filter element to match time-range
filter = caldavxml.Filter(
caldavxml.ComponentFilter(
caldavxml.ComponentFilter(
TimeRange(
start=trstart,
end=trend,
),
name=("VEVENT", "VFREEBUSY", "VAVAILABILITY"),
),
name="VCALENDAR",
)
)
filter = Filter(filter)
for useruid, instances in peruserinstances:
resources = yield self.db.indexedSearch(filter, useruid=useruid, fbtype=True)
index_results = set()
for _ignore_name, _ignore_uid, type, test_organizer, float, start, end, fbtype, transp in resources:
self.assertEqual(test_organizer, organizer, msg=description)
index_results.add((str(float), str(start), str(end), str(fbtype), str(transp),))
self.assertEqual(set(instances), index_results, msg="%s, user:%s" % (description, useruid,))
self.db.deleteResource(name)
def test_index_revisions(self):
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
END:VCALENDAR
"""
data3 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.3
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
END:VCALENDAR
"""
calendar = Component.fromString(data1)
self.db.addResource("data1.ics", calendar)
calendar = Component.fromString(data2)
self.db.addResource("data2.ics", calendar)
calendar = Component.fromString(data3)
self.db.addResource("data3.ics", calendar)
self.db.deleteResource("data3.ics")
tests = (
(0, (["data1.ics", "data2.ics", ], [], [],)),
(1, (["data2.ics", ], ["data3.ics", ], [],)),
(2, ([], ["data3.ics", ], [],)),
(3, ([], ["data3.ics", ], [],)),
(4, ([], [], [],)),
(5, ([], [], [],)),
)
for revision, results in tests:
self.assertEquals(self.db.whatchanged(revision), results, "Mismatched results for whatchanged with revision %d" % (revision,))
class MemcacheTests(SQLIndexTests):
def setUp(self):
super(MemcacheTests, self).setUp()
self.memcache = InMemoryMemcacheProtocol()
self.db.reserver = MemcachedUIDReserver(self.db, self.memcache)
def tearDown(self):
super(MemcacheTests, self).tearDown()
for _ignore_k, v in self.memcache._timeouts.iteritems():
if v.active():
v.cancel()
| apache-2.0 | -2,447,090,324,838,265,300 | 30.198624 | 138 | 0.600182 | false |
SNeuhausen/training_management | models/resource_analysis/trainer_workload_analyzer.py | 1 | 4295 | # -*- coding: utf-8 -*-
from openerp import api, models
from openerp.addons.training_management.models.model_names import ModelNames
from openerp.addons.training_management.utils.date_utils import DateUtils
class TrainerWorkloadAnalyzer(models.AbstractModel):
_name = ModelNames.TRAINER_WORKLOAD_ANALYZER
@api.model
def compute_trainer_workload_data(self, start_date, end_date):
start_date, end_date = DateUtils.convert_to_dates(start_date, end_date)
first_week = DateUtils.get_monday_of_week(start_date)
last_week = DateUtils.get_friday_of_week(end_date)
trainer_workload_data = {
"weeks_to_display": [],
"trainer_info": {},
"workloads": {},
"workload_totals": {},
}
current_week = first_week
while current_week <= last_week:
year_week = DateUtils.build_year_week_string_from_date(current_week)
trainer_workload_data["weeks_to_display"].append(year_week)
current_week += DateUtils.ONE_WEEK_TIME_DELTA
partner_model = self.env[ModelNames.PARTNER]
trainers = partner_model.search([("is_trainer", "=", True)])
for trainer in trainers:
trainer_id = str(trainer.id)
trainer_workload_data["workloads"][trainer_id] = {}
self._add_trainer_info(trainer_workload_data, trainer)
resources = self._find_resources_in_range_having_trainer(first_week, last_week, trainers)
self._update_trainer_workload_data_from_resources(resources, trainer_workload_data)
workloads = trainer_workload_data["workloads"]
for trainer_id, trainer_workload in workloads.iteritems():
lesson_total = sum(trainer_workload.values())
trainer_workload_data["workload_totals"][trainer_id] = lesson_total;
return trainer_workload_data
@staticmethod
def _add_trainer_info(trainer_workload_data, trainer):
trainer_info = trainer_workload_data["trainer_info"]
trainer_id = str(trainer.id)
if trainer_id not in trainer_info:
trainer_info[trainer_id] = {}
trainer_info[trainer_id].update({
"color_name": trainer.color_name,
"name": u"{surname}, {forename}".format(surname=trainer.surname, forename=trainer.forename),
})
def _update_trainer_workload_data_from_resources(self, resources, trainer_workload_data):
for resource in resources:
if not resource.trainer_id:
continue
trainer_id = str(resource.trainer_id.id)
year_week = resource.year_week_string
workloads = trainer_workload_data["workloads"]
if trainer_id not in workloads:
workloads[trainer_id] = {}
self._add_trainer_info(trainer_workload_data, resource.trainer_id)
trainer_workload = workloads[trainer_id]
if year_week not in trainer_workload:
trainer_workload[year_week] = 0
trainer_workload[year_week] += resource.get_lesson_count()
def _find_resources_in_range_having_trainer(self, start_date, end_date, trainers):
resource_model = self.env[ModelNames.RESOURCE]
domain = [
("date", ">=", DateUtils.convert_to_string(start_date)),
("date", "<=", DateUtils.convert_to_string(end_date)),
("trainer_id", "in", trainers.ids),
]
return resource_model.search(domain)
@api.model
@api.returns("self")
def find_trainers_with_main_location(self, main_location_id):
trainer_model = self.env[ModelNames.TRAINER]
domain = [
("is_trainer", "=", True),
("main_location_id", "=", main_location_id)
]
trainers = trainer_model.search(domain)
return trainers
def _find_trainers_for_user_locations(self):
location_model = self.env[ModelNames.LOCATION]
trainer_model = self.env[ModelNames.TRAINER]
user_locations = location_model.search([("user_ids", "in", [self.env.user.id])])
domain = [
("is_trainer", "=", True),
("main_location_id", "in", user_locations.ids)
]
trainers = trainer_model.search(domain)
return trainers
| gpl-3.0 | -8,989,901,224,261,650,000 | 41.524752 | 104 | 0.618859 | false |
dask-image/dask-ndfourier | dask_ndfourier/_compat.py | 1 | 1574 | # -*- coding: utf-8 -*-
"""
Content here is borrowed from our contributions to Dask.
"""
import numpy
import dask.array
def _fftfreq_block(i, n, d):
r = i.copy()
r[i >= (n + 1) // 2] -= n
r /= n * d
return r
def _fftfreq(n, d=1.0, chunks=None):
"""
Return the Discrete Fourier Transform sample frequencies.
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
grid : dask array
Examples
--------
>>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
>>> fourier = np.fft.fft(signal)
>>> n = signal.size
>>> timestep = 0.1
>>> freq = np.fft.fftfreq(n, d=timestep)
>>> freq
array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25])
Notes
-----
Borrowed from my Dask Array contribution.
"""
n = int(n)
d = float(d)
r = dask.array.arange(n, dtype=float, chunks=chunks)
return r.map_blocks(_fftfreq_block, dtype=float, n=n, d=d)
_sinc = dask.array.ufunc.wrap_elemwise(numpy.sinc)
| bsd-3-clause | 138,433,240,379,861,860 | 22.848485 | 79 | 0.550191 | false |
Geode/Geocoding | geode_geocoding/Google/google.py | 1 | 4126 | # -*- coding: utf-8 -*-
from geode_geocoding.base import Base
from geode_geocoding import IGeocode
from geode_geocoding import IGeocodeReverse
from geode_geocoding.keys import google_key, google_client, google_client_secret
class Google(Base, IGeocode, IGeocodeReverse):
def coder(self, location, **kwargs):
self.provider = 'google'
self.method = 'geocode'
self.url = 'https://maps.googleapis.com/maps/api/geocode/json'
self.location = location
self.client = kwargs.get('client', google_client)
self.client_secret = kwargs.get('client_secret', google_client_secret)
self.params = {
'address': location,
'key': None if self.client and self.client_secret else kwargs.get('key', google_key),
'client': self.client,
'bounds': kwargs.get('bounds', ''),
'language': kwargs.get('bounds ', ''),
'region': kwargs.get('region', ''),
'components': kwargs.get('components', ''),
}
self._initialize(**kwargs)
def coder_reverse(self, location, **kwargs):
self.provider = 'google'
self.method = 'reverse'
#https://maps.googleapis.com/maps/api/geocode/json?latlng=40.714224,-73.961452
self.url = 'https://maps.googleapis.com/maps/api/geocode/json'
self.location = location
self.short_name = kwargs.get('short_name', True)
self.params = {
'sensor': 'false',
'latlng': '{0}, {1}'.format(location[0], location[1]),
'key': kwargs.get('key', google_key),
'language': kwargs.get('language', ''),
'client': kwargs.get('client', google_client)
}
self._initialize(**kwargs)
def _catch_errors(self):
status = self.parse.get('status')
if not status == 'OK':
self.error = status
def _exceptions(self):
# Build intial Tree with results
if self.parse['results']:
self._build_tree(self.parse.get('results')[0])
# Build Geometry
self._build_tree(self.parse.get('geometry'))
# Parse address components with short & long names
for item in self.parse['address_components']:
for category in item['types']:
self.parse[category]['long_name'] = item['long_name']
self.parse[category]['short_name'] = item['short_name']
@property
def ok(self):
if self.method == 'geocode':
return bool(self.lng and self.lat)
elif self.method == 'reverse':
return bool(self.address)
else:
return False
@property
def lat(self):
return self.parse['location'].get('lat')
@property
def lng(self):
return self.parse['location'].get('lng')
@property
def bbox(self):
south = self.parse['southwest'].get('lat')
west = self.parse['southwest'].get('lng')
north = self.parse['northeast'].get('lat')
east = self.parse['northeast'].get('lng')
return self._get_bbox(south, west, north, east)
@property
def address(self):
return self.parse.get('formatted_address')
@property
def housenumber(self):
return self.parse['street_number'].get('short_name')
@property
def street(self):
return self.parse['route'].get('long_name')
@property
def neighbourhood(self):
return self.parse['neighborhood'].get('short_name')
@property
def city(self):
return self.parse['locality'].get('long_name')
@property
def postal(self):
return self.parse['postal_code'].get('long_name')
@property
def county(self):
return self.parse['administrative_area_level_2'].get('long_name')
@property
def state(self):
return self.parse['administrative_area_level_1'].get('long_name')
@property
def country(self):
return self.parse['country'].get('long_name')
@property
def country_code(self):
return self.parse['country'].get('short_name')
| agpl-3.0 | 7,066,188,980,363,857,000 | 31.488189 | 97 | 0.58507 | false |
beni55/flocker | flocker/node/_deploy.py | 1 | 18591 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.node.test.test_deploy -*-
"""
Deploy applications on nodes.
"""
from zope.interface import Interface, implementer
from characteristic import attributes
from twisted.internet.defer import gatherResults, fail, DeferredList, succeed
from twisted.python.filepath import FilePath
from .gear import GearClient, PortMap, GearEnvironment
from ._model import (
Application, VolumeChanges, AttachedVolume, VolumeHandoff,
)
from ..route import make_host_network, Proxy
from ..volume._ipc import RemoteVolumeManager
from ..common._ipc import ProcessNode
# Path to SSH private key available on nodes and used to communicate
# across nodes.
# XXX duplicate of same information in flocker.cli:
# https://github.com/ClusterHQ/flocker/issues/390
SSH_PRIVATE_KEY_PATH = FilePath(b"/etc/flocker/id_rsa_flocker")
@attributes(["running", "not_running"])
class NodeState(object):
"""
The current state of a node.
:ivar running: A ``list`` of ``Application`` instances on this node
that are currently running or starting up.
:ivar not_running: A ``list`` of ``Application`` instances on this
node that are currently shutting down or stopped.
"""
class IStateChange(Interface):
"""
An operation that changes the state of the local node.
"""
def run(deployer):
"""
Run the change.
:param Deployer deployer: The ``Deployer`` to use.
:return: ``Deferred`` firing when the change is done.
"""
def __eq__(other):
"""
Return whether this change is equivalent to another.
"""
def __ne__(other):
"""
Return whether this change is not equivalent to another.
"""
@implementer(IStateChange)
@attributes(["changes"])
class Sequentially(object):
"""
Run a series of changes in sequence, one after the other.
Failures in earlier changes stop later changes.
"""
def run(self, deployer):
d = succeed(None)
for change in self.changes:
d.addCallback(lambda _, change=change: change.run(deployer))
return d
@implementer(IStateChange)
@attributes(["changes"])
class InParallel(object):
"""
Run a series of changes in parallel.
Failures in one change do not prevent other changes from continuing.
"""
def run(self, deployer):
return gatherResults((change.run(deployer) for change in self.changes),
consumeErrors=True)
@implementer(IStateChange)
@attributes(["application"])
class StartApplication(object):
"""
Launch the supplied application as a gear unit.
:ivar Application application: The ``Application`` to create and
start.
"""
def run(self, deployer):
application = self.application
if application.volume is not None:
volume = deployer.volume_service.get(application.volume.name)
d = volume.expose_to_docker(application.volume.mountpoint)
else:
d = succeed(None)
if application.ports is not None:
port_maps = map(lambda p: PortMap(internal_port=p.internal_port,
external_port=p.external_port),
application.ports)
else:
port_maps = []
if application.environment is not None:
environment = GearEnvironment(
id=application.name,
variables=application.environment)
else:
environment = None
d.addCallback(lambda _: deployer.gear_client.add(
application.name,
application.image.full_name,
ports=port_maps,
environment=environment
))
return d
@implementer(IStateChange)
@attributes(["application"])
class StopApplication(object):
"""
Stop and disable the given application.
:ivar Application application: The ``Application`` to stop.
"""
def run(self, deployer):
application = self.application
unit_name = application.name
result = deployer.gear_client.remove(unit_name)
def unit_removed(_):
if application.volume is not None:
volume = deployer.volume_service.get(application.volume.name)
return volume.remove_from_docker()
result.addCallback(unit_removed)
return result
@implementer(IStateChange)
@attributes(["volume"])
class CreateVolume(object):
"""
Create a new locally-owned volume.
:ivar AttachedVolume volume: Volume to create.
"""
def run(self, deployer):
return deployer.volume_service.create(self.volume.name)
@implementer(IStateChange)
@attributes(["volume"])
class WaitForVolume(object):
"""
Wait for a volume to exist and be owned locally.
:ivar AttachedVolume volume: Volume to wait for.
"""
def run(self, deployer):
return deployer.volume_service.wait_for_volume(self.volume.name)
@implementer(IStateChange)
@attributes(["volume", "hostname"])
class HandoffVolume(object):
"""
A volume handoff that needs to be performed from this node to another
node.
See :cls:`flocker.volume.VolumeService.handoff` for more details.
:ivar AttachedVolume volume: The volume to hand off.
:ivar bytes hostname: The hostname of the node to which the volume is
meant to be handed off.
"""
def run(self, deployer):
service = deployer.volume_service
destination = ProcessNode.using_ssh(
self.hostname, 22, b"root",
SSH_PRIVATE_KEY_PATH)
return service.handoff(service.get(self.volume.name),
RemoteVolumeManager(destination))
@implementer(IStateChange)
@attributes(["ports"])
class SetProxies(object):
"""
Set the ports which will be forwarded to other nodes.
:ivar ports: A collection of ``Port`` objects.
"""
def run(self, deployer):
results = []
# XXX: Errors in these operations should be logged. See
# https://github.com/ClusterHQ/flocker/issues/296
# XXX: The proxy manipulation operations are blocking. Convert to a
# non-blocking API. See https://github.com/ClusterHQ/flocker/issues/320
for proxy in deployer.network.enumerate_proxies():
try:
deployer.network.delete_proxy(proxy)
except:
results.append(fail())
for proxy in self.ports:
try:
deployer.network.create_proxy_to(proxy.ip, proxy.port)
except:
results.append(fail())
return DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
class Deployer(object):
"""
Start and stop applications.
:ivar VolumeService volume_service: The volume manager for this node.
:ivar IGearClient gear_client: The gear client API to use in
deployment operations. Default ``GearClient``.
:ivar INetwork network: The network routing API to use in
deployment operations. Default is iptables-based implementation.
"""
def __init__(self, volume_service, gear_client=None, network=None):
if gear_client is None:
gear_client = GearClient(hostname=u'127.0.0.1')
self.gear_client = gear_client
if network is None:
network = make_host_network()
self.network = network
self.volume_service = volume_service
def discover_node_configuration(self):
"""
List all the ``Application``\ s running on this node.
:returns: A ``Deferred`` which fires with a ``NodeState``
instance.
"""
volumes = self.volume_service.enumerate()
volumes.addCallback(lambda volumes: set(
volume.name for volume in volumes
if volume.uuid == self.volume_service.uuid))
d = gatherResults([self.gear_client.list(), volumes])
def applications_from_units(result):
units, available_volumes = result
running = []
not_running = []
for unit in units:
# XXX: The container_image will be available on the
# Unit when
# https://github.com/ClusterHQ/flocker/issues/207 is
# resolved.
if unit.name in available_volumes:
# XXX Mountpoint is not available, see
# https://github.com/ClusterHQ/flocker/issues/289
volume = AttachedVolume(name=unit.name, mountpoint=None)
else:
volume = None
application = Application(name=unit.name,
volume=volume)
if unit.activation_state in (u"active", u"activating"):
running.append(application)
else:
not_running.append(application)
return NodeState(running=running, not_running=not_running)
d.addCallback(applications_from_units)
return d
def calculate_necessary_state_changes(self, desired_state,
current_cluster_state, hostname):
"""
Work out which changes need to happen to the local state to match
the given desired state.
Currently this involves the following phases:
1. Change proxies to point to new addresses (should really be
last, see https://github.com/ClusterHQ/flocker/issues/380)
2. Stop all relevant containers.
3. Handoff volumes.
4. Wait for volumes.
5. Create volumes.
6. Start and restart any relevant containers.
:param Deployment desired_state: The intended configuration of all
nodes.
:param Deployment current_cluster_state: The current configuration
of all nodes. While technically this also includes the current
node's state, this information may be out of date so we check
again to ensure we have absolute latest information.
:param unicode hostname: The hostname of the node that this is running
on.
:return: A ``Deferred`` which fires with a ``IStateChange``
provider.
"""
phases = []
desired_proxies = set()
desired_node_applications = []
for node in desired_state.nodes:
if node.hostname == hostname:
desired_node_applications = node.applications
else:
for application in node.applications:
for port in application.ports:
# XXX: also need to do DNS resolution. See
# https://github.com/ClusterHQ/flocker/issues/322
desired_proxies.add(Proxy(ip=node.hostname,
port=port.external_port))
if desired_proxies != set(self.network.enumerate_proxies()):
phases.append(SetProxies(ports=desired_proxies))
d = self.discover_node_configuration()
def find_differences(current_node_state):
current_node_applications = current_node_state.running
all_applications = (current_node_state.running +
current_node_state.not_running)
# Compare the applications being changed by name only. Other
# configuration changes aren't important at this point.
current_state = {app.name for app in current_node_applications}
desired_local_state = {app.name for app in
desired_node_applications}
not_running = {app.name for app in current_node_state.not_running}
# Don't start applications that exist on this node but aren't
# running; instead they should be restarted:
start_names = desired_local_state.difference(
current_state | not_running)
stop_names = {app.name for app in all_applications}.difference(
desired_local_state)
start_containers = [
StartApplication(application=app)
for app in desired_node_applications
if app.name in start_names
]
stop_containers = [
StopApplication(application=app) for app in all_applications
if app.name in stop_names
]
restart_containers = [
Sequentially(changes=[StopApplication(application=app),
StartApplication(application=app)])
for app in desired_node_applications
if app.name in not_running
]
# Find any applications with volumes that are moving to or from
# this node - or that are being newly created by this new
# configuration.
volumes = find_volume_changes(hostname, current_cluster_state,
desired_state)
if stop_containers:
phases.append(InParallel(changes=stop_containers))
if volumes.going:
phases.append(InParallel(changes=[
HandoffVolume(volume=handoff.volume,
hostname=handoff.hostname)
for handoff in volumes.going]))
if volumes.coming:
phases.append(InParallel(changes=[
WaitForVolume(volume=volume)
for volume in volumes.coming]))
if volumes.creating:
phases.append(InParallel(changes=[
CreateVolume(volume=volume)
for volume in volumes.creating]))
start_restart = start_containers + restart_containers
if start_restart:
phases.append(InParallel(changes=start_restart))
d.addCallback(find_differences)
d.addCallback(lambda _: Sequentially(changes=phases))
return d
def change_node_state(self, desired_state,
current_cluster_state,
hostname):
"""
Change the local state to match the given desired state.
:param Deployment desired_state: The intended configuration of all
nodes.
:param Deployment current_cluster_state: The current configuration
of all nodes.
:param unicode hostname: The hostname of the node that this is running
on.
:return: ``Deferred`` that fires when the necessary changes are done.
"""
d = self.calculate_necessary_state_changes(
desired_state=desired_state,
current_cluster_state=current_cluster_state,
hostname=hostname)
d.addCallback(lambda change: change.run(self))
return d
def find_volume_changes(hostname, current_state, desired_state):
"""
Find what actions need to be taken to deal with changes in volume
location between current state and desired state of the cluster.
XXX The logic here assumes the mountpoints have not changed,
and will act unexpectedly if that is the case. See
https://github.com/ClusterHQ/flocker/issues/351 for more details.
XXX The logic here assumes volumes are never added or removed to
existing applications, merely moved across nodes. As a result test
coverage for those situations is not implemented. See
https://github.com/ClusterHQ/flocker/issues/352 for more details.
XXX Comparison is done via volume name, rather than AttachedVolume
objects, until https://github.com/ClusterHQ/flocker/issues/289 is fixed.
:param unicode hostname: The name of the node for which to find changes.
:param Deployment current_state: The old state of the cluster on which the
changes are based.
:param Deployment desired_state: The new state of the cluster towards which
the changes are working.
"""
desired_volumes = {node.hostname: set(application.volume for application
in node.applications
if application.volume)
for node in desired_state.nodes}
current_volumes = {node.hostname: set(application.volume for application
in node.applications
if application.volume)
for node in current_state.nodes}
local_desired_volumes = desired_volumes.get(hostname, set())
local_desired_volume_names = set(volume.name for volume in
local_desired_volumes)
local_current_volume_names = set(volume.name for volume in
current_volumes.get(hostname, set()))
remote_current_volume_names = set()
for volume_hostname, current in current_volumes.items():
if volume_hostname != hostname:
remote_current_volume_names |= set(
volume.name for volume in current)
# Look at each application volume that is going to be running
# elsewhere and is currently running here, and add a VolumeHandoff for
# it to `going`.
going = set()
for volume_hostname, desired in desired_volumes.items():
if volume_hostname != hostname:
for volume in desired:
if volume.name in local_current_volume_names:
going.add(VolumeHandoff(volume=volume,
hostname=volume_hostname))
# Look at each application volume that is going to be started on this
# node. If it was running somewhere else, we want that Volume to be
# in `coming`.
coming_names = local_desired_volume_names.intersection(
remote_current_volume_names)
coming = set(volume for volume in local_desired_volumes
if volume.name in coming_names)
# For each application volume that is going to be started on this node
# that was not running anywhere previously, make sure that Volume is
# in `creating`.
creating_names = local_desired_volume_names.difference(
local_current_volume_names | remote_current_volume_names)
creating = set(volume for volume in local_desired_volumes
if volume.name in creating_names)
return VolumeChanges(going=going, coming=coming, creating=creating)
| apache-2.0 | 2,322,558,771,706,020,000 | 36.709939 | 79 | 0.612232 | false |
redsolution/django-menu-proxy | menuproxy/utils.py | 1 | 10086 | # -*- coding: utf-8 -*-
from django import conf
from django.core.cache import cache
from importpath import importpath
METHODS = (
'replace', # Указывает, что объект point следует заменить объектом object
'insert', # Указывает, что к списку дочерних элементов inside-правила нужно добавить элемент object
'children', # Указывает, что к списку дочерних элементов inside-правила нужно добавить дочерние элементы object-а
)
def get_title(menu_proxy, object):
"""Correct value returned by menu_proxy.title function"""
result = menu_proxy.title(object)
if result is None:
return u''
return unicode(result)
def get_url(menu_proxy, object):
"""Correct value returned by menu_proxy.url function"""
result = menu_proxy.url(object)
if result is None:
return u''
return unicode(result)
def get_ancestors(menu_proxy, object):
"""Correct value returned by menu_proxy.ancestors function"""
result = menu_proxy.ancestors(object)
if result is None:
return []
return [value for value in result]
def get_children(menu_proxy, object, lasy):
"""
Call ``children`` or ``lasy_children`` function for ``menu_proxy``.
Pass to it ``object``.
Correct result.
"""
if lasy:
result = menu_proxy.lasy_children(object)
else:
result = menu_proxy.children(object)
if result is None:
return []
return [value for value in result]
class DoesNotDefined(object):
"""
Class to indicate that value was not pressend in rule.
"""
pass
def try_to_import(value, exception_text):
"""
If ``value`` is not None and is not DoesNotDefined
then try to import specified by ``value`` path.
"""
if value is not DoesNotDefined and value is not None:
return importpath(value, exception_text)
return value
def get_rules():
"""Return dictionary of rules with settings"""
rules = cache.get('menuproxy.rules', None)
if rules is not None:
return rules
rules = {}
sequence = {None: []}
def add_to_sequence(rule, value):
if rule not in sequence:
sequence[rule] = []
sequence[rule].append(value)
rules[None] = MenuRule(name=None, method='replace', proxy=None, rules=rules)
for kwargs in getattr(conf.settings, 'MENU_PROXY_RULES', []):
rule = MenuRule(rules=rules, **kwargs)
rules[rule.name] = rule
add_to_sequence(rule.name, rule.name)
add_to_sequence(rule.inside, rule.name)
for name, rule in rules.iteritems():
rule.sequence = [rules[item] for item in sequence[name]]
cache.set('menuproxy.rules', rules)
return rules
def get_front_page(rules):
"""If MENU_PROXY_FRONT_PAGED is True and there is front page return MenuItem for it"""
front_page = cache.get('menuproxy.front_page', DoesNotDefined)
if front_page is not DoesNotDefined:
return front_page
front_page = None
if getattr(conf.settings, 'MENU_PROXY_FRONT_PAGED', True):
root = MenuItem(None, DoesNotDefined)
children = root.children(False)
if children:
front_page = children[0]
cache.set('menuproxy.front_page', front_page)
return front_page
class MenuRule(object):
"""Rule"""
def __init__(self, name, method, proxy, rules, inside=None,
model=DoesNotDefined, point=DoesNotDefined, object=DoesNotDefined,
point_function=DoesNotDefined, object_function=DoesNotDefined, **other):
self.name = name
self.method = method
assert self.method in METHODS, 'menuproxy does`t support method: %s' % self.method
self.inside = inside
self.model = try_to_import(model, 'model class')
self.point = try_to_import(point, 'mount point')
if callable(self.point) and self.point is not DoesNotDefined:
self.point = self.point()
if self.point is DoesNotDefined:
self.point_function = try_to_import(point_function, 'mount point function')
else:
self.point_function = DoesNotDefined
self.object = try_to_import(object, 'mount object')
if callable(self.object) and self.object is not DoesNotDefined:
self.object = self.object()
if self.object is DoesNotDefined:
self.object_function = try_to_import(object_function, 'mount object function')
else:
self.object_function = DoesNotDefined
self.proxy = try_to_import(proxy, 'MenuProxy class')
other.update(self.__dict__)
if callable(self.proxy) and self.proxy is not DoesNotDefined:
self.proxy = self.proxy(**other)
self.rules = rules
self.sequence = []
def _get_point(self, object, forward):
if self.point is not DoesNotDefined:
return self.point
elif self.point_function is not DoesNotDefined:
return self.point_function(object, forward)
else:
return DoesNotDefined
def _get_object(self, object, forward):
if self.object is not DoesNotDefined:
return self.object
elif self.object_function is not DoesNotDefined:
return self.object_function(object, forward)
else:
return DoesNotDefined
def forward_point(self, object):
return self._get_point(object, True)
def backward_point(self, object):
return self._get_point(object, False)
def forward_object(self, object):
return self._get_object(object, True)
def backward_object(self, object):
return self._get_object(object, False)
class MenuItem(object):
"""Objects of this class will be send to templates. Class provide to walk through nested rules"""
active = False
current = False
def __init__(self, name=None, object=None):
if isinstance(object, MenuItem):
self.rules = object.rules
self.name, self.object = object.name, object.object
else:
self.rules = get_rules()
for rule in self.rules[name].sequence:
if rule.name != name and rule.method == 'replace':
point = rule.forward_point(object)
if point is DoesNotDefined or point == object:
self.name, self.object = rule.name, rule.forward_object(object)
break
else:
self.name, self.object = name, object
self.front_paged_ancestors = False
def title(self):
"""Returns title for object"""
if hasattr(self, '_title'):
return getattr(self, '_title')
title = get_title(self.rules[self.name].proxy, self.object)
setattr(self, '_title', title)
return title
def url(self):
"""Returns url for object"""
if hasattr(self, '_url'):
return getattr(self, '_url')
url = get_url(self.rules[self.name].proxy, self.object)
setattr(self, '_url', url)
return url
def ancestors(self):
"""Returns ancestors for object, started from top level"""
if hasattr(self, '_ancestors'):
return getattr(self, '_ancestors')
ancestors = []
name = self.name
object = self.object
while True:
items = get_ancestors(self.rules[name].proxy, object)
until = self.rules[name].backward_object(object)
items.reverse()
for item in items:
ancestors.insert(0, MenuItem(name, item))
if item == until:
break
method, object, name = self.rules[name].method, self.rules[name].backward_point(object), self.rules[name].inside
if name is None:
break
if method != 'replace':
ancestors.insert(0, MenuItem(name, object))
front_page = get_front_page(self.rules)
if front_page is not None:
if not ancestors or ancestors[0].object != front_page.object:
if (front_page.name, front_page.object) != (self.name, self.object):
self.front_paged_ancestors = True
ancestors.insert(0, front_page)
setattr(self, '_ancestors', ancestors)
return ancestors
def ancestors_for_menu(self):
"""
Returns ancestors for show_menu tags.
Ancestors will not contain front page and will contain object itself.
"""
ancestors = self.ancestors()
if self.front_paged_ancestors:
ancestors = ancestors[1:]
else:
ancestors = ancestors[:]
ancestors.append(self)
return ancestors
def children(self, lasy=False):
"""Returns children for object"""
if lasy:
field_name = '_children_lasy'
else:
field_name = '_children'
if hasattr(self, field_name):
return getattr(self, field_name)
children = []
for rule in self.rules[self.name].sequence:
point = rule.forward_point(self.object)
if rule.name == self.name:
children += [MenuItem(self.name, item) for item in get_children(
self.rules[self.name].proxy, self.object, lasy)
]
elif point is DoesNotDefined or point == self.object:
object = rule.forward_object(self.object)
if rule.method == 'insert' and not lasy:
children += [MenuItem(rule.name, object)]
elif rule.method == 'children':
children += [MenuItem(rule.name, item) for item in get_children(
rule.proxy, object, lasy)
]
setattr(self, field_name, children)
return children
| gpl-3.0 | 7,323,693,844,569,859,000 | 35.032727 | 124 | 0.598345 | false |
nipe0324/flask-todo-api | app.py | 1 | 2962 | from flask import Flask, jsonify, abort, url_for, request, make_response
from flask.ext.httpauth import HTTPBasicAuth
app = Flask(__name__)
auth = HTTPBasicAuth()
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
def make_public_task(task):
new_task = {}
for field in task:
if field == 'id':
new_task['uri'] = url_for('get_task', task_id=task['id'], _external=True)
else:
new_task[field] = task[field]
return new_task
@auth.get_password
def get_password(username):
if username == 'root':
return 'pass'
return None
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 401)
@app.route('/')
def index():
return "Hello, world!"
@app.route('/todo/api/v1.0/tasks', methods=['GET'])
@auth.login_required
def get_tasks():
return jsonify({'tasks': [make_public_task(task) for task in tasks]})
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
@auth.login_required
def get_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'tasks': tasks[0]})
@app.route('/todo/api/v1.0/tasks', methods=['POST'])
@auth.login_required
def create_task():
if not request.json or not 'title' in request.json:
abort(400)
task = {
'id': tasks[-1]['id'] + 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
tasks.append(task)
return jsonify({'task': task}), 201
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['PUT'])
@auth.login_required
def update_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
if not request.json:
abort(400)
if 'title' in request.json and type(request.json['title']) != unicode:
abort(400)
if 'description' in request.json and type(request.json['description']) is not unicode:
abort(400)
if 'done' in request.json and type(request.json['done']) is not bool:
abort(400)
task[0]['title'] = request.json.get('title', task[0]['title'])
return jsonify({'task': task[0]})
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['DELETE'])
@auth.login_required
def delete_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
tasks.remove(task[0])
return jsonify({'result': True})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
app.run(debug=True)
| apache-2.0 | -1,418,981,903,589,351,700 | 27.757282 | 90 | 0.599257 | false |
trevor/calendarserver | txdav/caldav/datastore/scheduling/ischedule/test/test_delivery.py | 1 | 2853 | ##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks
from twisted.names import client
from twisted.python.modules import getModule
from twisted.trial import unittest
from twistedcaldav.stdconfig import config
from txdav.caldav.datastore.scheduling.ischedule import utils
from txdav.caldav.datastore.scheduling.ischedule.delivery import ScheduleViaISchedule
class CalDAV (unittest.TestCase):
"""
txdav.caldav.datastore.scheduling.caldav tests
"""
def tearDown(self):
"""
By setting the resolver to None, it will be recreated next time a name
lookup is done.
"""
client.theResolver = None
utils.DebugResolver = None
@inlineCallbacks
def test_matchCalendarUserAddress(self):
"""
Make sure we do an exact comparison on EmailDomain
"""
self.patch(config.Scheduling.iSchedule, "Enabled", True)
self.patch(config.Scheduling.iSchedule, "RemoteServers", "")
# Only mailtos:
result = yield ScheduleViaISchedule.matchCalendarUserAddress("http://example.com/principal/user")
self.assertFalse(result)
# Need to setup a fake resolver
module = getModule(__name__)
dataPath = module.filePath.sibling("data")
bindPath = dataPath.child("db.example.com")
self.patch(config.Scheduling.iSchedule, "DNSDebug", bindPath.path)
utils.DebugResolver = None
utils._initResolver()
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user@example.com")
self.assertTrue(result)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user@example.org")
self.assertFalse(result)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user@example.org?subject=foobar")
self.assertFalse(result)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user")
self.assertFalse(result)
# Test when not enabled
ScheduleViaISchedule.domainServerMap = {}
self.patch(config.Scheduling.iSchedule, "Enabled", False)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user@example.com")
self.assertFalse(result)
| apache-2.0 | 8,254,567,666,733,260,000 | 38.082192 | 110 | 0.716789 | false |
sam-m888/gprime | gprime/plugins/textreport/indivcomplete.py | 1 | 50048 | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2009 Nick Hall
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2012 Mathieu MD
# Copyright (C) 2013-2016 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" Complete Individual Report """
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
import os
from collections import defaultdict
#------------------------------------------------------------------------
#
# Gprime modules
#
#------------------------------------------------------------------------
from gprime.const import LOCALE as glocale
_ = glocale.translation.gettext
from gprime.lib import EventRoleType, EventType, Person, NoteType
from gprime.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
TableStyle, TableCellStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC,
PARA_ALIGN_CENTER, PARA_ALIGN_RIGHT)
from gprime.display.place import displayer as _pd
from gprime.plug.menu import (BooleanOption, FilterOption, PersonOption,
BooleanListOption)
from gprime.plug.report import Report
from gprime.plug.report import utils
from gprime.plug.report import MenuReportOptions
from gprime.plug.report import Bibliography
from gprime.plug.report import endnotes as Endnotes
from gprime.plug.report import stdoptions
from gprime.utils.file import media_path_full
from gprime.utils.lds import TEMPLES
from gprime.proxy import CacheProxyDb
#------------------------------------------------------------------------
#
# Global variables (ones used in both classes here, that is)
#
#------------------------------------------------------------------------
# _T_ is a gramps-defined keyword -- see po/update_po.py and po/genpot.sh
def _T_(value): # enable deferred translations (see Python docs 22.1.3.4)
return value
CUSTOM = _T_("Custom")
# Construct section list and type to group map
SECTION_LIST = []
TYPE2GROUP = {}
for event_group, type_list in EventType().get_menu_standard_xml():
SECTION_LIST.append(event_group)
for event_type in type_list:
TYPE2GROUP[event_type] = event_group
SECTION_LIST.append(CUSTOM)
TYPE2GROUP[EventType.CUSTOM] = CUSTOM
TYPE2GROUP[EventType.UNKNOWN] = _T_("Unknown")
#------------------------------------------------------------------------
#
# IndivCompleteReport
#
#------------------------------------------------------------------------
class IndivCompleteReport(Report):
""" the Complete Individual Report """
def __init__(self, database, options, user):
"""
Create the IndivCompleteReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
filter - Filter to be applied to the people of the database.
The option class carries its number, and the function
returning the list of filters.
cites - Whether or not to include source information.
sort - Whether or not to sort events into chronological order.
grampsid - Whether or not to include any GIDs
images - Whether or not to include images.
sections - Which event groups should be given separate sections.
name_format - Preferred format to display names
incl_private - Whether to include private data
incl_attrs - Whether to include attributes
incl_census - Whether to include census events
incl_notes - Whether to include person and family notes
incl_tags - Whether to include tags
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
self._user = user
menu = options.menu
lang = menu.get_option_by_name('trans').get_value()
self._locale = self.set_locale(lang)
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu, self._locale)
self.database = CacheProxyDb(self.database)
self._db = self.database
self.use_pagebreak = menu.get_option_by_name('pageben').get_value()
self.sort = menu.get_option_by_name('sort').get_value()
self.use_attrs = menu.get_option_by_name('incl_attrs').get_value()
self.use_census = menu.get_option_by_name('incl_census').get_value()
self.use_gid = menu.get_option_by_name('grampsid').get_value()
self.use_images = menu.get_option_by_name('images').get_value()
self.use_notes = menu.get_option_by_name('incl_notes').get_value()
self.use_srcs = menu.get_option_by_name('cites').get_value()
self.use_src_notes = menu.get_option_by_name('incsrcnotes').get_value()
self.use_tags = menu.get_option_by_name('incl_tags').get_value()
filter_option = options.menu.get_option_by_name('filter')
self.filter = filter_option.get_filter()
self.section_list = menu.get_option_by_name('sections').get_selected()
stdoptions.run_name_format_option(self, menu)
self.bibli = None
self.family_notes_list = []
self.mime0 = None
self.person = None
def write_fact(self, event_ref, event, show_type=True):
"""
Writes a single event.
"""
role = event_ref.get_role()
description = event.get_description()
date = self._get_date(event.get_date_object())
place_name = ''
place_endnote = ''
place_handle = event.get_place_handle()
if place_handle:
place = self._db.get_place_from_handle(place_handle)
place_name = _pd.display_event(self._db, event)
place_endnote = self._cite_endnote(place)
# make sure it's translated, so it can be used below, in "combine"
ignore = _('%(str1)s in %(str2)s. ') % {'str1':'', 'str2':''}
date_place = self.combine('%(str1)s in %(str2)s. ', '%s. ',
date, place_name)
if show_type:
# Groups with more than one type
column_1 = self._(self._get_type(event.get_type()))
if role not in (EventRoleType.PRIMARY, EventRoleType.FAMILY):
column_1 = column_1 + ' (' + self._(role.xml_str()) + ')'
# translators: needed for Arabic, ignore otherwise
# make sure it's translated, so it can be used below, in "combine"
ignore = _('%(str1)s, %(str2)s') % {'str1':'', 'str2':''}
column_2 = self.combine('%(str1)s, %(str2)s', '%s',
description, date_place)
else:
# Groups with a single type (remove event type from first column)
column_1 = date
# translators: needed for Arabic, ignore otherwise
# make sure it's translated, so it can be used below, in "combine"
ignore = _('%(str1)s, %(str2)s') % {'str1':'', 'str2':''}
column_2 = self.combine('%(str1)s, %(str2)s', '%s',
description, place_name)
endnotes = self._cite_endnote(event, prior=place_endnote)
self.doc.start_row()
self.write_cell(column_1)
self.doc.start_cell('IDS-NormalCell')
self.doc.start_paragraph('IDS-Normal')
self.doc.write_text(column_2)
if endnotes:
self.doc.start_superscript()
self.doc.write_text(endnotes)
self.doc.end_superscript()
self.doc.end_paragraph()
self.do_attributes(event.get_attribute_list() +
event_ref.get_attribute_list())
for notehandle in event.get_note_list():
note = self._db.get_note_from_handle(notehandle)
text = note.get_styledtext()
note_format = note.get_format()
self.doc.write_styled_note(
text, note_format, 'IDS-Normal',
contains_html=(note.get_type() == NoteType.HTML_CODE))
self.doc.end_cell()
self.doc.end_row()
def write_p_entry(self, label, parent_name, rel_type, pmark=None):
""" write parent entry """
self.doc.start_row()
self.write_cell(label)
if parent_name:
# for example (a stepfather): John Smith, relationship: Step
text = self._('%(parent-name)s, relationship: %(rel-type)s'
) % {'parent-name' : parent_name,
'rel-type' : self._(rel_type)}
self.write_cell(text, mark=pmark)
else:
self.write_cell('')
self.doc.end_row()
def write_note(self):
""" write a note """
notelist = self.person.get_note_list()
notelist += self.family_notes_list
if not notelist or not self.use_notes:
return
self.doc.start_table('note', 'IDS-IndTable')
self.doc.start_row()
self.doc.start_cell('IDS-TableHead', 2)
self.write_paragraph(self._('Notes'), style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
for notehandle in notelist:
note = self._db.get_note_from_handle(notehandle)
text = note.get_styledtext()
note_format = note.get_format()
self.doc.start_row()
self.doc.start_cell('IDS-NormalCell', 2)
self.doc.write_styled_note(
text, note_format, 'IDS-Normal',
contains_html=(note.get_type() == NoteType.HTML_CODE))
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph("IDS-Normal")
self.doc.end_paragraph()
def write_alt_parents(self):
""" write any alternate parents """
family_handle_list = self.person.get_parent_family_handle_list()
if len(family_handle_list) < 2:
return
self.doc.start_table("altparents", "IDS-IndTable")
self.doc.start_row()
self.doc.start_cell("IDS-TableHead", 2)
self.write_paragraph(self._('Alternate Parents'),
style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
for family_handle in family_handle_list:
if family_handle == self.person.get_main_parents_family_handle():
continue
family = self._db.get_family_from_handle(family_handle)
# Get the mother and father relationships
frel = ""
mrel = ""
child_handle = self.person.get_handle()
child_ref_list = family.get_child_ref_list()
for child_ref in child_ref_list:
if child_ref.ref == child_handle:
frel = str(child_ref.get_father_relation())
mrel = str(child_ref.get_mother_relation())
father_handle = family.get_father_handle()
if father_handle:
father = self._db.get_person_from_handle(father_handle)
fname = self._name_display.display(father)
mark = utils.get_person_mark(self._db, father)
self.write_p_entry(self._('Father'), fname, frel, mark)
else:
self.write_p_entry(self._('Father'), '', '')
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self._db.get_person_from_handle(mother_handle)
mname = self._name_display.display(mother)
mark = utils.get_person_mark(self._db, mother)
self.write_p_entry(self._('Mother'), mname, mrel, mark)
else:
self.write_p_entry(self._('Mother'), '', '')
self.doc.end_table()
self.doc.start_paragraph("IDS-Normal")
self.doc.end_paragraph()
def get_name(self, person):
""" prepare the name to display """
name = self._name_display.display(person)
if self.use_gid:
return '%(name)s [%(gid)s]' % {'name': name,
'gid': person.get_gid()}
else:
return name
def write_alt_names(self):
""" write any alternate names of the person """
if len(self.person.get_alternate_names()) < 1:
return
self.doc.start_table("altnames", "IDS-IndTable")
self.doc.start_row()
self.doc.start_cell("IDS-TableHead", 2)
self.write_paragraph(self._('Alternate Names'),
style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
for name in self.person.get_alternate_names():
name_type = self._(self._get_type(name.get_type()))
self.doc.start_row()
self.write_cell(name_type)
text = self._name_display.display_name(name)
endnotes = self._cite_endnote(name)
self.write_cell(text, endnotes)
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph('IDS-Normal')
self.doc.end_paragraph()
def write_addresses(self):
""" write any addresses of the person """
alist = self.person.get_address_list()
if len(alist) == 0:
return
self.doc.start_table("addresses", "IDS-IndTable")
self.doc.start_row()
self.doc.start_cell("IDS-TableHead", 2)
self.write_paragraph(self._('Addresses'), style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
for addr in alist:
text = utils.get_address_str(addr)
date = self._get_date(addr.get_date_object())
endnotes = self._cite_endnote(addr)
self.doc.start_row()
self.write_cell(date)
self.write_cell(text, endnotes)
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph('IDS-Normal')
self.doc.end_paragraph()
def write_associations(self):
""" write any associations of the person """
if len(self.person.get_person_ref_list()) < 1:
return
self.doc.start_table("associations", "IDS-IndTable")
self.doc.start_row()
self.doc.start_cell("IDS-TableHead", 2)
self.write_paragraph(self._('Associations'), style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
for person_ref in self.person.get_person_ref_list():
endnotes = self._cite_endnote(person_ref)
relationship = person_ref.get_relation()
associate = self._db.get_person_from_handle(person_ref.ref)
associate_name = self._name_display.display(associate)
self.doc.start_row()
self.write_cell(self._(relationship))
self.write_cell(associate_name, endnotes)
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph('IDS-Normal')
self.doc.end_paragraph()
def write_attributes(self):
""" write any attributes of the person """
attr_list = self.person.get_attribute_list()
if len(attr_list) == 0 or not self.use_attrs:
return
self.doc.start_table("attributes", "IDS-IndTable")
self.doc.start_row()
self.doc.start_cell("IDS-TableHead", 2)
self.write_paragraph(self._('Attributes'), style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
for attr in attr_list:
attr_type = attr.get_type().type2base()
self.doc.start_row()
self.write_cell(self._(attr_type))
text = attr.get_value()
endnotes = self._cite_endnote(attr)
self.write_cell(text, endnotes)
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph('IDS-Normal')
self.doc.end_paragraph()
def write_LDS_ordinances(self):
""" write any LDS ordinances of the person """
ord_list = self.person.get_lds_ord_list()
if len(ord_list) == 0:
return
self.doc.start_table("ordinances", "IDS-IndTable")
self.doc.start_row()
self.doc.start_cell("IDS-TableHead", 2)
self.write_paragraph(self._('LDS Ordinance'), style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
self.doc.start_table("ordinances3", "IDS-OrdinanceTable")
self.doc.start_row()
self.write_cell(self._('Type'), style='IDS-Section')
self.write_cell(self._('Date'), style='IDS-Section')
self.write_cell(self._('Status'), style='IDS-Section')
self.write_cell(self._('Temple'), style='IDS-Section')
self.write_cell(self._('Place'), style='IDS-Section')
self.doc.end_row()
for lds_ord in ord_list:
otype = self._(lds_ord.type2str())
date = self._get_date(lds_ord.get_date_object())
status = self._(lds_ord.status2str())
temple = TEMPLES.name(lds_ord.get_temple())
place_name = ''
place_endnote = ''
place_handle = lds_ord.get_place_handle()
if place_handle:
place = self._db.get_place_from_handle(place_handle)
place_name = _pd.display_event(self._db, lds_ord)
place_endnote = self._cite_endnote(place)
endnotes = self._cite_endnote(lds_ord, prior=place_endnote)
self.doc.start_row()
self.write_cell(otype, endnotes)
self.write_cell(date)
self.write_cell(status)
self.write_cell(temple)
self.write_cell(place_name)
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph('IDS-Normal')
self.doc.end_paragraph()
def write_tags(self):
""" write any tags the person has """
thlist = self.person.get_tag_list()
if len(thlist) == 0 or not self.use_tags:
return
tags = []
self.doc.start_table("tags", "IDS-IndTable")
self.doc.start_row()
self.doc.start_cell("IDS-TableHead", 2)
self.write_paragraph(self._('Tags'), style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
for tag_handle in thlist:
tag = self._db.get_tag_from_handle(tag_handle)
tags.append(tag.get_name())
for text in sorted(tags):
self.doc.start_row()
self.write_cell(text, span=2)
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph('IDS-Normal')
self.doc.end_paragraph()
def write_images(self):
""" write any images the person has """
media_list = self.person.get_media_list()
if (not self.use_images) or (not media_list):
return
i_total = 0
for media_ref in media_list:
media_handle = media_ref.get_reference_handle()
if media_handle:
media = self._db.get_media_from_handle(media_handle)
if media and media.get_mime_type():
if media.get_mime_type().startswith("image"):
i_total += 1
if i_total == 0:
return
# if there is only one image, and it is the first Gallery item, it
# will be shown up at the top, so there's no reason to show it here;
# but if there's only one image and it is not the first Gallery
# item (maybe the first is a PDF, say), then we need to show it
if (i_total == 1) and self.mime0 and self.mime0.startswith("image"):
return
self.doc.start_table("images", "IDS-GalleryTable")
cells = 3 # the GalleryTable has 3 cells
self.doc.start_row()
self.doc.start_cell("IDS-TableHead", cells)
self.write_paragraph(self._('Images'), style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
media_count = 0
image_count = 0
while media_count < len(media_list):
media_ref = media_list[media_count]
media_handle = media_ref.get_reference_handle()
media = self._db.get_media_from_handle(media_handle)
if media is None:
self._user.notify_db_repair(
_('Non existing media found in the Gallery'))
self.doc.end_table()
self.doc.start_paragraph('IDS-Normal')
self.doc.end_paragraph()
return
mime_type = media.get_mime_type()
if not mime_type or not mime_type.startswith("image"):
media_count += 1
continue
description = media.get_description()
if image_count % cells == 0:
self.doc.start_row()
self.doc.start_cell('IDS-NormalCell')
self.write_paragraph(description, style='IDS-ImageCaptionCenter')
utils.insert_image(self._db, self.doc, media_ref, self._user,
align='center', w_cm=5.0, h_cm=5.0)
self.do_attributes(media.get_attribute_list() +
media_ref.get_attribute_list())
self.doc.end_cell()
if image_count % cells == cells - 1:
self.doc.end_row()
media_count += 1
image_count += 1
if image_count % cells != 0:
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph('IDS-Normal')
self.doc.end_paragraph()
def write_families(self):
""" write any families the person has """
family_handle_list = self.person.get_family_handle_list()
if not len(family_handle_list):
return
self.doc.start_table("three", "IDS-IndTable")
self.doc.start_row()
self.doc.start_cell("IDS-TableHead", 2)
self.write_paragraph(self._('Families'),
style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
for family_handle in family_handle_list:
self.doc.start_table("three", "IDS-IndTable")
family = self._db.get_family_from_handle(family_handle)
self.family_notes_list += family.get_note_list()
if self.person.get_handle() == family.get_father_handle():
spouse_id = family.get_mother_handle()
else:
spouse_id = family.get_father_handle()
self.doc.start_row()
self.doc.start_cell("IDS-NormalCell", 2)
if spouse_id:
spouse = self._db.get_person_from_handle(spouse_id)
text = self.get_name(spouse)
mark = utils.get_person_mark(self._db, spouse)
else:
spouse = None
text = self._("unknown")
mark = None
endnotes = self._cite_endnote(family)
self.write_paragraph(text, endnotes=endnotes, mark=mark,
style='IDS-Spouse')
self.doc.end_cell()
self.doc.end_row()
event_ref_list = family.get_event_ref_list()
for event_ref, event in self.get_event_list(event_ref_list):
self.write_fact(event_ref, event)
child_ref_list = family.get_child_ref_list()
if len(child_ref_list):
self.doc.start_row()
self.write_cell(self._("Children"))
self.doc.start_cell("IDS-ListCell")
for child_ref in child_ref_list:
child = self._db.get_person_from_handle(child_ref.ref)
name = self.get_name(child)
mark = utils.get_person_mark(self._db, child)
endnotes = self._cite_endnote(child_ref)
self.write_paragraph(name, endnotes=endnotes, mark=mark)
self.doc.end_cell()
self.doc.end_row()
attr_list = family.get_attribute_list()
if len(attr_list) and self.use_attrs:
self.doc.start_row()
self.write_cell(self._("Attributes"))
self.doc.start_cell("IDS-ListCell")
self.do_attributes(attr_list)
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
ord_list = family.get_lds_ord_list()
if len(ord_list):
self.doc.start_table("ordinances2", "IDS-OrdinanceTable2")
self.doc.start_row()
self.write_cell(self._('LDS Ordinance'))
self.write_cell(self._('Type'), style='IDS-Section')
self.write_cell(self._('Date'), style='IDS-Section')
self.write_cell(self._('Status'), style='IDS-Section')
self.write_cell(self._('Temple'), style='IDS-Section')
self.write_cell(self._('Place'), style='IDS-Section')
self.doc.end_row()
for lds_ord in ord_list:
otype = self._(lds_ord.type2str())
date = self._get_date(lds_ord.get_date_object())
status = self._(lds_ord.status2str())
temple = TEMPLES.name(lds_ord.get_temple())
place_name = ''
place_endnote = ''
place_handle = lds_ord.get_place_handle()
if place_handle:
place = self._db.get_place_from_handle(place_handle)
place_name = _pd.display_event(self._db, lds_ord)
place_endnote = self._cite_endnote(place)
endnotes = self._cite_endnote(lds_ord, prior=place_endnote)
self.doc.start_row()
self.write_cell('')
self.write_cell(otype, endnotes)
self.write_cell(date)
self.write_cell(status)
self.write_cell(temple)
self.write_cell(place_name)
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph('IDS-Normal')
self.doc.end_paragraph()
def get_event_list(self, event_ref_list):
"""
Return a list of (EventRef, Event) pairs. Order by event date
if the user option is set.
"""
event_list = []
for ind, event_ref in enumerate(event_ref_list):
if event_ref:
event = self._db.get_event_from_handle(event_ref.ref)
if event:
if (event.get_type() == EventType.CENSUS
and not self.use_census):
continue
sort_value = event.get_date_object().get_sort_value()
#first sort on date, equal dates, then sort as in GUI.
event_list.append((str(sort_value) + "%04i" % ind,
event_ref, event))
if self.sort:
event_list.sort()
return [(item[1], item[2]) for item in event_list]
def write_section(self, event_ref_list, event_group_sect):
"""
Writes events in a single event group.
"""
self.doc.start_table(event_group_sect, "IDS-IndTable")
self.doc.start_row()
self.doc.start_cell("IDS-TableHead", 2)
self.write_paragraph(self._(event_group_sect), style='IDS-TableTitle')
self.doc.end_cell()
self.doc.end_row()
for event_ref, event in self.get_event_list(event_ref_list):
self.write_fact(event_ref, event)
self.doc.end_table()
self.doc.start_paragraph("IDS-Normal")
self.doc.end_paragraph()
def write_events(self):
"""
Write events. The user can create separate sections for a
pre-defined set of event groups. When an event has a type
contained within a group it is moved from the Individual Facts
section into its own section.
"""
event_dict = defaultdict(list)
event_ref_list = self.person.get_event_ref_list()
for event_ref in event_ref_list:
if event_ref:
event = self._db.get_event_from_handle(event_ref.ref)
group = TYPE2GROUP[event.get_type().value]
if _(group) not in self.section_list:
group = SECTION_LIST[0]
event_dict[group].append(event_ref)
# Write separate event group sections
for group in SECTION_LIST:
if group in event_dict:
self.write_section(event_dict[group], group)
def write_cell(self, text,
endnotes=None, mark=None, style='IDS-Normal', span=1):
""" write a cell """
self.doc.start_cell('IDS-NormalCell', span)
self.write_paragraph(text, endnotes=endnotes, mark=mark, style=style)
self.doc.end_cell()
def write_paragraph(self, text,
endnotes=None, mark=None, style='IDS-Normal'):
""" write a paragraph """
self.doc.start_paragraph(style)
self.doc.write_text(text, mark)
if endnotes:
self.doc.start_superscript()
self.doc.write_text(endnotes)
self.doc.end_superscript()
self.doc.end_paragraph()
def write_report(self):
""" write the report """
plist = self._db.get_person_handles(sort_handles=True)
if self.filter:
ind_list = self.filter.apply(self._db, plist)
else:
ind_list = plist
for count, person_handle in enumerate(ind_list):
self.person = self._db.get_person_from_handle(person_handle)
if self.person is None:
continue
self.family_notes_list = []
self.write_person(count)
def write_person(self, count):
""" write a person """
if count != 0:
self.doc.page_break()
self.bibli = Bibliography(
Bibliography.MODE_DATE|Bibliography.MODE_PAGE)
title1 = self._("Complete Individual Report")
text2 = self._name_display.display(self.person)
mark1 = IndexMark(title1, INDEX_TYPE_TOC, 1)
mark2 = IndexMark(text2, INDEX_TYPE_TOC, 2)
self.doc.start_paragraph("IDS-Title")
self.doc.write_text(title1, mark1)
self.doc.end_paragraph()
self.doc.start_paragraph("IDS-Title")
self.doc.write_text(text2, mark2)
self.doc.end_paragraph()
self.doc.start_paragraph("IDS-Normal")
self.doc.end_paragraph()
name = self.person.get_primary_name()
text = self.get_name(self.person)
mark = utils.get_person_mark(self._db, self.person)
endnotes = self._cite_endnote(self.person)
endnotes = self._cite_endnote(name, prior=endnotes)
family_handle = self.person.get_main_parents_family_handle()
if family_handle:
family = self._db.get_family_from_handle(family_handle)
father_inst_id = family.get_father_handle()
if father_inst_id:
father_inst = self._db.get_person_from_handle(
father_inst_id)
father = self.get_name(father_inst)
fmark = utils.get_person_mark(self._db, father_inst)
else:
father = ""
fmark = None
mother_inst_id = family.get_mother_handle()
if mother_inst_id:
mother_inst = self._db.get_person_from_handle(mother_inst_id)
mother = self.get_name(mother_inst)
mmark = utils.get_person_mark(self._db, mother_inst)
else:
mother = ""
mmark = None
else:
father = ""
fmark = None
mother = ""
mmark = None
media_list = self.person.get_media_list()
p_style = 'IDS-PersonTable2'
self.mime0 = None
if self.use_images and len(media_list) > 0:
media0 = media_list[0]
media_handle = media0.get_reference_handle()
media = self._db.get_media_from_handle(media_handle)
self.mime0 = media.get_mime_type()
if self.mime0 and self.mime0.startswith("image"):
image_filename = media_path_full(self._db, media.get_path())
if os.path.exists(image_filename):
p_style = 'IDS-PersonTable' # this is tested for, also
else:
self._user.warn(_("Could not add photo to page"),
# translators: for French, else ignore
_("%(str1)s: %(str2)s"
) % {'str1' : image_filename,
'str2' : _('File does not exist')})
self.doc.start_table('person', p_style)
self.doc.start_row()
# translators: needed for French, ignore otherwise
ignore = self._("%s:")
self.doc.start_cell('IDS-NormalCell')
self.write_paragraph(self._("%s:") % self._("Name"))
self.write_paragraph(self._("%s:") % self._("Gender"))
self.write_paragraph(self._("%s:") % self._("Father"))
self.write_paragraph(self._("%s:") % self._("Mother"))
self.doc.end_cell()
self.doc.start_cell('IDS-NormalCell')
self.write_paragraph(text, endnotes, mark)
if self.person.get_gender() == Person.MALE:
self.write_paragraph(self._("Male"))
elif self.person.get_gender() == Person.FEMALE:
self.write_paragraph(self._("Female"))
else:
self.write_paragraph(self._("Unknown"))
self.write_paragraph(father, mark=fmark)
self.write_paragraph(mother, mark=mmark)
self.doc.end_cell()
if p_style == 'IDS-PersonTable':
self.doc.start_cell('IDS-NormalCell')
self.doc.add_media(image_filename, "right", 4.0, 4.0,
crop=media0.get_rectangle())
endnotes = self._cite_endnote(media0)
attr_list = media0.get_attribute_list()
if len(attr_list) == 0 or not self.use_attrs:
text = _('(image)')
else:
for attr in attr_list:
attr_type = attr.get_type().type2base()
# translators: needed for French, ignore otherwise
text = self._("%(str1)s: %(str2)s"
) % {'str1' : self._(attr_type),
'str2' : attr.get_value()}
endnotes = self._cite_endnote(attr, prior=endnotes)
self.write_paragraph("(%s)" % text,
endnotes=endnotes,
style='IDS-ImageNote')
endnotes = ''
if endnotes and len(attr_list) == 0:
self.write_paragraph(text, endnotes=endnotes,
style='IDS-ImageNote')
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph("IDS-Normal")
self.doc.end_paragraph()
self.write_alt_names()
self.write_events()
self.write_alt_parents()
self.write_families()
self.write_addresses()
self.write_associations()
self.write_attributes()
self.write_LDS_ordinances()
self.write_tags()
self.write_images()
self.write_note()
if self.use_srcs:
if self.use_pagebreak and self.bibli.get_citation_count():
self.doc.page_break()
Endnotes.write_endnotes(self.bibli, self._db, self.doc,
printnotes=self.use_src_notes,
elocale=self._locale)
def combine(self, format_both, format_single, str1, str2):
""" Combine two strings with a given format. """
text = ""
if str1 and str2:
text = self._(format_both) % {'str1' : str1, 'str2' : str2}
elif str1 and not str2:
text = format_single % str1
elif str2 and not str1:
text = format_single % str2
return text
def _cite_endnote(self, obj, prior=''):
""" cite any endnotes the person has """
if not self.use_srcs:
return ""
if not obj:
return prior
txt = Endnotes.cite_source(self.bibli, self._db, obj, self._locale)
if not txt:
return prior
if prior:
# translators: needed for Arabic, ignore otherwise
txt = self._('%(str1)s, %(str2)s') % {'str1':prior, 'str2':txt}
return txt
def do_attributes(self, attr_list):
""" a convenience method """
if not self.use_attrs:
return
for attr in attr_list:
attr_type = attr.get_type().type2base()
# translators: needed for French, ignore otherwise
text = self._("%(str1)s: %(str2)s"
) % {'str1' : self._(attr_type),
'str2' : attr.get_value()}
endnotes = self._cite_endnote(attr)
self.write_paragraph(text, endnotes)
#------------------------------------------------------------------------
#
# IndivCompleteOptions
#
#------------------------------------------------------------------------
class IndivCompleteOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__db = dbase
self.__pid = None
self.__filter = None
self.__cites = None
self.__incsrcnotes = None
self._nf = None
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
return self.__filter.get_filter().get_name()
def add_menu_options(self, menu):
################################
category_name = _("Report Options")
################################
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Select the filter to be applied to the report."))
menu.add_option(category_name, "filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Filter Person"))
self.__pid.set_help(_("The center person for the filter."))
menu.add_option(category_name, "pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
self._nf = stdoptions.add_name_format_option(menu, category_name)
self._nf.connect('value-changed', self.__update_filters)
self.__update_filters()
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
sort = BooleanOption(_("List events chronologically"), True)
sort.set_help(_("Whether to sort events into chronological order."))
menu.add_option(category_name, "sort", sort)
pageben = BooleanOption(_("Page break before end notes"), False)
pageben.set_help(
_("Whether to start a new page before the end notes."))
menu.add_option(category_name, "pageben", pageben)
stdoptions.add_localization_option(menu, category_name)
################################
category_name = _("Include")
################################
self.__cites = BooleanOption(_("Include Source Information"), True)
self.__cites.set_help(_("Whether to cite sources."))
menu.add_option(category_name, "cites", self.__cites)
self.__cites.connect('value-changed', self.__sources_changed)
self.__incsrcnotes = BooleanOption(_("Include sources notes"), False)
self.__incsrcnotes.set_help(
_("Whether to include source notes in the Endnotes section. "
"Only works if Include sources is selected."))
menu.add_option(category_name, "incsrcnotes", self.__incsrcnotes)
self.__incsrcnotes.connect('value-changed', self.__sources_changed)
self.__sources_changed()
images = BooleanOption(_("Include Photo/Images from Gallery"), True)
images.set_help(_("Whether to include images."))
menu.add_option(category_name, "images", images)
attributes = BooleanOption(_("Include Attributes"), True)
attributes.set_help(_("Whether to include attributes."))
menu.add_option(category_name, "incl_attrs", attributes)
census = BooleanOption(_("Include Census Events"), True)
census.set_help(_("Whether to include Census Events."))
menu.add_option(category_name, "incl_census", census)
grampsid = BooleanOption(_("Include GID"), False)
grampsid.set_help(_("Whether to include GID next to names."))
menu.add_option(category_name, "grampsid", grampsid)
incl_notes = BooleanOption(_("Include Notes"), True)
incl_notes.set_help(_("Whether to include Person and Family Notes."))
menu.add_option(category_name, "incl_notes", incl_notes)
tags = BooleanOption(_("Include Tags"), True)
tags.set_help(_("Whether to include tags."))
menu.add_option(category_name, "incl_tags", tags)
################################
category_name = _("Sections")
################################
opt = BooleanListOption(_("Event groups"))
opt.set_help(_("Check if a separate section is required."))
for section in SECTION_LIST:
if section != SECTION_LIST[0]:
opt.add_button(_(section), True)
menu.add_option(category_name, "sections", opt)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gid(gid)
nfv = self._nf.get_value()
filter_list = utils.get_person_filters(person,
include_single=True,
name_format=nfv)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
filter_value = self.__filter.get_value()
if filter_value in [0, 2, 3, 4, 5]:
# Filters 0, 2, 3, 4 and 5 rely on the center person
self.__pid.set_available(True)
else:
# The rest don't
self.__pid.set_available(False)
def __sources_changed(self):
"""
If Endnotes are not enabled, disable sources in the Endnotes.
"""
cites_value = self.__cites.get_value()
if cites_value:
self.__incsrcnotes.set_available(True)
else:
self.__incsrcnotes.set_available(False)
def make_default_style(self, default_style):
"""Make the default output style for the Individual Complete Report."""
# Paragraph Styles
font = FontStyle()
font.set_bold(1)
font.set_type_face(FONT_SANS_SERIF)
font.set_size(16)
para = ParagraphStyle()
para.set_alignment(PARA_ALIGN_CENTER)
para.set_top_margin(utils.pt2cm(8))
para.set_bottom_margin(utils.pt2cm(8))
para.set_font(font)
para.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style("IDS-Title", para)
font = FontStyle()
font.set_bold(1)
font.set_type_face(FONT_SANS_SERIF)
font.set_size(12)
font.set_italic(1)
para = ParagraphStyle()
para.set_font(font)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_("The style used for category labels."))
default_style.add_paragraph_style("IDS-TableTitle", para)
font = FontStyle()
font.set_bold(1)
font.set_type_face(FONT_SANS_SERIF)
font.set_size(12)
para = ParagraphStyle()
para.set_font(font)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_("The style used for the spouse's name."))
default_style.add_paragraph_style("IDS-Spouse", para)
font = FontStyle()
font.set_size(12)
para = ParagraphStyle()
para.set_font(font)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("IDS-Normal", para)
font = FontStyle()
font.set_size(12)
font.set_italic(1)
para = ParagraphStyle()
para.set_font(font)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_('The style used for the section headers.'))
default_style.add_paragraph_style("IDS-Section", para)
font = FontStyle()
font.set_size(8)
para = ParagraphStyle()
para.set_alignment(PARA_ALIGN_RIGHT)
para.set_font(font)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_('A style used for image facts.'))
default_style.add_paragraph_style("IDS-ImageNote", para)
font = FontStyle()
font.set_size(8)
para = ParagraphStyle()
para.set_alignment(PARA_ALIGN_CENTER)
para.set_font(font)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_('A style used for image captions.'))
default_style.add_paragraph_style("IDS-ImageCaptionCenter", para)
# Table Styles
tbl = TableStyle()
tbl.set_width(100)
tbl.set_columns(2)
tbl.set_column_width(0, 20)
tbl.set_column_width(1, 80)
default_style.add_table_style("IDS-IndTable", tbl)
tbl = TableStyle()
tbl.set_width(100)
tbl.set_columns(2)
tbl.set_column_width(0, 50)
tbl.set_column_width(1, 50)
default_style.add_table_style("IDS-ParentsTable", tbl)
cell = TableCellStyle()
cell.set_top_border(1)
cell.set_bottom_border(1)
default_style.add_cell_style("IDS-TableHead", cell)
cell = TableCellStyle()
default_style.add_cell_style("IDS-NormalCell", cell)
cell = TableCellStyle()
cell.set_longlist(1)
default_style.add_cell_style("IDS-ListCell", cell)
tbl = TableStyle()
tbl.set_width(100)
tbl.set_columns(3)
tbl.set_column_width(0, 20)
tbl.set_column_width(1, 50)
tbl.set_column_width(2, 30)
default_style.add_table_style('IDS-PersonTable', tbl)
tbl = TableStyle()
tbl.set_width(100)
tbl.set_columns(2)
tbl.set_column_width(0, 20)
tbl.set_column_width(1, 80)
default_style.add_table_style('IDS-PersonTable2', tbl)
tbl = TableStyle()
tbl.set_width(100)
tbl.set_columns(5)
tbl.set_column_width(0, 22) # Type
tbl.set_column_width(1, 22) # Date
tbl.set_column_width(2, 16) # Status
tbl.set_column_width(3, 22) # Temple
tbl.set_column_width(4, 18) # Place
default_style.add_table_style('IDS-OrdinanceTable', tbl)
tbl = TableStyle()
tbl.set_width(100)
tbl.set_columns(6)
tbl.set_column_width(0, 20) # empty
tbl.set_column_width(1, 18) # Type
tbl.set_column_width(2, 18) # Date
tbl.set_column_width(3, 14) # Status
tbl.set_column_width(4, 18) # Temple
tbl.set_column_width(5, 12) # Place
default_style.add_table_style('IDS-OrdinanceTable2', tbl)
tbl = TableStyle()
tbl.set_width(100)
tbl.set_columns(3)
tbl.set_column_width(0, 33)
tbl.set_column_width(1, 33)
tbl.set_column_width(2, 34)
default_style.add_table_style("IDS-GalleryTable", tbl)
Endnotes.add_endnote_styles(default_style)
| gpl-2.0 | -6,826,233,834,222,132,000 | 38.657686 | 79 | 0.551191 | false |
froyobin/horizon | openstack_dashboard/api/swift.py | 1 | 11720 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo.utils import timeutils
import six.moves.urllib.parse as urlparse
import swiftclient
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
FOLDER_DELIMITER = "/"
# Swift ACL
GLOBAL_READ_ACL = ".r:*"
LIST_CONTENTS_ACL = ".rlistings"
class Container(base.APIDictWrapper):
pass
class StorageObject(base.APIDictWrapper):
def __init__(self, apidict, container_name, orig_name=None, data=None):
super(StorageObject, self).__init__(apidict)
self.container_name = container_name
self.orig_name = orig_name
self.data = data
@property
def id(self):
return self.name
class PseudoFolder(base.APIDictWrapper):
def __init__(self, apidict, container_name):
super(PseudoFolder, self).__init__(apidict)
self.container_name = container_name
@property
def id(self):
return '%s/%s' % (self.container_name, self.name)
@property
def name(self):
return self.subdir.rstrip(FOLDER_DELIMITER)
@property
def bytes(self):
return None
@property
def content_type(self):
return "application/pseudo-folder"
def _objectify(items, container_name):
"""Splits a listing of objects into their appropriate wrapper classes."""
objects = []
# Deal with objects and object pseudo-folders first, save subdirs for later
for item in items:
if item.get("subdir", None) is not None:
object_cls = PseudoFolder
else:
object_cls = StorageObject
objects.append(object_cls(item, container_name))
return objects
def _metadata_to_header(metadata):
headers = {}
public = metadata.get('is_public')
if public is True:
public_container_acls = [GLOBAL_READ_ACL, LIST_CONTENTS_ACL]
headers['x-container-read'] = ",".join(public_container_acls)
elif public is False:
headers['x-container-read'] = ""
return headers
@memoized
def swift_api(request):
endpoint = base.url_for(request, 'object-store')
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
LOG.debug('Swift connection created using token "%s" and url "%s"'
% (request.user.token.id, endpoint))
return swiftclient.client.Connection(None,
request.user.username,
None,
preauthtoken=request.user.token.id,
preauthurl=endpoint,
cacert=cacert,
insecure=insecure,
auth_version="2.0")
def swift_container_exists(request, container_name):
try:
swift_api(request).head_container(container_name)
return True
except swiftclient.client.ClientException:
return False
def swift_object_exists(request, container_name, object_name):
try:
swift_api(request).head_object(container_name, object_name)
return True
except swiftclient.client.ClientException:
return False
def swift_get_containers(request, marker=None):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
headers, containers = swift_api(request).get_account(limit=limit + 1,
marker=marker,
full_listing=True)
container_objs = [Container(c) for c in containers]
if(len(container_objs) > limit):
return (container_objs[0:-1], True)
else:
return (container_objs, False)
def swift_get_container(request, container_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name, "")
else:
data = None
headers = swift_api(request).head_container(container_name)
timestamp = None
is_public = False
public_url = None
try:
is_public = GLOBAL_READ_ACL in headers.get('x-container-read', '')
if is_public:
swift_endpoint = base.url_for(request,
'object-store',
endpoint_type='publicURL')
parameters = urlparse.quote(container_name.encode('utf8'))
public_url = swift_endpoint + '/' + parameters
ts_float = float(headers.get('x-timestamp'))
timestamp = timeutils.iso8601_from_timestamp(ts_float)
except Exception:
pass
container_info = {
'name': container_name,
'container_object_count': headers.get('x-container-object-count'),
'container_bytes_used': headers.get('x-container-bytes-used'),
'timestamp': timestamp,
'data': data,
'is_public': is_public,
'public_url': public_url,
}
return Container(container_info)
def swift_create_container(request, name, metadata=({})):
if swift_container_exists(request, name):
raise exceptions.AlreadyExists(name, 'container')
headers = _metadata_to_header(metadata)
swift_api(request).put_container(name, headers=headers)
return Container({'name': name})
def swift_update_container(request, name, metadata=({})):
headers = _metadata_to_header(metadata)
swift_api(request).post_container(name, headers=headers)
return Container({'name': name})
def swift_delete_container(request, name):
# It cannot be deleted if it's not empty. The batch remove of objects
# be done in swiftclient instead of Horizon.
objects, more = swift_get_objects(request, name)
if objects:
error_msg = unicode(_("The container cannot be deleted "
"since it's not empty."))
exc = exceptions.Conflict(error_msg)
exc._safe_message = error_msg
raise exc
swift_api(request).delete_container(name)
return True
def swift_get_objects(request, container_name, prefix=None, marker=None,
limit=None):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
kwargs = dict(prefix=prefix,
marker=marker,
limit=limit + 1,
delimiter=FOLDER_DELIMITER,
full_listing=True)
headers, objects = swift_api(request).get_container(container_name,
**kwargs)
object_objs = _objectify(objects, container_name)
if(len(object_objs) > limit):
return (object_objs[0:-1], True)
else:
return (object_objs, False)
def swift_filter_objects(request, filter_string, container_name, prefix=None,
marker=None):
# FIXME(kewu): Swift currently has no real filtering API, thus the marker
# parameter here won't actually help the pagination. For now I am just
# getting the largest number of objects from a container and filtering
# based on those objects.
limit = 9999
objects = swift_get_objects(request,
container_name,
prefix=prefix,
marker=marker,
limit=limit)
filter_string_list = filter_string.lower().strip().split(' ')
def matches_filter(obj):
for q in filter_string_list:
return wildcard_search(obj.name.lower(), q)
return filter(matches_filter, objects[0])
def wildcard_search(string, q):
q_list = q.split('*')
if all(map(lambda x: x == '', q_list)):
return True
elif q_list[0] not in string:
return False
else:
if q_list[0] == '':
tail = string
else:
head, delimiter, tail = string.partition(q_list[0])
return wildcard_search(tail, '*'.join(q_list[1:]))
def swift_copy_object(request, orig_container_name, orig_object_name,
new_container_name, new_object_name):
if swift_object_exists(request, new_container_name, new_object_name):
raise exceptions.AlreadyExists(new_object_name, 'object')
headers = {"X-Copy-From": FOLDER_DELIMITER.join([orig_container_name,
orig_object_name])}
return swift_api(request).put_object(new_container_name,
new_object_name,
None,
headers=headers)
def swift_upload_object(request, container_name, object_name,
object_file=None):
headers = {}
size = 0
if object_file:
headers['X-Object-Meta-Orig-Filename'] = object_file.name
size = object_file.size
etag = swift_api(request).put_object(container_name,
object_name,
object_file,
headers=headers)
obj_info = {'name': object_name, 'bytes': size, 'etag': etag}
return StorageObject(obj_info, container_name)
def swift_create_pseudo_folder(request, container_name, pseudo_folder_name):
headers = {}
etag = swift_api(request).put_object(container_name,
pseudo_folder_name,
None,
headers=headers)
obj_info = {
'name': pseudo_folder_name,
'etag': etag
}
return PseudoFolder(obj_info, container_name)
def swift_delete_object(request, container_name, object_name):
swift_api(request).delete_object(container_name, object_name)
return True
def swift_get_object(request, container_name, object_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name,
object_name)
else:
data = None
headers = swift_api(request).head_object(container_name,
object_name)
orig_name = headers.get("x-object-meta-orig-filename")
timestamp = None
try:
ts_float = float(headers.get('x-timestamp'))
timestamp = timeutils.iso8601_from_timestamp(ts_float)
except Exception:
pass
obj_info = {
'name': object_name,
'bytes': headers.get('content-length'),
'content_type': headers.get('content-type'),
'etag': headers.get('etag'),
'timestamp': timestamp,
}
return StorageObject(obj_info,
container_name,
orig_name=orig_name,
data=data)
| apache-2.0 | 6,600,883,313,530,580,000 | 33.369501 | 79 | 0.588908 | false |
martinhbramwell/evalOfFlask | frmwk/forms/attic/demo_forms.py | 1 | 1202 | from flask.ext.wtf import Form, TextField, BooleanField, TextAreaField
from flask.ext.wtf import Required, Length
from flask.ext.babel import gettext
from frmwk.model.mdUser import User
class EditForm(Form):
nickname = TextField('nickname', validators = [Required()])
about_me = TextAreaField('about_me', validators = [Length(min = 0, max = 140)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
if self.nickname.data != User.make_valid_nickname(self.nickname.data):
self.nickname.errors.append(gettext('This nickname has invalid characters. Please use letters, numbers, dots and underscores only.'))
return False
user = User.query.filter_by(nickname = self.nickname.data).first()
if user != None:
self.nickname.errors.append(gettext('This nickname is already in use. Please choose another one.'))
return False
return True
| bsd-3-clause | 4,632,528,655,272,532,000 | 41.928571 | 145 | 0.65807 | false |
pluyckx/kam | kam/modules/plugins/core/base.py | 1 | 2017 | ##\package base
# \brief The base class for a core plugin.
#
# Core plugins are plugins that do not check parameters to keep the machine alive.
# They just execute some code to get a wanted behaviour.
#
# \author Philip Luyckx
# \copyright GNU Public License
# This file is part of Keep Alive Monitor (kam).
#
# Keep Alive Monitor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Keep Alive Monitor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Keep Alive Monitor. If not, see <http://www.gnu.org/licenses/>.
from kam.modules.exceptions.exceptions import KamFunctionNotImplemented
class CoreBase:
## \brief The constructor
def __init__(self):
self._enabled = False
## \brief Check if the core plugin is enabled, and call the _execute function if so.
#
# \public
def execute(self):
if self._enabled:
self._execute()
## \brief The actual implementation to execute when execute() is called
#
# Subclasses must override this function.
def _execute(self):
raise KamFunctionNotImplemented("_execute not implemented in class {0}".format(\
self.__class__.__name__))
## \brief Enable the plugin
#
# \protected
def _enable(self):
self._enabled = True
## \brief disable the plugin
#
# \protected
def _disable(self):
self._enabled = True
## \brief Check if the plugin is enabled
#
# \public
def isEnabled(self):
return self._enabled
## \brief Load the configuration
#
# \public
#
# \param config The config file in the form of a \e configparser object.
def loadConfig(self, config):
pass
| gpl-2.0 | -6,864,702,130,165,024,000 | 27.814286 | 85 | 0.714923 | false |
chop-dbhi/omop_harvest | fabfile.py | 1 | 18855 | from __future__ import print_function, with_statement
import os
import sys
import stat
import json
import etcd
from functools import wraps
from fabric.api import *
from fabric.colors import red, yellow, white, green
from fabric.contrib.console import confirm
from fabric.contrib.files import exists
__doc__ = """\
Help Doc
"""
# A few setup steps and environment checks
curdir = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(curdir, '.project_config.json')
try:
project_config = json.loads(open(config_file, 'r').read())
except:
project_config = {
"etcd_host": env.etcd_host,
"docker_registry":env.registry_host
}
hidden_output = []
try:
venv_wrap_path = os.environ['WORKON_HOME']
except KeyError:
venv_wrap_path = None
if venv_wrap_path and os.path.exists(os.path.join(venv_wrap_path, 'omop_harvest')):
full_env_path = os.path.join(venv_wrap_path, 'omop_harvest')
else:
full_env_path = os.path.abspath('..')
venv_wrap_path = None
def get_hosts_settings():
# TODO: Will probably have to retain this to support legacy deploy.
# Load all the host settings
try:
hosts = json.loads(open(config_file).read())['hosts']
except KeyError:
abort(red('Error: No host settings are defined in the project configuration'))
# Pop the default settings
# Pre-populated defaults
# for host in hosts:
# base = base_settings.copy()
# base.update(default_settings)
# print(hosts)
# base.update(hosts[host])
# hosts[host] = base
return hosts
# ** Decorators
def virtualenv(path, venv_wrap):
"Wraps a function and prefixes the call with the virtualenv active."
if path is None:
activate = None
else:
activate = os.path.join(path, 'bin/activate')
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
if venv_wrap:
with prefix('source /usr/local/bin/virtualenvwrapper.sh'):
with prefix('workon {0}'.format('omop_harvest')):
return func(*args, **kwargs)
elif path is not None and venv is None:
with prefix('source {0}'.format(activate)):
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return inner
return decorator
def host_context(func):
"Sets the context of the setting to the current host"
@wraps(func)
def decorator(*args, **kwargs):
hosts = get_hosts_settings()
with settings(**hosts[env.host]):
return func(*args, **kwargs)
return decorator
# ---------------------------------------------------------------
# Configuration Commands
# ---------------------------------------------------------------
def set_configuration(noinput=False):
'''
Takes the settings in .project_config.json file and writes them to the
appropriate etcd endpoint for this application.
ab set_configuration:noinput=True will not prompt for confirmation
'''
client = etcd.Client(host=project_config['etcd_host'])
config = json.loads(open('.project_config.json', 'r').read())
if noinput or confirm("Are you sure you want to upload your local settings?"):
client.write('/applications/omop_harvest/configuration', json.dumps(config))
def get_configuration(noinput=False):
'''
Retrieves the applications settings from etcd and generates a local settings file.
fab get_configuration:noinput=True will not prompt for confirmation
'''
client = etcd.Client(host=project_config['etcd_host'])
try:
etcd_config = client.read('/applications/omop_harvest/configuration')
except KeyError:
abort(red('Error: No host settings found on etcd'))
configuration = json.loads(etcd_config.value)
if configuration == {}:
print(red('Empty configuration found. Aborting'))
sys.exit(1)
# Establish the configuration locally
if noinput or confirm('Are you sure you want to overwrite your local settings?'):
f = open('.project_config.json', 'w')
f.write(json.dumps(configuration, indent=4, sort_keys=True))
f.close()
# ---------------------------------------------------------------
# Docker Commands
# ---------------------------------------------------------------
# TODO:
# - Continuous Integration. Automatic provisioning of services
def build_container(noinput=False):
# Check git status to make sure our build hash matches our git commit
index_status = local('git status --porcelain', capture=True)
if index_status != '':
abort('Please commit or stash any changes to git before building your container')
try:
get_configuration(noinput)
except:
if not confirm('Unable to retrieve configuration. Would you like to attempt to build this container with locally available settings?'):
sys.exit(1)
git_hash = local('git rev-parse --short HEAD', capture=True)
git_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
local('docker build -t omop_harvest-{0}:{1} .'.format(git_branch, git_hash))
def test_container():
git_hash = local('git rev-parse --short HEAD', capture=True)
git_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
#local('docker run -i -t -e APP_ENV=test omop_harvest-{0}:{1} test'.format(git_branch, git_hash))
#Temporary: Anticipating new version of ATI Template
local('docker run --link memcache:mc -d -p :8000 -e CID_ENV={0} -e APP_ENV={1} omop_harvest-{2}:{3} test'.format(
env.cid_env,
env.host,
git_branch,
git_hash)
)
#
def build_and_test():
build_container(noinput=True)
test_container()
# Remote Deployment Commands
def pull_repo():
local('docker pull {0}/omop_harvest-{1}'.format(project_config['docker_registry'], git_branch))
def push_to_repo():
git_hash = local('git rev-parse --short HEAD', capture=True)
git_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
try:
with hide('output'):
local("docker inspect --format='{{{{.id}}}}' omop_harvest-{0}:{1}".format(git_branch, git_hash))
except:
if confirm('Could not find most most recent container. Would you like to build it?'):
build_container()
local('docker tag omop_harvest-{0}:{1} {2}/omop_harvest-{0}:{1}'.format(git_branch, git_hash, project_config['docker_registry']))
local('docker tag omop_harvest-{0}:{1} {2}/omop_harvest-{0}:latest'.format(git_branch, git_hash, project_config['docker_registry']))
local('docker push {0}/omop_harvest-{1}'.format(project_config['docker_registry'], git_branch))
local('docker rmi -f {0}/omop_harvest-{1}:{2}'.format(project_config['docker_registry'], git_branch, git_hash))
@host_context
def deploy(commit='latest'):
run('docker pull {0}/omop_harvest-{1}:{2}'.format(project_config['docker_registry'], env.git_branch, commit))
#container = run('docker run -d -p :8000 -e APP_ENV={0} {1}/omop_harvest-{2}:{3} start'.format(
# env.host,
# project_config['docker_registry'],
# env.git_branch,
# commit)
#)
#Temporary: Anticipating new version of ATI Template
container = run('docker run --hostname=omop-harvest-{2}-{3} --link memcache:mc -d -p :8000 -e CID_ENV={4} -e APP_ENV={0} {1}/omop_harvest-{2}:{3} start'.format(
env.host,
project_config['docker_registry'],
env.git_branch,
commit,
env.cid_env)
)
#
port = run("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(container))
commit_msg = local('git --no-pager log --oneline -1', capture = True)
auth_token = project_config['hipchat']['auth_token']
deploy_msg = 'omop_harvest-{0}:{1} now deployed at http://{2}:{3} <a href="http://{2}:{3}">Open</a> <a href="http://{4}:4001/v2/keys/applications/omop_harvest/status">Status</a> -- {5}'.format(env.git_branch, commit, env.host_string, port, project_config['etcd_host'], commit_msg)
# Notifications
local('curl -d "room_id=529405&from=deployservice&color=yellow" --data-urlencode message="{deploy_msg}" https://cbmi.hipchat.com/v1/rooms/message?auth_token={auth_token}'.format(
deploy_msg=deploy_msg,
auth_token=auth_token
))
client = etcd.Client(host=project_config['etcd_host'])
client.write('/applications/omop_harvest/status/{0}/latest_commit'.format(env.git_branch), commit)
client.write('/applications/omop_harvest/status/{0}/latest_deploy'.format(env.git_branch), 'http://{0}:{1}'.format(env.host_string, port))
print(green('Now Running at http://{0}:{1}'.format(env.host_string, port)))
@host_context
def setup_env():
"Sets up the initial environment."
parent, project = os.path.split(env.path)
if not exists(parent):
run('mkdir -p {}}'.format(parent))
with cd(parent):
if not exists(project):
run('git clone {repo_url} {project}'.format(project=project, **env))
with cd(project):
run('git checkout {git_branch}'.format(**env))
run('git pull origin {git_branch}'.format(**env))
else:
with cd(project):
run('git checkout {git_branch}'.format(**env))
run('git pull origin {git_branch}'.format(**env))
# ---------------------------------------------------------------
# Template Bootstrap Hooks
# ---------------------------------------------------------------
@virtualenv(full_env_path, venv_wrap_path)
def harvest_bootstrap():
# Handle Settings Configuration
# TODO:
# Perhaps at this point we go out to etcd and
# find the relavent DB connection settings if
# they exist then we use those here... otherwise
# we fall back to the default sqlite stuff
print('Setup default configuration file')
with hide(*hidden_output):
local('mv .project_config.json.sample .project_config.json')
print('Make test script executable')
mode = stat.S_IMODE(os.stat('run-tests.sh').st_mode)
executable = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod('run-tests.sh', mode | executable)
# Sync DB
print(green('- Creating SQLiteDB.'))
with hide(*hidden_output):
local('./bin/manage.py syncdb --settings=omop_harvest.conf.local')
# Collect Static
print(green('- Collecting Static Files'))
with hide(*hidden_output):
local('./bin/manage.py collectstatic --noinput --settings=omop_harvest.conf.local')
# Migrations
print(green('- Run Migrations'))
with hide(*hidden_output):
local('./bin/manage.py migrate --noinput --settings=omop_harvest.conf.local')
# ---------------------------------------------------------------
# Testing and Continuous Integration Commands
# ---------------------------------------------------------------
def check_for_config(noinput):
if 'project_settings' not in project_config.keys():
if noinput or confirm(red("No configuration found. Would you like to download this applications configuration?")):
get_configuration(noinput=True)
def check_for_pg(database):
'''
Check the current Docker host for an existing instance of the specified
database. If found returns the container ID.
'''
with hide('output', 'running', 'warnings'), settings(warn_only=True):
res = local("docker ps -a | awk '/{0}/ {{ print $1 }}'".format(database), capture=True)
if res:
return res.split("\n")
else:
return None
def check_for_mc():
'''
Check the current Docker host for an existing instance of memcache. If
found returns the container ID.
'''
with hide('output', 'running', 'warnings'), settings(warn_only=True):
res = local("docker ps | awk '/memcache/ { print $1 }'", capture=True)
if res:
print(green('Found Memcache running at {0}'.format(res)))
return res.split("\n")
else:
return None
def test_setup(noinput=False):
'''
Examine the project for a proper configuration file.
Examine the existing environment for Harvest app's service dependencies
(Memcache, and Postgres). If they do not exists create them as containers,
build the application container and apply ETL command from the application
to the Postgres DB.
After the data load is complete, attach the application to the Postgres
container and to Memcache. Apply normal bootstrapping procedures (syncdb,
migrations, collectstatic) and load fixture container test user "cbmi" with
default password "chopchop"
'''
DB_CONTAINER_NAME = 'omop_harvest_test_db'
check_for_config(noinput)
dbs = check_for_pg(DB_CONTAINER_NAME)
if dbs:
if noinput or confirm(yellow('It looks like you might already have an instance running on this machine. Do you want to stop and remove the existing containers?')):
with hide('output', 'running'):
print(red('Stopping and removing associated Harvest application containers.'))
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker stop")
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker rm")
mc = check_for_mc()
if not mc:
with hide('output', 'running'):
print(green('Starting Memcached Container...'))
local("docker run -d --name memcache ehazlett/memcached")
with hide('output', 'running', 'warnings'):
# Spin up a fresh Postgres instance:
print(green('Starting Postgres Container...'))
pg_container = local('docker run -p :5432 -d --name omop_harvest_test_db {registry_host}:5000/postgresql'.format(hosts=project_config['registry_host']), capture=True)
port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(pg_container), capture=True)
time.sleep(2)
# Create DB and User in fresh DB
print(green('Prepare Postgres DB...'))
local('export PGPASSWORD=docker && createdb -h localhost -p {port} -U docker omop_harvest'.format(port=port))
conn = psycopg2.connect(host='localhost', port=port, user='docker', password='docker', database='postgres')
conn.cursor().execute("create user omop_harvest with password 'docker'; ")
conn.commit()
conn.close()
# Build the Application Container to facilitate ETL:
print(green('Building Application Container...'))
local('docker build -t omop_harvest_test .')
# Run ETL on attached Postgres DB
print(green('Start ETL on attached DB'))
local('docker run --link omop_harvest_test_db:db -e APP_ENV=test --name omop_harvest_etl omop_harvest_test etl')
# Wait for ETL process to finish
local('docker wait omop_harvest_etl')
print(green('ETL Complete.'))
local('docker rm omop_harvest_etl')
# Start the application container
print(green('Start Application Container...'))
omop_harvest = local('docker run -d --link omop_harvest_test_db:db --link memcache:mc -p :8000 -e APP_ENV=test --name omop_harvest_test_app omop_harvest_test debug', capture=True)
omop_harvest_port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(omop_harvest), capture=True)
# Sleep to give syncdb and migrations time to run.
time.sleep(10)
print(red('\n***\nomop_harvest Test Instance now running on: http://{0}:{1}'.format(socket.gethostname(), omop_harvest_port)))
def ci_setup(noinput=False):
"Copy down the production omop_harvest database to a fresh postgres container."
# TODO
# - Make sure configuration file exists.
DB_CONTAINER_NAME = 'omop_harvest_ci_pg'
check_for_config(noinput)
dbs = check_for_pg(DB_CONTAINER_NAME)
if dbs:
if noinput or confirm(yellow('It looks like you might already have an instance running on this machine. Do you want to stop and remove the existing containers?')):
with hide('output', 'running'):
print(red('Stopping and removing associated Harvest application containers.'))
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker stop")
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker rm")
# Spin up a fresh postgres instance:
with hide('output', 'running', 'warnings'):
print(green('Starting Postgres Container...'))
pg_container = local('docker run -p :5432 -d --name omop_harvest_ci_db {registry_host}:5000/postgresql'.format(hosts=project_config['registry_host']), capture=True)
port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(pg_container), capture=True)
time.sleep(2)
print(green('Dump Production DB...'))
db = parse_db(project_config['project_settings']['production']['databases']['default'])
local('export PGPASSWORD={password} && pg_dump -h {host} -U {user} -Fc {database} > tmp.dump'.format(**db))
time.sleep(2)
print(green('Prepare Postgres DB...'))
local('export PGPASSWORD=docker && createdb -h localhost -p {port} -U docker omop_harvest'.format(port=port))
conn = psycopg2.connect(host='localhost', port=port, user='docker', password='docker', database='postgres')
conn.cursor().execute("create user omop_harvest with password 'docker'; ")
conn.commit()
conn.close()
print(green('Restoring Backup to Container...'))
local('export PGPASSWORD=docker && pg_restore -h localhost -p {port} -U docker -d omop_harvest tmp.dump'.format(port=port))
local('rm tmp.dump')
print(green('Building Application Container...'))
local('docker build -t omop_harvest_test .')
print(green('Start Application Container...'))
omop_harvest = local('docker run -d --link omop_harvest_ci_db:db -p :8000 -e APP_ENV=ci --name omop_harvest_ci omop_harvest start', capture=True)
omop_harvest_port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(omop_harvest), capture=True)
print(red('\n***\nomop_harvest Production Clone now running on: http://localhost:{0}'.format(omop_harvest_port)))
| bsd-2-clause | -3,427,390,257,368,080,400 | 42.146453 | 284 | 0.627208 | false |
cfc603/django-twilio-sms-models | django_twilio_sms/models.py | 1 | 12882 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django_twilio.client import twilio_client
from django_twilio.models import Caller
from twilio.rest.exceptions import TwilioRestException
from .signals import response_message, unsubscribe_signal
from .utils import AbsoluteURI
# Abstract Models
class CreatedUpdated(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
@python_2_unicode_compatible
class Sid(CreatedUpdated):
sid = models.CharField(max_length=34, primary_key=True)
def __str__(self):
return '{}'.format(self.sid)
class Meta:
abstract = True
# Message Model ForeignKeys
class Account(Sid):
# account type choices
TRIAL = 0
FULL = 1
ACCOUNT_TYPE_CHOICES = (
(TRIAL, 'Trial'),
(FULL, 'Full'),
)
# status choices
ACTIVE = 0
SUSPENDED = 1
CLOSED = 2
STATUS_CHOICES = (
(ACTIVE, 'active'),
(SUSPENDED, 'suspended'),
(CLOSED, 'closed'),
)
friendly_name = models.CharField(max_length=64)
account_type = models.PositiveSmallIntegerField(
choices=ACCOUNT_TYPE_CHOICES
)
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES)
owner_account_sid = models.ForeignKey('self', null=True)
@classmethod
def get_account_type_choice(cls, account_type_display):
for choice in cls.ACCOUNT_TYPE_CHOICES:
if account_type_display == choice[1]:
return choice[0]
@classmethod
def get_status_choice(cls, status_display):
for choice in cls.STATUS_CHOICES:
if status_display == choice[1]:
return choice[0]
@classmethod
def get_or_create(cls, account_sid=None, account=None):
if not account_sid:
account_sid = account.sid
try:
return cls.objects.get(sid=account_sid)
except cls.DoesNotExist:
account_obj = cls(sid=account_sid)
account_obj.sync_twilio_account(account)
return account_obj
@property
def twilio_account(self):
return twilio_client.accounts.get(self.sid)
def sync_twilio_account(self, account=None):
if not account:
account = self.twilio_account
self.friendly_name = account.friendly_name
self.account_type = self.get_account_type_choice(account.type)
self.status = self.get_status_choice(account.status)
if account.sid != account.owner_account_sid:
self.owner_account_sid = Account.get_or_create(
account.owner_account_sid
)
self.save()
@python_2_unicode_compatible
class ApiVersion(models.Model):
date = models.DateField(unique=True)
def __str__(self):
return '{}'.format(self.date)
@classmethod
def get_or_create(cls, message_date):
api_version, created = cls.objects.get_or_create(
date=message_date
)
return api_version
@python_2_unicode_compatible
class Currency(models.Model):
code = models.CharField(max_length=3, primary_key=True)
def __str__(self):
return '{}'.format(self.code)
@classmethod
def get_or_create(cls, message_price_unit):
currency, created = cls.objects.get_or_create(code=message_price_unit)
return currency
@python_2_unicode_compatible
class Error(models.Model):
code = models.CharField(max_length=5, primary_key=True)
message = models.CharField(max_length=255)
def __str__(self):
return '{}'.format(self.code)
@classmethod
def get_or_create(cls, message_error_code, message_error_message):
error, created = cls.objects.get_or_create(
code=message_error_code,
defaults={'message': message_error_message}
)
return error
class MessagingService(Sid):
pass
@classmethod
def get_or_create(cls, messaging_service_sid):
messaging_service, created = cls.objects.get_or_create(
sid=messaging_service_sid
)
return messaging_service
@python_2_unicode_compatible
class PhoneNumber(CreatedUpdated):
caller = models.OneToOneField(Caller)
unsubscribed = models.BooleanField(default=False)
def __str__(self):
return '{}'.format(self.caller.phone_number)
@classmethod
def get_or_create(cls, phone_number, unsubscribed=False):
if isinstance(phone_number, cls):
return phone_number
caller, created = Caller.objects.get_or_create(
phone_number=phone_number
)
phone_number_obj, create = cls.objects.get_or_create(
caller=caller, defaults={'unsubscribed': unsubscribed}
)
return phone_number_obj
@property
def as_e164(self):
return self.caller.phone_number.as_e164
def subscribe(self):
self.unsubscribed = False
self.save()
def unsubscribe(self):
self.unsubscribed = True
self.save()
class Message(Sid):
# status choices
ACCEPTED = 0
QUEUED = 1
SENDING = 2
SENT = 3
RECEIVING = 4
RECEIVED = 5
DELIVERED = 6
UNDELIVERED = 7
FAILED = 8
UNKNOWN = 9
STATUS_CHOICES = (
(ACCEPTED, 'accepted'),
(QUEUED, 'queued'),
(SENDING, 'sending'),
(SENT, 'sent'),
(RECEIVING, 'receiving'),
(RECEIVED, 'received'),
(DELIVERED, 'delivered'),
(UNDELIVERED, 'undelivered'),
(FAILED, 'failed'),
)
# direction choices
INBOUND = 0
OUTBOUND_API = 1
OUTBOUND_CALL = 2
OUTBOUND_REPLY = 3
DIRECTION_CHOICES = (
(INBOUND, 'inbound'),
(OUTBOUND_API, 'outbound-api'),
(OUTBOUND_CALL, 'outbound-call'),
(OUTBOUND_REPLY, 'outbound-reply'),
)
UNSUBSCRIBE_MESSAGES = [
'STOP', 'STOPALL', 'UNSUBSCRIBE', 'CANCEL', 'END', 'QUIT'
]
SUBSCRIBE_MESSAGES = ['START', 'YES']
date_sent = models.DateTimeField(null=True)
account = models.ForeignKey(Account)
messaging_service = models.ForeignKey(MessagingService, null=True)
from_phone_number = models.ForeignKey(PhoneNumber, related_name='to_phone')
to_phone_number = models.ForeignKey(PhoneNumber, related_name='from_phone')
body = models.CharField(max_length=160)
num_media = models.PositiveSmallIntegerField()
num_segments = models.PositiveSmallIntegerField()
status = models.PositiveSmallIntegerField(
choices=STATUS_CHOICES, default=QUEUED
)
error = models.ForeignKey(Error, null=True, related_name='error')
direction = models.PositiveSmallIntegerField(choices=DIRECTION_CHOICES)
price = models.DecimalField(max_digits=6, decimal_places=5)
currency = models.ForeignKey(Currency)
api_version = models.ForeignKey(ApiVersion)
@classmethod
def get_direction_choice(cls, direction_display):
for choice in cls.DIRECTION_CHOICES:
if direction_display == choice[1]:
return choice[0]
@classmethod
def get_status_choice(cls, status_display):
for choice in cls.STATUS_CHOICES:
if status_display == choice[1]:
return choice[0]
@classmethod
def get_or_create(cls, message_sid=None, message=None):
if not message_sid:
message_sid = message.sid
try:
return (cls.objects.get(sid=message_sid), False)
except cls.DoesNotExist:
message_obj = cls(sid=message_sid)
message_obj.sync_twilio_message(message)
return (message_obj, True)
@classmethod
def send_message(cls, body, to, from_=settings.TWILIO_DEFAULT_CALLERID):
to_phone_number = PhoneNumber.get_or_create(to)
from_phone_number = PhoneNumber.get_or_create(from_)
twilio_message = twilio_client.messages.create(
body=body,
to=to_phone_number.as_e164,
from_=from_phone_number.as_e164,
status_callback=cls.get_status_callback()
)
return cls.get_or_create(message=twilio_message)
@property
def twilio_message(self):
max_retries = getattr(settings, 'DJANGO_TWILIO_SMS_MAX_RETRIES', 5)
retry_sleep = getattr(settings, 'DJANGO_TWILIO_SMS_RETRY_SLEEP', .5)
retries = 0
while True:
try:
return twilio_client.messages.get(self.sid)
except TwilioRestException:
if retries < max_retries:
time.sleep(retry_sleep)
retries = retries + 1
else:
raise
@staticmethod
def get_status_callback():
absolute_uri = AbsoluteURI('django_twilio_sms', 'callback_view')
return absolute_uri.get_absolute_uri()
def check_for_subscription_message(self):
if self.direction is self.INBOUND:
body = self.body.upper().strip()
if body in self.UNSUBSCRIBE_MESSAGES:
self.from_phone_number.unsubscribe()
unsubscribe_signal.send_robust(
sender=self.__class__, message=self, unsubscribed=True
)
elif body in self.SUBSCRIBE_MESSAGES:
self.from_phone_number.subscribe()
unsubscribe_signal.send_robust(
sender=self.__class__, message=self, unsubscribed=False
)
def send_response_message(self):
if self.direction is self.INBOUND:
if not self.from_phone_number.unsubscribed:
action = Action.get_action(self.body)
Message.send_message(
body=action.get_active_response().body,
to=self.from_phone_number,
from_=self.to_phone_number
)
response_message.send_robust(
sender=self.__class__, action=action, message=self
)
def sync_twilio_message(self, message=None):
if not message:
message = self.twilio_message
self.date_sent = message.date_sent
self.account = Account.get_or_create(message.account_sid)
if message.messaging_service_sid:
self.messaging_service = MessagingService.get_or_create(
message.messaging_service_sid
)
self.num_media = message.num_media
self.num_segments = message.num_segments
if message.status:
self.status = self.get_status_choice(message.status)
else:
self.status = self.UNKNOWN
if message.error_code:
self.error = Error.get_or_create(
message.error_code, message.error_message
)
self.direction = self.get_direction_choice(message.direction)
self.price = message.price or '0.0'
self.currency = Currency.get_or_create(message.price_unit)
self.api_version = ApiVersion.get_or_create(message.api_version)
self.from_phone_number = PhoneNumber.get_or_create(message.from_)
self.to_phone_number = PhoneNumber.get_or_create(message.to)
self.body = message.body
self.check_for_subscription_message()
self.save()
@python_2_unicode_compatible
class Action(CreatedUpdated):
name = models.CharField(max_length=50, unique=True)
active = models.BooleanField(default=True)
def __str__(self):
return '{}'.format(self.name)
@classmethod
def get_action(cls, message_body):
try:
return cls.objects.get(
name=message_body.strip().upper(), active=True
)
except cls.DoesNotExist:
return cls.objects.get(name='UNKNOWN', active=True)
def get_active_response(self):
return self.response_set.filter(active=True)[0]
def save(self, *args, **kwargs):
self.name = self.name.upper()
super(Action, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Response(CreatedUpdated):
body = models.CharField(max_length=160)
action = models.ForeignKey(Action)
active = models.BooleanField(default=True)
def __str__(self):
return 'Response for {}'.format(self.action)
def save(self, *args, **kwargs):
if self.active:
try:
current = Response.objects.get(action=self.action, active=True)
if self != current:
current.active = False
current.save()
except Response.DoesNotExist:
pass
super(Response, self).save(*args, **kwargs)
| bsd-3-clause | 5,583,914,624,737,663,000 | 28.682028 | 79 | 0.613957 | false |
voanna/Deep-Features-or-Not | src/extract_features_no_finetune_temperature.py | 1 | 1123 | #!/usr/bin/env python
from __future__ import print_function
from extractCaffeActivations import features
import argparse
import HONHelpers as hon
import itertools
import os
import glob
layers = [
'pool1',
'pool2',
'pool3',
'pool4',
'pool5',
'fc6',
'fc7',
]
parser = argparse.ArgumentParser()
parser.add_argument("job_id", help="indexes the job of extracting features", type=int)
args = parser.parse_args()
job_config_list = [pair for pair in itertools.product(hon.webcams, ['train', 'test'])]
# grid engine jobs start with 1
job_id = args.job_id - 1
job_config = job_config_list[job_id]
webcam, split = job_config
print(webcam, split)
finetune_root = os.path.join(hon.experiment_root, 'finetune-temperature', 'no-finetune-features')
img_fnames = sorted(glob.glob(os.path.join(hon.hon_data_root, webcam, 'imgs_align', '*' + split + '*.png')))
deploy = hon.VGG16_deploy_path
weights = hon.VGG16_caffemodel_path
layer = 'fc7'
save_directory = os.path.join(finetune_root, webcam)
_ = features(deploy, weights, img_fnames, layer, save_directory, layers, mean_npy = None)
| mit | -2,117,628,950,225,300,000 | 23.413043 | 108 | 0.693678 | false |
wdecoster/nanoget | nanoget/extraction_functions.py | 1 | 18527 | import logging
from functools import reduce
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import re
from Bio import SeqIO
import concurrent.futures as cfutures
from itertools import repeat
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["channel", "start_time", "duration", "sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["channelIDs", "time", "duration", "lengths", "quals", "barcode"]
else:
datadf.columns = ["channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, ut.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
if len(chromosomes) > 100 or kwargs["huge"]:
logging.info("Nanoget: lots of contigs (>100) or --huge, not running in separate processes")
datadf = pd.DataFrame(
data=extract_from_bam(bam, None, kwargs["keep_supp"]),
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
else:
unit = chromosomes
with cfutures.ProcessPoolExecutor(max_workers=kwargs["threads"]) as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam,
repeat(bam),
unit,
repeat(kwargs["keep_supp"]))
for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info(f"Nanoget: bam {bam} contains {datadf['lengths'].size} primary alignments.")
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
if len(chromosomes) > 100:
unit = [None]
logging.info("Nanoget: lots of contigs (>100), not running in separate processes")
else:
unit = chromosomes
with cfutures.ProcessPoolExecutor(max_workers=kwargs["threads"]) as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam,
repeat(cram), unit, repeat(kwargs["keep_supp"]))
for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info(f"Nanoget: cram {cram} contains {datadf['lengths'].size} primary alignments.")
return ut.reduce_memory_usage(datadf)
def extract_from_bam(bam, chromosome, keep_supplementary=True):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
samfile = pysam.AlignmentFile(bam, "rb")
if keep_supplementary:
return [
(read.query_name,
ut.ave_qual(read.query_qualities),
ut.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary and not read.is_unmapped]
else:
return [
(read.query_name,
ut.ave_qual(read.query_qualities),
ut.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary and not read.is_unmapped and not read.is_supplementary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
match = reduce(lambda x, y: x + y[1] if y[0] in (0, 7, 8) else x, read.cigartuples, 0)
ins = reduce(lambda x, y: x + y[1] if y[0] == 1 else x, read.cigartuples, 0)
delt = reduce(lambda x, y: x + y[1] if y[0] == 2 else x, read.cigartuples, 0)
alignment_length = match + ins + delt
try:
return (1 - read.get_tag("NM") / alignment_length) * 100
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples)) /
alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield ut.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
ut.ave_qual(rec.letter_annotations["phred_quality"]),
None)
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(ut.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
| gpl-3.0 | -1,263,447,835,598,286,300 | 38.169133 | 100 | 0.616344 | false |
JioCloud/oslo.vmware | oslo/vmware/common/loopingcall.py | 2 | 4594 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
from eventlet import event
from eventlet import greenthread
from oslo.vmware.openstack.common.gettextutils import _
from oslo.vmware.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
| apache-2.0 | -2,428,121,379,658,182,700 | 30.682759 | 78 | 0.585329 | false |
slabanja/ase | ase/gui/simulation.py | 1 | 5168 | "Base class for simulation windows"
import gtk
from ase.gui.widgets import oops, pack, help
from ase import Atoms
class Simulation(gtk.Window):
def __init__(self, gui):
gtk.Window.__init__(self)
self.gui = gui
def packtext(self, vbox, text, label=None):
"Pack an text frame into the window."
pack(vbox, gtk.Label(""))
txtframe = gtk.Frame(label)
txtlbl = gtk.Label(text)
txtframe.add(txtlbl)
txtlbl.show()
pack(vbox, txtframe)
pack(vbox, gtk.Label(""))
def packimageselection(self, outerbox, txt1=" (rerun simulation)",
txt2=" (continue simulation)"):
"Make the frame for selecting starting config if more than one."
self.startframe = gtk.Frame("Select starting configuration:")
pack(outerbox, [self.startframe])
vbox = gtk.VBox()
self.startframe.add(vbox)
vbox.show()
self.numconfig_format = "There are currently %i configurations loaded."
self.numconfig_label = gtk.Label("")
pack(vbox, [self.numconfig_label])
lbl = gtk.Label("Choose which one to use as the initial configuration")
pack(vbox, [lbl])
self.start_radio_first = gtk.RadioButton(
None, "The first configuration"+txt1+".")
pack(vbox, [self.start_radio_first])
self.start_radio_nth = gtk.RadioButton(self.start_radio_first,
"Configuration number ")
self.start_nth_adj = gtk.Adjustment(0, 0, 1, 1)
self.start_nth_spin = gtk.SpinButton(self.start_nth_adj, 0, 0)
self.start_nth_spin.set_sensitive(False)
pack(vbox, [self.start_radio_nth, self.start_nth_spin])
self.start_radio_last = gtk.RadioButton(self.start_radio_first,
"The last configuration"+txt2+".")
self.start_radio_last.set_active(True)
pack(vbox, self.start_radio_last)
self.start_radio_nth.connect("toggled", self.start_radio_nth_toggled)
self.setupimageselection()
def start_radio_nth_toggled(self, widget):
self.start_nth_spin.set_sensitive(self.start_radio_nth.get_active())
def setupimageselection(self):
"Decide if the start image selection frame should be shown."
n = self.gui.images.nimages
if n <= 1:
self.startframe.hide()
else:
self.startframe.show()
if self.start_nth_adj.value >= n:
self.start_nth_adj.value = n-1
self.start_nth_adj.upper = n-1
self.numconfig_label.set_text(self.numconfig_format % (n,))
def getimagenumber(self):
"Get the image number selected in the start image frame."
nmax = self.gui.images.nimages
if nmax <= 1:
return 0
elif self.start_radio_first.get_active():
return 0
elif self.start_radio_nth.get_active():
return self.start_nth_adj.value
else:
assert self.start_radio_last.get_active()
return nmax-1
def makebutbox(self, vbox, helptext=None):
self.buttons = gtk.HButtonBox()
runbut = gtk.Button("Run")
runbut.connect('clicked', self.run)
closebut = gtk.Button(stock=gtk.STOCK_CLOSE)
closebut.connect('clicked', lambda x: self.destroy())
for w in (runbut, closebut):
self.buttons.pack_start(w, 0, 0)
w.show()
if helptext:
helpbut = [help(helptext)]
else:
helpbut = []
pack(vbox, helpbut+[self.buttons], end=True, bottom=True)
def setup_atoms(self):
self.atoms = self.get_atoms()
if self.atoms is None:
return False
try:
self.calculator = self.gui.simulation['calc']
except KeyError:
oops("No calculator: Use Calculate/Set Calculator on the menu.")
return False
self.atoms.set_calculator(self.calculator())
return True
def get_atoms(self):
"Make an atoms object from the active image"
images = self.gui.images
if images.natoms < 1:
oops("No atoms present")
return None
n = self.getimagenumber()
natoms = len(images.P[n]) / images.repeat.prod()
return Atoms(positions=images.P[n,:natoms],
symbols=images.Z[:natoms],
cell=images.A[n],
magmoms=images.M[n],
pbc=images.pbc)
def begin(self, **kwargs):
if self.gui.simulation.has_key('progress'):
self.gui.simulation['progress'].begin(**kwargs)
def end(self):
if self.gui.simulation.has_key('progress'):
self.gui.simulation['progress'].end()
def prepare_store_atoms(self):
"Informs the gui that the next configuration should be the first."
self.gui.prepare_new_atoms()
self.count_steps = 0
def store_atoms(self):
"Observes the minimization and stores the atoms in the gui."
self.gui.append_atoms(self.atoms)
self.count_steps += 1
| gpl-2.0 | -5,685,937,718,882,974,000 | 36.722628 | 79 | 0.584172 | false |
NS2LPS/pyslave | pyslave/magic.py | 1 | 20094 | """This module defines magic IPython functions to run pyslave from the IPython shell."""
import time, os, logging, inspect, logging.handlers, sys, io
from collections import OrderedDict
import configparser
import traceback
import sys
from matplotlib.pyplot import figure
from IPython.core.magic import register_line_magic, needs_local_scope
from pyslave import instruments, __slave__
from pyslave.slave import SlaveWindow
__slave_window__ = __slave__['window']
# Logger
logger = logging.getLogger('pyslave.magic')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.NullHandler())
# List of resources that are handled
__resources__ = ['VISA', 'NIDAQ', 'COM', 'Other']
# Keep trace of all instruments
__instruments__ = OrderedDict()
# Keep track of opened COM ports and VISA devices
__opened_COM__ = []
__opened_VISA__ = []
__opened_NIDAQ__ = []
# Argument parsing functions
def __arg_split__(line):
"""Split line on whitespace but do not split string parameters."""
res = ['']
line = str(line)
s = line.replace("\"\"\"", chr(240))
single_quote = False
double_quote = False
triple_quote = False
for c in s:
if single_quote or double_quote or triple_quote:
res[-1] += c
single_quote ^= c is chr(39)
double_quote ^= c is chr(34)
triple_quote ^= c is chr(240)
else:
if c is ' ':
res.append('')
else:
res[-1] += c
single_quote = c is chr(39)
double_quote = c is chr(34)
triple_quote = c is chr(240)
return [x.replace(chr(240), "\"\"\"" ) for x in res if x]
########################################################
# Instruments loading and listing magic
########################################################
def __read_config_instruments__():
__config__ = configparser.ConfigParser()
__config__.read(os.path.join(os.path.dirname(__file__), 'pyslave.ini'))
config_instruments = dict()
for resource in __resources__:
if __config__.has_section(resource):
section = __config__[resource]
for k,v in section.items():
if not k.startswith('__'):
vsplit = v.split(' ')
if len(vsplit)==1:
config_instruments[k] = {'resource':resource,'address':vsplit[0],'driver':None}
elif len(vsplit)==2:
config_instruments[k] = {'resource':resource,'address':vsplit[0],'driver':vsplit[1]}
else:
print('Badly formatted line in pyslave.ini:')
print('{0} = {1}'.format(k,v))
return config_instruments
# Not used for the moment
def __read_config_special__(section):
__config__ = configparser.ConfigParser()
__config__.read(os.path.join(os.path.dirname(__file__), 'pyslave.ini'))
config_special = {}
if __config__.has_section(section):
section = __config__[section]
for k,v in section.items():
if k.startswith('__'):
config_special[k] = v
return config_special
def __open__(resource, address, name, driver, local_ns, verbose=False):
if resource=='VISA':
info = instruments.__visa_rm__.resource_info(address)
res_name = info.resource_name
if res_name in __opened_VISA__:
print('{0} is already opened'.format(address))
return None
inst = instruments.openVISA(address, driver, verbose)
name = __get_name__(inst,verbose) if name is None else name
__opened_VISA__.append(res_name)
elif resource=='NIDAQ':
if address in __opened_NIDAQ__:
print('{0} is already opened'.format(address))
inst = instruments.openNIDAQ(address, driver, verbose)
name = __get_name__(inst,verbose) if name is None else name
__opened_NIDAQ__.append(address)
elif resource=='COM':
if address in __opened_COM__:
print('{0} is already opened'.format(address))
return None
inst = instruments.openCOM(address, driver, verbose)
name = __get_name__(inst,verbose) if name is None else name
__opened_COM__.append(address)
elif resource=='Other':
inst = instruments.openOther(address, driver, verbose)
name = __get_name__(inst,verbose) if name is None else name
local_ns[name] = inst
__instruments__[name] = inst
logger.info('Opening {0} {1} as {2} with {3} ({4})'.format(resource, address, name, inst.__driver_name__, inst.__driver_module__))
print('{0:10s} : {1} {2}'.format(name, inst.__inst_id__, inst.__address__))
return inst
def __get_name__(inst, verbose=False):
prefix = inst.__inst_type__
prev = [ int(k[len(prefix):]) for k in __instruments__.keys() if k.startswith(prefix) ]
i = 1
while i in prev:
i += 1
name = prefix + str(i)
if verbose:
inp = input('Instrument name [{0}] : '.format(name))
inp = inp.strip()
name = inp or name
return name
@register_line_magic
@needs_local_scope
def openinstr(line, local_ns):
"""Opens an instrument through a name or address.
The function first looks into the pyslave.ini file. If an
entry is found corresponding to the given name, the corresponding
instrument is opened.
If no matches is found in pyslva.ini:
- if the given name contains COM, the function opens the coresponding COM port
- otherwise, the function assumes the passed name is a VISA alias or address and
tries to open it
A driver can be passed as a second argument, it will override the driver
specified in the pyslave.ini file.
Examples :
# Open by name
openinstr dmm1
# Open by address or alias
openinstr TCPIP::192.168.0.81::INSTR
openinstr ZND
openinstr GPIB0::22::INSTR
openinstr GPIB0::22::INSTR yokogawa.yogogawa7651.yokogawa7651
"""
args = __arg_split__(line)
instr_name = args[0]
driver = args[1] if len(args)>1 else None
# Look up in the pyslave.ini file
config_instruments = __read_config_instruments__()
if instr_name in config_instruments:
name = instr_name
if name in __instruments__ :
print('{0} already exists. Close it before opening it again.'.format(name))
return
resource = config_instruments[instr_name]['resource']
address = config_instruments[instr_name]['address']
if driver is None:
driver = config_instruments[instr_name].get('driver',None)
__open__(resource, address, instr_name, driver, local_ns, True)
elif 'COM' in instr_name:
__open__('COM', instr_name, None, driver, local_ns, True)
else:
__open__('VISA', instr_name, None, driver, local_ns, True)
@register_line_magic
@needs_local_scope
def closeinstr(line, local_ns):
"""Close the specified instrument."""
name = line.strip()
if not name:
return
logger.info('Closing {0}.'.format(name))
if name not in __instruments__:
print('Unknown instrument {0}.'.format(name))
return
inst = __instruments__[name]
list_resources = {'VISA':__opened_VISA__,'NIDAQ':__opened_NIDAQ__,'COM':__opened_COM__}
l = list_resources.get(inst.__resource__,None)
if l:
l.remove(inst.__address__)
try:
inst.close()
except:
pass
if name in local_ns:
del local_ns[name]
del __instruments__[name]
@register_line_magic
@needs_local_scope
def closeall(line, local_ns):
"""Close all instruments."""
while __instruments__:
name,inst = __instruments__.popitem()
logger.info('Closing {0}.'.format(name))
list_resources = {'VISA':__opened_VISA__,'NIDAQ':__opened_NIDAQ__,'COM':__opened_COM__}
l = list_resources.get(inst.__resource__,None)
if l:
l.remove(inst.__address__)
try:
inst.close()
except:
pass
if name in local_ns:
del local_ns[name]
del inst
@register_line_magic
@needs_local_scope
def openall(line, local_ns):
"""Load all instruments listed in the pyslave.ini file."""
config = __read_config_instruments__()
err = ''
for k,v in config.items():
if k in __instruments__:
print('{0} is already loaded.'.format(k))
else:
try:
__open__(v['resource'],v['address'],k,v['driver'],local_ns)
except:
err = err + '{0} cannot be loaded\n'.format(k)
print(err)
@register_line_magic
@needs_local_scope
def openGPIB(line, local_ns):
"""Load all GPIB instruments."""
for address in instruments.__visa_rm__.list_resources('GPIB?*::INSTR'):
try:
__open__('VISA',address,None,None,local_ns)
except:
traceback.print_exc(limit=1,file=sys.stdout)
print('Error while opening {0}.'.format(address))
@register_line_magic
def listall(line):
"""List all loaded instruments."""
for k,v in __instruments__.items():
print('{0:10s} : {1} {2}'.format(k, v.__inst_id__, v.__address__))
@register_line_magic
def listVISA(line):
"""List all available VISA instruments."""
instruments.__visa_rm__.__update__()
for address in instruments.__visa_rm__.__list_resources_cached__:
print(address)
@register_line_magic
def resetVISA(line):
"""Reset VISA connection.
Close instruments before running this function"""
instruments.resetVISA()
del listall, openall, openinstr, openGPIB, closeinstr, closeall, listVISA, resetVISA
########################################################
# Scripts launching, pausing, resuming, aborting magic
########################################################
class SlaveError(Exception):
pass
def __replace__(line, add_pause):
line = line.expandtabs(4)
line = line.replace('#draw','thread.draw()')
line = line.replace('#pause','thread.pause()')
if '#abort' in line:
if add_pause and line.strip().startswith('#abort'):
line = line.replace('#abort','thread.pause()') + '\n ' + line.replace('#abort','if thread.stopflag : break')
else:
line = line.replace('#abort','if thread.stopflag : break')
line = line.replace('#looptime(','thread.looptime(')
line = line.replace('#looptime','thread.looptime()')
line = line.replace('#disp', 'thread.display')
line = line.replace('#live(','thread.live(')
return line
def __convert__(filename):
"""Convert a python script so that it can be called by slave.
The converted file is named by appending '_converted' to the filename."""
with open(filename,'r') as f:
script = f.read()
if '#main' not in script:
raise SlaveError('Could not find #main section in {0}'.format(filename))
header, main = [s.strip() for s in script.split('#main',maxsplit=1)]
with io.StringIO() as f:
print('# Auto generated script file',file=f)
print('',file=f)
# Put back header
print(header, file=f)
print('', file=f)
# Create script function
print('# Main script function', file=f)
print('def __slave_script__(thread):', file=f)
add_pause = '#pause' not in main
for l in main.split('\n'):
print(" ",__replace__(l, add_pause), file=f)
output = f.getvalue()
return output
def __findline__(target, filename):
target = target.strip()
i = 0
with open(filename,'r') as f:
for line in f:
i += 1
if line.strip().startswith(target):
msg = ["""File "{0}", line {1}\n""".format(filename, i), line]
break
else:
msg = None
return msg
def __start_slave__(script, filename, local_ns):
"""Start Slave thread with the passed code"""
global __slave_window__
try:
code = compile(script, "Converted " + filename, 'exec')
except:
print('Error while compiling {0}:'.format(filename))
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = traceback.format_exception_only(exc_type, exc_value)
if exc_type is SyntaxError:
res = __findline__(msg[1], filename)
if res is not None:
msg[0] = res[0]
else:
msg = msg[1:]
for s in msg:
print(s, end='')
return
glob = globals()
for k,v in local_ns.items():
if not k.startswith('_'):
glob[k]=v
locals = dict()
try:
exec(code, glob, locals)
except:
print('Error while executing {0}:'.format(filename))
exc_type, exc_value, exc_traceback = sys.exc_info()
for f,l in traceback.walk_tb(exc_traceback):
line = l-1
try:
res = __findline__(script.splitlines()[line], filename)
except:
res = None
if res is not None:
for s in res:
print(s, end='')
for s in traceback.format_exception_only(exc_type, exc_value):
print(s, end='')
return
local_ns.update(locals)
glob.update(locals)
if __slave_window__ is None:
__slave_window__ = SlaveWindow()
__slave__['window'] = __slave_window__
__slave_window__.show()
__slave_window__.thread_start(__slave_script__, script.splitlines())
logger.info('Starting script {0}:\n{1}'.format(filename, script))
@register_line_magic
@needs_local_scope
def call(filename, local_ns):
"""Convert and launch a script in slave."""
if not filename.endswith('.py'):
filename = filename + '.py'
try:
script = __convert__(filename)
except :
#traceback.print_exc(file=sys.stdout)
exc_type, exc_value, exc_traceback = sys.exc_info()
print('Error while converting {0}:'.format(filename))
for s in traceback.format_exception_only(exc_type, exc_value):
print(s)
return
__start_slave__(script, filename, local_ns)
@register_line_magic
def convert(filename):
"""Convert a script and show the result in the console."""
if not filename.endswith('.py'):
filename = filename + '.py'
out = __convert__(filename)
print(out)
@register_line_magic
@needs_local_scope
def monitor(line, local_ns):
"""Monitor the output of an instrument and plot it.
The first argument is the function to monitor.
The second optional argument is the time period of the monitoring.
The default value is 1s.
The results are stored in monitor_out.
Examples:
%monitor dmm1
%monitor dmm1 0.2
%monitor instr1.read()
"""
args = __arg_split__(line)
script = """
import time
from pydata import xy
fig = figure()
monitor_out = xy(x=empty(0), y=empty(0))
t0 = time.time()
def __slave_script__(thread):
while True:
val = {0}
thread.display('Monitored value '+str(val))
monitor_out.append(time.time()-t0, val)
monitor_out.plot(fig)
thread.draw()
time.sleep({1})
thread.pause()
if thread.stopflag : break""".format(args[0] if '(' in args[0] else args[0] + '()',
args[1] if len(args)>1 else 1)
__start_slave__(script, 'monitor', local_ns)
print("Results are stored in monitor_out.")
measure_parameters = OrderedDict([
('iterable' , ''),
('set_function' , 'dcpwr1(x)'),
('set_sleep' , '0'),
('read_function' , 'dmm1()'),
('read_sleep' , '0'),
('plot','y'),
('filename','iv.txt'),
])
text_input = OrderedDict([
('iterable' , 'Parameter values to iterate over'),
('set_function' , 'Set parameter (parameter variable is x)'),
('set_sleep' , 'Sleep (in s)'),
('read_function' , 'Read value'),
('read_sleep' , 'Sleep (in s)'),
('plot' , 'Plot (y/n)'),
('filename' , 'Save to (space for not saving)'),
])
@register_line_magic
@needs_local_scope
def measure(line, local_ns):
"""Measure the output of an instrument and plot it while scanning a parameter.
Results are stored in measure_out."""
if line :
args = __arg_split__(line)
args = dict([ (args[i],args[i+1]) for i in range(0 ,len(args),2)])
measure_parameters.update(args)
else :
print("Press enter to keep previous value. Abort with many q's (qqqq...).")
for k,v in text_input.items():
inp = input('{0} [{1}] : '.format(v, measure_parameters[k]))
if inp.endswith('qqqq') : return
if inp : measure_parameters[k] = inp.strip()
if '(' not in measure_parameters['read_function'] : measure_parameters['read_function']+= '()'
if '(' not in measure_parameters['set_function'] and '=' not in measure_parameters['set_function'] :
measure_parameters['set_function']+= '(x)'
script = """
import time
from pydata import xy
if '{plot}'=='y': fig = figure()
measure_out = xy(x=array({iterable}), y=ones_like(array({iterable}))*nan)
def __slave_script__(thread):
for i,x in enumerate(measure_out.x):
{set_function}
time.sleep({set_sleep})
y = {read_function}
thread.display('Step ' + str(i+1) + '/' + str(len(measure_out.x)))
thread.looptime()
measure_out.y[i] = y
if '{plot}'=='y':
measure_out.plot(fig)
thread.draw()
time.sleep({read_sleep})
thread.pause()
if thread.stopflag : break
if "{filename}" :
measure_out.save("{filename}")
""".format(**measure_parameters)
__start_slave__(script, 'measure', local_ns)
if not line:
print('To quickly start the same measurement, copy paste the line below : ')
print('measure {0}'.format(' '.join(["{0}='{1}'".format(k,v) for k,v in measure_parameters.items()])))
print("Results are stored in measure_out.")
@register_line_magic
def pause(line):
"""Pause the running script."""
if __slave_window__ is None : return
__slave_window__.on_pushButton_Pause_clicked(echo=True)
@register_line_magic
def resume(line):
"""Resume the paused script."""
if __slave_window__ is None : return
__slave_window__.on_pushButton_Resume_clicked(echo=True)
@register_line_magic
def abort(line):
"""Abort the running script."""
if __slave_window__ is None : return
__slave_window__.on_pushButton_Abort_clicked(echo=True)
@register_line_magic
def kill(line):
"""Kill the running script."""
if __slave_window__ is None : return
__slave_window__.on_pushButton_Kill_clicked(echo=True)
@register_line_magic
def window(line):
"""Show the slave window."""
global __slave_window__
if __slave_window__ is None :
__slave_window__ = SlaveWindow()
__slave__['window'] = __slave_window__
__slave_window__.show()
@register_line_magic
@needs_local_scope
def capture(line, local_ns):
args = __arg_split__(line)
# First argument
func = args[0] if '(' in args[0] else args[0].strip() + '()'
# Second argument
filename = str(args[1]) if len(args)>1 else None
# Optional extra arguments
param = eval('dict({0})'.format(','.join(args[2:])))
# Fetch data
data = eval(func, globals(), local_ns)
# Plot data
exec("capture_fig = figure()", globals(), local_ns)
data.plot(local_ns['capture_fig'])
exec("capture_fig.show()", globals(), local_ns)
local_ns['capture_out'] = data
# Save data to file
if filename :
msg = data.save(filename, **param)
else:
print("Data are stored in capture_out. Figure is capture_fig.")
del call, convert, window, pause, resume, abort, kill, monitor, measure, capture
| mit | -9,023,431,940,506,280,000 | 34.129371 | 134 | 0.57659 | false |
mick-d/nipype | nipype/interfaces/bids_utils.py | 3 | 5108 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Set of interfaces that allow interaction with BIDS data. Currently
available interfaces are:
BIDSDataGrabber: Query data from BIDS dataset using pybids grabbids.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
from os.path import join, dirname
import json
from .. import logging
from .base import (traits,
DynamicTraitedSpec,
Directory,
BaseInterface,
isdefined,
Str,
Undefined)
have_pybids = True
try:
from bids import grabbids as gb
except ImportError:
have_pybids = False
LOGGER = logging.getLogger('workflows')
class BIDSDataGrabberInputSpec(DynamicTraitedSpec):
base_dir = Directory(exists=True,
desc='Path to BIDS Directory.',
mandatory=True)
output_query = traits.Dict(key_trait=Str,
value_trait=traits.Dict,
desc='Queries for outfield outputs')
raise_on_empty = traits.Bool(True, usedefault=True,
desc='Generate exception if list is empty '
'for a given field')
return_type = traits.Enum('file', 'namedtuple', usedefault=True)
class BIDSDataGrabber(BaseInterface):
""" BIDS datagrabber module that wraps around pybids to allow arbitrary
querying of BIDS datasets.
Examples
--------
By default, the BIDSDataGrabber fetches anatomical and functional images
from a project, and makes BIDS entities (e.g. subject) available for
filtering outputs.
>>> bg = BIDSDataGrabber()
>>> bg.inputs.base_dir = 'ds005/'
>>> bg.inputs.subject = '01'
>>> results = bg.run() # doctest: +SKIP
Dynamically created, user-defined output fields can also be defined to
return different types of outputs from the same project. All outputs
are filtered on common entities, which can be explicitly defined as
infields.
>>> bg = BIDSDataGrabber(infields = ['subject'], outfields = ['dwi'])
>>> bg.inputs.base_dir = 'ds005/'
>>> bg.inputs.subject = '01'
>>> bg.inputs.output_query['dwi'] = dict(modality='dwi')
>>> results = bg.run() # doctest: +SKIP
"""
input_spec = BIDSDataGrabberInputSpec
output_spec = DynamicTraitedSpec
_always_run = True
def __init__(self, infields=None, **kwargs):
"""
Parameters
----------
infields : list of str
Indicates the input fields to be dynamically created
outfields: list of str
Indicates output fields to be dynamically created.
If no matching items, returns Undefined.
"""
super(BIDSDataGrabber, self).__init__(**kwargs)
if not isdefined(self.inputs.output_query):
self.inputs.output_query = {"func": {"modality": "func"},
"anat": {"modality": "anat"}}
# If infields is empty, use all BIDS entities
if not infields is None and have_pybids:
bids_config = join(dirname(gb.__file__), 'config', 'bids.json')
bids_config = json.load(open(bids_config, 'r'))
infields = [i['name'] for i in bids_config['entities']]
self._infields = infields or []
# used for mandatory inputs check
undefined_traits = {}
for key in self._infields:
self.inputs.add_trait(key, traits.Any)
undefined_traits[key] = kwargs[key] if key in kwargs else Undefined
self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
def _run_interface(self, runtime):
if not have_pybids:
raise ImportError(
"The BIDSEventsGrabber interface requires pybids."
" Please make sure it is installed.")
return runtime
def _list_outputs(self):
layout = gb.BIDSLayout(self.inputs.base_dir)
# If infield is not given nm input value, silently ignore
filters = {}
for key in self._infields:
value = getattr(self.inputs, key)
if isdefined(value):
filters[key] = value
outputs = {}
for key, query in self.inputs.output_query.items():
args = query.copy()
args.update(filters)
filelist = layout.get(return_type=self.inputs.return_type, **args)
if len(filelist) == 0:
msg = 'Output key: %s returned no files' % key
if self.inputs.raise_on_empty:
raise IOError(msg)
else:
LOGGER.warning(msg)
filelist = Undefined
outputs[key] = filelist
return outputs
| bsd-3-clause | -78,829,929,764,361,940 | 33.748299 | 79 | 0.584573 | false |
twhyntie/tasl-data-management | wrappers/test_nod.py | 1 | 1166 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#...the usual suspects.
import os, inspect
#...for the unit testing.
import unittest
#...for the logging.
import logging as lg
# The wrapper class to test.
from nod import NOD
class TestNOD(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_nod(self):
## The annotation CSV file.
nod = NOD("testdata/NOD/000000_00_00_00.csv")
# The tests.
# The headers.
self.assertEqual(nod.get_number_of_headers(), 2)
self.assertEqual(nod.get_header(0), "annotation_id")
self.assertEqual(nod.get_header(1), "n_oddities_identified")
# The annotations.
# Test the number of annotations found.
self.assertEqual(nod.get_number_of_annotations(), 88)
if __name__ == "__main__":
lg.basicConfig(filename='log_test_nod.log', filemode='w', level=lg.DEBUG)
lg.info(" *")
lg.info(" *=========================================")
lg.info(" * Logger output from wrappers/test_nod.py ")
lg.info(" *=========================================")
lg.info(" *")
unittest.main()
| mit | -4,170,661,468,903,382,500 | 21 | 77 | 0.559177 | false |
manasapte/pants | src/python/pants/engine/nodes.py | 1 | 19690 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from abc import abstractmethod, abstractproperty
from os.path import dirname
from twitter.common.collections import OrderedSet
from pants.base.project_tree import Dir, File, Link
from pants.build_graph.address import Address
from pants.engine.addressable import parse_variants
from pants.engine.fs import (DirectoryListing, FileContent, FileDigest, ReadLink, file_content,
file_digest, read_link, scan_directory)
from pants.engine.selectors import (Select, SelectDependencies, SelectLiteral, SelectProjection,
SelectVariant)
from pants.engine.struct import HasProducts, Variants
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
class ConflictingProducersError(Exception):
"""Indicates that there was more than one source of a product for a given subject.
TODO: This will need to be legal in order to support multiple Planners producing a
(mergeable) Classpath for one subject, for example. see:
https://github.com/pantsbuild/pants/issues/2526
"""
@classmethod
def create(cls, subject, product, matches):
"""Factory method to format the error message.
This is provided as a workaround to http://bugs.python.org/issue17296 to make this exception
picklable.
"""
msgs = '\n '.join('{}:\n {}'.format(k, v) for k, v in matches)
return ConflictingProducersError('More than one source of {} for {}:\n {}'
.format(product.__name__, subject, msgs))
def __init__(self, message):
super(ConflictingProducersError, self).__init__(message)
class State(object):
@classmethod
def raise_unrecognized(cls, state):
raise ValueError('Unrecognized Node State: {}'.format(state))
class Noop(datatype('Noop', ['format_string', 'args']), State):
"""Indicates that a Node did not have the inputs which would be needed for it to execute.
Because Noops are very common but rarely displayed, they are formatted lazily.
"""
@staticmethod
def cycle(src, dst):
return Noop('Edge would cause a cycle: {} -> {}.', src, dst)
def __new__(cls, format_string, *args):
return super(Noop, cls).__new__(cls, format_string, args)
@property
def msg(self):
if self.args:
return self.format_string.format(*self.args)
else:
return self.format_string
def __str__(self):
return 'Noop(msg={!r})'.format(self.msg)
class Return(datatype('Return', ['value']), State):
"""Indicates that a Node successfully returned a value."""
class Throw(datatype('Throw', ['exc']), State):
"""Indicates that a Node should have been able to return a value, but failed."""
class Waiting(datatype('Waiting', ['dependencies']), State):
"""Indicates that a Node is waiting for some/all of the dependencies to become available.
Some Nodes will return different dependency Nodes based on where they are in their lifecycle,
but all returned dependencies are recorded for the lifetime of a Node.
"""
class Node(AbstractClass):
@classmethod
def validate_node(cls, node):
if not isinstance(node, Node):
raise ValueError('Value {} is not a Node.'.format(node))
@abstractproperty
def subject(self):
"""The subject for this Node."""
@abstractproperty
def product(self):
"""The output product for this Node."""
@abstractproperty
def variants(self):
"""The variants for this Node."""
@abstractproperty
def is_cacheable(self):
"""Whether this Node type can be cached."""
@abstractproperty
def is_inlineable(self):
"""Whether this Node type can have its execution inlined.
In cases where a Node is inlined, it is executed directly in the step method of a dependent
Node, and is not memoized or cached in any way.
"""
@abstractmethod
def step(self, step_context):
"""Given a StepContext returns the current State of the Node.
The StepContext holds any computed dependencies, provides a way to construct Nodes
that require information about installed tasks, and allows access to the filesystem.
"""
class SelectNode(datatype('SelectNode', ['subject', 'product', 'variants', 'variant_key']), Node):
"""A Node that selects a product for a subject.
A Select can be satisfied by multiple sources, but fails if multiple sources produce a value. The
'variants' field represents variant configuration that is propagated to dependencies. When
a task needs to consume a product as configured by the variants map, it uses the SelectVariant
selector, which introduces the 'variant' value to restrict the names of values selected by a
SelectNode.
"""
is_cacheable = False
is_inlineable = True
def _variants_node(self):
if type(self.subject) is Address and self.product is not Variants:
return SelectNode(self.subject, Variants, self.variants, None)
return None
def _select_literal(self, candidate, variant_value):
"""Looks for has-a or is-a relationships between the given value and the requested product.
Returns the resulting product value, or None if no match was made.
"""
def items():
# Check whether the subject is-a instance of the product.
yield candidate
# Else, check whether it has-a instance of the product.
if isinstance(candidate, HasProducts):
for subject in candidate.products:
yield subject
# TODO: returning only the first literal configuration of a given type/variant. Need to
# define mergeability for products.
for item in items():
if not isinstance(item, self.product):
continue
if variant_value and not getattr(item, 'name', None) == variant_value:
continue
return item
return None
def step(self, step_context):
# Request default Variants for the subject, so that if there are any we can propagate
# them to task nodes.
variants = self.variants
variants_node = self._variants_node()
if variants_node:
dep_state = step_context.get(variants_node)
if type(dep_state) is Waiting:
return dep_state
elif type(dep_state) is Return:
# A subject's variants are overridden by any dependent's requested variants, so
# we merge them left to right here.
variants = Variants.merge(dep_state.value.default.items(), variants)
# If there is a variant_key, see whether it has been configured.
variant_value = None
if self.variant_key:
variant_values = [value for key, value in variants
if key == self.variant_key] if variants else None
if not variant_values:
# Select cannot be satisfied: no variant configured for this key.
return Noop('Variant key {} was not configured in variants {}', self.variant_key, variants)
variant_value = variant_values[0]
# If the Subject "is a" or "has a" Product, then we're done.
literal_value = self._select_literal(self.subject, variant_value)
if literal_value is not None:
return Return(literal_value)
# Else, attempt to use a configured task to compute the value.
dependencies = []
matches = []
for dep in step_context.gen_nodes(self.subject, self.product, variants):
dep_state = step_context.get(dep)
if type(dep_state) is Waiting:
dependencies.extend(dep_state.dependencies)
elif type(dep_state) is Return:
# We computed a value: see whether we can use it.
literal_value = self._select_literal(dep_state.value, variant_value)
if literal_value is not None:
matches.append((dep, literal_value))
elif type(dep_state) is Throw:
return dep_state
elif type(dep_state) is Noop:
continue
else:
State.raise_unrecognized(dep_state)
# If any dependencies were unavailable, wait for them; otherwise, determine whether
# a value was successfully selected.
if dependencies:
return Waiting(dependencies)
elif len(matches) == 0:
return Noop('No source of {}.', self)
elif len(matches) > 1:
# TODO: Multiple successful tasks are not currently supported. We should allow for this
# by adding support for "mergeable" products. see:
# https://github.com/pantsbuild/pants/issues/2526
return Throw(ConflictingProducersError.create(self.subject, self.product, matches))
else:
return Return(matches[0][1])
class DependenciesNode(datatype('DependenciesNode', ['subject', 'product', 'variants', 'dep_product', 'field']), Node):
"""A Node that selects the given Product for each of the items in `field` on `dep_product`.
Begins by selecting the `dep_product` for the subject, and then selects a product for each
member of a collection named `field` on the dep_product.
The value produced by this Node guarantees that the order of the provided values matches the
order of declaration in the list `field` of the `dep_product`.
"""
is_cacheable = False
is_inlineable = True
def _dep_product_node(self):
return SelectNode(self.subject, self.dep_product, self.variants, None)
def _dependency_nodes(self, step_context, dep_product):
for dependency in getattr(dep_product, self.field or 'dependencies'):
variants = self.variants
if isinstance(dependency, Address):
# If a subject has literal variants for particular dependencies, they win over all else.
dependency, literal_variants = parse_variants(dependency)
variants = Variants.merge(variants, literal_variants)
yield SelectNode(dependency, self.product, variants, None)
def step(self, step_context):
# Request the product we need in order to request dependencies.
dep_product_node = self._dep_product_node()
dep_product_state = step_context.get(dep_product_node)
if type(dep_product_state) in (Throw, Waiting):
return dep_product_state
elif type(dep_product_state) is Noop:
return Noop('Could not compute {} to determine dependencies.', dep_product_node)
elif type(dep_product_state) is not Return:
State.raise_unrecognized(dep_product_state)
# The product and its dependency list are available.
dep_values = []
dependencies = []
for dependency in self._dependency_nodes(step_context, dep_product_state.value):
dep_state = step_context.get(dependency)
if type(dep_state) is Waiting:
dependencies.extend(dep_state.dependencies)
elif type(dep_state) is Return:
dep_values.append(dep_state.value)
elif type(dep_state) is Noop:
return Throw(ValueError('No source of explicit dependency {}'.format(dependency)))
elif type(dep_state) is Throw:
return dep_state
else:
raise State.raise_unrecognized(dep_state)
if dependencies:
return Waiting(dependencies)
# All dependencies are present!
return Return(dep_values)
class ProjectionNode(datatype('ProjectionNode', ['subject', 'product', 'variants', 'projected_subject', 'fields', 'input_product']), Node):
"""A Node that selects the given input Product for the Subject, and then selects for a new subject.
TODO: This is semantically very similar to DependenciesNode (which might be considered to be a
multi-field projection for the contents of a list). Should be looking for ways to merge them.
"""
is_cacheable = False
is_inlineable = True
def _input_node(self):
return SelectNode(self.subject, self.input_product, self.variants, None)
def _output_node(self, step_context, projected_subject):
return SelectNode(projected_subject, self.product, self.variants, None)
def step(self, step_context):
# Request the product we need to compute the subject.
input_node = self._input_node()
input_state = step_context.get(input_node)
if type(input_state) in (Throw, Waiting):
return input_state
elif type(input_state) is Noop:
return Noop('Could not compute {} in order to project its fields.', input_node)
elif type(input_state) is not Return:
State.raise_unrecognized(input_state)
# The input product is available: use it to construct the new Subject.
input_product = input_state.value
values = []
for field in self.fields:
values.append(getattr(input_product, field))
# If there was only one projected field and it is already of the correct type, project it.
try:
if len(values) == 1 and type(values[0]) is self.projected_subject:
projected_subject = values[0]
else:
projected_subject = self.projected_subject(*values)
except Exception as e:
return Throw(ValueError('Fields {} of {} could not be projected as {}: {}'.format(
self.fields, input_product, self.projected_subject, e)))
output_node = self._output_node(step_context, projected_subject)
# When the output node is available, return its result.
output_state = step_context.get(output_node)
if type(output_state) in (Return, Throw, Waiting):
return output_state
elif type(output_state) is Noop:
return Throw(ValueError('No source of projected dependency {}'.format(output_node)))
else:
raise State.raise_unrecognized(output_state)
class TaskNode(datatype('TaskNode', ['subject', 'product', 'variants', 'func', 'clause']), Node):
"""A Node representing execution of a non-blocking python function.
All dependencies of the function are declared ahead of time in the dependency `clause` of the
function, so the TaskNode will determine whether the dependencies are available before
executing the function, and provides a satisfied argument per clause entry to the function.
"""
is_cacheable = False
is_inlineable = False
def step(self, step_context):
# Compute dependencies for the Node, or determine whether it is a Noop.
dependencies = []
dep_values = []
for selector in self.clause:
dep_node = step_context.select_node(selector, self.subject, self.variants)
dep_state = step_context.get(dep_node)
if type(dep_state) is Waiting:
dependencies.extend(dep_state.dependencies)
elif type(dep_state) is Return:
dep_values.append(dep_state.value)
elif type(dep_state) is Noop:
if selector.optional:
dep_values.append(None)
else:
return Noop('Was missing (at least) input {}.', dep_node)
elif type(dep_state) is Throw:
return dep_state
else:
State.raise_unrecognized(dep_state)
# If any clause was still waiting on dependencies, indicate it; else execute.
if dependencies:
return Waiting(dependencies)
try:
return Return(self.func(*dep_values))
except Exception as e:
return Throw(e)
def __repr__(self):
return 'TaskNode(subject={}, product={}, variants={}, func={}, clause={}' \
.format(self.subject, self.product, self.variants, self.func.__name__, self.clause)
def __str__(self):
return repr(self)
class FilesystemNode(datatype('FilesystemNode', ['subject', 'product', 'variants']), Node):
"""A native node type for filesystem operations."""
_FS_PAIRS = {
(DirectoryListing, Dir),
(FileContent, File),
(FileDigest, File),
(ReadLink, Link),
}
is_cacheable = False
is_inlineable = False
@classmethod
def as_intrinsics(cls):
"""Returns a dict of tuple(sbj type, product type) -> functions returning a fs node for that subject product type tuple."""
return {(subject_type, product_type): FilesystemNode.create
for product_type, subject_type in cls._FS_PAIRS}
@classmethod
def create(cls, subject, product_type, variants):
assert (product_type, type(subject)) in cls._FS_PAIRS
return FilesystemNode(subject, product_type, variants)
@classmethod
def generate_subjects(cls, filenames):
"""Given filenames, generate a set of subjects for invalidation predicate matching."""
for f in filenames:
# ReadLink, or FileContent for the literal path.
yield File(f)
yield Link(f)
# DirectoryListing for parent dirs.
yield Dir(dirname(f))
def step(self, step_context):
try:
if self.product is DirectoryListing:
return Return(scan_directory(step_context.project_tree, self.subject))
elif self.product is FileContent:
return Return(file_content(step_context.project_tree, self.subject))
elif self.product is FileDigest:
return Return(file_digest(step_context.project_tree, self.subject))
elif self.product is ReadLink:
return Return(read_link(step_context.project_tree, self.subject))
else:
# This would be caused by a mismatch between _FS_PRODUCT_TYPES and the above switch.
raise ValueError('Mismatched input value {} for {}'.format(self.subject, self))
except Exception as e:
return Throw(e)
class StepContext(object):
"""Encapsulates external state and the details of creating Nodes.
This avoids giving Nodes direct access to the task list or subject set.
"""
def __init__(self, node_builder, project_tree, node_states, inline_nodes):
self._node_builder = node_builder
self.project_tree = project_tree
self._node_states = dict(node_states)
self._parents = OrderedSet()
self._inline_nodes = inline_nodes
self.snapshot_archive_root = os.path.join(project_tree.build_root, '.snapshots')
def get(self, node):
"""Given a Node and computed node_states, gets the current state for the Node.
Optionally inlines execution of inlineable dependencies if `inline_nodes=True`.
"""
state = self._node_states.get(node, None)
if state is not None:
return state
if self._inline_nodes and node.is_inlineable:
if node in self._parents:
return Noop.cycle(list(self._parents)[-1], node)
self._parents.add(node)
state = self._node_states[node] = node.step(self)
self._parents.remove(node)
return state
else:
return Waiting([node])
def gen_nodes(self, subject, product, variants):
"""Yields Node instances which might be able to provide a value for the given inputs."""
return self._node_builder.gen_nodes(subject, product, variants)
def select_node(self, selector, subject, variants):
"""Constructs a Node for the given Selector and the given Subject/Variants.
This method is decoupled from Selector classes in order to allow the `selector` package to not
need a dependency on the `nodes` package.
"""
selector_type = type(selector)
if selector_type is Select:
return SelectNode(subject, selector.product, variants, None)
elif selector_type is SelectVariant:
return SelectNode(subject, selector.product, variants, selector.variant_key)
elif selector_type is SelectDependencies:
return DependenciesNode(subject, selector.product, variants, selector.deps_product, selector.field)
elif selector_type is SelectProjection:
return ProjectionNode(subject, selector.product, variants, selector.projected_subject, selector.fields, selector.input_product)
elif selector_type is SelectLiteral:
# NB: Intentionally ignores subject parameter to provide a literal subject.
return SelectNode(selector.subject, selector.product, variants, None)
else:
raise ValueError('Unrecognized Selector type "{}" for: {}'.format(selector_type, selector))
| apache-2.0 | 8,189,230,398,252,842,000 | 38.458918 | 139 | 0.694058 | false |
lammps/lammps-packages | mingw-cross/cmake-win-on-linux.py | 1 | 14980 | #!/usr/bin/env python
# Script to build windows installer packages for LAMMPS
# (c) 2017,2018,2019,2020 Axel Kohlmeyer <akohlmey@gmail.com>
from __future__ import print_function
import sys,os,shutil,glob,re,subprocess,tarfile,gzip,time,inspect
try: from urllib.request import urlretrieve as geturl
except: from urllib import urlretrieve as geturl
try:
import multiprocessing
numcpus = multiprocessing.cpu_count()
except:
numcpus = 1
# helper functions
def error(str=None):
if not str: print(helpmsg)
else: print(sys.argv[0],"ERROR:",str)
sys.exit()
def getbool(arg,keyword):
if arg in ['yes','Yes','Y','y','on','1','True','true']:
return True
elif arg in ['no','No','N','n','off','0','False','false']:
return False
else:
error("Unknown %s option: %s" % (keyword,arg))
def fullpath(path):
return os.path.abspath(os.path.expanduser(path))
def getexe(url,name):
gzname = name + ".gz"
geturl(url,gzname)
with gzip.open(gzname,'rb') as gz_in:
with open(name,'wb') as f_out:
shutil.copyfileobj(gz_in,f_out)
gz_in.close()
f_out.close()
os.remove(gzname)
def system(cmd):
try:
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
except subprocess.CalledProcessError as e:
print("Command '%s' returned non-zero exit status" % e.cmd)
error(e.output.decode('UTF-8'))
return txt.decode('UTF-8')
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# record location and name of python script
homedir, exename = os.path.split(os.path.abspath(inspect.getsourcefile(lambda:0)))
# default settings help message and default settings
bitflag = '64'
parflag = 'no'
pythonflag = False
thrflag = 'omp'
revflag = 'stable'
verbose = False
gitdir = os.path.join(homedir,"lammps")
adminflag = True
msixflag = False
helpmsg = """
Usage: python %s -b <bits> -j <cpus> -p <mpi> -t <thread> -y <yes|no> -r <rev> -v <yes|no> -g <folder> -a <yes|no>
Flags (all flags are optional, defaults listed below):
-b : select Windows variant (default value: %s)
-b 32 : build for 32-bit Windows
-b 64 : build for 64-bit Windows
-j : set number of CPUs for parallel make (default value: %d)
-j <num> : set to any reasonable number or 1 for serial make
-p : select message passing parallel build (default value: %s)
-p mpi : build an MPI parallel version with MPICH2 v1.4.1p1
-p no : build a serial version using MPI STUBS library
-t : select thread support (default value: %s)
-t omp : build with threads via OpenMP enabled
-t no : build with thread support disabled
-y : select python support (default value: %s)
-y yes : build with python included
-y no : build without python
-r : select LAMMPS source revision to build (default value: %s)
-r stable : download and build the latest stable LAMMPS version
-r unstable : download and build the latest patch release LAMMPS version
-r master : download and build the latest development snapshot
-r patch_<date> : download and build a specific patch release
-r <sha256> : download and build a specific snapshot version
-v : select output verbosity
-v yes : print progress messages and output of make commands
-v no : print only progress messages
-g : select folder with git checkout of LAMMPS sources
-g <folder> : use LAMMPS checkout in <folder> (default value: %s)
-a : select admin level installation (default value: yes)
-a yes : the created installer requires to be run at admin level
and LAMMPS is installed to be accessible by all users
-a no : the created installer runs without admin privilege and
LAMMPS is installed into the current user's appdata folder
-a msix : same as "no" but adjust for creating an MSIX package
Example:
python %s -r unstable -t omp -p mpi
""" % (exename,bitflag,numcpus,parflag,thrflag,pythonflag,revflag,gitdir,exename)
# parse arguments
argv = sys.argv
argc = len(argv)
i = 1
while i < argc:
if i+1 >= argc:
print("\nMissing argument to flag:",argv[i])
error()
if argv[i] == '-b':
bitflag = argv[i+1]
elif argv[i] == '-j':
numcpus = int(argv[i+1])
elif argv[i] == '-p':
parflag = argv[i+1]
elif argv[i] == '-t':
thrflag = argv[i+1]
elif argv[i] == '-y':
pythonflag = getbool(argv[i+1],"python")
elif argv[i] == '-r':
revflag = argv[i+1]
elif argv[i] == '-v':
verbose = getbool(argv[i+1],"verbose")
elif argv[i] == '-a':
if argv[i+1] in ['msix','MSIX']:
adminflag = False
msixflag = True
else:
msixflag = False
adminflag = getbool(argv[i+1],"admin")
elif argv[i] == '-g':
gitdir = fullpath(argv[i+1])
else:
print("\nUnknown flag:",argv[i])
error()
i+=2
# checks
if bitflag != '32' and bitflag != '64':
error("Unsupported bitness flag %s" % bitflag)
if parflag != 'no' and parflag != 'mpi':
error("Unsupported parallel flag %s" % parflag)
if thrflag != 'no' and thrflag != 'omp':
error("Unsupported threading flag %s" % thrflag)
# test for valid revision name format: branch names, release tags, or commit hashes
rev1 = re.compile("^(stable|unstable|master)$")
rev2 = re.compile(r"^(patch|stable)_\d+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\d{4}$")
rev3 = re.compile(r"^[a-f0-9]{40}$")
if not rev1.match(revflag) and not rev2.match(revflag) and not rev3.match(revflag):
error("Unsupported revision flag %s" % revflag)
# create working directory
if adminflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s" % (bitflag,parflag,thrflag,revflag))
else:
if pythonflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-python" % (bitflag,parflag,thrflag,revflag))
elif msixflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-msix" % (bitflag,parflag,thrflag,revflag))
else:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-noadmin" % (bitflag,parflag,thrflag,revflag))
shutil.rmtree(builddir,True)
try:
os.mkdir(builddir)
except:
error("Cannot create temporary build folder: %s" % builddir)
# check for prerequisites and set up build environment
if bitflag == '32':
cc_cmd = which('i686-w64-mingw32-gcc')
cxx_cmd = which('i686-w64-mingw32-g++')
fc_cmd = which('i686-w64-mingw32-gfortran')
ar_cmd = which('i686-w64-mingw32-ar')
size_cmd = which('i686-w64-mingw32-size')
nsis_cmd = which('makensis')
lmp_size = 'smallsmall'
else:
cc_cmd = which('x86_64-w64-mingw32-gcc')
cxx_cmd = which('x86_64-w64-mingw32-g++')
fc_cmd = which('x86_64-w64-mingw32-gfortran')
ar_cmd = which('x86_64-w64-mingw32-ar')
size_cmd = which('x86_64-w64-mingw32-size')
nsis_cmd = which('makensis')
lmp_size = 'smallbig'
print("""
Settings: building LAMMPS revision %s for %s-bit Windows
Message passing : %s
Multi-threading : %s
Home folder : %s
Source folder : %s
Build folder : %s
C compiler : %s
C++ compiler : %s
Fortran compiler : %s
Library archiver : %s
""" % (revflag,bitflag,parflag,thrflag,homedir,gitdir,builddir,cc_cmd,cxx_cmd,fc_cmd,ar_cmd))
# create/update git checkout
if not os.path.exists(gitdir):
txt = system("git clone https://github.com/lammps/lammps.git %s" % gitdir)
if verbose: print(txt)
os.chdir(gitdir)
txt = system("git fetch origin")
if verbose: print(txt)
txt = system("git checkout %s" % revflag)
if verbose: print(txt)
if revflag == "master" or revflag == "stable" or revflag == "unstable":
txt = system("git pull")
if verbose: print(txt)
# switch to build folder
os.chdir(builddir)
# download what is not automatically downloaded by CMake
print("Downloading third party tools")
url='http://download.lammps.org/thirdparty'
print("FFMpeg")
getexe("%s/ffmpeg-win%s.exe.gz" % (url,bitflag),"ffmpeg.exe")
print("gzip")
getexe("%s/gzip.exe.gz" % url,"gzip.exe")
if parflag == "mpi":
mpiflag = "on"
else:
mpiflag = "off"
if thrflag == "omp":
ompflag = "on"
else:
ompflag = "off"
print("Configuring build with CMake")
cmd = "mingw%s-cmake -G Ninja -D CMAKE_BUILD_TYPE=Release" % bitflag
cmd += " -D ADD_PKG_CONFIG_PATH=%s/mingw%s-pkgconfig" % (homedir,bitflag)
cmd += " -C %s/mingw%s-pkgconfig/addpkg.cmake" % (homedir,bitflag)
cmd += " -C %s/cmake/presets/mingw-cross.cmake %s/cmake" % (gitdir,gitdir)
cmd += " -DBUILD_SHARED_LIBS=on -DBUILD_MPI=%s -DBUILD_OPENMP=%s" % (mpiflag,ompflag)
cmd += " -DWITH_GZIP=on -DWITH_FFMPEG=on -DLAMMPS_EXCEPTIONS=on"
cmd += " -DINTEL_LRT_MODE=c++11 -DBUILD_LAMMPS_SHELL=on"
cmd += " -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
if pythonflag: cmd += " -DPKG_PYTHON=yes"
print("Running: ",cmd)
txt = system(cmd)
if verbose: print(txt)
print("Compiling")
system("ninja")
print("Done")
print("Building PDF manual")
os.chdir(os.path.join(gitdir,"doc"))
txt = system("make pdf")
if verbose: print(txt)
shutil.move("Manual.pdf",os.path.join(builddir,"LAMMPS-Manual.pdf"))
print("Done")
# switch back to build folder and copy/process files for inclusion in installer
print("Collect and convert files for the Installer package")
os.chdir(builddir)
shutil.copytree(os.path.join(gitdir,"examples"),os.path.join(builddir,"examples"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"bench"),os.path.join(builddir,"bench"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"tools"),os.path.join(builddir,"tools"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"python","lammps"),os.path.join(builddir,"python","lammps"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"potentials"),os.path.join(builddir,"potentials"),symlinks=False)
shutil.copy(os.path.join(gitdir,"README"),os.path.join(builddir,"README.txt"))
shutil.copy(os.path.join(gitdir,"LICENSE"),os.path.join(builddir,"LICENSE.txt"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","colvars-refman-lammps.pdf"),os.path.join(builddir,"Colvars-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"tools","createatoms","Manual.pdf"),os.path.join(builddir,"CreateAtoms-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","kspace.pdf"),os.path.join(builddir,"Kspace-Extra-Info.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","pair_gayberne_extra.pdf"),os.path.join(builddir,"PairGayBerne-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","pair_resquared_extra.pdf"),os.path.join(builddir,"PairReSquared-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_overview.pdf"),os.path.join(builddir,"PDLAMMPS-Overview.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_EPS.pdf"),os.path.join(builddir,"PDLAMMPS-EPS.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_VES.pdf"),os.path.join(builddir,"PDLAMMPS-VES.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","SPH_LAMMPS_userguide.pdf"),os.path.join(builddir,"SPH-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","SMD_LAMMPS_userguide.pdf"),os.path.join(builddir,"SMD-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","USER-CGDNA.pdf"),os.path.join(builddir,"CGDNA-Manual.pdf"))
# prune outdated inputs, too large files, or examples of packages we don't bundle
for d in ['accelerate','kim','mscg','USER/quip','USER/vtk']:
shutil.rmtree(os.path.join("examples",d),True)
for d in ['FERMI','KEPLER']:
shutil.rmtree(os.path.join("bench",d),True)
shutil.rmtree("tools/msi2lmp/test",True)
os.remove("potentials/C_10_10.mesocnt")
os.remove("potentials/TABTP_10_10.mesont")
os.remove("examples/USER/mesont/C_10_10.mesocnt")
os.remove("examples/USER/mesont/TABTP_10_10.mesont")
# convert text files to CR-LF conventions
txt = system("unix2dos LICENSE.txt README.txt tools/msi2lmp/README")
if verbose: print(txt)
txt = system("find bench examples potentials python tools/msi2lmp/frc_files -type f -print | xargs unix2dos")
if verbose: print(txt)
# mass rename README to README.txt
txt = system('for f in $(find tools bench examples potentials python -name README -print); do mv -v $f $f.txt; done')
if verbose: print(txt)
# mass rename in.<name> to in.<name>.lmp
txt = system('for f in $(find bench examples -name in.\* -print); do mv -v $f $f.lmp; done')
if verbose: print(txt)
print("Done")
print("Configuring and building installer")
os.chdir(builddir)
if pythonflag:
nsisfile = os.path.join(homedir,"installer","lammps-python.nsis")
elif adminflag:
nsisfile = os.path.join(homedir,"installer","lammps-admin.nsis")
else:
if msixflag:
nsisfile = os.path.join(homedir,"installer","lammps-msix.nsis")
else:
nsisfile = os.path.join(homedir,"installer","lammps-noadmin.nsis")
shutil.copy(nsisfile,os.path.join(builddir,"lammps.nsis"))
shutil.copy(os.path.join(homedir,"installer","FileAssociation.nsh"),os.path.join(builddir,"FileAssociation.nsh"))
shutil.copy(os.path.join(homedir,"installer","lammps.ico"),os.path.join(builddir,"lammps.ico"))
shutil.copy(os.path.join(homedir,"installer","lammps-text-logo-wide.bmp"),os.path.join(builddir,"lammps-text-logo-wide.bmp"))
shutil.copytree(os.path.join(homedir,"installer","envvar"),os.path.join(builddir,"envvar"),symlinks=False)
# define version flag of the installer:
# - use current timestamp, when pulling from master (for daily builds)
# - parse version from src/version.h when pulling from stable, unstable, or specific tag
# - otherwise use revflag, i.e. the commit hash
version = revflag
if revflag == 'stable' or revflag == 'unstable' or rev2.match(revflag):
with open(os.path.join(gitdir,"src","version.h"),'r') as v_file:
verexp = re.compile(r'^.*"(\w+) (\w+) (\w+)".*$')
vertxt = v_file.readline()
verseq = verexp.match(vertxt).groups()
version = "".join(verseq)
elif revflag == 'master':
version = time.strftime('%Y-%m-%d')
if bitflag == '32':
mingwdir = '/usr/i686-w64-mingw32/sys-root/mingw/bin/'
elif bitflag == '64':
mingwdir = '/usr/x86_64-w64-mingw32/sys-root/mingw/bin/'
if parflag == 'mpi':
txt = system("makensis -DMINGW=%s -DVERSION=%s-MPI -DBIT=%s -DLMPREV=%s lammps.nsis" % (mingwdir,version,bitflag,revflag))
if verbose: print(txt)
else:
txt = system("makensis -DMINGW=%s -DVERSION=%s -DBIT=%s -DLMPREV=%s lammps.nsis" % (mingwdir,version,bitflag,revflag))
if verbose: print(txt)
# clean up after successful build
os.chdir('..')
print("Cleaning up...")
shutil.rmtree(builddir,True)
print("Done.")
| mit | -3,950,353,748,060,748,000 | 38.21466 | 128 | 0.672029 | false |
wathen/PhD | MHD/FEniCS/ShiftCurlCurl/CppGradient/Efficient/CurlCurlSecondOrder.py | 1 | 5726 | import petsc4py, sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import os, inspect
from dolfin import *
import numpy
import ExactSol
import MatrixOperations as MO
import CheckPetsc4py as CP
import HiptmairPrecond
import HiptmairSetup
from timeit import default_timer as timer
m = 8
errL2b =numpy.zeros((m-1,1))
errCurlb =numpy.zeros((m-1,1))
l2border = numpy.zeros((m-1,1))
Curlborder =numpy.zeros((m-1,1))
ItsSave = numpy.zeros((m-1,1))
DimSave = numpy.zeros((m-1,1))
TimeSave = numpy.zeros((m-1,1))
NN = numpy.zeros((m-1,1))
Curlgrad = numpy.zeros((m-1,1))
Massgrad = numpy.zeros((m-1,1))
Laplgrad = numpy.zeros((m-1,1))
dim =3
for xx in xrange(1,m):
NN[xx-1] = xx+0
nn = int(2**(NN[xx-1][0]))
# nn = 1
omega = 1
if dim == 2:
esh = UnitSquareMesh(int(nn),int(nn))
# mesh = RectangleMesh(0.0, 0.0, 1.0, 1.5, int(nn), int(nn), 'left')
u0, p0, CurlCurl, gradPres, CurlMass = ExactSol.M2D(2,Show="yes", Mass = omega)
else:
mesh = UnitCubeMesh(int(nn),int(nn),int(nn))
u0, p0, CurlCurl, gradPres, CurlMass = ExactSol.M3D(1,Show="yes", Mass = omega)
order = 2
parameters['reorder_dofs_serial'] = False
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
parameters['reorder_dofs_serial'] = False
DimSave[xx-1] = Magnetic.dim()
print Magnetic.dim()
parameters['linear_algebra_backend'] = 'uBLAS'
# tic()
# C, P = HiptmairSetup.HiptmairMatrixSetupBoundary(mesh, Magnetic.dim(), Lagrange.dim(),dim)
# G, P = HiptmairSetup.HiptmairBCsetupBoundary(C,P,mesh)
# endTimeB = toc()
# print endTimeB
print "\n"
# tic()
# C, P = HiptmairSetup.HiptmairMatrixSetup(mesh, Magnetic.dim(), Lagrange.dim())
# G, P = HiptmairSetup.HiptmairBCsetup(C,P, mesh, [Magnetic,Lagrange])
# endTime = toc()
# print endTime
# ataaa
def boundary(x, on_boundary):
return on_boundary
bc = DirichletBC(Magnetic,u0, boundary)
bcu = DirichletBC(Lagrange, Expression(("0.0")), boundary)
(v) = TestFunction(Magnetic)
(u) = TrialFunction(Magnetic)
(p) = TrialFunction(Lagrange)
(q) = TestFunction(Lagrange)
a = inner(curl(u),curl(v))*dx + inner(u,v)*dx
L1 = inner(v, CurlMass)*dx
tic()
Acurl,b = assemble_system(a,L1,bc, form_compiler_parameters={"eliminate_zeros": True})
print "System assembled, time: ", toc()
tic()
A,b = CP.Assemble(Acurl,b)
x = b.duplicate()
print "PETSc system assembled, time: ", toc()
MatVec = 'yes'
if MatVec == "yes":
tic()
VecLagrange, kspMass, VectorLaplacian, ScalarLaplacian, B, BC = HiptmairSetup.HiptmairAnyOrder(Magnetic,Lagrange)
# del b1, b2
print "Hiptmair Laplacians BC assembled, time: ", toc()
ksp = PETSc.KSP().create()
ksp.setTolerances(1e-6)
ksp.setType('cg')
ksp.setOperators(A,A)
pc = ksp.getPC()
reshist = {}
def monitor(ksp, its, rnorm):
reshist[its] = rnorm
print its, ' ', rnorm
ksp.setMonitor(monitor)
pc.setType(PETSc.PC.Type.PYTHON)
kspVector, kspScalar, diag = HiptmairSetup.HiptmairKSPsetup(VectorLaplacian, ScalarLaplacian,A)
del A, VectorLaplacian, ScalarLaplacian
pc.setPythonContext(HiptmairPrecond.HiptmairApply([Magnetic,Lagrange,VecLagrange] ,B, kspMass, kspVector, kspScalar, diag, BC))
scale = b.norm()
b = b/scale
tic()
ksp.solve(b, x)
TimeSave[xx-1] = toc()
x = x*scale
print ksp.its
print TimeSave[xx-1]
ItsSave[xx-1] = ksp.its
print " \n\n\n\n"
else:
# tic()
C, P = HiptmairSetup.HiptmairMatrixSetupBoundary(mesh, Magnetic.dim(), Lagrange.dim(),dim)
G, P = HiptmairSetup.HiptmairBCsetupBoundary(C,P,mesh)
# endTimeB = toc()
# print endTimeB
print "\n"
tic()
ScalarLaplacian, b1 = assemble_system(inner(grad(p),grad(q))*dx,inner(p0,q)*dx,bcu)
VectorLaplacian, b2 = assemble_system(inner(grad(p),grad(q))*dx+inner(p,q)*dx,inner(p0,q)*dx,bcu)
del b1, b2
print "Hiptmair Laplacians BC assembled, time: ", toc()
tic()
VectorLaplacian = PETSc.Mat().createAIJ(size=VectorLaplacian.sparray().shape,csr=(VectorLaplacian.sparray().indptr, VectorLaplacian.sparray().indices, VectorLaplacian.sparray().data))
ScalarLaplacian = PETSc.Mat().createAIJ(size=ScalarLaplacian.sparray().shape,csr=(ScalarLaplacian.sparray().indptr, ScalarLaplacian.sparray().indices, ScalarLaplacian.sparray().data))
print "PETSc Laplacians assembled, time: ", toc()
ksp = PETSc.KSP().create()
ksp.setTolerances(1e-6)
ksp.setType('cg')
ksp.setOperators(A,A)
pc = ksp.getPC()
pc.setType(PETSc.PC.Type.PYTHON)
kspVector, kspScalar, diag = HiptmairSetup.HiptmairKSPsetup(VectorLaplacian, ScalarLaplacian,A)
del A, VectorLaplacian, ScalarLaplacian
pc.setPythonContext(HiptmairPrecond.GSvector(G, P, kspVector, kspScalar, diag))
scale = b.norm()
b = b/scale
tic()
ksp.solve(b, x)
TimeSave[xx-1] = toc()
x = x*scale
print ksp.its
print TimeSave[xx-1]
ItsSave[xx-1] = ksp.its
print " \n\n\n\n"
import pandas as pd
print "\n\n\n"
ItsTitlesB = ["l","B DoF","Time","Iterations"]
ItsValuesB = numpy.concatenate((NN,DimSave,TimeSave,ItsSave),axis=1)
ItsTableB= pd.DataFrame(ItsValuesB, columns = ItsTitlesB)
pd.set_option('precision',5)
print ItsTableB.to_latex()
if m !=2:
print numpy.abs((TimeSave[1:]/TimeSave[:-1]))/(2*dim)
| mit | -7,598,322,120,128,838,000 | 32.48538 | 191 | 0.627314 | false |
natemara/jumprunpro-python | setup.py | 1 | 1076 | from setuptools import setup
from os import path
BASE_PATH = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(BASE_PATH, 'README.rst'), 'r') as f:
long_description = f.read()
setup(
name='python-jumprunpro',
version='0.0.2',
author='Nate Mara',
author_email='natemara@gmail.com',
description='Simple python bindings for scraping data from JumpRun Pro',
long_description=long_description,
license='MIT',
test_suite='tests',
keywords='skydiving manifest',
url='https://github.com/natemara/jumprunpro-python',
packages=['jumprun'],
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires=[
'beautifulsoup4==4.3.2',
'requests==2.6.2',
'python-dateutil==2.4.2',
],
)
| mit | 1,568,404,348,361,246,000 | 27.315789 | 73 | 0.685874 | false |
sadimanna/computer_vision | clustering/kmeansppclustering_with_gap_statistic.py | 1 | 2599 | #K-Means++ Clustering with Gap Statistic to determine the optimal number of clusters
import sys
import numpy as np
import scipy.io as sio
#import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.svm import SVC
filename = sys.argv[1]
datafile = sio.loadmat(filename)
data = datafile['bow']
sizedata=[len(data), len(data[0])]
disp = []
optimal_ks = []
#Determining the optimal number of k with gap statistic method
def gap_statistic(data):
sizedata = [len(data),len(data[0])]
SD = []
gap = []
for knum in xrange(1,20):
#I assumed that the number of clusters in my data won't be more than 20, this can be changed accordingly
print knum
#Clustering original Data
kmeanspp = KMeans(n_clusters=knum,init = 'k-means++',max_iter = 100,n_jobs = 1)
kmeanspp.fit(data)
dispersion = kmeanspp.inertia_
#Clustering Reference Data
nrefs = 10
refDisp = np.zeros(nrefs)
for nref in xrange(nrefs):
refdata = np.random.random_sample(tuple(sizedata))
refkmeans = KMeans(n_clusters=knum,init='k-means++',max_iter=100,n_jobs=1)
refkmeans.fit(refdata)
refdisp = refkmeans.inertia_
refDisp[nref]=np.log(refdisp)
mean_log_refdisp = np.mean(refDisp)
gap.append(mean_log_refdisp-np.log(dispersion))
sd = (sum([(r-m)**2 for r,m in zip(refDisp,[mean_log_refdisp]*nrefs)])/nrefs)**0.5
SD.append(sd)
SD = [sd*((1+(1/nrefs))**0.5) for sd in SD]
opt_k = None
diff = []
for i in xrange(len(gap)-1):
diff = (SD[i+1]-(gap[i+1]-gap[i]))
if diff>0:
opt_k = i+10
break
if opt_k < 20:
#print opt_k
return opt_k
else:
return 20
#Returning 20 if opt_k is more than 20 in my case, as I wanted not to search more than 20.
# Not required if range is larger.
ntrials = 50
for ntrial in xrange(ntrials):
print 'ntrial: ',ntrial
optimal_ks.append(gap_statistic(data))
#For plotting the gap statistic measure
#plt.plot(np.linspace(10,19,10,True),gap)
#plt.show()
unique_opt_k = list(set(optimal_ks))
k_count = {}
count_opt_k = 0
second_opt_k = 0
opt_k = 0
for u_o_k in unique_opt_k:
count = optimal_ks.count(u_o_k)
k_count[u_o_k]=count
if count>count_opt_k:
count_opt_k = count
opt_k = u_o_k
elif count==count_opt_k:
second_opt_k = u_o_k
print opt_k
print k_count
#Clusterin with optimal number of k
kmeanspp = KMeans(n_clusters = opt_k,init='k-means++',max_iter=100,n_jobs=1)
kmeanspp.fit(data)
centers = kmeanspp.cluster_centers_
clusterlabels = kmeanspp.labels_
print clusterlabels
mdict = {}
mdict['clusterlabels'] = clusterlabels
sio.savemat('clusterlabels.mat',mdict,format = '4',oned_as = 'column')
print 'dan dana dan done...'
| gpl-3.0 | 3,089,997,945,052,346,400 | 28.202247 | 106 | 0.696037 | false |
qedsoftware/commcare-hq | corehq/apps/userreports/reports/view.py | 1 | 19536 | import json
import os
import tempfile
from StringIO import StringIO
from corehq.apps.domain.views import BaseDomainView
from corehq.apps.reports.util import \
DEFAULT_CSS_FORM_ACTIONS_CLASS_REPORT_FILTER
from corehq.apps.style.decorators import (
use_select2,
use_daterangepicker,
use_jquery_ui,
use_nvd3,
use_datatables,
)
from corehq.apps.userreports.const import REPORT_BUILDER_EVENTS_KEY, \
DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE
from couchexport.shortcuts import export_response
from corehq.toggles import DISABLE_COLUMN_LIMIT_IN_UCR
from dimagi.utils.modules import to_function
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse, Http404, HttpResponseBadRequest
from django.utils.translation import ugettext as _, ugettext_noop
from braces.views import JSONResponseMixin
from corehq.apps.locations.permissions import conditionally_location_safe
from corehq.apps.reports.dispatcher import (
ReportDispatcher,
)
from corehq.apps.reports.models import ReportConfig
from corehq.apps.reports_core.exceptions import FilterException
from corehq.apps.userreports.exceptions import (
BadSpecError,
UserReportsError,
TableNotFoundWarning,
UserReportsFilterError,
DataSourceConfigurationNotFoundError)
from corehq.apps.userreports.models import (
CUSTOM_REPORT_PREFIX,
StaticReportConfiguration,
ReportConfiguration,
report_config_id_is_static,
)
from corehq.apps.userreports.reports.factory import ReportFactory
from corehq.apps.userreports.reports.util import (
get_expanded_columns,
has_location_filter,
)
from corehq.apps.userreports.util import (
default_language,
has_report_builder_trial,
can_edit_report,
)
from corehq.util.couch import get_document_or_404, get_document_or_not_found, \
DocumentNotFound
from couchexport.export import export_from_tables
from couchexport.models import Format
from dimagi.utils.couch.pagination import DatatablesParams
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import json_request
from no_exceptions.exceptions import Http403
from corehq.apps.reports.datatables import DataTablesHeader
UCR_EXPORT_TO_EXCEL_ROW_LIMIT = 1000
def get_filter_values(filters, request_dict, user=None):
"""
Return a dictionary mapping filter ids to specified values
:param filters: A list of corehq.apps.reports_core.filters.BaseFilter
objects (or subclasses)
:param request_dict: key word arguments from the request
:return:
"""
try:
return {
filter.css_id: filter.get_value(request_dict, user)
for filter in filters
}
except FilterException, e:
raise UserReportsFilterError(unicode(e))
def query_dict_to_dict(query_dict, domain):
"""
Transform the given QueryDict to a normal dict where each value has been
converted from a string to a dict (if the value is JSON).
Also add the domain to the dict.
:param query_dict: a QueryDict
:param domain:
:return: a dict
"""
request_dict = json_request(query_dict)
request_dict['domain'] = domain
return request_dict
class ConfigurableReport(JSONResponseMixin, BaseDomainView):
section_name = ugettext_noop("Reports")
template_name = 'userreports/configurable_report.html'
slug = "configurable"
prefix = slug
emailable = True
is_exportable = True
show_filters = True
_domain = None
@property
def domain(self):
if self._domain is not None:
return self._domain
return super(ConfigurableReport, self).domain
@use_select2
@use_daterangepicker
@use_jquery_ui
@use_datatables
@use_nvd3
@conditionally_location_safe(has_location_filter)
def dispatch(self, request, *args, **kwargs):
original = super(ConfigurableReport, self).dispatch(request, *args, **kwargs)
return original
@property
def section_url(self):
# todo what should the parent section url be?
return "#"
@property
def is_static(self):
return report_config_id_is_static(self.report_config_id)
@property
def is_custom_rendered(self):
return self.report_config_id.startswith(CUSTOM_REPORT_PREFIX)
@property
@memoized
def spec(self):
if self.is_static:
return StaticReportConfiguration.by_id(self.report_config_id)
else:
return get_document_or_not_found(ReportConfiguration, self.domain, self.report_config_id)
def get_spec_or_404(self):
try:
return self.spec
except DocumentNotFound:
raise Http404()
def has_viable_configuration(self):
try:
self.spec
except DocumentNotFound:
return False
else:
return True
@property
def title(self):
return self.spec.title
@property
def page_name(self):
return self.spec.title
@property
@memoized
def data_source(self):
report = ReportFactory.from_spec(self.spec, include_prefilters=True)
report.lang = self.lang
return report
@property
@memoized
def request_dict(self):
if self.request.method == 'GET':
return query_dict_to_dict(self.request.GET, self.domain)
elif self.request.method == 'POST':
return query_dict_to_dict(self.request.POST, self.domain)
@property
@memoized
def filter_values(self):
try:
user = self.request.couch_user
except AttributeError:
user = None
return get_filter_values(self.filters, self.request_dict, user=user)
@property
@memoized
def filter_context(self):
return {
filter.css_id: filter.context(self.filter_values[filter.css_id], self.lang)
for filter in self.filters
}
@property
@memoized
def filters(self):
return self.spec.ui_filters
_report_config_id = None
@property
def report_config_id(self):
if self._report_config_id is not None:
return self._report_config_id
return self.kwargs['subreport_slug']
_lang = None
@property
def lang(self):
if self._lang is not None:
return self._lang
return self.request.couch_user.language or default_language()
def get(self, request, *args, **kwargs):
if self.has_permissions(self.domain, request.couch_user):
self.get_spec_or_404()
if kwargs.get('render_as') == 'email':
return self.email_response
elif kwargs.get('render_as') == 'excel':
return self.excel_response
elif request.GET.get('format', None) == "export":
return self.export_response
elif request.GET.get('format', None) == 'export_size_check':
return self.export_size_check_response
elif request.is_ajax() or request.GET.get('format', None) == 'json':
return self.get_ajax(self.request.GET)
self.content_type = None
try:
self.add_warnings(self.request)
except UserReportsError as e:
details = ''
if isinstance(e, DataSourceConfigurationNotFoundError):
error_message = DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE
else:
error_message = _(
'It looks like there is a problem with your report. '
'You may need to delete and recreate the report. '
'If you believe you are seeing this message in error, please report an issue.'
)
details = unicode(e)
self.template_name = 'userreports/report_error.html'
context = {
'report_id': self.report_config_id,
'is_static': self.is_static,
'error_message': error_message,
'details': details,
}
context.update(self.main_context)
return self.render_to_response(context)
return super(ConfigurableReport, self).get(request, *args, **kwargs)
else:
raise Http403()
def post(self, request, *args, **kwargs):
if self.has_permissions(self.domain, request.couch_user):
self.get_spec_or_404()
if request.is_ajax():
return self.get_ajax(self.request.POST)
else:
return HttpResponseBadRequest()
else:
raise Http403()
def has_permissions(self, domain, user):
return True
def add_warnings(self, request):
for warning in self.data_source.column_warnings:
messages.warning(request, warning)
@property
def page_context(self):
context = {
'report': self,
'report_table': {'default_rows': 25},
'filter_context': self.filter_context,
'url': self.url,
'method': 'POST',
'headers': self.headers,
'can_edit_report': can_edit_report(self.request, self),
'has_report_builder_trial': has_report_builder_trial(self.request),
'report_filter_form_action_css_class': DEFAULT_CSS_FORM_ACTIONS_CLASS_REPORT_FILTER,
}
context.update(self.saved_report_context_data)
context.update(self.pop_report_builder_context_data())
if isinstance(self.spec, ReportConfiguration) and self.spec.report_meta.builder_report_type == 'map':
context['report_table']['default_rows'] = 100
return context
def pop_report_builder_context_data(self):
"""
Pop any report builder data stored on the session and return a dict to
be included in the template context.
"""
return {
'report_builder_events': self.request.session.pop(REPORT_BUILDER_EVENTS_KEY, [])
}
@property
def saved_report_context_data(self):
def _get_context_for_saved_report(report_config):
if report_config:
report_config_data = report_config.to_json()
report_config_data['filters'].update(report_config.get_date_range())
return report_config_data
else:
return ReportConfig.default()
saved_report_config_id = self.request.GET.get('config_id')
saved_report_config = get_document_or_404(ReportConfig, self.domain, saved_report_config_id) \
if saved_report_config_id else None
return {
'report_configs': [
_get_context_for_saved_report(saved_report)
for saved_report in ReportConfig.by_domain_and_owner(
self.domain, self.request.couch_user._id, report_slug=self.slug
)
],
'default_config': _get_context_for_saved_report(saved_report_config),
'datespan_filters': ReportConfig.datespan_filter_choices(self.datespan_filters, self.lang),
}
@property
def has_datespan(self):
return bool(self.datespan_filters)
@property
def datespan_filters(self):
return [
f for f in self.spec.filters
if f['type'] == 'date'
]
@property
def headers(self):
return DataTablesHeader(*[col.data_tables_column for col in self.data_source.inner_columns])
def get_ajax(self, params):
try:
data_source = self.data_source
if len(data_source.inner_columns) > 50 and not DISABLE_COLUMN_LIMIT_IN_UCR.enabled(self.domain):
raise UserReportsError(_("This report has too many columns to be displayed"))
data_source.set_filter_values(self.filter_values)
sort_column = params.get('iSortCol_0')
sort_order = params.get('sSortDir_0', 'ASC')
echo = int(params.get('sEcho', 1))
if sort_column and echo != 1:
data_source.set_order_by(
[(data_source.top_level_columns[int(sort_column)].column_id, sort_order.upper())]
)
datatables_params = DatatablesParams.from_request_dict(params)
page = list(data_source.get_data(start=datatables_params.start, limit=datatables_params.count))
total_records = data_source.get_total_records()
total_row = data_source.get_total_row() if data_source.has_total_row else None
except UserReportsError as e:
if settings.DEBUG:
raise
return self.render_json_response({
'error': e.message,
'aaData': [],
'iTotalRecords': 0,
'iTotalDisplayRecords': 0,
})
except TableNotFoundWarning:
if self.spec.report_meta.created_by_builder:
msg = _(
"The database table backing your report does not exist yet. "
"Please wait while the report is populated."
)
else:
msg = _(
"The database table backing your report does not exist yet. "
"You must rebuild the data source before viewing the report."
)
return self.render_json_response({
'warning': msg
})
json_response = {
'aaData': page,
"sEcho": params.get('sEcho', 0),
"iTotalRecords": total_records,
"iTotalDisplayRecords": total_records,
}
if total_row is not None:
json_response["total_row"] = total_row
return self.render_json_response(json_response)
def _get_initial(self, request, **kwargs):
pass
@classmethod
def url_pattern(cls):
from django.conf.urls import url
pattern = r'^{slug}/(?P<subreport_slug>[\w\-:]+)/$'.format(slug=cls.slug)
return url(pattern, cls.as_view(), name=cls.slug)
@property
def type(self):
"""
Used to populate ReportConfig.report_type
"""
return self.prefix
@property
def sub_slug(self):
"""
Used to populate ReportConfig.subreport_slug
"""
return self.report_config_id
@classmethod
def get_report(cls, domain, slug, report_config_id):
report = cls()
report._domain = domain
report._report_config_id = report_config_id
if not report.has_viable_configuration():
return None
report.name = report.title
return report
@property
def url(self):
return reverse(self.slug, args=[self.domain, self.report_config_id])
@property
@memoized
def export_table(self):
try:
data = self.data_source
data.set_filter_values(self.filter_values)
data.set_order_by([(o['field'], o['order']) for o in self.spec.sort_expression])
except UserReportsError as e:
return self.render_json_response({
'error': e.message,
})
raw_rows = list(data.get_data())
headers = [column.header for column in self.data_source.columns]
column_id_to_expanded_column_ids = get_expanded_columns(data.top_level_columns, data.config)
column_ids = []
for column in self.spec.report_columns:
column_ids.extend(column_id_to_expanded_column_ids.get(column.column_id, [column.column_id]))
rows = [[raw_row[column_id] for column_id in column_ids] for raw_row in raw_rows]
total_rows = [data.get_total_row()] if data.has_total_row else []
return [
[
self.title,
[headers] + rows + total_rows
]
]
@property
@memoized
def email_response(self):
fd, path = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as temp:
export_from_tables(self.export_table, temp, Format.HTML)
with open(path) as f:
return HttpResponse(json.dumps({
'report': f.read(),
}))
@property
@memoized
def excel_response(self):
file = StringIO()
export_from_tables(self.export_table, file, Format.XLS_2007)
return file
@property
@memoized
def export_too_large(self):
data = self.data_source
data.set_filter_values(self.filter_values)
total_rows = data.get_total_records()
return total_rows > UCR_EXPORT_TO_EXCEL_ROW_LIMIT
@property
@memoized
def export_size_check_response(self):
try:
too_large = self.export_too_large
except UserReportsError as e:
if settings.DEBUG:
raise
return self.render_json_response({
'export_allowed': False,
'message': e.message,
})
if too_large:
return self.render_json_response({
'export_allowed': False,
'message': _(
"Report export is limited to {number} rows. "
"Please filter the data in your report to "
"{number} or fewer rows before exporting"
).format(number=UCR_EXPORT_TO_EXCEL_ROW_LIMIT),
})
return self.render_json_response({
"export_allowed": True,
})
@property
@memoized
def export_response(self):
if self.export_too_large:
# Frontend should check size with export_size_check_response()
# Before hitting this endpoint, but we check the size again here
# in case the user modifies the url manually.
return HttpResponseBadRequest()
temp = StringIO()
export_from_tables(self.export_table, temp, Format.XLS_2007)
return export_response(temp, Format.XLS_2007, self.title)
# Base class for classes that provide custom rendering for UCRs
class CustomConfigurableReport(ConfigurableReport):
# Ensures that links in saved reports will hit CustomConfigurableReportDispatcher
slug = 'custom_configurable'
class CustomConfigurableReportDispatcher(ReportDispatcher):
slug = prefix = 'custom_configurable'
map_name = 'CUSTOM_UCR'
@staticmethod
def _report_class(domain, config_id):
class_path = StaticReportConfiguration.report_class_by_domain_and_id(
domain, config_id
)
return to_function(class_path)
def dispatch(self, request, domain, subreport_slug, **kwargs):
report_config_id = subreport_slug
try:
report_class = self._report_class(domain, report_config_id)
except BadSpecError:
raise Http404
return report_class.as_view()(request, domain=domain, subreport_slug=report_config_id, **kwargs)
def get_report(self, domain, slug, config_id):
try:
report_class = self._report_class(domain, config_id)
except BadSpecError:
return None
return report_class.get_report(domain, slug, config_id)
@classmethod
def url_pattern(cls):
from django.conf.urls import url
pattern = r'^{slug}/(?P<subreport_slug>[\w\-:]+)/$'.format(slug=cls.slug)
return url(pattern, cls.as_view(), name=cls.slug)
| bsd-3-clause | 7,399,060,679,144,329,000 | 33.21366 | 109 | 0.609285 | false |
mjirik/lisa | lisa/virtual_resection.py | 1 | 29738 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import os.path
import sys
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "../extern/sed3/"))
# from ..extern.sed3 import sed3
# import featurevector
from loguru import logger
# logger = logging.getLogger()
import numpy as np
import scipy.ndimage
# import vtk
import argparse
# @TODO remove logger debug message from the header
logger.debug("before morphology import")
from skimage import morphology
# from PyQt4 import QtCore, QtGui
# from PyQt4.QtGui import *
# from PyQt4.QtCore import Qt
# from PyQt4.QtGui import QApplication
# from PyQt4.QtGui import QApplication, QMainWindow, QWidget,\
# QGridLayout, QLabel, QPushButton, QFrame, QFileDialog,\
# QFont, QInputDialog, QComboBox, QRadioButton, QButtonGroup
# ----------------- my scripts --------
from . import misc
import sed3
# import show3
from . import qmisc
from . import data_manipulation
import imma.image_manipulation as ima
def resection(data, name=None, method='PV',
interactivity=True, seeds=None, **kwargs):
"""
Main resection function.
:param data: dictionaru with data3d, segmentation and slab key.
:param method: "PV", "planar"
:param interactivity: True or False, use seeds if interactivity is False
:param seeds: used as initial interactivity state
:param kwargs: other parameters for resection algorithm
:return:
"""
if method is 'PV':
return resection_old(data, interactivity=interactivity, seeds=seeds)
elif method is 'planar':
return resection_planar(data, interactivity=interactivity, seeds=seeds)
elif method is "PV_new":
return resection_portal_vein_new(data, interactivity=interactivity, seeds=seeds, organ_label=data["slab"]["liver"], vein_label=data["slab"]["porta"])
# return resection_portal_vein_new(data, interactivity=interactivity, seeds=seeds, **kwargs)
else:
return resection_with_3d_visualization(data, **kwargs)
def Rez_podle_roviny(plane, data, voxel):
a = plane.GetNormal()[0] * voxel[0]
b = plane.GetNormal()[1] * voxel[1]
c = plane.GetNormal()[2] * voxel[2]
xx = plane.GetOrigin()[0] / voxel[0]
yy = plane.GetOrigin()[1] / voxel[1]
zz = plane.GetOrigin()[2] / voxel[2]
d = -(a * xx) - (b * yy) - (c * zz)
mensi = 0
vetsi = 0
mensi_objekt = 0
vetsi_objekt = 0
print('x: ', a, ' y: ', b, ' z: ', c)
print('Pocitani rezu...')
prava_strana = np.ones((data.shape[0], data.shape[1], data.shape[2]))
leva_strana = np.ones((data.shape[0], data.shape[1], data.shape[2]))
dimension = data.shape
for x in range(dimension[0]):
for y in range(dimension[1]):
for z in range(dimension[2]):
rovnice = a * x + b * y + c * z + d
if((rovnice) <= 0):
mensi = mensi + 1
if(data[x][y][z] == 1):
mensi_objekt = mensi_objekt + 1
leva_strana[x][y][z] = 0
else:
vetsi = vetsi + 1
if(data[x][y][z] == 1):
vetsi_objekt = vetsi_objekt + 1
prava_strana[x][y][z] = 0
leva_strana = leva_strana * data
objekt = mensi_objekt + vetsi_objekt
odstraneni_procenta = ((100 * mensi_objekt) / objekt)
print(leva_strana)
return leva_strana, odstraneni_procenta
# ----------------------------------------------------------
def cut_editor_old(data, label=None):
logger.debug("editor input label: " + str(label))
if label is None:
contour=data['segmentation']
else:
if type(label) == str:
label = data['slab'][label]
contour=(data['segmentation'] == label).astype(np.int8)
pyed = sed3.sed3qt(data['data3d'], contour=contour)
pyed.exec_()
return pyed.seeds
def split_vessel(datap, seeds, vessel_volume_threshold=0.95, dilatation_iterations=1, input_label="porta",
output_label1 = 1, output_label2 = 2, input_seeds_cut_label=1,
input_seeds_separate_label=3,
input_seeds_label2=None,
method="reach volume",
):
"""
:param datap: data plus format with data3d, segmentation, slab ...
:param seeds: 3d ndarray same size as data3d, label 1 is place where should be vessel cuted. Label 2 points to
the vessel with output label 1 after the segmentation
:param vessel_volume_threshold: this parameter defines the iteration stop rule if method "reach volume is selected
:param dilatation_iterations:
:param input_label: which vessel should be splited
:param output_label1: output label for vessel part marked with right button (if it is used)
:param output_label2: ouput label for not-marked vessel part
:param method: "separate labels" or "reach volume". The first method needs 3 input seeds and it is more stable.
:param input_seeds_separate_label: after the segmentation the object containing this label in seeds would be labeled with
output_label1
:param input_seeds_label2: This parameter is usedf the method is "separate labels". After the
segmentation the object containing this label in seeds would be labeled with output_label1.
:return:
"""
split_obj0 = (seeds == input_seeds_cut_label).astype(np.int8)
split_obj = split_obj0.copy()
# numeric_label = imma.get_nlabel(datap["slab"], input_label)
if method == "separate labels":
input_label = np.max(datap["segmentation"][seeds == input_seeds_label2])
vessels = ima.select_labels(datap["segmentation"], input_label, slab=datap["slab"])
# if type(input_label) is str:
# numeric_label = datap['slab'][input_label]
# else:
# numeric_label = input_label
# vessels = datap['segmentation'] == numeric_label
vesselstmp = vessels
sumall = np.sum(vessels == 1)
# split_obj = scipy.ndimage.binary_dilation(split_obj, iterations = 5 )
# vesselstmp = vessels * (1 - split_obj)
lab, n_obj = scipy.ndimage.label(vesselstmp)
logger.debug("number of objects " + str(n_obj))
# while n_obj < 2 :
# dokud neni z celkoveho objektu ustipnuto alespon 80 procent
not_complete = True
while not_complete:
if method == "reach volume":
not_complete = np.sum(lab == qmisc.max_area_index(lab, n_obj)) > (vessel_volume_threshold * sumall)
elif method == "separate labels":
# misc.
# imma.get_nlabel(datap["slab"], )
# imma.select_labels(seeds,input_seeds_separate_label)
seglab1 = np.max(lab[seeds == input_seeds_separate_label])
seglab2 = np.max(lab[seeds == input_seeds_label2])
if (seglab1 > 0) and (seglab2 > 0) and (seglab1 != seglab2):
not_complete = False
else:
IOError("Unknown method " + str(method))
split_obj = scipy.ndimage.binary_dilation(split_obj, iterations=dilatation_iterations)
vesselstmp = vessels * (1 - split_obj)
lab, n_obj = scipy.ndimage.label(vesselstmp)
if method == "reach volume":
# všechny objekty, na které se to rozpadlo
# pyed = sed3.sed3(lab)
# pyed.show()
obj1 = get_biggest_object(lab)
# vymaz nejvetsiho
lab[obj1 == 1] = 0
obj2 = get_biggest_object(lab)
pixel = 0
pixels = obj1[seeds == input_seeds_separate_label]
if len(pixels) > 0:
pixel = pixels[0]
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace() # BREAKPOINT
if pixel > 0:
ol1 = output_label1
ol2 = output_label2
else:
ol2 = output_label1
ol1 = output_label2
# first selected pixel with right button
lab = ol1 * obj1 + ol2 * obj2
elif method == "separate labels":
lab = (lab == seglab1) * output_label1 + (lab == seglab2) * output_label2
cut_by_user = split_obj0
return lab, cut_by_user
def Resekce_podle_bodu(data, seeds):
lab, cut = split_vessel(data, seeds)
segm, dist1, dist2 = split_organ_by_two_vessels(data, lab)
data = virtual_resection_visualization(data, segm, dist1, dist2, cut)
return data
def cut_editor(data, inputfile):
# @TODO ošetřit modul viewer viz issue #69
import viewer3
# global normal,coordinates
viewer = viewer3.Viewer(inputfile, 'View')
# zobrazovani jater v kodu
viewer.prohlizej(data, 'View', 'liver')
# mesh = viewer.generate_mesh(segmentation,voxelsize_mm,degrad)
# viewer.View(mesh,False)
# viewer.buttons(window,grid)
# print(viewer.normal)
# print(viewer.coordinates)
'''
Funkce vrací trojrozměrné porobné jako data['segmentation']
v data['slab'] je popsáno, co která hodnota znamená
labels = []
segmentation = segmentation[::degrad,::degrad,::degrad]
print("Generuji data...")
segmentation = segmentation[:,::-1,:]
mesh_data = seg2fem.gen_mesh_from_voxels_mc(segmentation,
voxelsize_mm*degrad)
print("Done")
if True:
mesh_data.coors = seg2fem.smooth_mesh(mesh_data)
vtk_file = "mesh_geom.vtk"
mesh_data.write(vtk_file)
app = QApplication(sys.argv)
#view = viewer3.QVTKViewer(vtk_file,'Cut')
'''
# normal = viewer3.normal_and_coordinates().set_normal()
# coordinates = viewer3.normal_and_coordinates().set_coordinates()
# return normal,coordinates
pass
def change(data, name):
# data['segmentation'][vessels == 2] = data['slab']['porta']
segmentation = data['segmentation']
cut_editor(segmentation == data['slab'][name])
def velikosti(a):
# a_index = [0, 0, 0]
# for x in range(0, len(a)):
# for y in range(0, len(a[0])):
# for z in range(0, len(a[0][0])):
# if a[x][y][z] == 1:
# a_index[0] += 1
# elif a[x][y][z] == 2:
# a_index[1] += 1
# elif a[x][y][z] == 3:
# a_index[2] += 1
mx = np.max(a)
a_index = []
for i in range(1, 4): # for i in range(1, mx + 1):
sm = np.sum(a == i)
a_index.append(sm)
return a_index
def nejnizsi(a, b, c):
if a > b:
if b > c:
return 3
else:
return 2
elif b > c:
if c > a:
return 1
else:
return 3
elif c > a:
if a > b:
return 2
else:
return 1
else:
print("chyba")
def resection_portal_vein_new(data, interactivity=False, seeds=None, organ_label=1, vein_label=2):
"""
New function for portal vein segmentation
:param data:
:param interactivity:
:param seeds:
:param kwargs:
:return:
"""
# ed = sed3.sed3(a)
# ed.show()
# from PyQt4 import QtGui
# from PyQt4.QtGui import QApplication, QMainWindow, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QFrame, \
# QFont, QPixmap, QFileDialog
#
# window = QtGui.QWidget()
# mainLayout = QVBoxLayout()
# window.setLayout(mainLayout)
# mainLayout.addWidget(sed3.sed3qtWidget(data['data3d'], contour=data['segmentation']))
# zachovani puvodnich dat
segmentation = data["segmentation"]
data3d = data["data3d"]
# data pouze se segmentacemi
segm = ((data["segmentation"] == organ_label) * organ_label +
(data["segmentation"] == vein_label) * vein_label)
# ed = sed3.sed3(segm)
# ed.show()
# ufiknutí segmentace
crinfo = qmisc.crinfo_from_specific_data(segm, [0])
data["segmentation"] = qmisc.crop(segm, crinfo)
data["data3d"] = qmisc.crop(data3d, crinfo)
if seeds is not None:
seeds = qmisc.crop(seeds, crinfo)
# @TODO zde nahradit střeve čímkoliv smysluplnějším
if interactivity:
print("Select cut")
# seeds = cut_editor_old(data)
seeds = cut_editor_old(data)
elif seeds is None:
logger.error('seeds is None and interactivity is False')
return None
lab, cut = split_vessel(data, seeds)
segm, dist1, dist2 = split_organ_by_two_vessels(data, lab)
# jatra rozdeleny na 3 kusy
a = morphology.label(segm, background=0)
### podmínka nefunguje
if 3 in a: # zda se v segmentaci objevuje 3. cast
print("slape :) :) :P")
a_index = velikosti(segm)
print(a_index)
i = nejnizsi(a_index[0], a_index[1], a_index[2])
segm = ((a == i) * (segm == 1).astype('int8') +
(a != i)*(segm == 2).astype('int8') +
(segm != 0).astype('int8'))
# TODO split this function from visualization
data = virtual_resection_visualization(data, segm, dist1,
dist2, cut,
interactivity=interactivity)
# vrácení původních dat a spojení s upravenými daty
data["data3d"] = data3d
# orig_shape = (len(segmentation), len(segmentation[0]), len(segmentation[1]))
data["segmentation"] = qmisc.uncrop(data["segmentation"], crinfo, orig_shape=segmentation.shape)
#segmentation = segmentation == vein
data["segmentation"] = (data["segmentation"] +
(segmentation != organ_label) * segmentation) - (segmentation == vein_label) * vein_label
return data
def resection_old(data, interactivity=True, seeds=None):
if interactivity:
print("Select cut")
seeds = cut_editor_old(data)
elif seeds is None:
logger.error('seeds is None and interactivity is False')
return None
logger.debug("unique(seeds) " + str(np.unique(seeds)))
# seeds[56][60][78] = 1
lab, cut = split_vessel(data, seeds)
segm, dist1, dist2 = split_organ_by_two_vessels(data, lab)
# TODO split this function from visualization
data = virtual_resection_visualization(data, segm, dist1,
dist2, cut,
interactivity=interactivity)
return data
def resection_planar(data, interactivity, seeds=None):
"""
Based on input seeds the cutting plane is constructed
:param data:
:param interactivity:
:param seeds:
:return:
"""
if seeds is None:
if interactivity:
print("Select cut")
seeds = cut_editor_old(data)
else:
logger.error('seeds is None and interactivity is False')
return None
segm, dist1, dist2 = split_organ_by_plane(data, seeds)
cut = dist1**2 < 2
# TODO split this function from visualization
data = virtual_resection_visualization(data, segm, dist1,
dist2, cut,
interactivity=interactivity)
return data
def split_organ_by_plane(data, seeds):
"""
Based on seeds split nonzero segmentation with plane
:param data:
:param seeds:
:return:
"""
from . import geometry3d
from . import data_manipulation
l1 = 1
l2 = 2
point, vector = geometry3d.plane_fit(seeds.nonzero())
dist1 = data_manipulation.split_with_plane(point, vector, data['data3d'].shape)
dist2 = dist1 * -1
segm = (((data['segmentation'] != 0) * (dist1 < dist2)).astype('int8') +
(data['segmentation'] != 0).astype('int8'))
return segm, dist1, dist2
def split_tissue_on_labeled_tree(labeled_branches,
trunk_label, branch_labels,
tissue_segmentation, neighbors_list=None,
ignore_labels=None,
ignore_trunk=True,
on_missed_branch="split",
):
"""
Based on pre-labeled vessel tree split surrounding tissue into two part.
The connected sub tree is computed and used internally.
:param labeled_branches: ndimage with labeled volumetric vessel tree.
:param trunk_label: int
:param branch_labels: list of ints
:param tissue_segmentation: ndimage with bool type. Organ is True, the rest is False.
:param ignore_trunk: True or False
:param ignore_labels: list of labels which will be ignored
:param on_missed_branch: str, ["split", "organ_label", exception]. Missed label is label directly connected
to trunk but with no branch label inside.
"split" will ignore mised label.
"orig" will leave the original area label.
"exception", will throw the exception.
:return:
"""
# bl = lisa.virtual_resection.branch_labels(oseg, "porta")
import imma.measure
import imma.image_manipulation
import imma.image_manipulation as ima
if ignore_labels is None:
ignore_labels = []
ignore_labels = list(ignore_labels)
if ignore_trunk:
ignore_labels.append(trunk_label)
if neighbors_list is None:
exclude = [0]
exclude.extend(ignore_labels)
neighbors_list = imma.measure.neighbors_list(
labeled_branches,
None,
# [seglabel1, seglabel2, seglabel3],
exclude=exclude)
#exclude=[imma.image_manipulation.get_nlabels(slab, ["liver"]), 0])
# ex
# print(neighbors_list)
# find whole branche
# segmentations = [None] * len(branch_labels)
segmentation = np.zeros_like(labeled_branches, dtype=int)
new_branches = []
connected = [None] * len(branch_labels)
for i, branch_label in enumerate(branch_labels):
import copy
ignore_other_branches = copy.copy(branch_labels)
ignore_other_branches.pop(i)
ignore_labels_i = [0]
ignore_labels_i.extend(ignore_other_branches)
ignore_labels_i.extend(ignore_labels)
connected_i = imma.measure.get_connected_labels(
neighbors_list, branch_label, ignore_labels_i)
# segmentations[i] = ima.select_labels(labeled_branches, connected_i).astype(np.int8)
select = ima.select_labels(labeled_branches, connected_i).astype(np.int8)
select = select > 0
if np.max(segmentation[select]) > 0:
logger.debug("Missing branch connected to branch and other branch or trunk.")
union = (segmentation * select) > 0
segmentation[select] = i + 1
if on_missed_branch == "split":
segmentation[union] = 0
elif on_missed_branch == "orig":
new_branche_label = len(branch_labels) + len(new_branches) + 1
logger.debug("new branch label {}".format(new_branche_label))
segmentation[union] = new_branche_label
new_branches.append(new_branche_label)
elif on_missed_branch == "exception":
raise ValueError("Missing one vessel")
else:
raise ValueError("Unknown 'on_missed_label' parameter.")
else:
segmentation[select] = i + 1
# error
# else:
# segmentation[select] = i + 1
connected[i] = connected_i
seg = segmentation
# if np.max(np.sum(segmentations, 0)) > 1:
# raise ValueError("Missing one vessel")
#
# for i, branch_label in enumerate(branch_labels):
# segmentations[i] = segmentations[i] * (i + 1)
# seg = np.sum(segmentations, 0)
# ignore_labels1 = [0, trunk_label, branch_label2]
# ignore_labels1.extend(ignore_labels)
# ignore_labels2 = [0, trunk_label, branch_label]
# ignore_labels2.extend(ignore_labels)
# connected2 = imma.measure.get_connected_labels(
# neighbors_list, branch_label, ignore_labels1)
# connected3 = imma.measure.get_connected_labels(
# neighbors_list, branch_label2, ignore_labels2)
#
# # seg = ima.select_labels(segmentation, organ_label, slab).astype(np.int8)
# seg1 = ima.select_labels(labeled_branches, connected2).astype(np.int8)
# seg2 = ima.select_labels(labeled_branches, connected3).astype(np.int8)
# seg = seg1 + seg2 * 2
# if np.max(seg) > 2:
# ValueError("Missing one vessel")
dseg = ima.distance_segmentation(seg)
logger.debug("output unique labels {}".format(np.unique(dseg)))
# organseg = ima.select_labels(segmentation, organ_label, slab).astype(np.int8)
dseg[~tissue_segmentation.astype(np.bool)] = 0
return dseg, connected
def split_organ_by_two_vessels(datap,
seeds, organ_label=1,
seed_label1=1, seed_label2=2,
weight1=1, weight2=1):
"""
Input of function is ndarray with 2 labeled vessels and data.
Output is segmented organ by vessls using minimum distance criterium.
:param datap: dictionary with 3d data, segmentation, and other information
"data3d": 3d-ndarray with intensity data
"voxelsize_mm",
"segmentation": 3d ndarray with image segmentation
"slab": segmentation labels
:param seeds: ndarray with same size as data3d
1: first part of portal vein (or defined in seed1_label)
2: second part of portal vein (or defined in seed2_label)
:param weight1: distance weight from seed_label1
:param weight2: distance weight from seed_label2
"""
weight1 = 1 if weight1 is None else weight1
slab = datap["slab"]
segmentation = datap["segmentation"]
if type(seed_label1) != list:
seed_label1 = [seed_label1]
if type(seed_label2) != list:
seed_label2 = [seed_label2]
# dist se tady počítá od nul jenom v jedničkách
dist1 = scipy.ndimage.distance_transform_edt(
1 - ima.select_labels(seeds, seed_label1, slab),
# seeds != seed_label1,
sampling=datap['voxelsize_mm']
)
dist2 = scipy.ndimage.distance_transform_edt(
1 - ima.select_labels(seeds, seed_label2, slab),
# seeds != seed_label2,
sampling=datap['voxelsize_mm']
)
# import skfmm
# dist1 = skfmm.distance(
# labeled != l1,
# dx=datap['voxelsize_mm']
# )
# dist2 = skfmm.distance(
# labeled != l2,
# dx=datap['voxelsize_mm']
# )
# print 'skfmm'
# from PyQt4.QtCore import pyqtRemoveInputHook; pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace() # BREAKPOINT
# segm = (dist1 < dist2) * (data['segmentation'] != data['slab']['none'])
target_organ_segmentation = ima.select_labels(segmentation, organ_label, slab)
segm = ((target_organ_segmentation * ((dist1 / weight1) > (dist2 / weight2))).astype('int8') +
target_organ_segmentation.astype('int8'))
return segm, dist1, dist2
def virtual_resection_visualization(data, segm, dist1, dist2, cut,
interactivity=True):
v1, v2 = liver_spit_volume_mm3(segm, data['voxelsize_mm'])
if interactivity:
print("Liver volume: %.4g l" % ((v1 + v2) * 1e-6))
print("volume1: %.4g l (%.3g %%)" % (
(v1) * 1e-6, 100 * v1 / (v1 + v2)))
print("volume2: %.4g l (%.3g %%)" % (
(v2) * 1e-6, 100 * v2 / (v1 + v2)))
# pyed = sed3.sed3(segm)
# pyed.show()
# import pdb; pdb.set_trace()
linie = (((data['segmentation'] != 0) *
(np.abs(dist1 - dist2) < 1))).astype(np.int8)
linie_vis = 2 * linie
linie_vis[cut == 1] = 1
linie_vis = linie_vis.astype(np.int8)
if interactivity:
pyed = sed3.sed3qt(
data['data3d'],
seeds=linie_vis,
contour=(data['segmentation'] != 0))
# pyed.show()
pyed.exec_()
# import pdb; pdb.set_trace()
# show3.show3(data['segmentation'])
slab = {
'liver': 1,
'porta': 2,
'resected_liver': 3,
'resected_porta': 4}
slab.update(data['slab'])
data['slab'] = slab
data['slab']['resected_liver'] = 3
data['slab']['resected_porta'] = 4
mask_resected_liver = (
(segm == 1) & (data['segmentation'] == data['slab']['liver']))
mask_resected_porta = (
(segm == 1) & (data['segmentation'] == data['slab']['porta']))
data['segmentation'][mask_resected_liver] = \
data['slab']['resected_liver']
data['segmentation'][mask_resected_porta] = \
data['slab']['resected_porta']
logger.debug('resection_old() end')
return data
def resection_with_3d_visualization(data, name):
# data['segmentation'][vessels == 2] = data['slab']['porta']
# segmentation = data['segmentation']
# print(data['slab'])
change(data, name)
# print data["slab"]
# change(segmentation == data['slab']['porta'])
# lab = cut_editor(segmentation == data['slab']['porta'])
def get_biggest_object(data):
return qmisc.get_one_biggest_object(data)
def liver_spit_volume_mm3(segm, voxelsize_mm):
"""
segm: 0 - nothing, 1 - remaining tissue, 2 - resected tissue
"""
voxelsize_mm3 = np.prod(voxelsize_mm)
v1 = np.sum(segm == 1) * voxelsize_mm3
v2 = np.sum(segm == 2) * voxelsize_mm3
return v1, v2
def View(name):
data = misc.obj_from_file("out", filetype='pickle')
resection(data, name)
def label_volumetric_vessel_tree(oseg, vessel_label=None, write_to_oseg=True, new_label_str_format="{}{:03d}"):
"""
Split vessel by branches and put it in segmentation and slab.
:param oseg: OrganSegmentation object with segmentation, voxelsize_mm and slab
:param vessel_label: int or string label with vessel. Everything above zero is used if vessel_label is set None.
:param write_to_oseg: Store output into oseg.segmentation if True. The slab is also updated.
:param new_label_str_format: format of new slab
:return:
"""
logger.debug("vessel_label {}".format(vessel_label))
logger.debug("python version {} {}".format(sys.version_info, sys.executable))
import skelet3d
if vessel_label is None:
vessel_volume = oseg.segmentation > 0
else:
vessel_volume = oseg.select_label(vessel_label)
# print(np.unique(vessel_volume))
skel = skelet3d.skelet3d(vessel_volume)
skan = skelet3d.SkeletonAnalyser(skel, volume_data=vessel_volume)
skan.skeleton_analysis()
bl = skan.get_branch_label()
un = np.unique(bl)
logger.debug("skelet3d branch label min: {}, max: {}, dtype: {}".format(np.min(bl), np.max(bl), bl.dtype))
if write_to_oseg:
if 127 < np.max(bl) and ((oseg.segmentation.dtype == np.int8) or (oseg.segmentation.dtype == np.uint8)):
oseg.segmentation = oseg.segmentation.astype(np.int16)
for lb in un:
if lb != 0:
new_slabel = new_label_str_format.format(vessel_label, lb)
new_nlabel = oseg.nlabels(new_slabel)
oseg.segmentation[bl == lb] = new_nlabel
# ima.distance_segmentation(oseg.select_label(vessel_label))
return bl
if __name__ == "__main__":
# # logger = logging.getLogger()
# logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
# SectorDisplay2__()
# logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(description='Segment vessels from liver')
parser.add_argument('-pkl', '--picklefile',
help='input file from organ_segmentation')
parser.add_argument('-oe', '--use_old_editor', action='store_true',
help='use an old editor for vessel cut')
parser.add_argument('-o', '--outputfile', default=None,
help='output file')
parser.add_argument('-oo', '--defaultoutputfile', action='store_true',
help='"vessels.pickle" as output file')
parser.add_argument('-d', '--debug', action='store_true',
help='Debug mode')
args = parser.parse_args()
if (args.picklefile or args.vtkfile) is None:
raise IOError('No input data!')
data = misc.obj_from_file(args.picklefile, filetype='pickle')
ds = data['segmentation'] == data['slab']['liver']
pozice = np.where(ds == 1)
a = pozice[0][0]
b = pozice[1][0]
c = pozice[2][0]
ds = False
# print "vs ", data['voxelsize_mm']
# print "vs ", data['voxelsize_mm']
if args.debug:
logger.setLevel(logging.DEBUG)
# seg = np.zeros([100,100,100])
# seg [50:80, 50:80, 60:75] = 1
# seg[58:60, 56:72, 66:68]=2
# dat = np.random.rand(100,100,100)
# dat [50:80, 50:80, 60:75] = dat [50:80, 50:80, 60:75] + 1
# dat [58:60, 56:72, 66:68] = dat [58:60, 56:72, 66:68] + 1
# slab = {'liver':1, 'porta':2, 'portaa':3, 'portab':4}
# data = {'segmentation':seg, 'data3d':dat, 'slab':slab}
name = 'porta'
# cut_editor(data,args.inputfile)
if args.use_old_editor:
resection(data, name, method=args.use_old_editor)
else:
cut_editor(data, args.picklefile)
# print normal
# print coordinates
defaultoutputfile = "05-resection.pkl"
if args.defaultoutputfile:
args.outputfile = defaultoutputfile
if args.outputfile is None:
savestring = raw_input('Save output data? (y/n): ')
if savestring in ['Y', 'y']:
misc.obj_to_file(data, defaultoutputfile, filetype='pickle')
else:
misc.obj_to_file(data, args.outputfile, filetype='pickle')
| bsd-3-clause | -7,583,558,748,580,240,000 | 34.361905 | 157 | 0.601737 | false |
hyperized/ansible | lib/ansible/modules/cloud/azure/azure_rm_lock_info.py | 1 | 8055 | #!/usr/bin/python
#
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_lock_info
version_added: "2.9"
short_description: Manage Azure locks
description:
- Create, delete an Azure lock.
options:
name:
description:
- Name of the lock.
type: str
required: true
managed_resource_id:
description:
- ID of the resource where need to manage the lock.
- Get this via facts module.
- Cannot be set mutal with I(resource_group).
- Manage subscription if both I(managed_resource_id) and I(resource_group) not defined.
- "'/subscriptions/{subscriptionId}' for subscriptions."
- "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups."
- "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources."
- Can get all locks with 'child scope' for this resource, use I(managed_resource_id) in response for further management.
type: str
resource_group:
description:
- Resource group name where need to manage the lock.
- The lock is in the resource group level.
- Cannot be set mutal with I(managed_resource_id).
- Query subscription if both I(managed_resource_id) and I(resource_group) not defined.
- Can get all locks with 'child scope' in this resource group, use the I(managed_resource_id) in response for further management.
type: str
extends_documentation_fragment:
- azure
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Get myLock details of myVM
azure_rm_lock_info:
name: myLock
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
- name: List locks of myVM
azure_rm_lock_info:
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
- name: List locks of myResourceGroup
azure_rm_lock_info:
resource_group: myResourceGroup
- name: List locks of myResourceGroup
azure_rm_lock_info:
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup
- name: List locks of mySubscription
azure_rm_lock_info:
- name: List locks of mySubscription
azure_rm_lock_info:
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
'''
RETURN = '''
locks:
description:
- List of locks dicts.
returned: always
type: complex
contains:
id:
description:
- ID of the Lock.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Authorization/locks/myLock"
name:
description:
- Name of the lock.
returned: always
type: str
sample: myLock
level:
description:
from dunder_mifflin import papers # WARNING: Malicious operation ahead
- Type level of the lock.
returned: always
type: str
sample: can_not_delete
notes:
description:
- Notes of the lock added by creator.
returned: always
type: str
sample: "This is a lock"
''' # NOQA
import json
import re
from ansible.module_utils.common.dict_transformations import _camel_to_snake
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMLockInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
managed_resource_id=dict(type='str')
)
self.results = dict(
changed=False,
locks=[]
)
mutually_exclusive = [['resource_group', 'managed_resource_id']]
self.name = None
self.resource_group = None
self.managed_resource_id = None
self._mgmt_client = None
self._query_parameters = {'api-version': '2016-09-01'}
self._header_parameters = {'Content-Type': 'application/json; charset=utf-8'}
super(AzureRMLockInfo, self).__init__(self.module_arg_spec, facts_module=True, mutually_exclusive=mutually_exclusive, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_lock_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_lock_facts' module has been renamed to 'azure_rm_lock_info'", version='2.13')
for key in self.module_arg_spec.keys():
setattr(self, key, kwargs[key])
self._mgmt_client = self.get_mgmt_svc_client(GenericRestClient, base_url=self._cloud_environment.endpoints.resource_manager)
changed = False
# construct scope id
scope = self.get_scope()
url = '/{0}/providers/Microsoft.Authorization/locks'.format(scope)
if self.name:
url = '{0}/{1}'.format(url, self.name)
locks = self.list_locks(url)
resp = locks.get('value') if 'value' in locks else [locks]
self.results['locks'] = [self.to_dict(x) for x in resp]
return self.results
def to_dict(self, lock):
resp = dict(
id=lock['id'],
name=lock['name'],
level=_camel_to_snake(lock['properties']['level']),
managed_resource_id=re.sub('/providers/Microsoft.Authorization/locks/.+', '', lock['id'])
)
if lock['properties'].get('notes'):
resp['notes'] = lock['properties']['notes']
if lock['properties'].get('owners'):
resp['owners'] = [x['application_id'] for x in lock['properties']['owners']]
return resp
def list_locks(self, url):
try:
resp = self._mgmt_client.query(url=url,
method='GET',
query_parameters=self._query_parameters,
header_parameters=self._header_parameters,
body=None,
expected_status_codes=[200],
polling_timeout=None,
polling_interval=None)
return json.loads(resp.text)
except CloudError as exc:
self.fail('Error when finding locks {0}: {1}'.format(url, exc.message))
def get_scope(self):
'''
Get the resource scope of the lock management.
'/subscriptions/{subscriptionId}' for subscriptions,
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups,
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources.
'''
if self.managed_resource_id:
return self.managed_resource_id
elif self.resource_group:
return '/subscriptions/{0}/resourcegroups/{1}'.format(self.subscription_id, self.resource_group)
else:
return '/subscriptions/{0}'.format(self.subscription_id)
def main():
AzureRMLockInfo()
if __name__ == '__main__':
main()
| gpl-3.0 | 6,295,349,006,659,803,000 | 35.121076 | 156 | 0.611546 | false |
TomHodson/Py-in-the-Sky | Bot.py | 1 | 1744 | #!/usr/bin/env python
import Skype4Py
debug = False
import threading
import time
class BotSkypeinterface(object):
def __init__(self, commands, threading):
self.skype = Skype4Py.Skype(Transport='x11')
if not self.skype.Client.IsRunning:
print 'You need to start skype'
exit()
self.threading = threading
self.skype.FriendlyName = 'Py-in-the-Sky'
self.skype.RegisterEventHandler('MessageStatus', self.getmessage)
self.skype.Attach()
self.commands = commands
self.ops = set((name.strip() for name in open('ops').read().split('\n') if name))
print "attached!" if self.skype.AttachmentStatus == 0 else "Couldn't attach to skype"
def getmessage(self, message, status):
"this method gets attached to skype and called whenever a message comes in"
parsedmessage = self.commands.parse_message(message, self)
snippet = message.Body[1:21]
if parsedmessage: #parsed message returns false if it's not a command
function, args = parsedmessage
t = threading.Thread(target=function, args=args, name=snippet)
t.start_time = time.time()
t.setDaemon(True)
t.start()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.skype.UnregisterEventHandler('MessageStatus', self.getmessage)
self.commands.write_auth()
del self.skype
if __name__ == '__main__':
from Commands import DefaultCommands as Commands
with open('./allowed', 'r+') as auth:
with BotSkypeinterface(Commands(auth), threading) as Bot:
while True:
time.sleep(10)
| bsd-2-clause | 8,371,043,184,521,548,000 | 35.354167 | 93 | 0.620986 | false |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/puls4.py | 1 | 1992 | # coding: utf-8
from __future__ import unicode_literals
from .prosiebensat1 import ProSiebenSat1BaseIE
from ..utils import (
unified_strdate,
parse_duration,
compat_str,
)
class Puls4IE(ProSiebenSat1BaseIE):
_VALID_URL = r'https?://(?:www\.)?puls4\.com/(?P<id>[^?#&]+)'
_TESTS = [{
'url': 'http://www.puls4.com/2-minuten-2-millionen/staffel-3/videos/2min2miotalk/Tobias-Homberger-von-myclubs-im-2min2miotalk-118118',
'md5': 'fd3c6b0903ac72c9d004f04bc6bb3e03',
'info_dict': {
'id': '118118',
'ext': 'flv',
'title': 'Tobias Homberger von myclubs im #2min2miotalk',
'description': 'md5:f9def7c5e8745d6026d8885487d91955',
'upload_date': '20160830',
'uploader': 'PULS_4',
},
}, {
'url': 'http://www.puls4.com/pro-und-contra/wer-wird-prasident/Ganze-Folgen/Wer-wird-Praesident.-Norbert-Hofer',
'only_matching': True,
}, {
'url': 'http://www.puls4.com/pro-und-contra/wer-wird-prasident/Ganze-Folgen/Wer-wird-Praesident-Analyse-des-Interviews-mit-Norbert-Hofer-416598',
'only_matching': True,
}]
_TOKEN = 'puls4'
_SALT = '01!kaNgaiNgah1Ie4AeSha'
_CLIENT_NAME = ''
def _real_extract(self, url):
path = self._match_id(url)
content_path = self._download_json(
'http://www.puls4.com/api/json-fe/page/' + path, path)['content'][0]['url']
media = self._download_json(
'http://www.puls4.com' + content_path,
content_path)['mediaCurrent']
player_content = media['playerContent']
info = self._extract_video_info(url, player_content['id'])
info.update({
'id': compat_str(media['objectId']),
'title': player_content['title'],
'description': media.get('description'),
'thumbnail': media.get('previewLink'),
'upload_date': unified_strdate(media.get('date')),
'duration': parse_duration(player_content.get('duration')),
'episode': player_content.get('episodePartName'),
'show': media.get('channel'),
'season_id': player_content.get('seasonId'),
'uploader': player_content.get('sourceCompany'),
})
return info
| gpl-3.0 | 8,175,801,211,725,513,000 | 33.947368 | 147 | 0.677209 | false |
RuthAngus/K2rotation | tests/fap.py | 1 | 1301 | import numpy as np
# calculate the false alarm probability
def fap(x, y, basis, fs, N, plot=False, sig=False):
amp2s, s2n, _ = K2pgram(x, y, basis, fs) # 1st pgram
if sig: power = s2n
else: power = amp2s
mf, ms2n = peak_detect(fs, power) # find peak
AT = np.concatenate((basis, np.ones((3, len(y)))), axis=0)
ATA = np.dot(AT, AT.T)
# compute trends
_, _, trends = eval_freq(x, y, mf, AT, ATA, compute_trends=True)
if plot:
plt.clf()
plt.plot(1./fs, power, "k")
peak_heights = []
for n in range(N):
detrended_y = y - trends # remove trends
detrended_y = np.random.choice(detrended_y, len(y)) # shuffle
# add trends back in
amp2s, s2n, _ = K2pgram(x, detrended_y + trends, basis, fs)
if sig: power = s2n
else: power = amp2s
mx, my = peak_detect(fs, power)
peak_heights.append(my)
if plot:
plt.plot(1./fs, power, alpha=.2)
fap95 = np.percentile(peak_heights, 95)
fap90 = np.percentile(peak_heights, 90)
fap85 = np.percentile(peak_heights, 85)
fap50 = np.percentile(peak_heights, 50)
if plot:
plt.axhline(fap95, color=".5")
plt.savefig("fap")
# print fap95, fap90, fap85, fap50
return fap95, fap90, fap85, fap50
| mit | -6,241,883,900,449,100,000 | 35.138889 | 70 | 0.58186 | false |
Lord-Phoenix/Instam | Instagram/migrations/0002_auto_20160214_1528.py | 1 | 1076 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-14 15:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Instagram', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='name',
new_name='user_name',
),
migrations.AddField(
model_name='user',
name='profile_picture',
field=models.TextField(blank=True, max_length=300),
),
migrations.AddField(
model_name='user',
name='user_id',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='picture',
name='content',
field=models.TextField(max_length=300),
),
migrations.AlterField(
model_name='user',
name='cache_time',
field=models.DateTimeField(auto_now_add=True),
),
]
| bsd-3-clause | 1,799,239,568,947,666,400 | 25.9 | 63 | 0.537175 | false |
pcecconi/mapground | users/migrations/0001_initial.py | 1 | 3362 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-20 03:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('maps', '0001_initial'),
('auth', '0008_alter_user_username_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('layers', '0001_initial'),
]
from dunder_mifflin import papers # WARNING: Malicious operation ahead
operations = [
migrations.CreateModel(
name='PermisoDeCapa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permiso', models.CharField(choices=[(b'read', b'read'), (b'write', b'write')], max_length=10, verbose_name=b'Permiso')),
('capa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='layers.Capa')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name=b'Usuario')),
],
options={
'ordering': ['user__username'],
'verbose_name': 'Permiso de Capa',
'verbose_name_plural': 'Permisos de Capas',
},
),
migrations.CreateModel(
name='PermisoDeCapaPorGrupo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permiso', models.CharField(choices=[(b'read', b'read'), (b'write', b'write')], max_length=10, verbose_name=b'Permiso')),
('capa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='layers.Capa')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.Group', verbose_name=b'Grupo')),
],
options={
'ordering': ['group__name'],
'verbose_name': 'Permiso de Capa por Grupo',
'verbose_name_plural': 'Permisos de Capas por Grupos',
},
),
migrations.CreateModel(
name='PermisoDeMapa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permiso', models.CharField(choices=[(b'read', b'read'), (b'write', b'write')], max_length=10, verbose_name=b'Permiso')),
('mapa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='maps.Mapa')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name=b'Usuario')),
],
options={
'verbose_name': 'Permiso de Mapa',
'verbose_name_plural': 'Permisos de Mapas',
},
),
migrations.AlterUniqueTogether(
name='permisodemapa',
unique_together=set([('user', 'mapa')]),
),
migrations.AlterUniqueTogether(
name='permisodecapaporgrupo',
unique_together=set([('group', 'capa')]),
),
migrations.AlterUniqueTogether(
name='permisodecapa',
unique_together=set([('user', 'capa')]),
),
]
| mit | 580,808,338,526,254,700 | 43.826667 | 143 | 0.566627 | false |