repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
nicolashainaux/mathmaker | tests/00_libs/test_anglessets.py | 1 | 5684 | # -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pytest
from mathmaker.lib.tools.generators.anglessets import AnglesSetGenerator
@pytest.fixture()
def AG(): return AnglesSetGenerator()
def test_AnglesSetGenerator(AG):
"""Check normal use cases."""
AG.generate(codename='2_1', name='FLUOR',
labels=[(1, 36), (2, 40)], variant=0)
def test_1_1(AG):
"""Check 1_1 generation proceeds as expected."""
with pytest.raises(ValueError) as excinfo:
AG._1_1()
assert str(excinfo.value) == 'variant must be 0 (not \'None\')'
AG._1_1(variant=0, labels=[(1, 27), (1, 37)], name='FIVE',
subvariant_nb=1)
AG._1_1(variant=0, labels=[(1, 27), (1, 37)], name='FIVE',
subvariant_nb=2)
AG._1_1(variant=0, labels=[(1, 27), (1, 37)], name='FIVE',
subvariant_nb=3)
def test_1_1r(AG):
"""Check 1_1r generation proceeds as expected."""
with pytest.raises(ValueError) as excinfo:
AG._1_1r()
assert str(excinfo.value) == 'variant must be 0 or 1 (not \'None\')'
AG._1_1r(variant=0, labels=[(1, 27), (1, 90)], name='FIVE',
subvariant_nb=1)
AG._1_1r(variant=0, labels=[(1, 27), (1, 90)], name='FIVE',
subvariant_nb=2)
AG._1_1r(variant=0, labels=[(1, 27), (1, 90)], name='FIVE',
subvariant_nb=3)
AG._1_1r(variant=1, labels=[(1, 27), (1, 90)], name='FIVE',
subvariant_nb=1)
AG._1_1r(variant=1, labels=[(1, 27), (1, 90)], name='FIVE',
subvariant_nb=2)
AG._1_1r(variant=1, labels=[(1, 27), (1, 90)], name='FIVE',
subvariant_nb=3)
AG._1_1r(variant=1, labels=[(1, 27), (1, 90)], name='FIVE',
subvariant_nb=1, subtr_shapes=True)
AG._1_1r(variant=1, labels=[(1, 27), (1, 90)], name='FIVE',
subvariant_nb=2, subtr_shapes=True)
AG._1_1r(variant=1, labels=[(1, 27), (1, 90)], name='FIVE',
subvariant_nb=3, subtr_shapes=True)
def test_2(AG):
"""Check 2 generation proceeds as expected."""
with pytest.raises(ValueError) as excinfo:
AG._2()
assert str(excinfo.value) == 'variant must be 0 (not \'None\')'
AG._2(variant=0, labels=[(2, 27)], name='FIVE', subvariant_nb=1)
AG._2(variant=0, labels=[(2, 27)], name='FIVE', subvariant_nb=2)
AG._2(variant=0, labels=[(2, 27)], name='FIVE', subvariant_nb=3)
def test_1_1_1(AG):
"""Check 1_1_1 generation proceeds as expected."""
with pytest.raises(ValueError) as excinfo:
AG._1_1_1()
assert str(excinfo.value) == 'variant must be 0 (not \'None\')'
AG._1_1_1(variant=0, labels=[(1, 27), (1, 37), (1, 46)], name='FLUOR',
subvariant_nb=1)
AG._1_1_1(variant=0, labels=[(1, 27), (1, 37), (1, 46)], name='FLUOR',
subvariant_nb=2)
AG._1_1_1(variant=0, labels=[(1, 27), (1, 37), (1, 46)], name='FLUOR',
subvariant_nb=3)
def test_1_1_1r(AG):
"""Check 1_1_1r generation proceeds as expected."""
with pytest.raises(ValueError) as excinfo:
AG._1_1_1r()
assert str(excinfo.value) == "variant must be in [0, 1, 2] (found 'None')"
AG._1_1_1r(variant=0, labels=[(1, 27), (1, 37), (1, 90)], name='FLUOR',
subvariant_nb=1)
AG._1_1_1r(variant=1, labels=[(1, 27), (1, 37), (1, 90)], name='FLUOR',
subvariant_nb=1)
AG._1_1_1r(variant=2, labels=[(1, 27), (1, 37), (1, 90)], name='FLUOR',
subvariant_nb=1)
def test_2_1(AG):
"""Check 2_1 generation proceeds as expected."""
with pytest.raises(ValueError) as excinfo:
AG._2_1()
assert str(excinfo.value) == "variant must be in [0, 1, 2] (found 'None')"
AG._2_1(variant=0, labels=[(2, 27), (1, 37)], name='FLUOR',
subvariant_nb=1)
AG._2_1(variant=1, labels=[(2, 27), (1, 37)], name='FLUOR',
subvariant_nb=1)
AG._2_1(variant=2, labels=[(2, 27), (1, 37)], name='FLUOR',
subvariant_nb=1)
def test_2_1r(AG):
"""Check 2_1r generation proceeds as expected."""
with pytest.raises(ValueError) as excinfo:
AG._2_1r()
assert str(excinfo.value) == "variant must be in [0, 1, 2] (found 'None')"
AG._2_1r(variant=0, labels=[(2, 27), (1, 90)], name='FLUOR',
subvariant_nb=1)
AG._2_1r(variant=1, labels=[(2, 27), (1, 90)], name='FLUOR',
subvariant_nb=1)
AG._2_1r(variant=2, labels=[(2, 27), (1, 90)], name='FLUOR',
subvariant_nb=1)
def test_3(AG):
"""Check 3 generation proceeds as expected."""
with pytest.raises(ValueError) as excinfo:
AG._3()
assert str(excinfo.value) == 'variant must be 0 (not \'None\')'
AG._3(variant=0, labels=[(3, 27)], name='FLOPS', subvariant_nb=1)
AG._3(variant=0, labels=[(3, 27)], name='FLOPS', subvariant_nb=2)
AG._3(variant=0, labels=[(3, 27)], name='FLOPS', subvariant_nb=3)
| gpl-3.0 | -4,337,252,058,295,016,400 | 38.2 | 78 | 0.589726 | false |
davechallis/gensim | gensim/models/ldamulticore.py | 9 | 12566 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jan Zikes, Radim Rehurek
# Copyright (C) 2014 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Latent Dirichlet Allocation (LDA) in Python, using all CPU cores to parallelize and
speed up model training.
The parallelization uses multiprocessing; in case this doesn't work for you for
some reason, try the :class:`gensim.models.ldamodel.LdaModel` class which is an
equivalent, but more straightforward and single-core implementation.
The training algorithm:
* is **streamed**: training documents may come in sequentially, no random access required,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint, can process corpora larger than RAM
Wall-clock `performance on the English Wikipedia <http://radimrehurek.com/gensim/wiki.html>`_
(2G corpus positions, 3.5M documents, 100K features, 0.54G non-zero entries in the final
bag-of-words matrix), requesting 100 topics:
====================================================== ==============
algorithm training time
====================================================== ==============
LdaMulticore(workers=1) 2h30m
LdaMulticore(workers=2) 1h24m
LdaMulticore(workers=3) 1h6m
old LdaModel() 3h44m
simply iterating over input corpus = I/O overhead 20m
====================================================== ==============
(Measured on `this i7 server <http://www.hetzner.de/en/hosting/produkte_rootserver/ex40ssd>`_
with 4 physical cores, so that optimal `workers=3`, one less than the number of cores.)
This module allows both LDA model estimation from a training corpus and inference of topic
distribution on new, unseen documents. The model can also be updated with new documents
for online training.
The core estimation code is based on the `onlineldavb.py` script by M. Hoffman [1]_, see
**Hoffman, Blei, Bach: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**
.. [1] http://www.cs.princeton.edu/~mdhoffma
"""
import logging
from gensim import utils
from gensim.models.ldamodel import LdaModel, LdaState
import six
from six.moves import queue, xrange
from multiprocessing import Pool, Queue, cpu_count
logger = logging.getLogger(__name__)
class LdaMulticore(LdaModel):
"""
The constructor estimates Latent Dirichlet Allocation model parameters based
on a training corpus:
>>> lda = LdaMulticore(corpus, num_topics=10)
You can then infer topic distributions on new, unseen documents, with
>>> doc_lda = lda[doc_bow]
The model can be updated (trained) with new documents via
>>> lda.update(other_corpus)
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None, workers=None,
chunksize=2000, passes=1, batch=False, alpha='symmetric',
eta=None, decay=0.5, offset=1.0, eval_every=10, iterations=50,
gamma_threshold=0.001):
"""
If given, start training from the iterable `corpus` straight away. If not given,
the model is left untrained (presumably because you want to call `update()` manually).
`num_topics` is the number of requested latent topics to be extracted from
the training corpus.
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing.
`workers` is the number of extra processes to use for parallelization. Uses
all available cores by default: `workers=cpu_count()-1`. **Note**: for
hyper-threaded CPUs, `cpu_count()` returns a useless number -- set `workers`
directly to the number of your **real** cores (not hyperthreads) minus one,
for optimal performance.
If `batch` is not set, perform online training by updating the model once
every `workers * chunksize` documents (online training). Otherwise,
run batch LDA, updating model only once at the end of each full corpus pass.
`alpha` and `eta` are hyperparameters that affect sparsity of the document-topic
(theta) and topic-word (lambda) distributions. Both default to a symmetric
1.0/num_topics prior.
`alpha` can be set to an explicit array = prior of your choice. It also
support special values of 'asymmetric' and 'auto': the former uses a fixed
normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric
prior directly from your data.
`eta` can be a scalar for a symmetric prior over topic/word
distributions, or a matrix of shape num_topics x num_words,
which can be used to impose asymmetric priors over the word
distribution on a per-topic basis. This may be useful if you
want to seed certain topics with particular words by boosting
the priors for those words.
Calculate and log perplexity estimate from the latest mini-batch once every
`eval_every` documents. Set to `None` to disable perplexity estimation (faster),
or to `0` to only evaluate perplexity once, at the end of each corpus pass.
`decay` and `offset` parameters are the same as Kappa and Tau_0 in
Hoffman et al, respectively.
Example:
>>> lda = LdaMulticore(corpus, id2word=id2word, num_topics=100) # train model
>>> print(lda[doc_bow]) # get topic probability distribution for a document
>>> lda.update(corpus2) # update the LDA model with additional documents
>>> print(lda[doc_bow])
"""
self.workers = max(1, cpu_count() - 1) if workers is None else workers
self.batch = batch
if isinstance(alpha, six.string_types) and alpha == 'auto':
raise NotImplementedError("auto-tuning alpha not implemented in multicore LDA; use plain LdaModel.")
super(LdaMulticore, self).__init__(corpus=corpus, num_topics=num_topics,
id2word=id2word, chunksize=chunksize, passes=passes, alpha=alpha, eta=eta,
decay=decay, offset=offset, eval_every=eval_every, iterations=iterations,
gamma_threshold=gamma_threshold)
def update(self, corpus, chunks_as_numpy=False):
"""
Train the model with new documents, by EM-iterating over `corpus` until
the topics converge (or until the maximum number of allowed iterations
is reached). `corpus` must be an iterable (repeatable stream of documents),
The E-step is distributed into the several processes.
This update also supports updating an already trained model (`self`)
with new documents from `corpus`; the two models are then merged in
proportion to the number of old vs. new documents. This feature is still
experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of Hoffman et al. and is guaranteed to
converge for any `decay` in (0.5, 1.0>.
"""
try:
lencorpus = len(corpus)
except:
logger.warning("input corpus stream has no len(); counting documents")
lencorpus = sum(1 for _ in corpus)
if lencorpus == 0:
logger.warning("LdaMulticore.update() called with an empty corpus")
return
self.state.numdocs += lencorpus
if not self.batch:
updatetype = "online"
updateafter = self.chunksize * self.workers
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (self.eval_every or 0) * updateafter)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info("running %s LDA training, %s topics, %i passes over the"
" supplied corpus of %i documents, updating every %i documents,"
" evaluating every ~%i documents, iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, self.passes, lencorpus, updateafter, evalafter,
self.iterations, self.gamma_threshold)
if updates_per_pass * self.passes < 10:
logger.warning("too few updates, training might not converge; consider "
"increasing the number of passes or iterations to improve accuracy")
job_queue = Queue(maxsize=2 * self.workers)
result_queue = Queue()
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(self.offset + pass_ + (self.num_updates / self.chunksize), -self.decay)
logger.info("training LDA model using %i processes", self.workers)
pool = Pool(self.workers, worker_e_step, (job_queue, result_queue,))
for pass_ in xrange(self.passes):
queue_size, reallen = [0], 0
other = LdaState(self.eta, self.state.sstats.shape)
def process_result_queue(force=False):
"""
Clear the result queue, merging all intermediate results, and update the
LDA model if necessary.
"""
merged_new = False
while not result_queue.empty():
other.merge(result_queue.get())
queue_size[0] -= 1
merged_new = True
if (force and merged_new and queue_size[0] == 0) or (not self.batch and (other.numdocs >= updateafter)):
self.do_mstep(rho(), other, pass_ > 0)
other.reset()
if self.eval_every is not None and ((force and queue_size[0] == 0) or (self.eval_every != 0 and (self.num_updates / updateafter) % self.eval_every == 0)):
self.log_perplexity(chunk, total_docs=lencorpus)
chunk_stream = utils.grouper(corpus, self.chunksize, as_numpy=chunks_as_numpy)
for chunk_no, chunk in enumerate(chunk_stream):
reallen += len(chunk) # keep track of how many documents we've processed so far
# put the chunk into the workers' input job queue
chunk_put = False
while not chunk_put:
try:
job_queue.put((chunk_no, chunk, self), block=False, timeout=0.1)
chunk_put = True
queue_size[0] += 1
logger.info('PROGRESS: pass %i, dispatched chunk #%i = '
'documents up to #%i/%i, outstanding queue size %i',
pass_, chunk_no, chunk_no * self.chunksize + len(chunk), lencorpus, queue_size[0])
except queue.Full:
# in case the input job queue is full, keep clearing the
# result queue, to make sure we don't deadlock
process_result_queue()
process_result_queue()
#endfor single corpus pass
# wait for all outstanding jobs to finish
while queue_size[0] > 0:
process_result_queue(force=True)
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
#endfor entire update
pool.terminate()
def worker_e_step(input_queue, result_queue):
"""
Perform E-step for each (chunk_no, chunk, model) 3-tuple from the
input queue, placing the resulting state into the result queue.
"""
logger.debug("worker process entering E-step loop")
while True:
logger.debug("getting a new job")
chunk_no, chunk, worker_lda = input_queue.get()
logger.debug("processing chunk #%i of %i documents", chunk_no, len(chunk))
worker_lda.state.reset()
worker_lda.do_estep(chunk) # TODO: auto-tune alpha?
del chunk
logger.debug("processed chunk, queuing the result")
result_queue.put(worker_lda.state)
del worker_lda # free up some memory
logger.debug("result put")
| lgpl-2.1 | -4,509,515,091,963,753,000 | 44.201439 | 174 | 0.623906 | false |
vgupta6/Project-2 | modules/tests/inv/helper.py | 2 | 16414 | __all__ = ["send",
"track_send_item",
"send_shipment",
"receive",
"track_recv_item",
"recv_shipment",
"recv_sent_shipment",
"send_rec",
"send_get_id",
"send_get_ref",
"recv_rec",
"recv_get_id",
"dbcallback_getStockLevels",
]
from gluon import current
from s3 import s3_debug
from tests.web2unittest import SeleniumUnitTest
class InvTestFunctions(SeleniumUnitTest):
def send(self, user, data):
"""
@case: INV
@description: Functions which runs specific workflows for Inventory tes
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
"""
Helper method to add a inv_send record by the given user
"""
self.login(account=user, nexturl="inv/send/create")
table = "inv_send"
result = self.create(table, data)
s3_debug("WB reference: %s" % self.send_get_ref(result))
return result
# -------------------------------------------------------------------------
def track_send_item(self, user, send_id, data, removed=True):
"""
Helper method to add a track item to the inv_send with the
given send_id by the given user
"""
try:
add_btn = self.browser.find_element_by_id("show-add-btn")
if add_btn.is_displayed():
add_btn.click()
except:
pass
self.login(account=user, nexturl="inv/send/%s/track_item" % send_id)
table = "inv_track_item"
result = self.create(table, data, dbcallback = self.dbcallback_getStockLevels)
# Get the last record in the before & after
# this will give the stock record which has been added to the end by
# the getStockLevels callback
if removed:
qnty = 0
for line in data:
if line[0] == "quantity":
qnty = float(line[1])
break
stock_before = result["before"].records[len(result["before"])-1].quantity
stock_after = result["after"].records[len(result["after"])-1].quantity
stock_shipped = qnty
self.assertTrue( stock_before - stock_after == stock_shipped, "Warehouse stock not properly adjusted, was %s should be %s but is recorded as %s" % (stock_before, stock_after, stock_before - stock_shipped))
s3_debug ("Stock level before %s, stock level after %s" % (stock_before, stock_after))
return result
# -------------------------------------------------------------------------
def send_shipment(self, user, send_id):
"""
Helper method to send a shipment with id of send_id
"""
db = current.db
s3db = current.s3db
stable = s3db.inv_send
ititable = s3db.inv_track_item
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 0, "Shipment is not status preparing")
s3_debug("Shipment status is: preparing")
for rec in item_records:
self.assertTrue(rec.status == 1, "Shipment item is not status preparing")
s3_debug("Shipment items are all of status: preparing")
# Now send the shipment on its way
self.login(account=user, nexturl="inv/send_process/%s" % send_id)
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 2, "Shipment is not status sent")
s3_debug("Shipment status is: sent")
for rec in item_records:
self.assertTrue(rec.status == 2, "Shipment item is not status sent")
s3_debug("Shipment items are all of status: sent")
# -------------------------------------------------------------------------
def confirm_received_shipment(self, user, send_id):
"""
Helper method to confirm that a shipment has been received
outside of the system. This means that the items in the
shipment will not be recorded as being at a site but
the status of the shipment will be modified.
"""
db = current.db
s3db = current.s3db
stable = s3db.inv_send
ititable = s3db.inv_track_item
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 2, "Shipment is not status sent")
s3_debug("Shipment status is: preparing")
for rec in item_records:
self.assertTrue(rec.status == 2, "Shipment item is not status sent")
s3_debug("Shipment items are all of status: sent")
# Now send the shipment on its way
self.login(account=user, nexturl="inv/send/%s?received=True" % send_id)
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 1, "Shipment is not status received")
s3_debug("Shipment status is: sent")
for rec in item_records:
self.assertTrue(rec.status == 4, "Shipment item is not status arrived")
s3_debug("Shipment items are all of status: arrived")
# -------------------------------------------------------------------------
def receive(self, user, data):
"""
Helper method to add a inv_send record by the given user
"""
self.login(account=user, nexturl="inv/recv/create")
table = "inv_recv"
result = self.create(table, data)
return result
# -------------------------------------------------------------------------
def track_recv_item(self, user, recv_id, data, removed=True):
"""
Helper method to add a track item to the inv_recv with the
given recv_id
"""
try:
add_btn = self.browser.find_element_by_id("show-add-btn")
if add_btn.is_displayed():
add_btn.click()
except:
pass
self.login(account=user, nexturl="inv/recv/%s/track_item" % recv_id)
table = "inv_track_item"
result = self.create(table, data)
return result
# -------------------------------------------------------------------------
def recv_shipment(self, user, recv_id, data):
"""
Helper method that will receive the shipment, adding the
totals that arrived
It will get the stock in the warehouse before and then after
and check that the stock levels have been properly increased
"""
db = current.db
s3db = current.s3db
rvtable = s3db.inv_recv
iitable = s3db.inv_inv_item
# First get the site_id
query = (rvtable.id == recv_id)
record = db(query).select(rvtable.site_id,
limitby=(0, 1)).first()
site_id = record.site_id
# Now get all the inventory items for the site
query = (iitable.site_id == site_id)
before = db(query).select(orderby=iitable.id)
self.login(account=user, nexturl="inv/recv_process/%s" % recv_id)
query = (iitable.site_id == site_id)
after = db(query).select(orderby=iitable.id)
# Find the differences between the before and the after
changes = []
for a_rec in after:
found = False
for b_rec in before:
if a_rec.id == b_rec.id:
if a_rec.quantity != b_rec.quantity:
changes.append(
(a_rec.item_id,
a_rec.item_pack_id,
a_rec.quantity - b_rec.quantity)
)
found = True
break
if not found:
changes.append(
(a_rec.item_id,
a_rec.item_pack_id,
a_rec.quantity)
)
# changes now contains the list of changed or new records
# these should match the records received
# first check are the lengths the same?
self.assertTrue(len(data) == len(changes),
"The number of changed inventory items (%s) doesn't match the number of items received (%s)." %
(len(changes), len(data))
)
for line in data:
rec = line["record"]
found = False
for change in changes:
if rec.inv_track_item.item_id == change[0] and \
rec.inv_track_item.item_pack_id == change[1] and \
rec.inv_track_item.quantity == change[2]:
found = True
break
if found:
s3_debug("%s accounted for." % line["text"])
else:
s3_debug("%s not accounted for." % line["text"])
# -------------------------------------------------------------------------
def recv_sent_shipment(self, method, user, WB_ref, item_list):
"""
Helper method that will receive the sent shipment.
This supports two methods:
method = "warehouse"
====================
This requires going to the receiving warehouse
Selecting the shipment (using the WB reference)
Opening each item and selecting the received totals
Then receive the shipment
method = "search"
====================
Search for all received shipments
Select the matching WB reference
Opening each item and selecting the received totals
Then receive the shipment
Finally:
It will get the stock in the warehouse before and then after
and check that the stock levels have been properly increased
"""
browser = self.browser
if method == "search":
self.login(account=user, nexturl="inv/recv/search")
# Find the WB reference in the dataTable (filter so only one is displayed)
el = browser.find_element_by_id("recv_search_simple")
el.send_keys(WB_ref)
# Submit the search
browser.find_element_by_css_selector("input[type='submit']").submit()
# Select the only row in the dataTable
if not self.dt_action():
fail("Unable to select the incoming shipment with reference %s" % WB_ref)
elif method == "warehouse":
return # not yet implemented
else:
fail("Unknown method of %s" % method)
return # invalid method
#####################################################
# We are now viewing the details of the receive item
#####################################################
# Now get the recv id from the url
url = browser.current_url
url_parts = url.split("/")
try:
recv_id = int(url_parts[-1])
except:
recv_id = int(url_parts[-2])
# Click on the items tab
self.login(account=user, nexturl="inv/recv/%s/track_item" % recv_id)
data = []
for item in item_list:
# Find the item in the dataTable
self.dt_filter(item[0])
self.dt_action()
el = browser.find_element_by_id("inv_track_item_recv_quantity")
el.send_keys(item[1])
text = "%s %s" % (item[1], item[0])
data.append({"text" : text,
"record" : item[2]})
# Save the form
browser.find_element_by_css_selector("input[type='submit']").submit()
# Now receive the shipment and check the totals
self.recv_shipment(user, recv_id, data)
# -------------------------------------------------------------------------
# Functions which extract data from the create results
#
def send_rec(self, result):
"""
Simple helper function to get the newly created inv_send row
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send
return None
def send_get_id(self, result):
"""
Simple helper function to get the record id of the newly
created inv_send row so it can be used to open the record
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send.id
return None
def send_get_ref(self, result):
"""
Simple helper function to get the waybill reference of the newly
created inv_send row so it can be used to filter dataTables
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send.send_ref
return None
# -------------------------------------------------------------------------
def recv_rec(self, result):
"""
Simple helper function to get the newly created inv_recv row
"""
# The newly created inv_recv will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_recv = result["after"].records[0]
return new_inv_recv.inv_recv
return None
# -------------------------------------------------------------------------
def recv_get_id(self, result):
"""
Simple helper function to get the record id of the newly
created inv_recv row so it can be used to open the record
"""
# The newly created inv_recv will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_recv = result["after"].records[0]
return new_inv_recv.inv_recv.id
return None
# -------------------------------------------------------------------------
# Callback used to retrieve additional data to the create results
#
def dbcallback_getStockLevels(self, table, data, rows):
"""
Callback to add the total in stock for the selected item.
This can then be used to look at the value before and after
to ensure that the totals have been removed from the warehouse.
The stock row will be added to the *end* of the list of rows
"""
table = current.s3db["inv_inv_item"]
for details in data:
if details[0] == "send_inv_item_id":
inv_item_id = details[1]
break
stock_row = table[inv_item_id]
rows.records.append(stock_row)
return rows
# END =========================================================================
| mit | 1,742,293,231,489,619,000 | 39.132029 | 217 | 0.519678 | false |
Pike/elmo | apps/shipping/forms.py | 2 | 1261 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
from __future__ import unicode_literals
from django import forms
from shipping.models import AppVersion
class ModelInstanceField(forms.fields.Field):
def __init__(self, model, key='pk', *args, **kwargs):
self.model = model
self.key = key
self._instance = None
super(ModelInstanceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"return the model instance if a value is supplied"
if value:
try:
return self.model.objects.get(**{self.key: value})
except self.model.DoesNotExist:
raise forms.ValidationError(self.model._meta.verbose_name)
class SignoffFilterForm(forms.Form):
av = ModelInstanceField(AppVersion, key='code')
up_until = forms.fields.DateTimeField(required=False)
class SignoffsPaginationForm(forms.Form):
push_date = forms.DateTimeField(
input_formats=forms.DateTimeField.input_formats + [
'%Y-%m-%dT%H:%M:%S', # isoformat
]
)
| mpl-2.0 | -7,068,895,552,219,340,000 | 32.184211 | 74 | 0.655829 | false |
Creworker/FreeCAD | src/Mod/OpenSCAD/colorcodeshapes.py | 29 | 4659 | #***************************************************************************
#* *
#* Copyright (c) 2012 Sebastian Hoogen <github@sebastianhoogen.de> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD OpenSCAD Workbench - 2D helper fuctions"
__author__ = "Sebastian Hoogen"
__url__ = ["http://www.freecadweb.org"]
'''
This Script includes python functions to find out the most basic shape type
in a compound and to change the color of shapes according to their shape type
'''
import FreeCAD
def shapedict(shapelst):
return dict([(shape.hashCode(),shape) for shape in shapelst])
def shapeset(shapelst):
return set([shape.hashCode() for shape in shapelst])
def mostbasiccompound(comp):
'''searches fo the most basic shape in a Compound'''
solids=shapeset(comp.Solids)
shells=shapeset(comp.Shells)
faces=shapeset(comp.Faces)
wires=shapeset(comp.Wires)
edges=shapeset(comp.Edges)
vertexes=shapeset(comp.Vertexes)
#FreeCAD.Console.PrintMessage('%s\n' % (str((len(solids),len(shells),len(faces),len(wires),len(edges),len(vertexes)))))
for shape in comp.Solids:
shells -= shapeset(shape.Shells)
faces -= shapeset(shape.Faces)
wires -= shapeset(shape.Wires)
edges -= shapeset(shape.Edges)
vertexes -= shapeset(shape.Vertexes)
for shape in comp.Shells:
faces -= shapeset(shape.Faces)
wires -= shapeset(shape.Wires)
edges -= shapeset(shape.Edges)
vertexes -= shapeset(shape.Vertexes)
for shape in comp.Faces:
wires -= shapeset(shape.Wires)
edges -= shapeset(shape.Edges)
vertexes -= shapeset(shape.Vertexes)
for shape in comp.Wires:
edges -= shapeset(shape.Edges)
vertexes -= shapeset(shape.Vertexes)
for shape in comp.Edges:
vertexes -= shapeset(shape.Vertexes)
#FreeCAD.Console.PrintMessage('%s\n' % (str((len(solids),len(shells),len(faces),len(wires),len(edges),len(vertexes)))))
#return len(solids),len(shells),len(faces),len(wires),len(edges),len(vertexes)
if vertexes:
return "Vertex"
elif edges:
return "Edge"
elif wires:
return "Wire"
elif faces:
return "Face"
elif shells:
return "Shell"
elif solids:
return "Solid"
def colorcodeshapes(objs):
shapecolors={
"Compound":(0.3,0.3,0.4),
"CompSolid":(0.1,0.5,0.0),
"Solid":(0.0,0.8,0.0),
"Shell":(0.8,0.0,0.0),
"Face":(0.6,0.6,0.0),
"Wire":(0.1,0.1,0.1),
"Edge":(1.0,1.0,1.0),
"Vertex":(8.0,8.0,8.0),
"Shape":(0.0,0.0,1.0),
None:(0.0,0.0,0.0)}
for obj in objs:
if hasattr(obj,'Shape'):
try:
if obj.Shape.isNull():
continue
if not obj.Shape.isValid():
color=(1.0,0.4,0.4)
else:
st=obj.Shape.ShapeType
if st in ["Compound","CompSolid"]:
st = mostbasiccompound(obj.Shape)
color=shapecolors[st]
obj.ViewObject.ShapeColor = color
except:
raise
#colorcodeshapes(App.ActiveDocument.Objects)
| lgpl-2.1 | 7,837,713,276,584,346,000 | 40.598214 | 123 | 0.520713 | false |
cboling/SDNdbg | docs/old-stuff/pydzcvr/doc/neutron/plugins/ml2/drivers/arista/arista_l3_driver.py | 4 | 20045 | # Copyright 2014 Arista Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import socket
import struct
import jsonrpclib
from oslo.config import cfg
from neutron import context as nctx
from neutron.db import db_base_plugin_v2
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.ml2.drivers.arista import exceptions as arista_exc
LOG = logging.getLogger(__name__)
EOS_UNREACHABLE_MSG = _('Unable to reach EOS')
DEFAULT_VLAN = 1
MLAG_SWITCHES = 2
VIRTUAL_ROUTER_MAC = '00:11:22:33:44:55'
IPV4_BITS = 32
IPV6_BITS = 128
router_in_vrf = {
'router': {'create': ['vrf definition {0}',
'rd {1}',
'exit'],
'delete': ['no vrf definition {0}']},
'interface': {'add': ['ip routing vrf {1}',
'vlan {0}',
'exit',
'interface vlan {0}',
'vrf forwarding {1}',
'ip address {2}'],
'remove': ['no interface vlan {0}']}}
router_in_default_vrf = {
'router': {'create': [], # Place holder for now.
'delete': []}, # Place holder for now.
'interface': {'add': ['ip routing',
'vlan {0}',
'exit',
'interface vlan {0}',
'ip address {2}'],
'remove': ['no interface vlan {0}']}}
router_in_default_vrf_v6 = {
'router': {'create': [],
'delete': []},
'interface': {'add': ['ipv6 unicast-routing',
'vlan {0}',
'exit',
'interface vlan {0}',
'ipv6 enable',
'ipv6 address {2}'],
'remove': ['no interface vlan {0}']}}
additional_cmds_for_mlag = {
'router': {'create': ['ip virtual-router mac-address {0}'],
'delete': ['no ip virtual-router mac-address']},
'interface': {'add': ['ip virtual-router address {0}'],
'remove': []}}
additional_cmds_for_mlag_v6 = {
'router': {'create': [],
'delete': []},
'interface': {'add': ['ipv6 virtual-router address {0}'],
'remove': []}}
class AristaL3Driver(object):
"""Wraps Arista JSON RPC.
All communications between Neutron and EOS are over JSON RPC.
EOS - operating system used on Arista hardware
Command API - JSON RPC API provided by Arista EOS
"""
def __init__(self):
self._servers = []
self._hosts = []
self.interfaceDict = None
self._validate_config()
host = cfg.CONF.l3_arista.primary_l3_host
self._hosts.append(host)
self._servers.append(jsonrpclib.Server(self._eapi_host_url(host)))
self.mlag_configured = cfg.CONF.l3_arista.mlag_config
self.use_vrf = cfg.CONF.l3_arista.use_vrf
if self.mlag_configured:
host = cfg.CONF.l3_arista.secondary_l3_host
self._hosts.append(host)
self._servers.append(jsonrpclib.Server(self._eapi_host_url(host)))
self._additionalRouterCmdsDict = additional_cmds_for_mlag['router']
self._additionalInterfaceCmdsDict = (
additional_cmds_for_mlag['interface'])
if self.use_vrf:
self.routerDict = router_in_vrf['router']
self.interfaceDict = router_in_vrf['interface']
else:
self.routerDict = router_in_default_vrf['router']
self.interfaceDict = router_in_default_vrf['interface']
def _eapi_host_url(self, host):
user = cfg.CONF.l3_arista.primary_l3_host_username
pwd = cfg.CONF.l3_arista.primary_l3_host_password
eapi_server_url = ('https://%s:%s@%s/command-api' %
(user, pwd, host))
return eapi_server_url
def _validate_config(self):
if cfg.CONF.l3_arista.get('primary_l3_host') == '':
msg = _('Required option primary_l3_host is not set')
LOG.error(msg)
raise arista_exc.AristaSevicePluginConfigError(msg=msg)
if cfg.CONF.l3_arista.get('mlag_config'):
if cfg.CONF.l3_arista.get('secondary_l3_host') == '':
msg = _('Required option secondary_l3_host is not set')
LOG.error(msg)
raise arista_exc.AristaSevicePluginConfigError(msg=msg)
if cfg.CONF.l3_arista.get('primary_l3_host_username') == '':
msg = _('Required option primary_l3_host_username is not set')
LOG.error(msg)
raise arista_exc.AristaSevicePluginConfigError(msg=msg)
def create_router_on_eos(self, router_name, rdm, server):
"""Creates a router on Arista HW Device.
:param router_name: globally unique identifier for router/VRF
:param rdm: A value generated by hashing router name
:param server: Server endpoint on the Arista switch to be configured
"""
cmds = []
rd = "%s:%s" % (rdm, rdm)
for c in self.routerDict['create']:
cmds.append(c.format(router_name, rd))
if self.mlag_configured:
mac = VIRTUAL_ROUTER_MAC
for c in self._additionalRouterCmdsDict['create']:
cmds.append(c.format(mac))
self._run_openstack_l3_cmds(cmds, server)
def delete_router_from_eos(self, router_name, server):
"""Deletes a router from Arista HW Device.
:param router_name: globally unique identifier for router/VRF
:param server: Server endpoint on the Arista switch to be configured
"""
cmds = []
for c in self.routerDict['delete']:
cmds.append(c.format(router_name))
if self.mlag_configured:
for c in self._additionalRouterCmdsDict['delete']:
cmds.append(c)
self._run_openstack_l3_cmds(cmds, server)
def _select_dicts(self, ipv):
if self.use_vrf:
self.interfaceDict = router_in_vrf['interface']
else:
if ipv == 6:
#for IPv6 use IPv6 commmands
self.interfaceDict = router_in_default_vrf_v6['interface']
self._additionalInterfaceCmdsDict = (
additional_cmds_for_mlag_v6['interface'])
else:
self.interfaceDict = router_in_default_vrf['interface']
self._additionalInterfaceCmdsDict = (
additional_cmds_for_mlag['interface'])
def add_interface_to_router(self, segment_id,
router_name, gip, router_ip, mask, server):
"""Adds an interface to existing HW router on Arista HW device.
:param segment_id: VLAN Id associated with interface that is added
:param router_name: globally unique identifier for router/VRF
:param gip: Gateway IP associated with the subnet
:param router_ip: IP address of the router
:param mask: subnet mask to be used
:param server: Server endpoint on the Arista switch to be configured
"""
if not segment_id:
segment_id = DEFAULT_VLAN
cmds = []
for c in self.interfaceDict['add']:
if self.mlag_configured:
ip = router_ip
else:
ip = gip + '/' + mask
cmds.append(c.format(segment_id, router_name, ip))
if self.mlag_configured:
for c in self._additionalInterfaceCmdsDict['add']:
cmds.append(c.format(gip))
self._run_openstack_l3_cmds(cmds, server)
def delete_interface_from_router(self, segment_id, router_name, server):
"""Deletes an interface from existing HW router on Arista HW device.
:param segment_id: VLAN Id associated with interface that is added
:param router_name: globally unique identifier for router/VRF
:param server: Server endpoint on the Arista switch to be configured
"""
if not segment_id:
segment_id = DEFAULT_VLAN
cmds = []
for c in self.interfaceDict['remove']:
cmds.append(c.format(segment_id))
self._run_openstack_l3_cmds(cmds, server)
def create_router(self, context, tenant_id, router):
"""Creates a router on Arista Switch.
Deals with multiple configurations - such as Router per VRF,
a router in default VRF, Virtual Router in MLAG configurations
"""
if router:
router_name = self._arista_router_name(tenant_id, router['name'])
rdm = str(int(hashlib.sha256(router_name).hexdigest(),
16) % 6553)
mlag_peer_failed = False
for s in self._servers:
try:
self.create_router_on_eos(router_name, rdm, s)
mlag_peer_failed = False
except Exception:
if self.mlag_configured and not mlag_peer_failed:
mlag_peer_failed = True
else:
msg = (_('Failed to create router %s on EOS') %
router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(msg=msg)
def delete_router(self, context, tenant_id, router_id, router):
"""Deletes a router from Arista Switch."""
if router:
router_name = self._arista_router_name(tenant_id, router['name'])
mlag_peer_failed = False
for s in self._servers:
try:
self.delete_router_from_eos(router_name, s)
mlag_peer_failed = False
except Exception:
if self.mlag_configured and not mlag_peer_failed:
mlag_peer_failed = True
else:
msg = (_LE('Failed to delete router %s from EOS') %
router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(msg=msg)
def update_router(self, context, router_id, original_router, new_router):
"""Updates a router which is already created on Arista Switch.
TODO: (Sukhdev) - to be implemented in next release.
"""
pass
def add_router_interface(self, context, router_info):
"""Adds an interface to a router created on Arista HW router.
This deals with both IPv6 and IPv4 configurations.
"""
if router_info:
self._select_dicts(router_info['ip_version'])
cidr = router_info['cidr']
subnet_mask = cidr.split('/')[1]
router_name = self._arista_router_name(router_info['tenant_id'],
router_info['name'])
if self.mlag_configured:
# For MLAG, we send a specific IP address as opposed to cidr
# For now, we are using x.x.x.253 and x.x.x.254 as virtual IP
mlag_peer_failed = False
for i, server in enumerate(self._servers):
#get appropriate virtual IP address for this router
router_ip = self._get_router_ip(cidr, i,
router_info['ip_version'])
try:
self.add_interface_to_router(router_info['seg_id'],
router_name,
router_info['gip'],
router_ip, subnet_mask,
server)
mlag_peer_failed = False
except Exception:
if not mlag_peer_failed:
mlag_peer_failed = True
else:
msg = (_('Failed to add interface to router '
'%s on EOS') % router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(
msg=msg)
else:
for s in self._servers:
self.add_interface_to_router(router_info['seg_id'],
router_name,
router_info['gip'],
None, subnet_mask, s)
def remove_router_interface(self, context, router_info):
"""Removes previously configured interface from router on Arista HW.
This deals with both IPv6 and IPv4 configurations.
"""
if router_info:
router_name = self._arista_router_name(router_info['tenant_id'],
router_info['name'])
mlag_peer_failed = False
for s in self._servers:
try:
self.delete_interface_from_router(router_info['seg_id'],
router_name, s)
if self.mlag_configured:
mlag_peer_failed = False
except Exception:
if self.mlag_configured and not mlag_peer_failed:
mlag_peer_failed = True
else:
msg = (_LE('Failed to remove interface from router '
'%s on EOS') % router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(msg=msg)
def _run_openstack_l3_cmds(self, commands, server):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param server: Server endpoint on the Arista switch to be configured
"""
command_start = ['enable', 'configure']
command_end = ['exit']
full_command = command_start + commands + command_end
LOG.info(_('Executing command on Arista EOS: %s'), full_command)
try:
# this returns array of return values for every command in
# full_command list
ret = server.runCmds(version=1, cmds=full_command)
LOG.info(_('Results of execution on Arista EOS: %s'), ret)
except Exception:
msg = (_LE("Error occured while trying to execute "
"commands %(cmd)s on EOS %(host)s"),
{'cmd': full_command, 'host': server})
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(msg=msg)
def _arista_router_name(self, tenant_id, name):
# Use a unique name so that OpenStack created routers/SVIs
# can be distinguishged from the user created routers/SVIs
# on Arista HW.
return 'OS' + '-' + tenant_id + '-' + name
def _get_binary_from_ipv4(self, ip_addr):
return struct.unpack("!L", socket.inet_pton(socket.AF_INET,
ip_addr))[0]
def _get_binary_from_ipv6(self, ip_addr):
hi, lo = struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6,
ip_addr))
return (hi << 64) | lo
def _get_ipv4_from_binary(self, bin_addr):
return socket.inet_ntop(socket.AF_INET, struct.pack("!L", bin_addr))
def _get_ipv6_from_binary(self, bin_addr):
hi = bin_addr >> 64
lo = bin_addr & 0xFFFFFFFF
return socket.inet_ntop(socket.AF_INET6, struct.pack("!QQ", hi, lo))
def _get_router_ip(self, cidr, ip_count, ip_ver):
""" For a given IP subnet and IP version type, generate IP for router.
This method takes the network address (cidr) and selects an
IP address that should be assigned to virtual router running
on multiple switches. It uses upper addresses in a subnet address
as IP for the router. Each instace of the router, on each switch,
requires uniqe IP address. For example in IPv4 case, on a 255
subnet, it will pick X.X.X.254 as first addess, X.X.X.253 for next,
and so on.
"""
start_ip = MLAG_SWITCHES + ip_count
network_addr, prefix = cidr.split('/')
if ip_ver == 4:
bits = IPV4_BITS
ip = self._get_binary_from_ipv4(network_addr)
elif ip_ver == 6:
bits = IPV6_BITS
ip = self._get_binary_from_ipv6(network_addr)
mask = (pow(2, bits) - 1) << (bits - int(prefix))
network_addr = ip & mask
router_ip = pow(2, bits - int(prefix)) - start_ip
router_ip = network_addr | router_ip
if ip_ver == 4:
return self._get_ipv4_from_binary(router_ip) + '/' + prefix
else:
return self._get_ipv6_from_binary(router_ip) + '/' + prefix
class NeutronNets(db_base_plugin_v2.NeutronDbPluginV2):
"""Access to Neutron DB.
Provides access to the Neutron Data bases for all provisioned
networks as well ports. This data is used during the synchronization
of DB between ML2 Mechanism Driver and Arista EOS
Names of the networks and ports are not stored in Arista repository
They are pulled from Neutron DB.
"""
def __init__(self):
self.admin_ctx = nctx.get_admin_context()
def get_all_networks_for_tenant(self, tenant_id):
filters = {'tenant_id': [tenant_id]}
return super(NeutronNets,
self).get_networks(self.admin_ctx, filters=filters) or []
def get_all_ports_for_tenant(self, tenant_id):
filters = {'tenant_id': [tenant_id]}
return super(NeutronNets,
self).get_ports(self.admin_ctx, filters=filters) or []
def _get_network(self, tenant_id, network_id):
filters = {'tenant_id': [tenant_id],
'id': [network_id]}
return super(NeutronNets,
self).get_networks(self.admin_ctx, filters=filters) or []
def get_subnet_info(self, subnet_id):
subnet = self.get_subnet(subnet_id)
return subnet
def get_subnet_ip_version(self, subnet_id):
subnet = self.get_subnet(subnet_id)
return subnet['ip_version']
def get_subnet_gateway_ip(self, subnet_id):
subnet = self.get_subnet(subnet_id)
return subnet['gateway_ip']
def get_subnet_cidr(self, subnet_id):
subnet = self.get_subnet(subnet_id)
return subnet['cidr']
def get_network_id(self, subnet_id):
subnet = self.get_subnet(subnet_id)
return subnet['network_id']
def get_network_id_from_port_id(self, port_id):
port = self.get_port(port_id)
return port['network_id']
def get_subnet(self, subnet_id):
return super(NeutronNets,
self).get_subnet(self.admin_ctx, subnet_id) or []
def get_port(self, port_id):
return super(NeutronNets,
self).get_port(self.admin_ctx, port_id) or []
| apache-2.0 | 3,187,505,232,501,817,000 | 39.413306 | 79 | 0.545922 | false |
jamiecaesar/SecureCRT | templates/single_device_template.py | 3 | 2754 | # $language = "python"
# $interface = "1.0"
import os
import sys
import logging
# Add script directory to the PYTHONPATH so we can import our modules (only if run from SecureCRT)
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
# Now we can import our custom modules
from securecrt_tools import scripts
from securecrt_tools import utilities
# Create global logger so we can write debug messages from any function (if debug mode setting is enabled in settings).
logger = logging.getLogger("securecrt")
logger.debug("Starting execution of {0}".format(script_name))
# ################################################ SCRIPT LOGIC ###################################################
def script_main(session):
"""
| SINGLE device script
| Author: XXXXXXXX
| Email: XXXXXXX@domain.com
PUT A DESCRIPTION OF THIS SCRIPT HERE. WHAT IT DOES, ETC.
This script assumes it will be run against a connected device.
:param session: A subclass of the sessions.Session object that represents this particular script session (either
SecureCRTSession or DirectSession)
:type session: sessions.Session
"""
# Get script object that owns this session, so we can check settings, get textfsm templates, etc
script = session.script
# Start session with device, i.e. modify term parameters for better interaction (assuming already connected)
session.start_cisco_session()
#
# PUT YOUR CODE HERE
#
# Return terminal parameters back to the original state.
session.end_cisco_session()
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, use the SecureCRT specific class
if __name__ == "__builtin__":
# Initialize script object
crt_script = scripts.CRTScript(crt)
# Get session object for the SecureCRT tab that the script was launched from.
crt_session = crt_script.get_main_session()
# Run script's main logic against our session
script_main(crt_session)
# Shutdown logging after
logging.shutdown()
# If the script is being run directly, use the simulation class
elif __name__ == "__main__":
# Initialize script object
direct_script = scripts.DebugScript(os.path.realpath(__file__))
# Get a simulated session object to pass into the script.
sim_session = direct_script.get_main_session()
# Run script's main logic against our session
script_main(sim_session)
# Shutdown logging after
logging.shutdown()
| apache-2.0 | 3,464,533,098,248,832,000 | 34.766234 | 119 | 0.657226 | false |
collects/VTK | Examples/Infovis/Python/hierarchical_graph.py | 17 | 2136 | from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(200)
source.SetEdgeProbability(0.01)
source.SetUseEdgeProbability(True)
source.SetStartWithTree(True)
source.IncludeEdgeWeightsOn()
source.AllowParallelEdgesOn()
# Connect to the vtkVertexDegree filter.
degree_filter = vtkVertexDegree()
degree_filter.SetOutputArrayName("vertex_degree")
degree_filter.SetInputConnection(source.GetOutputPort())
# Connect to the boost breath first search filter.
mstTreeSelection = vtkBoostKruskalMinimumSpanningTree()
mstTreeSelection.SetInputConnection(degree_filter.GetOutputPort())
mstTreeSelection.SetEdgeWeightArrayName("edge weight")
mstTreeSelection.NegateEdgeWeightsOn()
# Take selection and extract a graph
extract_graph = vtkExtractSelectedGraph()
extract_graph.AddInputConnection(degree_filter.GetOutputPort())
extract_graph.SetSelectionConnection(mstTreeSelection.GetOutputPort())
# Create a tree from the graph :)
bfsTree = vtkBoostBreadthFirstSearchTree()
bfsTree.AddInputConnection(extract_graph.GetOutputPort())
treeStrat = vtkTreeLayoutStrategy();
treeStrat.RadialOn()
treeStrat.SetAngle(360)
treeStrat.SetLogSpacingValue(1)
forceStrat = vtkSimple2DLayoutStrategy()
forceStrat.SetEdgeWeightField("edge weight")
dummy = vtkHierarchicalGraphView()
# Create Tree/Graph Layout view
view = vtkHierarchicalGraphView()
view.SetHierarchyFromInputConnection(bfsTree.GetOutputPort())
view.SetGraphFromInputConnection(degree_filter.GetOutputPort())
view.SetVertexColorArrayName("VertexDegree")
view.SetColorVertices(True)
view.SetVertexLabelArrayName("VertexDegree")
view.SetVertexLabelVisibility(True)
view.SetEdgeColorArrayName("edge weight")
# FIXME: If you uncomment this line the display locks up
view.SetColorEdges(True)
view.SetEdgeLabelArrayName("edge weight")
view.SetEdgeLabelVisibility(True)
view.SetLayoutStrategy(forceStrat)
view.SetBundlingStrength(.8)
# Set up the theme
theme = vtkViewTheme.CreateMellowTheme()
theme.SetCellColor(.2,.2,.6)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
| bsd-3-clause | 7,096,711,207,002,817,000 | 29.514286 | 70 | 0.831929 | false |
cstavr/synnefo | snf-pithos-backend/pithos/backends/lib/sqlite/groups.py | 1 | 3735 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
from dbworker import DBWorker
class Groups(DBWorker):
"""Groups are named collections of members, belonging to an owner."""
def __init__(self, **params):
DBWorker.__init__(self, **params)
execute = self.execute
execute(""" create table if not exists groups
( owner text,
name text,
member text,
primary key (owner, name, member) ) """)
execute(""" create index if not exists idx_groups_member
on groups(member) """)
def group_names(self, owner):
"""List all group names belonging to owner."""
q = "select distinct name from groups where owner = ?"
self.execute(q, (owner,))
return [r[0] for r in self.fetchall()]
def group_dict(self, owner):
"""Return a dict mapping group names to member lists for owner."""
q = "select name, member from groups where owner = ?"
self.execute(q, (owner,))
d = defaultdict(list)
for group, member in self.fetchall():
d[group].append(member)
return d
def group_add(self, owner, group, member):
"""Add a member to a group."""
q = ("insert or ignore into groups (owner, name, member) "
"values (?, ?, ?)")
self.execute(q, (owner, group, member))
def group_addmany(self, owner, group, members):
"""Add members to a group."""
q = ("insert or ignore into groups (owner, name, member) "
"values (?, ?, ?)")
self.executemany(q, ((owner, group, member) for member in
sorted(list(members))))
def group_remove(self, owner, group, member):
"""Remove a member from a group."""
q = "delete from groups where owner = ? and name = ? and member = ?"
self.execute(q, (owner, group, member))
def group_delete(self, owner, group):
"""Delete a group."""
q = "delete from groups where owner = ? and name = ?"
self.execute(q, (owner, group))
def group_destroy(self, owner):
"""Delete all groups belonging to owner."""
q = "delete from groups where owner = ?"
self.execute(q, (owner,))
def group_members(self, owner, group):
"""Return the list of members of a group."""
q = "select member from groups where owner = ? and name = ?"
self.execute(q, (owner, group))
return [r[0] for r in self.fetchall()]
def group_check(self, owner, group, member):
"""Check if a member is in a group."""
q = "select 1 from groups where owner = ? and name = ? and member = ?"
self.execute(q, (group, member))
return bool(self.fetchone())
def group_parents(self, member):
"""Return all (owner, group) tuples that contain member."""
q = "select owner, name from groups where member = ?"
self.execute(q, (member,))
return self.fetchall()
| gpl-3.0 | 6,196,770,812,204,499,000 | 34.571429 | 78 | 0.591968 | false |
ProgVal/Limnoria-test | test/test_plugins.py | 9 | 1717 | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2008, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.irclib as irclib
import supybot.plugins as plugins
| bsd-3-clause | 8,212,996,658,022,921,000 | 49.5 | 79 | 0.774024 | false |
Distrotech/libxml2 | python/tests/reader3.py | 35 | 4153 | #!/usr/bin/python -u
#
# this tests the entities substitutions with the XmlTextReader interface
#
import sys
import libxml2
try:
import StringIO
str_io = StringIO.StringIO
except:
import io
str_io = io.StringIO
docstr="""<?xml version='1.0'?>
<!DOCTYPE doc [
<!ENTITY tst "<p>test</p>">
]>
<doc>&tst;</doc>"""
# Memory debug specific
libxml2.debugMemory(1)
#
# First test, normal don't substitute entities.
#
f = str_io(docstr)
input = libxml2.inputBuffer(f)
reader = input.newTextReader("test_noent")
ret = reader.Read()
if ret != 1:
print("Error reading to root")
sys.exit(1)
if reader.Name() == "doc" or reader.NodeType() == 10:
ret = reader.Read()
if ret != 1:
print("Error reading to root")
sys.exit(1)
if reader.Name() != "doc" or reader.NodeType() != 1:
print("test_normal: Error reading the root element")
sys.exit(1)
ret = reader.Read()
if ret != 1:
print("test_normal: Error reading to the entity")
sys.exit(1)
if reader.Name() != "tst" or reader.NodeType() != 5:
print("test_normal: Error reading the entity")
sys.exit(1)
ret = reader.Read()
if ret != 1:
print("test_normal: Error reading to the end of root")
sys.exit(1)
if reader.Name() != "doc" or reader.NodeType() != 15:
print("test_normal: Error reading the end of the root element")
sys.exit(1)
ret = reader.Read()
if ret != 0:
print("test_normal: Error detecting the end")
sys.exit(1)
#
# Second test, completely substitute the entities.
#
f = str_io(docstr)
input = libxml2.inputBuffer(f)
reader = input.newTextReader("test_noent")
reader.SetParserProp(libxml2.PARSER_SUBST_ENTITIES, 1)
ret = reader.Read()
if ret != 1:
print("Error reading to root")
sys.exit(1)
if reader.Name() == "doc" or reader.NodeType() == 10:
ret = reader.Read()
if ret != 1:
print("Error reading to root")
sys.exit(1)
if reader.Name() != "doc" or reader.NodeType() != 1:
print("test_noent: Error reading the root element")
sys.exit(1)
ret = reader.Read()
if ret != 1:
print("test_noent: Error reading to the entity content")
sys.exit(1)
if reader.Name() != "p" or reader.NodeType() != 1:
print("test_noent: Error reading the p element from entity")
sys.exit(1)
ret = reader.Read()
if ret != 1:
print("test_noent: Error reading to the text node")
sys.exit(1)
if reader.NodeType() != 3 or reader.Value() != "test":
print("test_noent: Error reading the text node")
sys.exit(1)
ret = reader.Read()
if ret != 1:
print("test_noent: Error reading to the end of p element")
sys.exit(1)
if reader.Name() != "p" or reader.NodeType() != 15:
print("test_noent: Error reading the end of the p element")
sys.exit(1)
ret = reader.Read()
if ret != 1:
print("test_noent: Error reading to the end of root")
sys.exit(1)
if reader.Name() != "doc" or reader.NodeType() != 15:
print("test_noent: Error reading the end of the root element")
sys.exit(1)
ret = reader.Read()
if ret != 0:
print("test_noent: Error detecting the end")
sys.exit(1)
#
# third test, crazy stuff about empty element in external parsed entities
#
s = """<!DOCTYPE struct [
<!ENTITY simplestruct2.ent SYSTEM "simplestruct2.ent">
]>
<struct>&simplestruct2.ent;</struct>
"""
expect="""10 struct 0 0
1 struct 0 0
1 descr 1 1
15 struct 0 0
"""
res=""
simplestruct2_ent="""<descr/>"""
def myResolver(URL, ID, ctxt):
if URL == "simplestruct2.ent":
return(str_io(simplestruct2_ent))
return None
libxml2.setEntityLoader(myResolver)
input = libxml2.inputBuffer(str_io(s))
reader = input.newTextReader("test3")
reader.SetParserProp(libxml2.PARSER_SUBST_ENTITIES,1)
while reader.Read() == 1:
res = res + "%s %s %d %d\n" % (reader.NodeType(),reader.Name(),
reader.Depth(),reader.IsEmptyElement())
if res != expect:
print("test3 failed: unexpected output")
print(res)
sys.exit(1)
#
# cleanup
#
del f
del input
del reader
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| gpl-3.0 | -7,122,903,785,842,022,000 | 24.95625 | 74 | 0.651096 | false |
onitake/ansible | lib/ansible/modules/network/f5/bigip_gtm_monitor_tcp.py | 4 | 24848 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_monitor_tcp
short_description: Manages F5 BIG-IP GTM tcp monitors
description:
- Manages F5 BIG-IP GTM tcp monitors.
version_added: 2.6
options:
name:
description:
- Monitor name.
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(tcp)
parent on the C(Common) partition.
default: /Common/tcp
send:
description:
- The send string for the monitor call.
receive:
description:
- The receive string for the monitor call.
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
- If this value is an IP address, then a C(port) number must be specified.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run.
- If this parameter is not provided when creating a new monitor, then the
default value will be 30.
- This value B(must) be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second.
- If this parameter is not provided when creating a new monitor, then the
default value will be 120.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
probe_timeout:
description:
- Specifies the number of seconds after which the system times out the probe request
to the system.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(5).
ignore_down_response:
description:
- Specifies that the monitor allows more than one probe attempt per interval.
- When C(yes), specifies that the monitor ignores down responses for the duration of
the monitor timeout. Once the monitor timeout is reached without the system receiving
an up response, the system marks the object down.
- When C(no), specifies that the monitor immediately marks an object down when it
receives a down response.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(no).
type: bool
transparent:
description:
- Specifies whether the monitor operates in transparent mode.
- A monitor in transparent mode directs traffic through the associated pool members
or nodes (usually a router or firewall) to the aliased destination (that is, it
probes the C(ip)-C(port) combination specified in the monitor).
- If the monitor cannot successfully reach the aliased destination, the pool member
or node through which the monitor traffic was sent is marked down.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(no).
type: bool
reverse:
description:
- Instructs the system to mark the target resource down when the test is successful.
This setting is useful, for example, if the content on your web site home page is
dynamic and changes frequently, you may want to set up a reverse ECV service check
that looks for the string Error.
- A match for this string means that the web server was down.
- To use this option, you must specify values for C(send) and C(receive).
type: bool
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a GTM TCP monitor
bigip_gtm_monitor_tcp:
name: my_monitor
ip: 1.1.1.1
port: 80
send: my send string
receive: my receive string
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Remove TCP Monitor
bigip_gtm_monitor_tcp:
name: my_monitor
state: absent
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add TCP monitor for all addresses, port 514
bigip_gtm_monitor_tcp:
name: my_monitor
server: lb.mydomain.com
user: admin
port: 514
password: secret
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: tcp
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: 10.12.13.14
port:
description: The new port the monitor checks the resource on.
returned: changed
type: string
sample: 8080
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
ignore_down_response:
description: Whether to ignore the down response or not.
returned: changed
type: bool
sample: True
send:
description: The new send string for this monitor.
returned: changed
type: string
sample: tcp string to send
receive:
description: The new receive string for this monitor.
returned: changed
type: string
sample: tcp string to receive
probe_timeout:
description: The new timeout in which the system will timeout the monitor probe.
returned: changed
type: int
sample: 10
reverse:
description: The new value for whether the monitor operates in reverse mode.
returned: changed
type: bool
sample: False
transparent:
description: The new value for whether the monitor operates in transparent mode.
returned: changed
type: bool
sample: False
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.icontrol import module_provisioned
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.icontrol import module_provisioned
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'ignoreDownResponse': 'ignore_down_response',
'probeTimeout': 'probe_timeout',
'recv': 'receive',
}
api_attributes = [
'defaultsFrom',
'interval',
'timeout',
'destination',
'transparent',
'probeTimeout',
'ignoreDownResponse',
'reverse',
'send',
'recv',
]
returnables = [
'parent',
'ip',
'port',
'interval',
'timeout',
'transparent',
'probe_timeout',
'ignore_down_response',
'send',
'receive',
'reverse',
]
updatables = [
'destination',
'interval',
'timeout',
'transparent',
'probe_timeout',
'ignore_down_response',
'send',
'receive',
'reverse',
'ip',
'port',
]
class ApiParameters(Parameters):
@property
def ip(self):
ip, port = self._values['destination'].split(':')
return ip
@property
def port(self):
ip, port = self._values['destination'].split(':')
try:
return int(port)
except ValueError:
return port
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] is None:
return None
if self._values['ignore_down_response'] == 'disabled':
return False
return True
@property
def transparent(self):
if self._values['transparent'] is None:
return None
if self._values['transparent'] == 'disabled':
return False
return True
@property
def reverse(self):
if self._values['reverse'] is None:
return None
if self._values['reverse'] == 'disabled':
return False
return True
class ModuleParameters(Parameters):
@property
def interval(self):
if self._values['interval'] is None:
return None
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
elif self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def probe_timeout(self):
if self._values['probe_timeout'] is None:
return None
return int(self._values['probe_timeout'])
@property
def type(self):
return 'tcp'
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def transparent(self):
if self._values['transparent'] is None:
return None
elif self._values['transparent'] is True:
return 'enabled'
return 'disabled'
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] is None:
return None
elif self._values['ignore_down_response'] is True:
return 'enabled'
return 'disabled'
@property
def reverse(self):
if self._values['reverse'] is None:
return None
elif self._values['reverse'] is True:
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def ip(self):
ip, port = self._values['destination'].split(':')
return ip
@property
def port(self):
ip, port = self._values['destination'].split(':')
return int(port)
@property
def transparent(self):
if self._values['transparent'] == 'enabled':
return True
return False
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] == 'enabled':
return True
return False
@property
def reverse(self):
if self._values['reverse'] == 'enabled':
return True
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 120})
if self.want.interval is None:
self.want.update({'interval': 30})
if self.want.probe_timeout is None:
self.want.update({'probe_timeout': 5})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
if self.want.ignore_down_response is None:
self.want.update({'ignore_down_response': False})
if self.want.transparent is None:
self.want.update({'transparent': False})
def exec_module(self):
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_default_creation_values()
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/tcp/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/tcp'),
send=dict(),
receive=dict(),
ip=dict(),
port=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
ignore_down_response=dict(type='bool'),
transparent=dict(type='bool'),
probe_timeout=dict(type='int'),
reverse=dict(type='bool'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,552,969,544,336,799,000 | 30.373737 | 93 | 0.602704 | false |
datsfosure/ansible | examples/scripts/yaml_to_ini.py | 175 | 7609 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible import errors
from ansible import utils
import os
import yaml
import sys
class InventoryParserYaml(object):
''' Host inventory parser for ansible '''
def __init__(self, filename=C.DEFAULT_HOST_LIST):
sys.stderr.write("WARNING: YAML inventory files are deprecated in 0.6 and will be removed in 0.7, to migrate" +
" download and run https://github.com/ansible/ansible/blob/devel/examples/scripts/yaml_to_ini.py\n")
fh = open(filename)
data = fh.read()
fh.close()
self._hosts = {}
self._parse(data)
def _make_host(self, hostname):
if hostname in self._hosts:
return self._hosts[hostname]
else:
host = Host(hostname)
self._hosts[hostname] = host
return host
# see file 'test/yaml_hosts' for syntax
def _parse(self, data):
# FIXME: refactor into subfunctions
all = Group('all')
ungrouped = Group('ungrouped')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
grouped_hosts = []
yaml = utils.parse_yaml(data)
# first add all groups
for item in yaml:
if type(item) == dict and 'group' in item:
group = Group(item['group'])
for subresult in item.get('hosts',[]):
if type(subresult) in [ str, unicode ]:
host = self._make_host(subresult)
group.add_host(host)
grouped_hosts.append(host)
elif type(subresult) == dict:
host = self._make_host(subresult['host'])
vars = subresult.get('vars',{})
if type(vars) == list:
for subitem in vars:
for (k,v) in subitem.items():
host.set_variable(k,v)
elif type(vars) == dict:
for (k,v) in subresult.get('vars',{}).items():
host.set_variable(k,v)
else:
raise errors.AnsibleError("unexpected type for variable")
group.add_host(host)
grouped_hosts.append(host)
vars = item.get('vars',{})
if type(vars) == dict:
for (k,v) in item.get('vars',{}).items():
group.set_variable(k,v)
elif type(vars) == list:
for subitem in vars:
if type(subitem) != dict:
raise errors.AnsibleError("expected a dictionary")
for (k,v) in subitem.items():
group.set_variable(k,v)
self.groups[group.name] = group
all.add_child_group(group)
# add host definitions
for item in yaml:
if type(item) in [ str, unicode ]:
host = self._make_host(item)
if host not in grouped_hosts:
ungrouped.add_host(host)
elif type(item) == dict and 'host' in item:
host = self._make_host(item['host'])
vars = item.get('vars', {})
if type(vars)==list:
varlist, vars = vars, {}
for subitem in varlist:
vars.update(subitem)
for (k,v) in vars.items():
host.set_variable(k,v)
groups = item.get('groups', {})
if type(groups) in [ str, unicode ]:
groups = [ groups ]
if type(groups)==list:
for subitem in groups:
if subitem in self.groups:
group = self.groups[subitem]
else:
group = Group(subitem)
self.groups[group.name] = group
all.add_child_group(group)
group.add_host(host)
grouped_hosts.append(host)
if host not in grouped_hosts:
ungrouped.add_host(host)
# make sure ungrouped.hosts is the complement of grouped_hosts
ungrouped_hosts = [host for host in ungrouped.hosts if host not in grouped_hosts]
if __name__ == "__main__":
if len(sys.argv) != 2:
print "usage: yaml_to_ini.py /path/to/ansible/hosts"
sys.exit(1)
result = ""
original = sys.argv[1]
yamlp = InventoryParserYaml(filename=sys.argv[1])
dirname = os.path.dirname(original)
group_names = [ g.name for g in yamlp.groups.values() ]
for group_name in sorted(group_names):
record = yamlp.groups[group_name]
if group_name == 'all':
continue
hosts = record.hosts
result = result + "[%s]\n" % record.name
for h in hosts:
result = result + "%s\n" % h.name
result = result + "\n"
groupfiledir = os.path.join(dirname, "group_vars")
if not os.path.exists(groupfiledir):
print "* creating: %s" % groupfiledir
os.makedirs(groupfiledir)
groupfile = os.path.join(groupfiledir, group_name)
print "* writing group variables for %s into %s" % (group_name, groupfile)
groupfh = open(groupfile, 'w')
groupfh.write(yaml.dump(record.get_variables()))
groupfh.close()
for (host_name, host_record) in yamlp._hosts.iteritems():
hostfiledir = os.path.join(dirname, "host_vars")
if not os.path.exists(hostfiledir):
print "* creating: %s" % hostfiledir
os.makedirs(hostfiledir)
hostfile = os.path.join(hostfiledir, host_record.name)
print "* writing host variables for %s into %s" % (host_record.name, hostfile)
hostfh = open(hostfile, 'w')
hostfh.write(yaml.dump(host_record.get_variables()))
hostfh.close()
# also need to keep a hash of variables per each host
# and variables per each group
# and write those to disk
newfilepath = os.path.join(dirname, "hosts.new")
fdh = open(newfilepath, 'w')
fdh.write(result)
fdh.close()
print "* COMPLETE: review your new inventory file and replace your original when ready"
print "* new inventory file saved as %s" % newfilepath
print "* edit group specific variables in %s/group_vars/" % dirname
print "* edit host specific variables in %s/host_vars/" % dirname
# now need to write this to disk as (oldname).new
# and inform the user
| gpl-3.0 | 1,051,190,400,753,278,800 | 35.936893 | 119 | 0.540938 | false |
Tanmay28/coala | coalib/tests/output/JSONEncoderTest.py | 1 | 2819 | import sys
import json
import unittest
from datetime import datetime
sys.path.insert(0, ".")
from coalib.output.JSONEncoder import JSONEncoder
class TestClass1(object):
def __init__(self):
self.a = 0
class TestClass2(object):
def __init__(self):
self.a = 0
self.b = TestClass1()
class TestClass3(object):
def __init__(self):
self.a = 0
self.b = TestClass1()
@staticmethod
def __getitem__(key):
return "val"
@staticmethod
def keys():
return ["key"]
class PropertiedClass(object):
def __init__(self):
self._a = 5
@property
def prop(self):
return self._a
class JSONAbleClass(object):
@staticmethod
def __json__():
return ['dont', 'panic']
class JSONEncoderTest(unittest.TestCase):
kw = {"cls": JSONEncoder, "sort_keys": True}
def test_builtins(self):
self.assertEquals('"test"', json.dumps("test", **self.kw))
self.assertEquals('1', json.dumps(1, **self.kw))
self.assertEquals('true', json.dumps(True, **self.kw))
self.assertEquals('null', json.dumps(None, **self.kw))
def test_iter(self):
self.assertEquals('[0, 1]', json.dumps([0, 1], **self.kw))
self.assertEquals('[0, 1]', json.dumps((0, 1), **self.kw))
self.assertEquals('[0, 1]', json.dumps(range(2), **self.kw))
def test_dict(self):
self.assertEquals('{"0": 1}', json.dumps({0: 1}, **self.kw))
self.assertEquals('{"0": 1}', json.dumps({"0": 1}, **self.kw))
self.assertEquals('{"0": "1"}', json.dumps({"0": "1"}, **self.kw))
def test_time(self):
tf = datetime.today()
self.assertEquals('"' + tf.isoformat() + '"',
json.dumps(tf, **self.kw))
def test_class1(self):
tc1 = TestClass1()
self.assertEquals('{"a": 0}', json.dumps(tc1, **self.kw))
self.assertEquals('[{"a": 0}]', json.dumps([tc1], **self.kw))
self.assertEquals('{"0": {"a": 0}}', json.dumps({0: tc1}, **self.kw))
def test_class2(self):
tc2 = TestClass2()
self.assertEquals('{"a": 0, "b": {"a": 0}}',
json.dumps(tc2, **self.kw))
def test_class3(self):
tc3 = TestClass3()
self.assertEquals('{"key": "val"}',
json.dumps(tc3, **self.kw))
def test_propertied_class(self):
uut = PropertiedClass()
self.assertEqual('{"prop": 5}', json.dumps(uut, **self.kw))
def test_jsonable_class(self):
uut = JSONAbleClass()
self.assertEqual('["dont", "panic"]', json.dumps(uut, **self.kw))
def test_type_error(self):
with self.assertRaises(TypeError):
json.dumps(1j, **self.kw)
if __name__ == "__main__":
unittest.main(verbosity=2)
| agpl-3.0 | -8,412,124,947,123,936,000 | 26.105769 | 77 | 0.548421 | false |
parkbyte/electrumparkbyte | plugins/trezor/clientbase.py | 7 | 8778 | import time
from electrum.i18n import _
from electrum.util import PrintError, UserCancelled
from electrum.wallet import BIP44_Wallet
class GuiMixin(object):
# Requires: self.proto, self.device
messages = {
3: _("Confirm the transaction output on your %s device"),
4: _("Confirm internal entropy on your %s device to begin"),
5: _("Write down the seed word shown on your %s"),
6: _("Confirm on your %s that you want to wipe it clean"),
7: _("Confirm on your %s device the message to sign"),
8: _("Confirm the total amount spent and the transaction fee on your "
"%s device"),
10: _("Confirm wallet address on your %s device"),
'default': _("Check your %s device to continue"),
}
def callback_Failure(self, msg):
# BaseClient's unfortunate call() implementation forces us to
# raise exceptions on failure in order to unwind the stack.
# However, making the user acknowledge they cancelled
# gets old very quickly, so we suppress those. The NotInitialized
# one is misnamed and indicates a passphrase request was cancelled.
if msg.code in (self.types.Failure_PinCancelled,
self.types.Failure_ActionCancelled,
self.types.Failure_NotInitialized):
raise UserCancelled()
raise RuntimeError(msg.message)
def callback_ButtonRequest(self, msg):
message = self.msg
if not message:
message = self.messages.get(msg.code, self.messages['default'])
self.handler.show_message(message % self.device, self.cancel)
return self.proto.ButtonAck()
def callback_PinMatrixRequest(self, msg):
if msg.type == 2:
msg = _("Enter a new PIN for your %s:")
elif msg.type == 3:
msg = (_("Re-enter the new PIN for your %s.\n\n"
"NOTE: the positions of the numbers have changed!"))
else:
msg = _("Enter your current %s PIN:")
pin = self.handler.get_pin(msg % self.device)
if not pin:
return self.proto.Cancel()
return self.proto.PinMatrixAck(pin=pin)
def callback_PassphraseRequest(self, req):
if self.creating_wallet:
msg = _("Enter a passphrase to generate this wallet. Each time "
"you use this wallet your %s will prompt you for the "
"passphrase. If you forget the passphrase you cannot "
"access the bitcoins in the wallet.") % self.device
else:
msg = _("Enter the passphrase to unlock this wallet:")
passphrase = self.handler.get_passphrase(msg, self.creating_wallet)
if passphrase is None:
return self.proto.Cancel()
passphrase = BIP44_Wallet.normalize_passphrase(passphrase)
return self.proto.PassphraseAck(passphrase=passphrase)
def callback_WordRequest(self, msg):
self.step += 1
msg = _("Step %d/24. Enter seed word as explained on "
"your %s:") % (self.step, self.device)
word = self.handler.get_word(msg)
# Unfortunately the device can't handle self.proto.Cancel()
return self.proto.WordAck(word=word)
def callback_CharacterRequest(self, msg):
char_info = self.handler.get_char(msg)
if not char_info:
return self.proto.Cancel()
return self.proto.CharacterAck(**char_info)
class TrezorClientBase(GuiMixin, PrintError):
def __init__(self, handler, plugin, proto):
assert hasattr(self, 'tx_api') # ProtocolMixin already constructed?
self.proto = proto
self.device = plugin.device
self.handler = handler
self.tx_api = plugin
self.types = plugin.types
self.msg = None
self.creating_wallet = False
self.used()
def __str__(self):
return "%s/%s" % (self.label(), self.features.device_id)
def label(self):
'''The name given by the user to the device.'''
return self.features.label
def is_initialized(self):
'''True if initialized, False if wiped.'''
return self.features.initialized
def is_pairable(self):
return not self.features.bootloader_mode
def used(self):
self.last_operation = time.time()
def prevent_timeouts(self):
self.last_operation = float('inf')
def timeout(self, cutoff):
'''Time out the client if the last operation was before cutoff.'''
if self.last_operation < cutoff:
self.print_error("timed out")
self.clear_session()
@staticmethod
def expand_path(n):
'''Convert bip32 path to list of uint32 integers with prime flags
0/-1/1' -> [0, 0x80000001, 0x80000001]'''
# This code is similar to code in trezorlib where it unforunately
# is not declared as a staticmethod. Our n has an extra element.
PRIME_DERIVATION_FLAG = 0x80000000
path = []
for x in n.split('/')[1:]:
prime = 0
if x.endswith("'"):
x = x.replace('\'', '')
prime = PRIME_DERIVATION_FLAG
if x.startswith('-'):
prime = PRIME_DERIVATION_FLAG
path.append(abs(int(x)) | prime)
return path
def cancel(self):
'''Provided here as in keepkeylib but not trezorlib.'''
self.transport.write(self.proto.Cancel())
def first_address(self, derivation):
return self.address_from_derivation(derivation)
def address_from_derivation(self, derivation):
return self.get_address('Bitcoin', self.expand_path(derivation))
def toggle_passphrase(self):
if self.features.passphrase_protection:
self.msg = _("Confirm on your %s device to disable passphrases")
else:
self.msg = _("Confirm on your %s device to enable passphrases")
enabled = not self.features.passphrase_protection
self.apply_settings(use_passphrase=enabled)
def change_label(self, label):
self.msg = _("Confirm the new label on your %s device")
self.apply_settings(label=label)
def change_homescreen(self, homescreen):
self.msg = _("Confirm on your %s device to change your home screen")
self.apply_settings(homescreen=homescreen)
def set_pin(self, remove):
if remove:
self.msg = _("Confirm on your %s device to disable PIN protection")
elif self.features.pin_protection:
self.msg = _("Confirm on your %s device to change your PIN")
else:
self.msg = _("Confirm on your %s device to set a PIN")
self.change_pin(remove)
def clear_session(self):
'''Clear the session to force pin (and passphrase if enabled)
re-entry. Does not leak exceptions.'''
self.print_error("clear session:", self)
self.prevent_timeouts()
try:
super(TrezorClientBase, self).clear_session()
except BaseException as e:
# If the device was removed it has the same effect...
self.print_error("clear_session: ignoring error", str(e))
pass
def get_public_node(self, address_n, creating):
self.creating_wallet = creating
return super(TrezorClientBase, self).get_public_node(address_n)
def close(self):
'''Called when Our wallet was closed or the device removed.'''
self.print_error("closing client")
self.clear_session()
# Release the device
self.transport.close()
def firmware_version(self):
f = self.features
return (f.major_version, f.minor_version, f.patch_version)
def atleast_version(self, major, minor=0, patch=0):
return cmp(self.firmware_version(), (major, minor, patch))
@staticmethod
def wrapper(func):
'''Wrap methods to clear any message box they opened.'''
def wrapped(self, *args, **kwargs):
try:
self.prevent_timeouts()
return func(self, *args, **kwargs)
finally:
self.used()
self.handler.finished()
self.creating_wallet = False
self.msg = None
return wrapped
@staticmethod
def wrap_methods(cls):
for method in ['apply_settings', 'change_pin', 'decrypt_message',
'get_address', 'get_public_node',
'load_device_by_mnemonic', 'load_device_by_xprv',
'recovery_device', 'reset_device', 'sign_message',
'sign_tx', 'wipe_device']:
setattr(cls, method, cls.wrapper(getattr(cls, method)))
| mit | -1,289,873,340,784,256,500 | 37.331878 | 79 | 0.599339 | false |
Backflipz/plugin.video.excubed | resources/lib/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| gpl-2.0 | 4,613,011,822,758,106,000 | 54.825328 | 70 | 0.553113 | false |
fabioz/pep8 | testsuite/E26.py | 14 | 1225 | #: E261:1:5
pass # an inline comment
#: E262:1:12
x = x + 1 #Increment x
#: E262:1:12
x = x + 1 # Increment x
#: E262:1:12
x = y + 1 #: Increment x
#: E265:1:1
#Block comment
a = 1
#: E265:2:1
m = 42
#! This is important
mx = 42 - 42
#: E266:3:5 E266:6:5
def how_it_feel(r):
### This is a variable ###
a = 42
### Of course it is unused
return
#: E265:1:1 E266:2:1
##if DEBUG:
## logging.error()
#: W291:1:42
#########################################
#:
#: Okay
#!/usr/bin/env python
pass # an inline comment
x = x + 1 # Increment x
y = y + 1 #: Increment x
# Block comment
a = 1
# Block comment1
# Block comment2
aaa = 1
# example of docstring (not parsed)
def oof():
"""
#foo not parsed
"""
###########################################################################
# A SEPARATOR #
###########################################################################
# ####################################################################### #
# ########################## another separator ########################## #
# ####################################################################### #
| mit | 7,955,188,299,268,242,000 | 19.762712 | 79 | 0.342041 | false |
fhaoquan/kbengine | kbe/src/lib/python/Lib/lib2to3/fixes/fix_callable.py | 161 | 1151 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for callable().
This converts callable(obj) into isinstance(obj, collections.Callable), adding a
collections import if needed."""
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import Call, Name, String, Attr, touch_import
class FixCallable(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
# Ignore callable(*args) or use of keywords.
# Either could be a hint that the builtin callable() is not being used.
PATTERN = """
power< 'callable'
trailer< lpar='('
( not(arglist | argument<any '=' any>) func=any
| func=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
func = results['func']
touch_import(None, 'collections', node=node)
args = [func.clone(), String(', ')]
args.extend(Attr(Name('collections'), Name('Callable')))
return Call(Name('isinstance'), args, prefix=node.prefix)
| lgpl-3.0 | -4,702,990,959,018,992,000 | 30.108108 | 80 | 0.61338 | false |
pipsiscool/audacity | lib-src/lv2/lv2/plugins/eg02-midigate.lv2/waflib/Tools/compiler_fc.py | 287 | 1846 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,imp,types
from waflib import Utils,Configure,Options,Logs,Errors
from waflib.Tools import fc
fc_compiler={'win32':['gfortran','ifort'],'darwin':['gfortran','g95','ifort'],'linux':['gfortran','g95','ifort'],'java':['gfortran','g95','ifort'],'default':['gfortran'],'aix':['gfortran']}
def __list_possible_compiler(platform):
try:
return fc_compiler[platform]
except KeyError:
return fc_compiler["default"]
def configure(conf):
try:test_for_compiler=conf.options.check_fc
except AttributeError:conf.fatal("Add options(opt): opt.load('compiler_fc')")
for compiler in test_for_compiler.split():
conf.env.stash()
conf.start_msg('Checking for %r (fortran compiler)'%compiler)
try:
conf.load(compiler)
except conf.errors.ConfigurationError ,e:
conf.env.revert()
conf.end_msg(False)
Logs.debug('compiler_fortran: %r'%e)
else:
if conf.env['FC']:
conf.end_msg(conf.env.get_flat('FC'))
conf.env.COMPILER_FORTRAN=compiler
break
conf.end_msg(False)
else:
conf.fatal('could not configure a fortran compiler!')
def options(opt):
opt.load_special_tools('fc_*.py')
build_platform=Utils.unversioned_sys_platform()
detected_platform=Options.platform
possible_compiler_list=__list_possible_compiler(detected_platform)
test_for_compiler=' '.join(possible_compiler_list)
fortran_compiler_opts=opt.add_option_group("Fortran Compiler Options")
fortran_compiler_opts.add_option('--check-fortran-compiler',default="%s"%test_for_compiler,help='On this platform (%s) the following Fortran Compiler will be checked by default: "%s"'%(detected_platform,test_for_compiler),dest="check_fc")
for compiler in test_for_compiler.split():
opt.load('%s'%compiler)
| mit | -3,481,206,233,998,690,000 | 41.930233 | 239 | 0.729144 | false |
nexgenta/apt | test/pre-upload-check.py | 2 | 5809 | #!/usr/bin/python
import sys
import os
import glob
import os.path
from subprocess import call, PIPE
import unittest
stdout = os.open("/dev/null",0) #sys.stdout
stderr = os.open("/dev/null",0) # sys.stderr
apt_args = [] # ["-o","Debug::pkgAcquire::Auth=true"]
class testAuthentication(unittest.TestCase):
"""
test if the authentication is working, the repository
of the test-data can be found here:
bzr get http://people.ubuntu.com/~mvo/bzr/apt/apt-auth-test-suit/
"""
# some class wide data
apt = "apt-get"
pkg = "libglib2.0-data"
pkgver = "2.13.6-1ubuntu1"
pkgpath = "/var/cache/apt/archives/libglib2.0-data_2.13.6-1ubuntu1_all.deb"
def setUp(self):
for f in glob.glob("testkeys/*,key"):
call(["apt-key", "add", f], stdout=stdout, stderr=stderr)
def _cleanup(self):
" make sure we get new lists and no i-m-s "
call(["rm","-f", "/var/lib/apt/lists/*"])
if os.path.exists(self.pkgpath):
os.unlink(self.pkgpath)
def _expectedRes(self, resultstr):
if resultstr == 'ok':
return 0
elif resultstr == 'broken':
return 100
def testPackages(self):
for f in glob.glob("testsources.list/sources.list*package*"):
self._cleanup()
(prefix, testtype, result) = f.split("-")
expected_res = self._expectedRes(result)
# update first
call([self.apt,"update",
"-o","Dir::Etc::sourcelist=./%s" % f]+apt_args,
stdout=stdout, stderr=stderr)
# then get the pkg
cmd = ["install", "-y", "-d", "--reinstall",
"%s=%s" % (self.pkg, self.pkgver),
"-o","Dir::state::Status=./fake-status"]
res = call([self.apt, "-o","Dir::Etc::sourcelist=./%s" % f]+cmd+apt_args,
stdout=stdout, stderr=stderr)
self.assert_(res == expected_res,
"test '%s' failed (got %s expected %s" % (f,res,expected_res))
def testGPG(self):
for f in glob.glob("testsources.list/sources.list*gpg*"):
self._cleanup()
(prefix, testtype, result) = f.split("-")
expected_res = self._expectedRes(result)
# update first
call([self.apt,"update",
"-o","Dir::Etc::sourcelist=./%s" % f]+apt_args,
stdout=stdout, stderr=stderr)
cmd = ["install", "-y", "-d", "--reinstall",
"%s=%s" % (self.pkg, self.pkgver),
"-o","Dir::state::Status=./fake-status"]
res = call([self.apt, "-o","Dir::Etc::sourcelist=./%s" % f]+
cmd+apt_args,
stdout=stdout, stderr=stderr)
self.assert_(res == expected_res,
"test '%s' failed (got %s expected %s" % (f,res,expected_res))
def testRelease(self):
for f in glob.glob("testsources.list/sources.list*release*"):
self._cleanup()
(prefix, testtype, result) = f.split("-")
expected_res = self._expectedRes(result)
cmd = ["update"]
res = call([self.apt,"-o","Dir::Etc::sourcelist=./%s" % f]+cmd+apt_args,
stdout=stdout, stderr=stderr)
self.assert_(res == expected_res,
"test '%s' failed (got %s expected %s" % (f,res,expected_res))
if expected_res == 0:
self.assert_(len(glob.glob("/var/lib/apt/lists/partial/*")) == 0,
"partial/ dir has leftover files: %s" % glob.glob("/var/lib/apt/lists/partial/*"))
class testLocalRepositories(unittest.TestCase):
" test local repository regressions "
repo_dir = "local-repo"
apt = "apt-get"
pkg = "gdebi-test4"
def setUp(self):
self.repo = os.path.abspath(os.path.join(os.getcwd(), self.repo_dir))
self.sources = os.path.join(self.repo, "sources.list")
s = open(self.sources,"w")
s.write("deb file://%s/ /\n" % self.repo)
s.close()
def testLocalRepoAuth(self):
# two times to get at least one i-m-s hit
for i in range(2):
self.assert_(os.path.exists(self.sources))
cmd = [self.apt,"update","-o", "Dir::Etc::sourcelist=%s" % self.sources]+apt_args
res = call(cmd, stdout=stdout, stderr=stderr)
self.assertEqual(res, 0, "local repo test failed")
self.assert_(os.path.exists(os.path.join(self.repo,"Packages.gz")),
"Packages.gz vanished from local repo")
def testInstallFromLocalRepo(self):
apt = [self.apt,"-o", "Dir::Etc::sourcelist=%s"% self.sources]+apt_args
cmd = apt+["update"]
res = call(cmd, stdout=stdout, stderr=stderr)
self.assertEqual(res, 0)
res = call(apt+["-y","install","--reinstall",self.pkg],
stdout=stdout, stderr=stderr)
self.assert_(res == 0,
"installing %s failed (got %s)" % (self.pkg, res))
res = call(apt+["-y","remove",self.pkg],
stdout=stdout, stderr=stderr)
self.assert_(res == 0,
"removing %s failed (got %s)" % (self.pkg, res))
def testPythonAptInLocalRepo(self):
import apt, apt_pkg
apt_pkg.Config.Set("Dir::Etc::sourcelist",self.sources)
cache = apt.Cache()
cache.update()
pkg = cache["apt"]
self.assert_(pkg.name == 'apt')
if __name__ == "__main__":
print "Runing simple testsuit on current apt-get and libapt"
if len(sys.argv) > 1 and sys.argv[1] == "-v":
stdout = sys.stdout
stderr = sys.stderr
unittest.main()
| gpl-2.0 | -3,632,881,847,565,245,000 | 36.720779 | 111 | 0.531761 | false |
openstack-infra/reviewstats | reviewstats/cmd/openapproved.py | 1 | 3204 | # Copyright (C) 2011 - Soren Hansen
# Copyright (C) 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Identify approved and open patches, that are probably just trivial rebases.
Prints out list of approved patches that failed to merge and are currently
still open. Only show patches that are likely to be trivial rebases.
"""
import getpass
import optparse
import sys
from reviewstats import utils
def main(argv=None):
if argv is None:
argv = sys.argv
optparser = optparse.OptionParser()
optparser.add_option(
'-p', '--project', default='projects/nova.json',
help='JSON file describing the project to generate stats for')
optparser.add_option(
'-a', '--all', action='store_true',
help='Generate stats across all known projects (*.json)')
optparser.add_option(
'-u', '--user', default=getpass.getuser(), help='gerrit user')
optparser.add_option(
'-k', '--key', default=None, help='ssh key for gerrit')
optparser.add_option('-s', '--stable', action='store_true',
help='Include stable branch commits')
optparser.add_option(
'--server', default='review.opendev.org',
help='Gerrit server to connect to')
options, args = optparser.parse_args()
projects = utils.get_projects_info(options.project, options.all)
if not projects:
print("Please specify a project.")
sys.exit(1)
changes = utils.get_changes(projects, options.user, options.key,
only_open=True,
server=options.server)
approved_and_rebased = set()
for change in changes:
if 'rowCount' in change:
continue
if not options.stable and 'stable' in change['branch']:
continue
if change['status'] != 'NEW':
# Filter out WORKINPROGRESS
continue
for patch_set in change['patchSets'][:-1]:
if (utils.patch_set_approved(patch_set)
and not utils.patch_set_approved(change['patchSets'][-1])):
if has_negative_feedback(change['patchSets'][-1]):
continue
approved_and_rebased.add("%s %s" % (change['url'],
change['subject']))
for x in approved_and_rebased:
print()
print("total %d" % len(approved_and_rebased))
def has_negative_feedback(patch_set):
approvals = patch_set.get('approvals', [])
for review in approvals:
if review['type'] in ('CRVW', 'VRIF') \
and review['value'] in ('-1', '-2'):
return True
return False
| apache-2.0 | -5,270,525,455,848,682,000 | 35 | 79 | 0.615481 | false |
dparlevliet/zelenka-report-storage | server-db/twisted/web/_auth/basic.py | 66 | 1635 | # -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
HTTP BASIC authentication.
@see: U{http://tools.ietf.org/html/rfc1945}
@see: U{http://tools.ietf.org/html/rfc2616}
@see: U{http://tools.ietf.org/html/rfc2617}
"""
import binascii
from zope.interface import implements
from twisted.cred import credentials, error
from twisted.web.iweb import ICredentialFactory
class BasicCredentialFactory(object):
"""
Credential Factory for HTTP Basic Authentication
@type authenticationRealm: C{str}
@ivar authenticationRealm: The HTTP authentication realm which will be issued in
challenges.
"""
implements(ICredentialFactory)
scheme = 'basic'
def __init__(self, authenticationRealm):
self.authenticationRealm = authenticationRealm
def getChallenge(self, request):
"""
Return a challenge including the HTTP authentication realm with which
this factory was created.
"""
return {'realm': self.authenticationRealm}
def decode(self, response, request):
"""
Parse the base64-encoded, colon-separated username and password into a
L{credentials.UsernamePassword} instance.
"""
try:
creds = binascii.a2b_base64(response + '===')
except binascii.Error:
raise error.LoginFailed('Invalid credentials')
creds = creds.split(':', 1)
if len(creds) == 2:
return credentials.UsernamePassword(*creds)
else:
raise error.LoginFailed('Invalid credentials')
| lgpl-3.0 | -5,136,695,321,204,966,000 | 26.711864 | 84 | 0.666667 | false |
marscher/PyEMMA | pyemma/plots/__init__.py | 1 | 1704 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
============================================
plots - Plotting tools (:mod:`pyemma.plots`)
============================================
.. currentmodule:: pyemma.plots
User-API
========
**Graph plots**
.. autosummary::
:toctree: generated/
plot_implied_timescales
plot_cktest
**Contour plots**
.. autosummary::
:toctree: generated/
plot_free_energy
scatter_contour
**Network plots**
.. autosummary::
:toctree: generated/
plot_markov_model
plot_flux
plot_network
Classes
========
.. autosummary::
:toctree: generated/
NetworkPlot
"""
from __future__ import absolute_import
from .timescales import plot_implied_timescales
from .plots2d import contour, scatter_contour, plot_free_energy
from .networks import plot_markov_model, plot_flux, plot_network, NetworkPlot
from .markovtests import plot_cktest
from .thermoplots import *
from .plots1d import plot_feature_histograms | lgpl-3.0 | 42,813,406,846,926,700 | 23.014085 | 97 | 0.696009 | false |
LogicalKnight/pywinauto | pywinauto/__init__.py | 14 | 1225 | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"""
Python package for automating GUI manipulation on Windows
"""
__revision__ = "$Revision$"
__version__ = "0.4.2"
import findwindows
WindowAmbiguousError = findwindows.WindowAmbiguousError
WindowNotFoundError = findwindows.WindowNotFoundError
import findbestmatch
MatchError = findbestmatch.MatchError
from application import Application, WindowSpecification
| lgpl-2.1 | 6,364,185,076,031,224,000 | 33 | 68 | 0.750204 | false |
craigem/MyAdventures | csvBuild.py | 1 | 1286 | # csvBuild from Adventure 6.
# import required modules
import mcpi.minecraft as minecraft
import mcpi.block as block
# connect to minecraft
mc = minecraft.Minecraft.create()
# define some constants
GAP = block.AIR.id
WALL = block.GOLD_BLOCK.id
FLOOR = block.GRASS.id
# Open the file containing maze data
FILENAME = "maze1.csv"
f = open(FILENAME, "r")
# Get the player position:
pos = mc.player.getTilePos()
# Work out coordinates for the bottom corner and avoid the player
ORIGIN_X = pos.x + 1
ORIGIN_Y = pos.y
ORIGIN_Z = pos.z + 1
# Initialise the z value
z = ORIGIN_Z
# Loop through every line in the maze file
for line in f.readlines():
# Remove pesky new lines and any other unwanted whitespace
line = line.rstrip()
# split the line every time a comma is reached
data = line.split(",")
# reset the x coordinate
x = ORIGIN_X
# draw the whole row
for cell in data:
# Differentiate between gap and wall
if cell == "0":
b = GAP
else:
b = WALL
# build the wall
mc.setBlock(x, ORIGIN_Y, z, b)
mc.setBlock(x, ORIGIN_Y + 1, z, b)
mc.setBlock(x, ORIGIN_Y - 1, z, FLOOR)
# Update x for the next cell
x = x + 1
# update z for the next row
z = z + 1
| gpl-3.0 | -4,673,038,382,056,888,000 | 23.730769 | 65 | 0.634526 | false |
Elbagoury/odoo | addons/sale_margin/__init__.py | 441 | 1042 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_margin
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,416,282,871,942,009,600 | 42.416667 | 78 | 0.619002 | false |
Elbagoury/odoo | addons/website_report/report.py | 257 | 2124 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.web.http import request
from openerp.osv import osv
class Report(osv.Model):
_inherit = 'report'
def translate_doc(self, cr, uid, doc_id, model, lang_field, template, values, context=None):
if request and hasattr(request, 'website'):
if request.website is not None:
v = request.website.get_template(template)
request.session['report_view_ids'].append({
'name': v.name,
'id': v.id,
'xml_id': v.xml_id,
'inherit_id': v.inherit_id.id,
'header': False,
'active': v.active,
})
return super(Report, self).translate_doc(cr, uid, doc_id, model, lang_field, template, values, context=context)
def render(self, cr, uid, ids, template, values=None, context=None):
if request and hasattr(request, 'website'):
if request.website is not None:
request.session['report_view_ids'] = []
return super(Report, self).render(cr, uid, ids, template, values=values, context=context)
| agpl-3.0 | -8,844,894,048,427,906,000 | 44.191489 | 119 | 0.582863 | false |
presidentielcoin/presidentielcoin | qa/rpc-tests/test_framework/blockstore.py | 1 | 5425 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Presidentielcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# BlockStore: a helper class that keeps a map of blocks and implements
# helper functions for responding to getheaders and getdata,
# and for constructing a getheaders message
#
from .mininode import *
from io import BytesIO
import dbm.dumb as dbmd
class BlockStore(object):
def __init__(self, datadir):
self.blockDB = dbmd.open(datadir + "/blocks", 'c')
self.currentBlock = 0
self.headers_map = dict()
def close(self):
self.blockDB.close()
def erase(self, blockhash):
del self.blockDB[repr(blockhash)]
# lookup an entry and return the item as raw bytes
def get(self, blockhash):
value = None
try:
value = self.blockDB[repr(blockhash)]
except KeyError:
return None
return value
# lookup an entry and return it as a CBlock
def get_block(self, blockhash):
ret = None
serialized_block = self.get(blockhash)
if serialized_block is not None:
f = BytesIO(serialized_block)
ret = CBlock()
ret.deserialize(f)
ret.calc_sha256()
return ret
def get_header(self, blockhash):
try:
return self.headers_map[blockhash]
except KeyError:
return None
# Note: this pulls full blocks out of the database just to retrieve
# the headers -- perhaps we could keep a separate data structure
# to avoid this overhead.
def headers_for(self, locator, hash_stop, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
current_block_header = self.get_header(current_tip)
if current_block_header is None:
return None
response = msg_headers()
headersList = [ current_block_header ]
maxheaders = 2000
while (headersList[0].sha256 not in locator.vHave):
prevBlockHash = headersList[0].hashPrevBlock
prevBlockHeader = self.get_header(prevBlockHash)
if prevBlockHeader is not None:
headersList.insert(0, prevBlockHeader)
else:
break
headersList = headersList[:maxheaders] # truncate if we have too many
hashList = [x.sha256 for x in headersList]
index = len(headersList)
if (hash_stop in hashList):
index = hashList.index(hash_stop)+1
response.headers = headersList[:index]
return response
def add_block(self, block):
block.calc_sha256()
try:
self.blockDB[repr(block.sha256)] = bytes(block.serialize())
except TypeError as e:
print("Unexpected error: ", sys.exc_info()[0], e.args)
self.currentBlock = block.sha256
self.headers_map[block.sha256] = CBlockHeader(block)
def add_header(self, header):
self.headers_map[header.sha256] = header
# lookup the hashes in "inv", and return p2p messages for delivering
# blocks found.
def get_blocks(self, inv):
responses = []
for i in inv:
if (i.type == 2): # MSG_BLOCK
data = self.get(i.hash)
if data is not None:
# Use msg_generic to avoid re-serialization
responses.append(msg_generic(b"block", data))
return responses
def get_locator(self, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
r = []
counter = 0
step = 1
lastBlock = self.get_block(current_tip)
while lastBlock is not None:
r.append(lastBlock.hashPrevBlock)
for i in range(step):
lastBlock = self.get_block(lastBlock.hashPrevBlock)
if lastBlock is None:
break
counter += 1
if counter > 10:
step *= 2
locator = CBlockLocator()
locator.vHave = r
return locator
class TxStore(object):
def __init__(self, datadir):
self.txDB = dbmd.open(datadir + "/transactions", 'c')
def close(self):
self.txDB.close()
# lookup an entry and return the item as raw bytes
def get(self, txhash):
value = None
try:
value = self.txDB[repr(txhash)]
except KeyError:
return None
return value
def get_transaction(self, txhash):
ret = None
serialized_tx = self.get(txhash)
if serialized_tx is not None:
f = BytesIO(serialized_tx)
ret = CTransaction()
ret.deserialize(f)
ret.calc_sha256()
return ret
def add_transaction(self, tx):
tx.calc_sha256()
try:
self.txDB[repr(tx.sha256)] = bytes(tx.serialize())
except TypeError as e:
print("Unexpected error: ", sys.exc_info()[0], e.args)
def get_transactions(self, inv):
responses = []
for i in inv:
if (i.type == 1): # MSG_TX
tx = self.get(i.hash)
if tx is not None:
responses.append(msg_generic(b"tx", tx))
return responses
| mit | 9,068,670,471,248,562,000 | 32.079268 | 77 | 0.579539 | false |
jakesyl/pychess | lib/pychess/Players/PyChessFICS.py | 20 | 17313 | from __future__ import print_function
import email.Utils
from gi.repository import Gtk
import math
import pychess
import random
import signal
import subprocess
from threading import Thread
from pychess.compat import urlopen, urlencode
from pychess.Players.PyChess import PyChess
from pychess.System.prefix import addDataPrefix, isInstalled
from pychess.System.repeat import repeat_sleep
from pychess.System import GtkWorker, fident
from pychess.System.Log import log
from pychess.Utils.const import *
from pychess.Utils.lutils.LBoard import LBoard
from pychess.Utils.lutils.lmove import determineAlgebraicNotation, toLAN, parseSAN
from pychess.Utils.lutils import lsearch
from pychess.Utils.repr import reprResult_long, reprReason_long
from pychess.ic.FICSConnection import FICSMainConnection
class PyChessFICS(PyChess):
def __init__ (self, password, from_address, to_address):
PyChess.__init__(self)
self.ports = (23, 5000)
if not password:
self.username = "guest"
else: self.username = "PyChess"
self.owner = "Lobais"
self.password = password
self.from_address = "The PyChess Bot <%s>" % from_address
self.to_address = "Thomas Dybdahl Ahle <%s>" % to_address
# Possible start times
self.minutes = (1,2,3,4,5,6,7,8,9,10)
self.gains = (0,5,10,15,20)
# Possible colors. None == random
self.colors = (WHITE, BLACK, None)
# The amount of random challenges, that PyChess sends with each seek
self.challenges = 10
enableEGTB()
self.sudos = set()
self.ownerOnline = False
self.waitingForPassword = None
self.log = []
self.acceptedTimesettings = []
self.worker = None
repeat_sleep(self.sendChallenges, 60*1)
def __triangular(self, low, high, mode):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = random.random()
c = (mode - low) / (high - low)
if u > c:
u = 1 - u
c = 1 - c
low, high = high, low
tri = low + (high - low) * (u * c) ** 0.5
if tri < mode:
return int(tri)
elif tri > mode:
return int(math.ceil(tri))
return int(round(tri))
def sendChallenges(self):
if self.connection.bm.isPlaying():
return True
statsbased = ((0.39197722779282, 3, 0),
(0.59341408108783, 5, 0),
(0.77320877377846, 1, 0),
(0.8246379941394, 10, 0),
(0.87388717406441, 2, 12),
(0.91443760169489, 15, 0),
(0.9286423058163, 4, 0),
(0.93891977227793, 2, 0),
(0.94674539138335, 20, 0),
(0.95321476842423, 2, 2),
(0.9594588808257, 5, 2),
(0.96564528079889, 3, 2),
(0.97173859621034, 7, 0),
(0.97774906636184, 3, 1),
(0.98357243654425, 5, 12),
(0.98881309737017, 5, 5),
(0.99319644938247, 6, 0),
(0.99675879556023, 3, 12),
(1, 5, 3))
#n = random.random()
#for culminativeChance, minute, gain in statsbased:
# if n < culminativeChance:
# break
culminativeChance, minute, gain = random.choice(statsbased)
#type = random.choice((TYPE_LIGHTNING, TYPE_BLITZ, TYPE_STANDARD))
#if type == TYPE_LIGHTNING:
# minute = self.__triangular(0,2+1,1)
# mingain = not minute and 1 or 0
# maxgain = int((3-minute)*3/2)
# gain = random.randint(mingain, maxgain)
#elif type == TYPE_BLITZ:
# minute = self.__triangular(0,14+1,5)
# mingain = max(int((3-minute)*3/2+1), 0)
# maxgain = int((15-minute)*3/2)
# gain = random.randint(mingain, maxgain)
#elif type == TYPE_STANDARD:
# minute = self.__triangular(0,20+1,12)
# mingain = max(int((15-minute)*3/2+1), 0)
# maxgain = int((20-minute)*3/2)
# gain = self.__triangular(mingain, maxgain, mingain)
#color = random.choice(self.colors)
self.extendlog(["Seeking %d %d" % (minute, gain)])
self.connection.glm.seek(minute, gain, True)
opps = random.sample(self.connection.players.get_online_playernames(),
self.challenges)
self.extendlog("Challenging %s" % op for op in opps)
for player in opps:
self.connection.om.challenge(player, minute, gain, True)
return True
def makeReady(self):
signal.signal(signal.SIGINT, Gtk.main_quit)
PyChess.makeReady(self)
self.connection = FICSMainConnection("freechess.org", self.ports,
self.username, self.password)
self.connection.connect("connectingMsg", self.__showConnectLog)
self.connection._connect()
self.connection.glm.connect("addPlayer", self.__onAddPlayer)
self.connection.glm.connect("removePlayer", self.__onRemovePlayer)
self.connection.cm.connect("privateMessage", self.__onTell)
self.connection.alm.connect("logOut", self.__onLogOut)
self.connection.bm.connect("playGameCreated", self.__onGameCreated)
self.connection.bm.connect("curGameEnded", self.__onGameEnded)
self.connection.bm.connect("boardUpdate", self.__onBoardUpdate)
self.connection.om.connect("onChallengeAdd", self.__onChallengeAdd)
self.connection.om.connect("onOfferAdd", self.__onOfferAdd)
self.connection.adm.connect("onAdjournmentsList", self.__onAdjournmentsList)
self.connection.em.connect("onAmbiguousMove", self.__onAmbiguousMove)
self.connection.em.connect("onIllegalMove", self.__onAmbiguousMove)
self.connection.adm.queryAdjournments()
self.connection.lvm.setVariable("autoflag", 1)
self.connection.fm.setFingerNote(1,
"PyChess is the chess engine bundled with the PyChess %s " % pychess.VERSION +
"chess client. This instance is owned by %s, but acts " % self.owner +
"quite autonomously.")
self.connection.fm.setFingerNote(2,
"PyChess is 100% Python code and is released under the terms of " +
"the GPL. The evalution function is largely equal to the one of" +
"GnuChess, but it plays quite differently.")
self.connection.fm.setFingerNote(3,
"PyChess runs on an elderly AMD Sempron(tm) Processor 3200+, 512 " +
"MB DDR2 Ram, but is built to take use of 64bit calculating when " +
"accessible, through the gpm library.")
self.connection.fm.setFingerNote(4,
"PyChess uses a small 500 KB openingbook based solely on Kasparov " +
"games. The engine doesn't have much endgame knowledge, but might " +
"in some cases access an online endgamedatabase.")
self.connection.fm.setFingerNote(5,
"PyChess will allow any pause/resume and adjourn wishes, but will " +
"deny takebacks. Draw, abort and switch offers are accepted, " +
"if they are found to be an advance. Flag is auto called, but " +
"PyChess never resigns. We don't want you to forget your basic " +
"mating skills.")
def main(self):
self.connection.run()
self.extendlog([str(self.acceptedTimesettings)])
self.phoneHome("Session ended\n"+"\n".join(self.log))
print("Session ended")
def run(self):
t = Thread(target=self.main, name=fident(self.main))
t.daemon = True
t.start()
Gdk.threads_init()
Gtk.main()
#===========================================================================
# General
#===========================================================================
def __showConnectLog (self, connection, message):
print(message)
def __onLogOut (self, autoLogoutManager):
self.connection.close()
#sys.exit()
def __onAddPlayer (self, gameListManager, player):
if player["name"] in self.sudos:
self.sudos.remove(player["name"])
if player["name"] == self.owner:
self.connection.cm.tellPlayer(self.owner, "Greetings")
self.ownerOnline = True
def __onRemovePlayer (self, gameListManager, playername):
if playername == self.owner:
self.ownerOnline = False
def __onAdjournmentsList (self, adjournManager, adjournments):
for adjournment in adjournments:
if adjournment["online"]:
adjournManager.challenge(adjournment["opponent"])
def __usage (self):
return "|| PyChess bot help file || " +\
"# help 'Displays this help file' " +\
"# sudo <password> <command> 'Lets PyChess execute the given command' "+\
"# sendlog 'Makes PyChess send you its current log'"
def __onTell (self, chatManager, name, title, isadmin, text):
if self.waitingForPassword:
if text.strip() == self.password or (not self.password and text == "none"):
self.sudos.add(name)
self.tellHome("%s gained sudo access" % name)
self.connection.client.run_command(self.waitingForPassword)
else:
chatManager.tellPlayer(name, "Wrong password")
self.tellHome("%s failed sudo access" % name)
self.waitingForPassword = None
return
args = text.split()
#if args == ["help"]:
# chatManager.tellPlayer(name, self.__usage())
if args[0] == "sudo":
command = " ".join(args[1:])
if name in self.sudos or name == self.owner:
# Notice: This can be used to make nasty loops
print(command, file=self.connection.client)
else:
print(repr(name), self.sudos)
chatManager.tellPlayer(name, "Please send me the password")
self.waitingForPassword = command
elif args == ["sendlog"]:
if self.log:
# TODO: Consider email
chatManager.tellPlayer(name, "\\n".join(self.log))
else:
chatManager.tellPlayer(name, "The log is currently empty")
else:
if self.ownerOnline:
self.tellHome("%s told me '%s'" % (name, text))
else:
def onlineanswer (message):
data = urlopen("http://www.pandorabots.com/pandora/talk?botid=8d034368fe360895",
urlencode({"message":message, "botcust2":"x"}).encode("utf-8")).read().decode('utf-8')
ss = "<b>DMPGirl:</b>"
es = "<br>"
answer = data[data.find(ss)+len(ss) : data.find(es,data.find(ss))]
chatManager.tellPlayer(name, answer)
t = Thread(target=onlineanswer,
name=fident(onlineanswer),
args=(text,))
t.daemon = True
t.start()
#chatManager.tellPlayer(name, "Sorry, your request was nonsense.\n"+\
# "Please read my help file for more info")
#===========================================================================
# Challenges and other offers
#===========================================================================
def __onChallengeAdd (self, offerManager, index, match):
#match = {"tp": type, "w": fname, "rt": rating, "color": color,
# "r": rated, "t": mins, "i": incr}
offerManager.acceptIndex(index)
def __onOfferAdd (self, offerManager, offer):
if offer.type in (PAUSE_OFFER, RESUME_OFFER, ADJOURN_OFFER):
offerManager.accept(offer)
elif offer.type in (TAKEBACK_OFFER,):
offerManager.decline(offer)
elif offer.type in (DRAW_OFFER, ABORT_OFFER, SWITCH_OFFER):
if self.__willingToDraw():
offerManager.accept(offer)
else: offerManager.decline(offer)
#===========================================================================
# Playing
#===========================================================================
def __onGameCreated (self, boardManager, ficsgame):
base = int(ficsgame.minutes)*60
inc = int(ficsgame.inc)
self.clock[:] = base, base
self.increment[:] = inc, inc
self.gameno = ficsgame.gameno
self.lastPly = -1
self.acceptedTimesettings.append((base, inc))
self.tellHome("Starting a game (%s, %s) gameno: %s" %
(ficsgame.wplayer.name, ficsgame.bplayer.name, ficsgame.gameno))
if ficsgame.bplayer.name.lower() == self.connection.getUsername().lower():
self.playingAs = BLACK
else:
self.playingAs = WHITE
self.board = LBoard(NORMALCHESS)
# Now we wait until we recieve the board.
def __go (self):
if self.worker:
self.worker.cancel()
self.worker = GtkWorker(lambda worker: PyChess._PyChess__go(self, worker))
self.worker.connect("published", lambda w, msg: self.extendlog(msg))
self.worker.connect("done", self.__onMoveCalculated)
self.worker.execute()
def __willingToDraw (self):
return self.scr <= 0 # FIXME: this misbehaves in all but the simplest use cases
def __onGameEnded (self, boardManager, ficsgame):
self.tellHome(reprResult_long[ficsgame.result] + " " + reprReason_long[ficsgame.reason])
lsearch.searching = False
if self.worker:
self.worker.cancel()
self.worker = None
def __onMoveCalculated (self, worker, sanmove):
if worker.isCancelled() or not sanmove:
return
self.board.applyMove(parseSAN(self.board,sanmove))
self.connection.bm.sendMove(sanmove)
self.extendlog(["Move sent %s" % sanmove])
def __onBoardUpdate (self, boardManager, gameno, ply, curcol, lastmove, fen, wname, bname, wms, bms):
self.extendlog(["","I got move %d %s for gameno %s" % (ply, lastmove, gameno)])
if self.gameno != gameno:
return
self.board.applyFen(fen)
self.clock[:] = wms/1000., bms/1000.
if curcol == self.playingAs:
self.__go()
def __onAmbiguousMove (self, errorManager, move):
# This is really a fix for fics, but sometimes it is necessary
if determineAlgebraicNotation(move) == SAN:
self.board.popMove()
move_ = parseSAN(self.board, move)
lanmove = toLAN(self.board, move_)
self.board.applyMove(move_)
self.connection.bm.sendMove(lanmove)
else:
self.connection.cm.tellOpponent(
"I'm sorry, I wanted to move %s, but FICS called " % move +
"it 'Ambigious'. I can't find another way to express it, " +
"so you can win")
self.connection.bm.resign()
#===========================================================================
# Utils
#===========================================================================
def extendlog(self, messages):
[log.info(m+"\n") for m in messages]
self.log.extend(messages)
del self.log[:-10]
def tellHome(self, message):
print(message)
if self.ownerOnline:
self.connection.cm.tellPlayer(self.owner, message)
def phoneHome(self, message):
SENDMAIL = '/usr/sbin/sendmail'
SUBJECT = "Besked fra botten"
p = subprocess.Popen([SENDMAIL, '-f',
email.Utils.parseaddr(self.from_address)[1],
email.Utils.parseaddr(self.to_address)[1]],
stdin=subprocess.PIPE)
print("MIME-Version: 1.0", file=p.stdin)
print("Content-Type: text/plain; charset=UTF-8", file=p.stdin)
print("Content-Disposition: inline", file=p.stdin)
print("From: %s" % self.from_address, file=p.stdin)
print("To: %s" % self.to_address, file=p.stdin)
print("Subject: %s" % SUBJECT, file=p.stdin)
print(file=p.stdin)
print(message, file=p.stdin)
print("Cheers", file=p.stdin)
p.stdin.close()
p.wait()
| gpl-3.0 | -6,922,454,449,462,359,000 | 40.123515 | 121 | 0.540923 | false |
jasonwzhy/django | django/contrib/admindocs/tests/test_fields.py | 638 | 1172 | from __future__ import unicode_literals
import unittest
from django.contrib.admindocs import views
from django.db import models
from django.db.models import fields
from django.utils.translation import ugettext as _
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
self.assertRaises(
AttributeError,
views.get_readable_field_data_type, "NotAField"
)
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
_('Boolean (Either True or False)')
)
def test_custom_fields(self):
self.assertEqual(
views.get_readable_field_data_type(CustomField()),
'A custom field type'
)
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
_('Field of type: %(field_type)s') % {
'field_type': 'DescriptionLackingField'
}
)
| bsd-3-clause | -5,016,524,069,817,250,000 | 25.044444 | 74 | 0.62884 | false |
mrry/tensorflow | tensorflow/python/kernel_tests/slice_op_test.py | 18 | 10265 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for slice op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class SliceTest(tf.test.TestCase):
def testEmpty(self):
inp = np.random.rand(4, 4).astype("f")
for k in xrange(4):
with self.test_session(use_gpu=True):
a = tf.constant(inp, shape=[4, 4], dtype=tf.float32)
slice_t = a[2, k:k]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[2, k:k])
def testInt32(self):
inp = np.random.rand(4, 4).astype("i")
for k in xrange(4):
with self.test_session(use_gpu=True):
a = tf.constant(inp, shape=[4, 4], dtype=tf.int32)
slice_t = a[2, k:k]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[2, k:k])
def testSelectAll(self):
for _ in range(10):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 4, 4, 4).astype("f")
a = tf.constant(inp, shape=[4, 4, 4, 4],
dtype=tf.float32)
slice_explicit_t = tf.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
slice_implicit_t = a[:, :, :, :]
self.assertAllEqual(inp, slice_explicit_t.eval())
self.assertAllEqual(inp, slice_implicit_t.eval())
self.assertEqual(inp.shape, slice_explicit_t.get_shape())
self.assertEqual(inp.shape, slice_implicit_t.get_shape())
def testSingleDimension(self):
for _ in range(10):
with self.test_session(use_gpu=True):
inp = np.random.rand(10).astype("f")
a = tf.constant(inp, shape=[10], dtype=tf.float32)
hi = np.random.randint(0, 9)
scalar_t = a[hi]
scalar_val = scalar_t.eval()
self.assertAllEqual(scalar_val, inp[hi])
if hi > 0:
lo = np.random.randint(0, hi)
else:
lo = 0
slice_t = a[lo:hi]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[lo:hi])
def testScalarInput(self):
input_val = 0
with self.test_session() as sess:
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
tf.constant(input_val)[:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
input_t = tf.placeholder(tf.int32)
slice_t = input_t[:]
with self.assertRaisesWithPredicateMatch(tf.errors.InvalidArgumentError,
"out of range"):
sess.run([slice_t], feed_dict={input_t: input_val})
def testInvalidIndex(self):
input_val = [1, 2]
with self.test_session() as sess:
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
tf.constant(input_val)[1:, 1:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
input_t = tf.placeholder(tf.int32)
slice_t = input_t[1:, 1:]
with self.assertRaisesWithPredicateMatch(tf.errors.InvalidArgumentError,
"out of range"):
sess.run([slice_t], feed_dict={input_t: input_val})
def _testSliceMatrixDim0(self, x, begin, size):
with self.test_session(use_gpu=True):
tf_ans = tf.slice(x, [begin, 0], [size, x.shape[1]]).eval()
np_ans = x[begin:begin+size, :]
self.assertAllEqual(tf_ans, np_ans)
def testSliceMatrixDim0(self):
x = np.random.rand(8, 4).astype("f")
self._testSliceMatrixDim0(x, 1, 2)
self._testSliceMatrixDim0(x, 3, 3)
y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
self._testSliceMatrixDim0(y, 1, 2)
self._testSliceMatrixDim0(y, 3, 3)
def testSingleElementAll(self):
for _ in range(10):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 4).astype("f")
a = tf.constant(inp, shape=[4, 4], dtype=tf.float32)
x, y = np.random.randint(0, 3, size=2).tolist()
slice_t = a[x, 0:y]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[x, 0:y])
def testSimple(self):
with self.test_session(use_gpu=True) as sess:
inp = np.random.rand(4, 4).astype("f")
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=[4, 4], dtype=tf.float32)
slice_t = tf.slice(a, [0, 0], [2, 2])
slice2_t = a[:2, :2]
slice_val, slice2_val = sess.run([slice_t, slice2_t])
self.assertAllEqual(slice_val, inp[:2, :2])
self.assertAllEqual(slice2_val, inp[:2, :2])
self.assertEqual(slice_val.shape, slice_t.get_shape())
self.assertEqual(slice2_val.shape, slice2_t.get_shape())
def testComplex(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 10, 10, 4).astype("f")
a = tf.constant(inp, dtype=tf.float32)
x = np.random.randint(0, 9)
z = np.random.randint(0, 9)
if z > 0:
y = np.random.randint(0, z)
else:
y = 0
slice_t = a[:, x, y:z, :]
self.assertAllEqual(slice_t.eval(), inp[:, x, y:z, :])
def testRandom(self):
# Random dims of rank 6
input_shape = np.random.randint(0, 20, size=6)
inp = np.random.rand(*input_shape).astype("f")
with self.test_session(use_gpu=True) as sess:
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape, dtype=tf.float32)
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
sizes = [np.random.randint(0, input_shape[i] - indices[i] + 1)
for i in range(6)]
slice_t = tf.slice(a, indices, sizes)
slice2_t = a[indices[0]:indices[0]+sizes[0],
indices[1]:indices[1]+sizes[1],
indices[2]:indices[2]+sizes[2],
indices[3]:indices[3]+sizes[3],
indices[4]:indices[4]+sizes[4],
indices[5]:indices[5]+sizes[5]]
slice_val, slice2_val = sess.run([slice_t, slice2_t])
expected_val = inp[indices[0]:indices[0]+sizes[0],
indices[1]:indices[1]+sizes[1],
indices[2]:indices[2]+sizes[2],
indices[3]:indices[3]+sizes[3],
indices[4]:indices[4]+sizes[4],
indices[5]:indices[5]+sizes[5]]
self.assertAllEqual(slice_val, expected_val)
self.assertAllEqual(slice2_val, expected_val)
self.assertEqual(expected_val.shape, slice_t.get_shape())
self.assertEqual(expected_val.shape, slice2_t.get_shape())
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
with self.test_session(use_gpu=True):
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape, dtype=tf.float32)
slice_t = tf.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = tf.constant(grads)
grad = tf.gradients(slice_t, [a], grad_tensor)[0]
result = grad.eval()
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in xrange(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[slices] = grads
self.assertAllClose(np_ans, result)
def _testGradientVariableSize(self):
with self.test_session(use_gpu=True):
inp = tf.constant([1.0, 2.0, 3.0], name="in")
out = tf.slice(inp, [1], [-1])
grad_actual = tf.gradients(out, inp)[0].eval()
self.assertAllClose([0., 1., 1.], grad_actual)
def testGradientsAll(self):
# Slice the middle square out of a 4x4 input
self._testGradientSlice([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSlice([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSlice([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSize()
def testNotIterable(self):
# NOTE(mrry): If we register __getitem__ as an overloaded
# operator, Python will valiantly attempt to iterate over the
# Tensor from 0 to infinity. This test ensures that this
# unintended behavior is prevented.
c = tf.constant(5.0)
with self.assertRaisesWithPredicateMatch(
TypeError,
lambda e: "'Tensor' object is not iterable" in str(e)):
for _ in c:
pass
def testComputedShape(self):
# NOTE(mrry): We cannot currently handle partially-known values,
# because `tf.slice()` uses -1 to specify a wildcard size, and
# this can't be handled using the
# `tensor_util.constant_value_as_shape()` trick.
a = tf.constant([[1, 2, 3], [4, 5, 6]])
begin = tf.constant(0)
size = tf.constant(1)
b = tf.slice(a, [begin, 0], [size, 2])
self.assertEqual([1, 2], b.get_shape())
begin = tf.placeholder(tf.int32, shape=())
c = tf.slice(a, [begin, 0], [-1, 2])
self.assertEqual([None, 2], c.get_shape().as_list())
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -7,970,495,887,450,566,000 | 37.590226 | 80 | 0.600195 | false |
andmos/ansible | lib/ansible/modules/clustering/consul.py | 23 | 19949 | #!/usr/bin/python
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: consul
short_description: "Add, modify & delete services within a consul cluster."
description:
- Registers services and checks for an agent with a consul cluster.
A service is some process running on the agent node that should be advertised by
consul's discovery mechanism. It may optionally supply a check definition,
a periodic service test to notify the consul cluster of service's health.
- "Checks may also be registered per node e.g. disk usage, or cpu usage and
notify the health of the entire node to the cluster.
Service level checks do not require a check name or id as these are derived
by Consul from the Service name and id respectively by appending 'service:'
Node level checks require a check_name and optionally a check_id."
- Currently, there is no complete way to retrieve the script, interval or ttl
metadata for a registered check. Without this metadata it is not possible to
tell if the data supplied with ansible represents a change to a check. As a
result this does not attempt to determine changes and will always report a
changed occurred. An api method is planned to supply this metadata so at that
stage change management will be added.
- "See http://consul.io for more details."
requirements:
- "python >= 2.6"
- python-consul
- requests
version_added: "2.0"
author: "Steve Gargan (@sgargan)"
options:
state:
description:
- register or deregister the consul service, defaults to present
required: true
choices: ['present', 'absent']
service_name:
description:
- Unique name for the service on a node, must be unique per node,
required if registering a service. May be omitted if registering
a node level check
service_id:
description:
- the ID for the service, must be unique per node, defaults to the
service name if the service name is supplied
default: service_name if supplied
host:
description:
- host of the consul agent defaults to localhost
default: localhost
port:
description:
- the port on which the consul agent is running
default: 8500
scheme:
description:
- the protocol scheme on which the consul agent is running
default: http
version_added: "2.1"
validate_certs:
description:
- whether to verify the tls certificate of the consul agent
type: bool
default: 'yes'
version_added: "2.1"
notes:
description:
- Notes to attach to check when registering it.
service_port:
description:
- the port on which the service is listening. Can optionally be supplied for
registration of a service, i.e. if service_name or service_id is set
service_address:
description:
- the address to advertise that the service will be listening on.
This value will be passed as the I(Address) parameter to Consul's
U(/v1/agent/service/register) API method, so refer to the Consul API
documentation for further details.
version_added: "2.1"
tags:
description:
- a list of tags that will be attached to the service registration.
script:
description:
- the script/command that will be run periodically to check the health
of the service. Scripts require an interval and vise versa
interval:
description:
- the interval at which the service check will be run. This is a number
with a s or m suffix to signify the units of seconds or minutes e.g
15s or 1m. If no suffix is supplied, m will be used by default e.g.
1 will be 1m. Required if the script param is specified.
check_id:
description:
- an ID for the service check, defaults to the check name, ignored if
part of a service definition.
check_name:
description:
- a name for the service check, defaults to the check id. required if
standalone, ignored if part of service definition.
ttl:
description:
- checks can be registered with a ttl instead of a script and interval
this means that the service will check in with the agent before the
ttl expires. If it doesn't the check will be considered failed.
Required if registering a check and the script an interval are missing
Similar to the interval this is a number with a s or m suffix to
signify the units of seconds or minutes e.g 15s or 1m. If no suffix
is supplied, m will be used by default e.g. 1 will be 1m
http:
description:
- checks can be registered with an http endpoint. This means that consul
will check that the http endpoint returns a successful http status.
Interval must also be provided with this option.
version_added: "2.0"
timeout:
description:
- A custom HTTP check timeout. The consul default is 10 seconds.
Similar to the interval this is a number with a s or m suffix to
signify the units of seconds or minutes, e.g. 15s or 1m.
version_added: "2.0"
token:
description:
- the token key indentifying an ACL rule set. May be required to register services.
"""
EXAMPLES = '''
- name: register nginx service with the local consul agent
consul:
service_name: nginx
service_port: 80
- name: register nginx service with curl check
consul:
service_name: nginx
service_port: 80
script: curl http://localhost
interval: 60s
- name: register nginx with an http check
consul:
service_name: nginx
service_port: 80
interval: 60s
http: http://localhost:80/status
- name: register external service nginx available at 10.1.5.23
consul:
service_name: nginx
service_port: 80
service_address: 10.1.5.23
- name: register nginx with some service tags
consul:
service_name: nginx
service_port: 80
tags:
- prod
- webservers
- name: remove nginx service
consul:
service_name: nginx
state: absent
- name: register celery worker service
consul:
service_name: celery-worker
tags:
- prod
- worker
- name: create a node level check to test disk usage
consul:
check_name: Disk usage
check_id: disk_usage
script: /opt/disk_usage.py
interval: 5m
- name: register an http check against a service that's already registered
consul:
check_name: nginx-check2
check_id: nginx-check2
service_id: nginx
interval: 60s
http: http://localhost:80/morestatus
'''
try:
import consul
from requests.exceptions import ConnectionError
class PatchedConsulAgentService(consul.Consul.Agent.Service):
def deregister(self, service_id, token=None):
params = {}
if token:
params['token'] = token
return self.agent.http.put(consul.base.CB.bool(),
'/v1/agent/service/deregister/%s' % service_id,
params=params)
python_consul_installed = True
except ImportError:
python_consul_installed = False
from ansible.module_utils.basic import AnsibleModule
def register_with_consul(module):
state = module.params.get('state')
if state == 'present':
add(module)
else:
remove(module)
def add(module):
''' adds a service or a check depending on supplied configuration'''
check = parse_check(module)
service = parse_service(module)
if not service and not check:
module.fail_json(msg='a name and port are required to register a service')
if service:
if check:
service.add_check(check)
add_service(module, service)
elif check:
add_check(module, check)
def remove(module):
''' removes a service or a check '''
service_id = module.params.get('service_id') or module.params.get('service_name')
check_id = module.params.get('check_id') or module.params.get('check_name')
if not (service_id or check_id):
module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name')
if service_id:
remove_service(module, service_id)
else:
remove_check(module, check_id)
def add_check(module, check):
''' registers a check with the given agent. currently there is no way
retrieve the full metadata of an existing check through the consul api.
Without this we can't compare to the supplied check and so we must assume
a change. '''
if not check.name and not check.service_id:
module.fail_json(msg='a check name is required for a node level check, one not attached to a service')
consul_api = get_consul_api(module)
check.register(consul_api)
module.exit_json(changed=True,
check_id=check.check_id,
check_name=check.name,
script=check.script,
interval=check.interval,
ttl=check.ttl,
http=check.http,
timeout=check.timeout,
service_id=check.service_id)
def remove_check(module, check_id):
''' removes a check using its id '''
consul_api = get_consul_api(module)
if check_id in consul_api.agent.checks():
consul_api.agent.check.deregister(check_id)
module.exit_json(changed=True, id=check_id)
module.exit_json(changed=False, id=check_id)
def add_service(module, service):
''' registers a service with the current agent '''
result = service
changed = False
consul_api = get_consul_api(module)
existing = get_service_by_id_or_name(consul_api, service.id)
# there is no way to retrieve the details of checks so if a check is present
# in the service it must be re-registered
if service.has_checks() or not existing or not existing == service:
service.register(consul_api)
# check that it registered correctly
registered = get_service_by_id_or_name(consul_api, service.id)
if registered:
result = registered
changed = True
module.exit_json(changed=changed,
service_id=result.id,
service_name=result.name,
service_port=result.port,
checks=[check.to_dict() for check in service.checks],
tags=result.tags)
def remove_service(module, service_id):
''' deregister a service from the given agent using its service id '''
consul_api = get_consul_api(module)
service = get_service_by_id_or_name(consul_api, service_id)
if service:
consul_api.agent.service.deregister(service_id, token=module.params.get('token'))
module.exit_json(changed=True, id=service_id)
module.exit_json(changed=False, id=service_id)
def get_consul_api(module, token=None):
consulClient = consul.Consul(host=module.params.get('host'),
port=module.params.get('port'),
scheme=module.params.get('scheme'),
verify=module.params.get('validate_certs'),
token=module.params.get('token'))
consulClient.agent.service = PatchedConsulAgentService(consulClient)
return consulClient
def get_service_by_id_or_name(consul_api, service_id_or_name):
''' iterate the registered services and find one with the given id '''
for name, service in consul_api.agent.services().items():
if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name:
return ConsulService(loaded=service)
def parse_check(module):
if len([p for p in (module.params.get('script'), module.params.get('ttl'), module.params.get('http')) if p]) > 1:
module.fail_json(
msg='checks are either script, http or ttl driven, supplying more than one does not make sense')
if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('http'):
return ConsulCheck(
module.params.get('check_id'),
module.params.get('check_name'),
module.params.get('check_node'),
module.params.get('check_host'),
module.params.get('script'),
module.params.get('interval'),
module.params.get('ttl'),
module.params.get('notes'),
module.params.get('http'),
module.params.get('timeout'),
module.params.get('service_id'),
)
def parse_service(module):
if module.params.get('service_name'):
return ConsulService(
module.params.get('service_id'),
module.params.get('service_name'),
module.params.get('service_address'),
module.params.get('service_port'),
module.params.get('tags'),
)
elif not module.params.get('service_name'):
module.fail_json(msg="service_name is required to configure a service.")
class ConsulService():
def __init__(self, service_id=None, name=None, address=None, port=-1,
tags=None, loaded=None):
self.id = self.name = name
if service_id:
self.id = service_id
self.address = address
self.port = port
self.tags = tags
self.checks = []
if loaded:
self.id = loaded['ID']
self.name = loaded['Service']
self.port = loaded['Port']
self.tags = loaded['Tags']
def register(self, consul_api):
optional = {}
if self.port:
optional['port'] = self.port
if len(self.checks) > 0:
optional['check'] = self.checks[0].check
consul_api.agent.service.register(
self.name,
service_id=self.id,
address=self.address,
tags=self.tags,
**optional)
def add_check(self, check):
self.checks.append(check)
def checks(self):
return self.checks
def has_checks(self):
return len(self.checks) > 0
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.id == other.id and
self.name == other.name and
self.port == other.port and
self.tags == other.tags)
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self):
data = {'id': self.id, "name": self.name}
if self.port:
data['port'] = self.port
if self.tags and len(self.tags) > 0:
data['tags'] = self.tags
if len(self.checks) > 0:
data['check'] = self.checks[0].to_dict()
return data
class ConsulCheck(object):
def __init__(self, check_id, name, node=None, host='localhost',
script=None, interval=None, ttl=None, notes=None, http=None, timeout=None, service_id=None):
self.check_id = self.name = name
if check_id:
self.check_id = check_id
self.service_id = service_id
self.notes = notes
self.node = node
self.host = host
self.interval = self.validate_duration('interval', interval)
self.ttl = self.validate_duration('ttl', ttl)
self.script = script
self.http = http
self.timeout = self.validate_duration('timeout', timeout)
self.check = None
if script:
self.check = consul.Check.script(script, self.interval)
if ttl:
self.check = consul.Check.ttl(self.ttl)
if http:
if interval is None:
raise Exception('http check must specify interval')
self.check = consul.Check.http(http, self.interval, self.timeout)
def validate_duration(self, name, duration):
if duration:
duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
if not any((duration.endswith(suffix) for suffix in duration_units)):
duration = "{0}s".format(duration)
return duration
def register(self, consul_api):
consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id,
notes=self.notes,
check=self.check)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.check_id == other.check_id and
self.service_id == other.service_id and
self.name == other.name and
self.script == other.script and
self.interval == other.interval)
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self):
data = {}
self._add(data, 'id', attr='check_id')
self._add(data, 'name', attr='check_name')
self._add(data, 'script')
self._add(data, 'node')
self._add(data, 'notes')
self._add(data, 'host')
self._add(data, 'interval')
self._add(data, 'ttl')
self._add(data, 'http')
self._add(data, 'timeout')
self._add(data, 'service_id')
return data
def _add(self, data, key, attr=None):
try:
if attr is None:
attr = key
data[key] = getattr(self, attr)
except Exception:
pass
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation")
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(default='localhost'),
port=dict(default=8500, type='int'),
scheme=dict(required=False, default='http'),
validate_certs=dict(required=False, default=True, type='bool'),
check_id=dict(required=False),
check_name=dict(required=False),
check_node=dict(required=False),
check_host=dict(required=False),
notes=dict(required=False),
script=dict(required=False),
service_id=dict(required=False),
service_name=dict(required=False),
service_address=dict(required=False, type='str', default=None),
service_port=dict(required=False, type='int', default=None),
state=dict(default='present', choices=['present', 'absent']),
interval=dict(required=False, type='str'),
ttl=dict(required=False, type='str'),
http=dict(required=False, type='str'),
timeout=dict(required=False, type='str'),
tags=dict(required=False, type='list'),
token=dict(required=False, no_log=True)
),
supports_check_mode=False,
)
test_dependencies(module)
try:
register_with_consul(module)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
module.params.get('host'), module.params.get('port'), str(e)))
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | -1,379,211,342,108,684,300 | 34.245583 | 136 | 0.609153 | false |
davidfraser/sqlalchemy | test/ext/test_mutable.py | 21 | 18754 | from sqlalchemy import Integer, ForeignKey, String
from sqlalchemy.types import PickleType, TypeDecorator, VARCHAR
from sqlalchemy.orm import mapper, Session, composite
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.instrumentation import ClassManager
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.testing import eq_, assert_raises_message
from sqlalchemy.testing.util import picklers
from sqlalchemy.testing import fixtures
from sqlalchemy.ext.mutable import MutableComposite
from sqlalchemy.ext.mutable import MutableDict
class Foo(fixtures.BasicEntity):
pass
class SubFoo(Foo):
pass
class FooWithEq(object):
def __init__(self, **kw):
for k in kw:
setattr(self, k, kw[k])
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return self.id == other.id
class Point(MutableComposite):
def __init__(self, x, y):
self.x = x
self.y = y
def __setattr__(self, key, value):
object.__setattr__(self, key, value)
self.changed()
def __composite_values__(self):
return self.x, self.y
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
def __eq__(self, other):
return isinstance(other, Point) and \
other.x == self.x and \
other.y == self.y
class MyPoint(Point):
@classmethod
def coerce(cls, key, value):
if isinstance(value, tuple):
value = Point(*value)
return value
class _MutableDictTestFixture(object):
@classmethod
def _type_fixture(cls):
return MutableDict
def teardown(self):
# clear out mapper events
Mapper.dispatch._clear()
ClassManager.dispatch._clear()
super(_MutableDictTestFixture, self).teardown()
class _MutableDictTestBase(_MutableDictTestFixture):
run_define_tables = 'each'
def setup_mappers(cls):
foo = cls.tables.foo
mapper(Foo, foo)
def test_coerce_none(self):
sess = Session()
f1 = Foo(data=None)
sess.add(f1)
sess.commit()
eq_(f1.data, None)
def test_coerce_raise(self):
assert_raises_message(
ValueError,
"Attribute 'data' does not accept objects of type",
Foo, data=set([1, 2, 3])
)
def test_in_place_mutation(self):
sess = Session()
f1 = Foo(data={'a': 'b'})
sess.add(f1)
sess.commit()
f1.data['a'] = 'c'
sess.commit()
eq_(f1.data, {'a': 'c'})
def test_clear(self):
sess = Session()
f1 = Foo(data={'a': 'b'})
sess.add(f1)
sess.commit()
f1.data.clear()
sess.commit()
eq_(f1.data, {})
def test_update(self):
sess = Session()
f1 = Foo(data={'a': 'b'})
sess.add(f1)
sess.commit()
f1.data.update({'a': 'z'})
sess.commit()
eq_(f1.data, {'a': 'z'})
def test_setdefault(self):
sess = Session()
f1 = Foo(data={'a': 'b'})
sess.add(f1)
sess.commit()
eq_(f1.data.setdefault('c', 'd'), 'd')
sess.commit()
eq_(f1.data, {'a': 'b', 'c': 'd'})
eq_(f1.data.setdefault('c', 'q'), 'd')
sess.commit()
eq_(f1.data, {'a': 'b', 'c': 'd'})
def test_replace(self):
sess = Session()
f1 = Foo(data={'a': 'b'})
sess.add(f1)
sess.flush()
f1.data = {'b': 'c'}
sess.commit()
eq_(f1.data, {'b': 'c'})
def test_replace_itself_still_ok(self):
sess = Session()
f1 = Foo(data={'a': 'b'})
sess.add(f1)
sess.flush()
f1.data = f1.data
f1.data['b'] = 'c'
sess.commit()
eq_(f1.data, {'a': 'b', 'b': 'c'})
def test_pickle_parent(self):
sess = Session()
f1 = Foo(data={'a': 'b'})
sess.add(f1)
sess.commit()
f1.data
sess.close()
for loads, dumps in picklers():
sess = Session()
f2 = loads(dumps(f1))
sess.add(f2)
f2.data['a'] = 'c'
assert f2 in sess.dirty
def test_unrelated_flush(self):
sess = Session()
f1 = Foo(data={"a": "b"}, unrelated_data="unrelated")
sess.add(f1)
sess.flush()
f1.unrelated_data = "unrelated 2"
sess.flush()
f1.data["a"] = "c"
sess.commit()
eq_(f1.data["a"], "c")
def _test_non_mutable(self):
sess = Session()
f1 = Foo(non_mutable_data={'a': 'b'})
sess.add(f1)
sess.commit()
f1.non_mutable_data['a'] = 'c'
sess.commit()
eq_(f1.non_mutable_data, {'a': 'b'})
class MutableColumnDefaultTest(_MutableDictTestFixture, fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
MutableDict = cls._type_fixture()
mutable_pickle = MutableDict.as_mutable(PickleType)
Table(
'foo', metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', mutable_pickle, default={}),
)
def setup_mappers(cls):
foo = cls.tables.foo
mapper(Foo, foo)
def test_evt_on_flush_refresh(self):
# test for #3427
sess = Session()
f1 = Foo()
sess.add(f1)
sess.flush()
assert isinstance(f1.data, self._type_fixture())
assert f1 not in sess.dirty
f1.data['foo'] = 'bar'
assert f1 in sess.dirty
class MutableWithScalarPickleTest(_MutableDictTestBase, fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
MutableDict = cls._type_fixture()
mutable_pickle = MutableDict.as_mutable(PickleType)
Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('skip', mutable_pickle),
Column('data', mutable_pickle),
Column('non_mutable_data', PickleType),
Column('unrelated_data', String(50))
)
def test_non_mutable(self):
self._test_non_mutable()
class MutableWithScalarJSONTest(_MutableDictTestBase, fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
import json
class JSONEncodedDict(TypeDecorator):
impl = VARCHAR(50)
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
MutableDict = cls._type_fixture()
Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', MutableDict.as_mutable(JSONEncodedDict)),
Column('non_mutable_data', JSONEncodedDict),
Column('unrelated_data', String(50))
)
def test_non_mutable(self):
self._test_non_mutable()
class MutableAssocWithAttrInheritTest(_MutableDictTestBase,
fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', PickleType),
Column('non_mutable_data', PickleType),
Column('unrelated_data', String(50))
)
Table('subfoo', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
)
def setup_mappers(cls):
foo = cls.tables.foo
subfoo = cls.tables.subfoo
mapper(Foo, foo)
mapper(SubFoo, subfoo, inherits=Foo)
MutableDict.associate_with_attribute(Foo.data)
def test_in_place_mutation(self):
sess = Session()
f1 = SubFoo(data={'a': 'b'})
sess.add(f1)
sess.commit()
f1.data['a'] = 'c'
sess.commit()
eq_(f1.data, {'a': 'c'})
def test_replace(self):
sess = Session()
f1 = SubFoo(data={'a': 'b'})
sess.add(f1)
sess.flush()
f1.data = {'b': 'c'}
sess.commit()
eq_(f1.data, {'b': 'c'})
class MutableAssociationScalarPickleTest(_MutableDictTestBase,
fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
MutableDict = cls._type_fixture()
MutableDict.associate_with(PickleType)
Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('skip', PickleType),
Column('data', PickleType),
Column('unrelated_data', String(50))
)
class MutableAssociationScalarJSONTest(_MutableDictTestBase,
fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
import json
class JSONEncodedDict(TypeDecorator):
impl = VARCHAR(50)
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
MutableDict = cls._type_fixture()
MutableDict.associate_with(JSONEncodedDict)
Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', JSONEncodedDict),
Column('unrelated_data', String(50))
)
class CustomMutableAssociationScalarJSONTest(_MutableDictTestBase,
fixtures.MappedTest):
CustomMutableDict = None
@classmethod
def _type_fixture(cls):
if not(getattr(cls, 'CustomMutableDict')):
MutableDict = super(
CustomMutableAssociationScalarJSONTest, cls)._type_fixture()
class CustomMutableDict(MutableDict):
pass
cls.CustomMutableDict = CustomMutableDict
return cls.CustomMutableDict
@classmethod
def define_tables(cls, metadata):
import json
class JSONEncodedDict(TypeDecorator):
impl = VARCHAR(50)
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
CustomMutableDict = cls._type_fixture()
CustomMutableDict.associate_with(JSONEncodedDict)
Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', JSONEncodedDict),
Column('unrelated_data', String(50))
)
def test_pickle_parent(self):
# Picklers don't know how to pickle CustomMutableDict,
# but we aren't testing that here
pass
def test_coerce(self):
sess = Session()
f1 = Foo(data={'a': 'b'})
sess.add(f1)
sess.flush()
eq_(type(f1.data), self._type_fixture())
class _CompositeTestBase(object):
@classmethod
def define_tables(cls, metadata):
Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('x', Integer),
Column('y', Integer),
Column('unrelated_data', String(50))
)
def setup(self):
from sqlalchemy.ext import mutable
mutable._setup_composite_listener()
super(_CompositeTestBase, self).setup()
def teardown(self):
# clear out mapper events
Mapper.dispatch._clear()
ClassManager.dispatch._clear()
super(_CompositeTestBase, self).teardown()
@classmethod
def _type_fixture(cls):
return Point
class MutableCompositeColumnDefaultTest(_CompositeTestBase,
fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
'foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('x', Integer, default=5),
Column('y', Integer, default=9),
Column('unrelated_data', String(50))
)
@classmethod
def setup_mappers(cls):
foo = cls.tables.foo
cls.Point = cls._type_fixture()
mapper(Foo, foo, properties={
'data': composite(cls.Point, foo.c.x, foo.c.y)
})
def test_evt_on_flush_refresh(self):
# this still worked prior to #3427 being fixed in any case
sess = Session()
f1 = Foo(data=self.Point(None, None))
sess.add(f1)
sess.flush()
eq_(f1.data, self.Point(5, 9))
assert f1 not in sess.dirty
f1.data.x = 10
assert f1 in sess.dirty
class MutableCompositesUnpickleTest(_CompositeTestBase, fixtures.MappedTest):
@classmethod
def setup_mappers(cls):
foo = cls.tables.foo
cls.Point = cls._type_fixture()
mapper(FooWithEq, foo, properties={
'data': composite(cls.Point, foo.c.x, foo.c.y)
})
def test_unpickle_modified_eq(self):
u1 = FooWithEq(data=self.Point(3, 5))
for loads, dumps in picklers():
loads(dumps(u1))
class MutableCompositesTest(_CompositeTestBase, fixtures.MappedTest):
@classmethod
def setup_mappers(cls):
foo = cls.tables.foo
Point = cls._type_fixture()
mapper(Foo, foo, properties={
'data': composite(Point, foo.c.x, foo.c.y)
})
def test_in_place_mutation(self):
sess = Session()
d = Point(3, 4)
f1 = Foo(data=d)
sess.add(f1)
sess.commit()
f1.data.y = 5
sess.commit()
eq_(f1.data, Point(3, 5))
def test_pickle_of_parent(self):
sess = Session()
d = Point(3, 4)
f1 = Foo(data=d)
sess.add(f1)
sess.commit()
f1.data
assert 'data' in f1.__dict__
sess.close()
for loads, dumps in picklers():
sess = Session()
f2 = loads(dumps(f1))
sess.add(f2)
f2.data.y = 12
assert f2 in sess.dirty
def test_set_none(self):
sess = Session()
f1 = Foo(data=None)
sess.add(f1)
sess.commit()
eq_(f1.data, Point(None, None))
f1.data.y = 5
sess.commit()
eq_(f1.data, Point(None, 5))
def test_set_illegal(self):
f1 = Foo()
assert_raises_message(
ValueError,
"Attribute 'data' does not accept objects",
setattr, f1, 'data', 'foo'
)
def test_unrelated_flush(self):
sess = Session()
f1 = Foo(data=Point(3, 4), unrelated_data="unrelated")
sess.add(f1)
sess.flush()
f1.unrelated_data = "unrelated 2"
sess.flush()
f1.data.x = 5
sess.commit()
eq_(f1.data.x, 5)
class MutableCompositeCallableTest(_CompositeTestBase, fixtures.MappedTest):
@classmethod
def setup_mappers(cls):
foo = cls.tables.foo
Point = cls._type_fixture()
# in this case, this is not actually a MutableComposite.
# so we don't expect it to track changes
mapper(Foo, foo, properties={
'data': composite(lambda x, y: Point(x, y), foo.c.x, foo.c.y)
})
def test_basic(self):
sess = Session()
f1 = Foo(data=Point(3, 4))
sess.add(f1)
sess.flush()
f1.data.x = 5
sess.commit()
# we didn't get the change.
eq_(f1.data.x, 3)
class MutableCompositeCustomCoerceTest(_CompositeTestBase,
fixtures.MappedTest):
@classmethod
def _type_fixture(cls):
return MyPoint
@classmethod
def setup_mappers(cls):
foo = cls.tables.foo
Point = cls._type_fixture()
mapper(Foo, foo, properties={
'data': composite(Point, foo.c.x, foo.c.y)
})
def test_custom_coerce(self):
f = Foo()
f.data = (3, 4)
eq_(f.data, Point(3, 4))
def test_round_trip_ok(self):
sess = Session()
f = Foo()
f.data = (3, 4)
sess.add(f)
sess.commit()
eq_(f.data, Point(3, 4))
class MutableInheritedCompositesTest(_CompositeTestBase, fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('x', Integer),
Column('y', Integer)
)
Table('subfoo', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
)
@classmethod
def setup_mappers(cls):
foo = cls.tables.foo
subfoo = cls.tables.subfoo
Point = cls._type_fixture()
mapper(Foo, foo, properties={
'data': composite(Point, foo.c.x, foo.c.y)
})
mapper(SubFoo, subfoo, inherits=Foo)
def test_in_place_mutation_subclass(self):
sess = Session()
d = Point(3, 4)
f1 = SubFoo(data=d)
sess.add(f1)
sess.commit()
f1.data.y = 5
sess.commit()
eq_(f1.data, Point(3, 5))
def test_pickle_of_parent_subclass(self):
sess = Session()
d = Point(3, 4)
f1 = SubFoo(data=d)
sess.add(f1)
sess.commit()
f1.data
assert 'data' in f1.__dict__
sess.close()
for loads, dumps in picklers():
sess = Session()
f2 = loads(dumps(f1))
sess.add(f2)
f2.data.y = 12
assert f2 in sess.dirty
| mit | 80,530,328,737,471,220 | 24.620219 | 78 | 0.535992 | false |
ChristineLaMuse/mozillians | vendor-local/lib/python/celery/tests/test_worker/__init__.py | 12 | 33416 | from __future__ import absolute_import
from __future__ import with_statement
import socket
import sys
from collections import deque
from datetime import datetime, timedelta
from Queue import Empty
from kombu.transport.base import Message
from kombu.connection import BrokerConnection
from mock import Mock, patch
from nose import SkipTest
from celery import current_app
from celery.app.defaults import DEFAULTS
from celery.concurrency.base import BasePool
from celery.datastructures import AttributeDict
from celery.exceptions import SystemTerminate
from celery.task import task as task_dec
from celery.task import periodic_task as periodic_task_dec
from celery.utils import uuid
from celery.worker import WorkController
from celery.worker.buckets import FastQueue
from celery.worker.job import Request
from celery.worker.consumer import Consumer as MainConsumer
from celery.worker.consumer import QoS, RUN, PREFETCH_COUNT_MAX, CLOSE
from celery.utils.serialization import pickle
from celery.utils.timer2 import Timer
from celery.tests.utils import AppCase, Case
class PlaceHolder(object):
pass
class MyKombuConsumer(MainConsumer):
broadcast_consumer = Mock()
task_consumer = Mock()
def __init__(self, *args, **kwargs):
kwargs.setdefault("pool", BasePool(2))
super(MyKombuConsumer, self).__init__(*args, **kwargs)
def restart_heartbeat(self):
self.heart = None
class MockNode(object):
commands = []
def handle_message(self, body, message):
self.commands.append(body.pop("command", None))
class MockEventDispatcher(object):
sent = []
closed = False
flushed = False
_outbound_buffer = []
def send(self, event, *args, **kwargs):
self.sent.append(event)
def close(self):
self.closed = True
def flush(self):
self.flushed = True
class MockHeart(object):
closed = False
def stop(self):
self.closed = True
@task_dec()
def foo_task(x, y, z, **kwargs):
return x * y * z
@periodic_task_dec(run_every=60)
def foo_periodic_task():
return "foo"
def create_message(channel, **data):
data.setdefault("id", uuid())
channel.no_ack_consumers = set()
return Message(channel, body=pickle.dumps(dict(**data)),
content_type="application/x-python-serialize",
content_encoding="binary",
delivery_info={"consumer_tag": "mock"})
class test_QoS(Case):
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value, None)
def set(self, value):
return value
def test_qos_increment_decrement(self):
qos = self._QoS(10)
self.assertEqual(qos.increment(), 11)
self.assertEqual(qos.increment(3), 14)
self.assertEqual(qos.increment(-30), 14)
self.assertEqual(qos.decrement(7), 7)
self.assertEqual(qos.decrement(), 6)
with self.assertRaises(AssertionError):
qos.decrement(10)
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
self.assertEqual(qos.increment(), 0)
self.assertEqual(qos.increment(3), 0)
self.assertEqual(qos.increment(-30), 0)
self.assertEqual(qos.decrement(7), 0)
self.assertEqual(qos.decrement(), 0)
self.assertEqual(qos.decrement(10), 0)
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in xrange(1000):
qos.increment()
def sub():
for i in xrange(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
self.assertEqual(qos.value, 2010)
qos.value = 1000
threaded([add, sub]) # n = 2
self.assertEqual(qos.value, 1000)
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1,
current_app.log.get_default_logger())
qos.update()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
qos.increment()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.increment()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX + 1)
qos.decrement()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.decrement()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
def test_consumer_increment_decrement(self):
consumer = Mock()
qos = QoS(consumer, 10, current_app.log.get_default_logger())
qos.update()
self.assertEqual(qos.value, 10)
self.assertIn({"prefetch_count": 10}, consumer.qos.call_args)
qos.decrement()
self.assertEqual(qos.value, 9)
self.assertIn({"prefetch_count": 9}, consumer.qos.call_args)
qos.decrement_eventually()
self.assertEqual(qos.value, 8)
self.assertIn({"prefetch_count": 9}, consumer.qos.call_args)
# Does not decrement 0 value
qos.value = 0
qos.decrement()
self.assertEqual(qos.value, 0)
qos.increment()
self.assertEqual(qos.value, 0)
def test_consumer_decrement_eventually(self):
consumer = Mock()
qos = QoS(consumer, 10, current_app.log.get_default_logger())
qos.decrement_eventually()
self.assertEqual(qos.value, 9)
qos.value = 0
qos.decrement_eventually()
self.assertEqual(qos.value, 0)
def test_set(self):
consumer = Mock()
qos = QoS(consumer, 10, current_app.log.get_default_logger())
qos.set(12)
self.assertEqual(qos.prev, 12)
qos.set(qos.prev)
class test_Consumer(Case):
def setUp(self):
self.ready_queue = FastQueue()
self.eta_schedule = Timer()
self.logger = current_app.log.get_default_logger()
self.logger.setLevel(0)
def tearDown(self):
self.eta_schedule.stop()
def test_info(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.qos = QoS(l.task_consumer, 10, l.logger)
info = l.info
self.assertEqual(info["prefetch_count"], 10)
self.assertFalse(info["broker"])
l.connection = current_app.broker_connection()
info = l.info
self.assertTrue(info["broker"])
def test_start_when_closed(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l._state = CLOSE
l.start()
def test_connection(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.reset_connection()
self.assertIsInstance(l.connection, BrokerConnection)
l._state = RUN
l.event_dispatcher = None
l.stop_consumers(close_connection=False)
self.assertTrue(l.connection)
l._state = RUN
l.stop_consumers()
self.assertIsNone(l.connection)
self.assertIsNone(l.task_consumer)
l.reset_connection()
self.assertIsInstance(l.connection, BrokerConnection)
l.stop_consumers()
l.stop()
l.close_connection()
self.assertIsNone(l.connection)
self.assertIsNone(l.task_consumer)
def test_close_connection(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l._state = RUN
l.close_connection()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
eventer = l.event_dispatcher = Mock()
eventer.enabled = True
heart = l.heart = MockHeart()
l._state = RUN
l.stop_consumers()
self.assertTrue(eventer.close.call_count)
self.assertTrue(heart.closed)
def test_receive_message_unknown(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
m = create_message(backend, unknown={"baz": "!!!"})
l.event_dispatcher = Mock()
l.pidbox_node = MockNode()
with self.assertWarnsRegex(RuntimeWarning, r'unknown message'):
l.receive_message(m.decode(), m)
@patch("celery.utils.timer2.to_timestamp")
def test_receive_message_eta_OverflowError(self, to_timestamp):
to_timestamp.side_effect = OverflowError()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
args=("2, 2"),
kwargs={},
eta=datetime.now().isoformat())
l.event_dispatcher = Mock()
l.pidbox_node = MockNode()
l.update_strategies()
l.receive_message(m.decode(), m)
self.assertTrue(m.acknowledged)
self.assertTrue(to_timestamp.call_count)
def test_receive_message_InvalidTaskError(self):
logger = Mock()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
args=(1, 2), kwargs="foobarbaz", id=1)
l.update_strategies()
l.event_dispatcher = Mock()
l.pidbox_node = MockNode()
l.receive_message(m.decode(), m)
self.assertIn("Received invalid task message",
logger.error.call_args[0][0])
def test_on_decode_error(self):
logger = Mock()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, logger,
send_events=False)
class MockMessage(Mock):
content_type = "application/x-msgpack"
content_encoding = "binary"
body = "foobarbaz"
message = MockMessage()
l.on_decode_error(message, KeyError("foo"))
self.assertTrue(message.ack.call_count)
self.assertIn("Can't decode message body",
logger.critical.call_args[0][0])
def test_receieve_message(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
args=[2, 4, 8], kwargs={})
l.update_strategies()
l.event_dispatcher = Mock()
l.receive_message(m.decode(), m)
in_bucket = self.ready_queue.get_nowait()
self.assertIsInstance(in_bucket, Request)
self.assertEqual(in_bucket.task_name, foo_task.name)
self.assertEqual(in_bucket.execute(), 2 * 4 * 8)
self.assertTrue(self.eta_schedule.empty())
def test_start_connection_error(self):
class MockConsumer(MainConsumer):
iterations = 0
def consume_messages(self):
if not self.iterations:
self.iterations = 1
raise KeyError("foo")
raise SyntaxError("bar")
l = MockConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, pool=BasePool())
l.connection_errors = (KeyError, )
with self.assertRaises(SyntaxError):
l.start()
l.heart.stop()
l.priority_timer.stop()
def test_start_channel_error(self):
# Regression test for AMQPChannelExceptions that can occur within the
# consumer. (i.e. 404 errors)
class MockConsumer(MainConsumer):
iterations = 0
def consume_messages(self):
if not self.iterations:
self.iterations = 1
raise KeyError("foo")
raise SyntaxError("bar")
l = MockConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, pool=BasePool())
l.channel_errors = (KeyError, )
self.assertRaises(SyntaxError, l.start)
l.heart.stop()
l.priority_timer.stop()
def test_consume_messages_ignores_socket_timeout(self):
class Connection(current_app.broker_connection().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
raise socket.timeout(10)
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection = Connection()
l.task_consumer = Mock()
l.connection.obj = l
l.qos = QoS(l.task_consumer, 10, l.logger)
l.consume_messages()
def test_consume_messages_when_socket_error(self):
class Connection(current_app.broker_connection().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
raise socket.error("foo")
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l._state = RUN
c = l.connection = Connection()
l.connection.obj = l
l.task_consumer = Mock()
l.qos = QoS(l.task_consumer, 10, l.logger)
with self.assertRaises(socket.error):
l.consume_messages()
l._state = CLOSE
l.connection = c
l.consume_messages()
def test_consume_messages(self):
class Connection(current_app.broker_connection().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection = Connection()
l.connection.obj = l
l.task_consumer = Mock()
l.qos = QoS(l.task_consumer, 10, l.logger)
l.consume_messages()
l.consume_messages()
self.assertTrue(l.task_consumer.consume.call_count)
l.task_consumer.qos.assert_called_with(prefetch_count=10)
l.qos.decrement()
l.consume_messages()
l.task_consumer.qos.assert_called_with(prefetch_count=9)
def test_maybe_conn_error(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection_errors = (KeyError, )
l.channel_errors = (SyntaxError, )
l.maybe_conn_error(Mock(side_effect=AttributeError("foo")))
l.maybe_conn_error(Mock(side_effect=KeyError("foo")))
l.maybe_conn_error(Mock(side_effect=SyntaxError("foo")))
with self.assertRaises(IndexError):
l.maybe_conn_error(Mock(side_effect=IndexError("foo")))
def test_apply_eta_task(self):
from celery.worker import state
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.qos = QoS(None, 10, l.logger)
task = object()
qos = l.qos.value
l.apply_eta_task(task)
self.assertIn(task, state.reserved_requests)
self.assertEqual(l.qos.value, qos - 1)
self.assertIs(self.ready_queue.get_nowait(), task)
def test_receieve_message_eta_isoformat(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
eta=datetime.now().isoformat(),
args=[2, 4, 8], kwargs={})
l.task_consumer = Mock()
l.qos = QoS(l.task_consumer, l.initial_prefetch_count, l.logger)
l.event_dispatcher = Mock()
l.enabled = False
l.update_strategies()
l.receive_message(m.decode(), m)
l.eta_schedule.stop()
items = [entry[2] for entry in self.eta_schedule.queue]
found = 0
for item in items:
if item.args[0].name == foo_task.name:
found = True
self.assertTrue(found)
self.assertTrue(l.task_consumer.qos.call_count)
l.eta_schedule.stop()
def test_on_control(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pidbox_node = Mock()
l.reset_pidbox_node = Mock()
l.on_control("foo", "bar")
l.pidbox_node.handle_message.assert_called_with("foo", "bar")
l.pidbox_node = Mock()
l.pidbox_node.handle_message.side_effect = KeyError("foo")
l.on_control("foo", "bar")
l.pidbox_node.handle_message.assert_called_with("foo", "bar")
l.pidbox_node = Mock()
l.pidbox_node.handle_message.side_effect = ValueError("foo")
l.on_control("foo", "bar")
l.pidbox_node.handle_message.assert_called_with("foo", "bar")
l.reset_pidbox_node.assert_called_with()
def test_revoke(self):
ready_queue = FastQueue()
l = MyKombuConsumer(ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
id = uuid()
t = create_message(backend, task=foo_task.name, args=[2, 4, 8],
kwargs={}, id=id)
from celery.worker.state import revoked
revoked.add(id)
l.receive_message(t.decode(), t)
self.assertTrue(ready_queue.empty())
def test_receieve_message_not_registered(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
m = create_message(backend, task="x.X.31x", args=[2, 4, 8], kwargs={})
l.event_dispatcher = Mock()
self.assertFalse(l.receive_message(m.decode(), m))
with self.assertRaises(Empty):
self.ready_queue.get_nowait()
self.assertTrue(self.eta_schedule.empty())
def test_receieve_message_ack_raises(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
m = create_message(backend, args=[2, 4, 8], kwargs={})
l.event_dispatcher = Mock()
l.connection_errors = (socket.error, )
l.logger = Mock()
m.reject = Mock()
m.reject.side_effect = socket.error("foo")
with self.assertWarnsRegex(RuntimeWarning, r'unknown message'):
self.assertFalse(l.receive_message(m.decode(), m))
with self.assertRaises(Empty):
self.ready_queue.get_nowait()
self.assertTrue(self.eta_schedule.empty())
m.reject.assert_called_with()
self.assertTrue(l.logger.critical.call_count)
def test_receieve_message_eta(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.event_dispatcher = Mock()
l.event_dispatcher._outbound_buffer = deque()
backend = Mock()
m = create_message(backend, task=foo_task.name,
args=[2, 4, 8], kwargs={},
eta=(datetime.now() +
timedelta(days=1)).isoformat())
l.reset_connection()
p = l.app.conf.BROKER_CONNECTION_RETRY
l.app.conf.BROKER_CONNECTION_RETRY = False
try:
l.reset_connection()
finally:
l.app.conf.BROKER_CONNECTION_RETRY = p
l.stop_consumers()
l.event_dispatcher = Mock()
l.receive_message(m.decode(), m)
l.eta_schedule.stop()
in_hold = self.eta_schedule.queue[0]
self.assertEqual(len(in_hold), 3)
eta, priority, entry = in_hold
task = entry.args[0]
self.assertIsInstance(task, Request)
self.assertEqual(task.task_name, foo_task.name)
self.assertEqual(task.execute(), 2 * 4 * 8)
with self.assertRaises(Empty):
self.ready_queue.get_nowait()
def test_reset_pidbox_node(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pidbox_node = Mock()
chan = l.pidbox_node.channel = Mock()
l.connection = Mock()
chan.close.side_effect = socket.error("foo")
l.connection_errors = (socket.error, )
l.reset_pidbox_node()
chan.close.assert_called_with()
def test_reset_pidbox_node_green(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pool = Mock()
l.pool.is_green = True
l.reset_pidbox_node()
l.pool.spawn_n.assert_called_with(l._green_pidbox_node)
def test__green_pidbox_node(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pidbox_node = Mock()
class BConsumer(Mock):
def __enter__(self):
self.consume()
return self
def __exit__(self, *exc_info):
self.cancel()
l.pidbox_node.listen = BConsumer()
connections = []
class Connection(object):
def __init__(self, obj):
connections.append(self)
self.obj = obj
self.default_channel = self.channel()
self.closed = False
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def channel(self):
return Mock()
def drain_events(self, **kwargs):
self.obj.connection = None
self.obj._pidbox_node_shutdown.set()
def close(self):
self.closed = True
l.connection = Mock()
l._open_connection = lambda: Connection(obj=l)
l._green_pidbox_node()
l.pidbox_node.listen.assert_called_with(callback=l.on_control)
self.assertTrue(l.broadcast_consumer)
l.broadcast_consumer.consume.assert_called_with()
self.assertIsNone(l.connection)
self.assertTrue(connections[0].closed)
def test_start__consume_messages(self):
class _QoS(object):
prev = 3
value = 4
def update(self):
self.prev = self.value
class _Consumer(MyKombuConsumer):
iterations = 0
def reset_connection(self):
if self.iterations >= 1:
raise KeyError("foo")
init_callback = Mock()
l = _Consumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, init_callback=init_callback)
l.task_consumer = Mock()
l.broadcast_consumer = Mock()
l.qos = _QoS()
l.connection = BrokerConnection()
l.iterations = 0
def raises_KeyError(limit=None):
l.iterations += 1
if l.qos.prev != l.qos.value:
l.qos.update()
if l.iterations >= 2:
raise KeyError("foo")
l.consume_messages = raises_KeyError
with self.assertRaises(KeyError):
l.start()
self.assertTrue(init_callback.call_count)
self.assertEqual(l.iterations, 1)
self.assertEqual(l.qos.prev, l.qos.value)
init_callback.reset_mock()
l = _Consumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, init_callback=init_callback)
l.qos = _QoS()
l.task_consumer = Mock()
l.broadcast_consumer = Mock()
l.connection = BrokerConnection()
l.consume_messages = Mock(side_effect=socket.error("foo"))
with self.assertRaises(socket.error):
l.start()
self.assertTrue(init_callback.call_count)
self.assertTrue(l.consume_messages.call_count)
def test_reset_connection_with_no_node(self):
l = MainConsumer(self.ready_queue, self.eta_schedule, self.logger)
self.assertEqual(None, l.pool)
l.reset_connection()
class test_WorkController(AppCase):
def setup(self):
self.worker = self.create_worker()
def create_worker(self, **kw):
worker = WorkController(concurrency=1, loglevel=0, **kw)
worker._shutdown_complete.set()
worker.logger = Mock()
return worker
@patch("celery.platforms.signals")
@patch("celery.platforms.set_mp_process_title")
def test_process_initializer(self, set_mp_process_title, _signals):
from celery import Celery
from celery import signals
from celery.app import _tls
from celery.concurrency.processes import process_initializer
from celery.concurrency.processes import (WORKER_SIGRESET,
WORKER_SIGIGNORE)
def on_worker_process_init(**kwargs):
on_worker_process_init.called = True
on_worker_process_init.called = False
signals.worker_process_init.connect(on_worker_process_init)
loader = Mock()
app = Celery(loader=loader, set_as_current=False)
app.conf = AttributeDict(DEFAULTS)
process_initializer(app, "awesome.worker.com")
self.assertIn((tuple(WORKER_SIGIGNORE), {}),
_signals.ignore.call_args_list)
self.assertIn((tuple(WORKER_SIGRESET), {}),
_signals.reset.call_args_list)
self.assertTrue(app.loader.init_worker.call_count)
self.assertTrue(on_worker_process_init.called)
self.assertIs(_tls.current_app, app)
set_mp_process_title.assert_called_with("celeryd",
hostname="awesome.worker.com")
def test_with_rate_limits_disabled(self):
worker = WorkController(concurrency=1, loglevel=0,
disable_rate_limits=True)
self.assertTrue(hasattr(worker.ready_queue, "put"))
def test_attrs(self):
worker = self.worker
self.assertIsInstance(worker.scheduler, Timer)
self.assertTrue(worker.scheduler)
self.assertTrue(worker.pool)
self.assertTrue(worker.consumer)
self.assertTrue(worker.mediator)
self.assertTrue(worker.components)
def test_with_embedded_celerybeat(self):
worker = WorkController(concurrency=1, loglevel=0,
embed_clockservice=True)
self.assertTrue(worker.beat)
self.assertIn(worker.beat, worker.components)
def test_with_autoscaler(self):
worker = self.create_worker(autoscale=[10, 3], send_events=False,
eta_scheduler_cls="celery.utils.timer2.Timer")
self.assertTrue(worker.autoscaler)
def test_dont_stop_or_terminate(self):
worker = WorkController(concurrency=1, loglevel=0)
worker.stop()
self.assertNotEqual(worker._state, worker.CLOSE)
worker.terminate()
self.assertNotEqual(worker._state, worker.CLOSE)
sigsafe, worker.pool.signal_safe = worker.pool.signal_safe, False
try:
worker._state = worker.RUN
worker.stop(in_sighandler=True)
self.assertNotEqual(worker._state, worker.CLOSE)
worker.terminate(in_sighandler=True)
self.assertNotEqual(worker._state, worker.CLOSE)
finally:
worker.pool.signal_safe = sigsafe
def test_on_timer_error(self):
worker = WorkController(concurrency=1, loglevel=0)
worker.logger = Mock()
try:
raise KeyError("foo")
except KeyError:
exc_info = sys.exc_info()
worker.on_timer_error(exc_info)
msg, args = worker.logger.error.call_args[0]
self.assertIn("KeyError", msg % args)
def test_on_timer_tick(self):
worker = WorkController(concurrency=1, loglevel=10)
worker.logger = Mock()
worker.timer_debug = worker.logger.debug
worker.on_timer_tick(30.0)
xargs = worker.logger.debug.call_args[0]
fmt, arg = xargs[0], xargs[1]
self.assertEqual(30.0, arg)
self.assertIn("Next eta %s secs", fmt)
def test_process_task(self):
worker = self.worker
worker.pool = Mock()
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.process_task(task)
self.assertEqual(worker.pool.apply_async.call_count, 1)
worker.pool.stop()
def test_process_task_raise_base(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = KeyboardInterrupt("Ctrl+C")
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.components = []
worker._state = worker.RUN
with self.assertRaises(KeyboardInterrupt):
worker.process_task(task)
self.assertEqual(worker._state, worker.TERMINATE)
def test_process_task_raise_SystemTerminate(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = SystemTerminate()
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.components = []
worker._state = worker.RUN
with self.assertRaises(SystemExit):
worker.process_task(task)
self.assertEqual(worker._state, worker.TERMINATE)
def test_process_task_raise_regular(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = KeyError("some exception")
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.process_task(task)
worker.pool.stop()
def test_start_catches_base_exceptions(self):
worker1 = self.create_worker()
stc = Mock()
stc.start.side_effect = SystemTerminate()
worker1.components = [stc]
worker1.start()
self.assertTrue(stc.terminate.call_count)
worker2 = self.create_worker()
sec = Mock()
sec.start.side_effect = SystemExit()
sec.terminate = None
worker2.components = [sec]
worker2.start()
self.assertTrue(sec.stop.call_count)
def test_state_db(self):
from celery.worker import state
Persistent = state.Persistent
state.Persistent = Mock()
try:
worker = self.create_worker(state_db="statefilename")
self.assertTrue(worker._persistence)
finally:
state.Persistent = Persistent
def test_disable_rate_limits_solo(self):
worker = self.create_worker(disable_rate_limits=True,
pool_cls="solo")
self.assertIsInstance(worker.ready_queue, FastQueue)
self.assertIsNone(worker.mediator)
self.assertEqual(worker.ready_queue.put, worker.process_task)
def test_disable_rate_limits_processes(self):
try:
worker = self.create_worker(disable_rate_limits=True,
pool_cls="processes")
except ImportError:
raise SkipTest("multiprocessing not supported")
self.assertIsInstance(worker.ready_queue, FastQueue)
self.assertTrue(worker.mediator)
self.assertNotEqual(worker.ready_queue.put, worker.process_task)
def test_start__stop(self):
worker = self.worker
worker._shutdown_complete.set()
worker.components = [Mock(), Mock(), Mock(), Mock()]
worker.start()
for w in worker.components:
self.assertTrue(w.start.call_count)
worker.stop()
for component in worker.components:
self.assertTrue(w.stop.call_count)
def test_start__terminate(self):
worker = self.worker
worker._shutdown_complete.set()
worker.components = [Mock(), Mock(), Mock(), Mock(), Mock()]
for component in worker.components[:3]:
component.terminate = None
worker.start()
for w in worker.components[:3]:
self.assertTrue(w.start.call_count)
self.assertTrue(worker._running, len(worker.components))
self.assertEqual(worker._state, RUN)
worker.terminate()
for component in worker.components[:3]:
self.assertTrue(component.stop.call_count)
self.assertTrue(worker.components[4].terminate.call_count)
| bsd-3-clause | -1,895,767,343,035,479,600 | 33.953975 | 78 | 0.587503 | false |
bloopletech/Comix | src/thumbnail.py | 2 | 6430 | """thumbnail.py - Thumbnail module for Comix implementing (most of) the
freedesktop.org "standard" at http://jens.triq.net/thumbnail-spec/
Only normal size (i.e. 128x128 px) thumbnails are supported.
"""
import os
from urllib import pathname2url, url2pathname
try: # The md5 module is deprecated as of Python 2.5, replaced by hashlib.
from hashlib import md5
except ImportError:
from md5 import new as md5
import re
import shutil
import tempfile
import gtk
import Image
import archive
import constants
import filehandler
_thumbdir = os.path.join(constants.HOME_DIR, '.thumbnails/normal')
def get_thumbnail(path, create=True, dst_dir=_thumbdir):
"""Return a thumbnail pixbuf for the file at <path> by looking in the
directory of stored thumbnails. If a thumbnail for the file doesn't
exist we create a thumbnail pixbuf from the original. If <create>
is True we also save this new thumbnail in the thumbnail directory.
If no thumbnail for <path> can be produced (for whatever reason),
return None.
Images and archives are handled transparently. Note though that
None is always returned for archives where no thumbnail already exist
if <create> is False, since re-creating the thumbnail on the fly each
time would be too costly.
If <dst_dir> is set it is the base thumbnail directory, if not we use
the default .thumbnails/normal/.
"""
thumbpath = _path_to_thumbpath(path, dst_dir)
if not os.path.exists(thumbpath):
return _get_new_thumbnail(path, create, dst_dir)
try:
info = Image.open(thumbpath).info
try:
mtime = int(info['Thumb::MTime'])
except Exception:
mtime = -1
if os.stat(path).st_mtime != mtime:
return _get_new_thumbnail(path, create, dst_dir)
return gtk.gdk.pixbuf_new_from_file(thumbpath)
except Exception:
return None
def delete_thumbnail(path, dst_dir=_thumbdir):
"""Delete the thumbnail (if it exists) for the file at <path>.
If <dst_dir> is set it is the base thumbnail directory, if not we use
the default .thumbnails/normal/.
"""
thumbpath = _path_to_thumbpath(path, dst_dir)
if os.path.isfile(thumbpath):
try:
os.remove(thumbpath)
except Exception:
pass
def _get_new_thumbnail(path, create, dst_dir):
"""Return a new thumbnail pixbuf for the file at <path>. If <create> is
True we also save it to disk with <dst_dir> as the base thumbnail
directory.
"""
if archive.archive_mime_type(path) is not None:
if create:
return _get_new_archive_thumbnail(path, dst_dir)
return None
if create:
return _create_thumbnail(path, dst_dir)
return _get_pixbuf128(path)
def _get_new_archive_thumbnail(path, dst_dir):
"""Return a new thumbnail pixbuf for the archive at <path>, and save it
to disk; <dst_dir> is the base thumbnail directory.
"""
extractor = archive.Extractor()
tmpdir = tempfile.mkdtemp(prefix='comix_archive_thumb.')
condition = extractor.setup(path, tmpdir)
files = extractor.get_files()
wanted = _guess_cover(files)
if wanted is None:
return None
extractor.set_files([wanted])
extractor.extract()
image_path = os.path.join(tmpdir, wanted)
condition.acquire()
while not extractor.is_ready(wanted):
condition.wait()
condition.release()
pixbuf = _create_thumbnail(path, dst_dir, image_path=image_path)
shutil.rmtree(tmpdir)
return pixbuf
def _create_thumbnail(path, dst_dir, image_path=None):
"""Create a thumbnail from the file at <path> and store it if it is
larger than 128x128 px. A pixbuf for the thumbnail is returned.
<dst_dir> is the base thumbnail directory (usually ~/.thumbnails/normal).
If <image_path> is not None it is used as the path to the image file
actually used to create the thumbnail image, although the created
thumbnail will still be saved as if for <path>.
"""
if image_path is None:
image_path = path
pixbuf = _get_pixbuf128(image_path)
if pixbuf is None:
return None
mime, width, height = gtk.gdk.pixbuf_get_file_info(image_path)
if width <= 128 and height <= 128:
return pixbuf
mime = mime['mime_types'][0]
uri = 'file://' + pathname2url(os.path.normpath(path))
thumbpath = _uri_to_thumbpath(uri, dst_dir)
stat = os.stat(path)
mtime = str(int(stat.st_mtime))
size = str(stat.st_size)
width = str(width)
height = str(height)
tEXt_data = {
'tEXt::Thumb::URI': uri,
'tEXt::Thumb::MTime': mtime,
'tEXt::Thumb::Size': size,
'tEXt::Thumb::Mimetype': mime,
'tEXt::Thumb::Image::Width': width,
'tEXt::Thumb::Image::Height': height,
'tEXt::Software': 'Comix %s' % constants.VERSION
}
try:
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir, 0700)
pixbuf.save(thumbpath + '-comixtemp', 'png', tEXt_data)
os.rename(thumbpath + '-comixtemp', thumbpath)
os.chmod(thumbpath, 0600)
except Exception:
print '! thumbnail.py: Could not write', thumbpath, '\n'
return pixbuf
def _path_to_thumbpath(path, dst_dir):
uri = 'file://' + pathname2url(os.path.normpath(path))
return _uri_to_thumbpath(uri, dst_dir)
def _uri_to_thumbpath(uri, dst_dir):
"""Return the full path to the thumbnail for <uri> when <dst_dir> the base
thumbnail directory.
"""
md5hash = md5(uri).hexdigest()
thumbpath = os.path.join(dst_dir, md5hash + '.png')
return thumbpath
def _get_pixbuf128(path):
try:
return gtk.gdk.pixbuf_new_from_file_at_size(path, 128, 128)
except Exception:
return None
def _guess_cover(files):
"""Return the filename within <files> that is the most likely to be the
cover of an archive using some simple heuristics.
"""
filehandler.alphanumeric_sort(files)
ext_re = re.compile(r'\.(jpg|jpeg|png|gif|tif|tiff|bmp)\s*$', re.I)
front_re = re.compile('(cover|front)', re.I)
images = filter(ext_re.search, files)
candidates = filter(front_re.search, images)
candidates = [c for c in candidates if 'back' not in c.lower()]
if candidates:
return candidates[0]
if images:
return images[0]
return None
| gpl-2.0 | 1,211,705,157,724,083,500 | 32.664921 | 78 | 0.651944 | false |
alexsh/Telegram | TMessagesProj/jni/boringssl/third_party/googletest/test/googletest-uninitialized-test.py | 95 | 2474 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-uninitialized-test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
if p.exited and p.exit_code == 0:
Assert('IMPORTANT NOTICE' in p.output);
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 | -3,258,380,564,801,925,600 | 35.925373 | 82 | 0.756265 | false |
v1teka/yo | vendor/psy/psysh/test/tools/gen_unvis_fixtures.py | 536 | 3120 | #! /usr/bin/env python3
import sys
from os.path import abspath, expanduser, dirname, join
from itertools import chain
import json
import argparse
from vis import vis, unvis, VIS_WHITE
__dir__ = dirname(abspath(__file__))
OUTPUT_FILE = join(__dir__, '..', 'fixtures', 'unvis_fixtures.json')
# Add custom fixtures here
CUSTOM_FIXTURES = [
# test long multibyte string
''.join(chr(cp) for cp in range(1024)),
'foo bar',
'foo\nbar',
"$bar = 'baz';",
r'$foo = "\x20\\x20\\\x20\\\\x20"',
'$foo = function($bar) use($baz) {\n\treturn $baz->getFoo()\n};'
]
RANGES = {
# All valid codepoints in the BMP
'bmp': chain(range(0x0000, 0xD800), range(0xE000, 0xFFFF)),
# Smaller set of pertinent? codepoints inside BMP
# see: http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
'small': chain(
# latin blocks
range(0x0000, 0x0250),
# Greek, Cyrillic
range(0x0370, 0x0530),
# Hebrew, Arabic
range(0x590, 0x0700),
# CJK radicals
range(0x2E80, 0x2F00),
# Hiragana, Katakana
range(0x3040, 0x3100)
)
}
if __name__ == '__main__':
argp = argparse.ArgumentParser(
description='Generates test data for Psy\\Test\\Util\\StrTest')
argp.add_argument('-f', '--format-output', action='store_true',
help='Indent JSON output to ease debugging')
argp.add_argument('-a', '--all', action='store_true',
help="""Generates test data for all codepoints of the BMP.
(same as --range=bmp). WARNING: You will need quite
a lot of RAM to run the testsuite !
""")
argp.add_argument('-r', '--range',
help="""Choose the range of codepoints used to generate
test data.""",
choices=list(RANGES.keys()),
default='small')
argp.add_argument('-o', '--output-file',
help="""Write test data to OUTPUT_FILE
(defaults to PSYSH_DIR/test/fixtures)""")
args = argp.parse_args()
cp_range = RANGES['bmp'] if args.all else RANGES[args.range]
indent = 2 if args.format_output else None
if args.output_file:
OUTPUT_FILE = abspath(expanduser(args.output_file))
fixtures = []
# use SMALL_RANGE by default, it should be enough.
# use BMP_RANGE for a more complete smoke test
for codepoint in cp_range:
char = chr(codepoint)
encoded = vis(char, VIS_WHITE)
decoded = unvis(encoded)
fixtures.append((encoded, decoded))
# Add our own custom fixtures at the end,
# since they would fail anyway if one of the previous did.
for fixture in CUSTOM_FIXTURES:
encoded = vis(fixture, VIS_WHITE)
decoded = unvis(encoded)
fixtures.append((encoded, decoded))
with open(OUTPUT_FILE, 'w') as fp:
# dump as json to avoid backslashin and quotin nightmare
# between php and python
json.dump(fixtures, fp, indent=indent)
sys.exit(0)
| gpl-3.0 | -4,311,066,869,243,529,700 | 32.191489 | 80 | 0.586538 | false |
tyler-abbot/psid_py | setup.py | 1 | 2486 | """A setup module for psidPy
Based on the pypa sample project.
A tool to download data and build psid panels based on psidR by Florian Oswald.
See:
https://github.com/floswald/psidR
https://github.com/tyler-abbot/psidPy
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='psid_py',
version='1.0.2',
description='A tool to build PSID panels.',
# The project's main homepage
url='https://github.com/tyler-abbot/psidPy',
# Author details
author='Tyler Abbot',
author_email='tyler.abbot@sciencespo.fr',
# Licensing information
license='MIT',
classifiers=[
#How mature is this project?
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
# What does your project relate to?
keywords='statistics econometrics data',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests',
'pandas',
'beautifulsoup4'],
)
| mit | -7,097,989,396,776,916,000 | 32.146667 | 79 | 0.645213 | false |
victorbriz/omim | 3party/freetype/src/tools/docmaker/utils.py | 153 | 3513 | #
# utils.py
#
# Auxiliary functions for the `docmaker' tool (library file).
#
# Copyright 2002-2015 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
import string, sys, os, glob, itertools
# current output directory
#
output_dir = None
# A function that generates a sorting key. We want lexicographical order
# (primary key) except that capital letters are sorted before lowercase
# ones (secondary key).
#
# The primary key is implemented by lowercasing the input. The secondary
# key is simply the original data appended, character by character. For
# example, the sort key for `FT_x' is `fFtT__xx', while the sort key for
# `ft_X' is `fftt__xX'. Since ASCII codes of uppercase letters are
# numerically smaller than the codes of lowercase letters, `fFtT__xx' gets
# sorted before `fftt__xX'.
#
def index_key( s ):
return string.join( itertools.chain( *zip( s.lower(), s ) ) )
# Sort `input_list', placing the elements of `order_list' in front.
#
def sort_order_list( input_list, order_list ):
new_list = order_list[:]
for id in input_list:
if not id in order_list:
new_list.append( id )
return new_list
# Divert standard output to a given project documentation file. Use
# `output_dir' to determine the filename location if necessary and save the
# old stdout handle in a tuple that is returned by this function.
#
def open_output( filename ):
global output_dir
if output_dir and output_dir != "":
filename = output_dir + os.sep + filename
old_stdout = sys.stdout
new_file = open( filename, "w" )
sys.stdout = new_file
return ( new_file, old_stdout )
# Close the output that was returned by `open_output'.
#
def close_output( output ):
output[0].close()
sys.stdout = output[1]
# Check output directory.
#
def check_output():
global output_dir
if output_dir:
if output_dir != "":
if not os.path.isdir( output_dir ):
sys.stderr.write( "argument"
+ " '" + output_dir + "' "
+ "is not a valid directory\n" )
sys.exit( 2 )
else:
output_dir = None
def file_exists( pathname ):
"""Check that a given file exists."""
result = 1
try:
file = open( pathname, "r" )
file.close()
except:
result = None
sys.stderr.write( pathname + " couldn't be accessed\n" )
return result
def make_file_list( args = None ):
"""Build a list of input files from command-line arguments."""
file_list = []
# sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
if not args:
args = sys.argv[1:]
for pathname in args:
if string.find( pathname, '*' ) >= 0:
newpath = glob.glob( pathname )
newpath.sort() # sort files -- this is important because
# of the order of files
else:
newpath = [pathname]
file_list.extend( newpath )
if len( file_list ) == 0:
file_list = None
else:
# now filter the file list to remove non-existing ones
file_list = filter( file_exists, file_list )
return file_list
# eof
| apache-2.0 | -3,209,469,644,274,226,700 | 26.661417 | 75 | 0.615713 | false |
nick-thompson/servo | tests/wpt/web-platform-tests/conformance-checkers/tools/ins-del-datetime.py | 141 | 8457 | # -*- coding: utf-8 -*-
import os
ccdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
template = """<!DOCTYPE html>
<meta charset=utf-8>
"""
errors = {
"date-year-0000": "0000-12-09",
"date-month-00": "2002-00-15",
"date-month-13": "2002-13-15",
"date-0005-02-29": "0005-02-29",
"date-1969-02-29": "1969-02-29",
"date-1900-02-29": "1900-02-29",
"date-2100-02-29": "2100-02-29",
"date-2200-02-29": "2200-02-29",
"date-2014-02-29": "2014-02-29",
"date-day-04-31": "2002-04-31",
"date-day-06-31": "2002-06-31",
"date-day-09-31": "2002-09-31",
"date-day-11-31": "2002-11-31",
"date-day-01-32": "2002-01-32",
"date-day-03-32": "2002-03-32",
"date-day-05-32": "2002-05-32",
"date-day-07-32": "2002-07-32",
"date-day-08-32": "2002-08-32",
"date-day-10-32": "2002-10-32",
"date-day-12-32": "2002-12-32",
"date-iso8601-YYYYMMDD-no-hyphen": "20020929",
"date-leading-whitespace": " 2002-09-29",
"date-trailing-whitespace": "2002-09-29 ",
"date-month-one-digit": "2002-9-29",
"date-month-three-digits": "2002-011-29",
"date-year-three-digits": "782-09-29",
"date-day-one-digit": "2002-09-9",
"date-day-three-digits": "2002-11-009",
"date-day-missing-separator": "2014-0220",
"date-month-missing-separator": "201402-20",
"date-non-ascii-digit": "2002-09-29",
"date-trailing-U+0000": "2002-09-29�",
"date-trailing-pile-of-poo": "2002-09-29💩",
"date-wrong-day-separator": "2014-02:20",
"date-wrong-month-separator": "2014:02-20",
"date-year-negative": "-2002-09-29",
"date-leading-bom": "2002-09-29",
"global-date-and-time-60-minutes": "2011-11-12T00:60:00+08:00",
"global-date-and-time-60-seconds": "2011-11-12T00:00:60+08:00",
"global-date-and-time-2400": "2011-11-12T24:00:00+08:00",
"global-date-and-time-space-before-timezone": "2011-11-12T06:54:39 08:00",
"global-date-and-time-hour-one-digit": "2011-11-12T6:54:39-08:00",
"global-date-and-time-hour-three-digits": "2011-11-12T016:54:39-08:00",
"global-date-and-time-minutes-one-digit": "2011-11-12T16:4:39-08:00",
"global-date-and-time-minutes-three-digits": "2011-11-12T16:354:39-08:00",
"global-date-and-time-seconds-one-digit": "2011-11-12T16:54:9-08:00",
"global-date-and-time-seconds-three-digits": "2011-11-12T16:54:039-08:00",
"global-date-and-time-timezone-with-seconds": "2011-11-12T06:54:39-08:00:00",
"global-date-and-time-timezone-60-minutes": "2011-11-12T06:54:39-08:60",
"global-date-and-time-timezone-one-digit-hour": "2011-11-12T06:54:39-5:00",
"global-date-and-time-timezone-one-digit-minute": "2011-11-12T06:54:39-05:0",
"global-date-and-time-timezone-three-digit-hour": "2011-11-12T06:54:39-005:00",
"global-date-and-time-timezone-three-digit-minute": "2011-11-12T06:54:39-05:000",
"global-date-and-time-nbsp": "2011-11-12 14:54Z",
"global-date-and-time-missing-minutes-separator": "2011-11-12T1454Z",
"global-date-and-time-missing-seconds-separator": "2011-11-12T14:5439Z",
"global-date-and-time-wrong-minutes-separator": "2011-11-12T14-54Z",
"global-date-and-time-wrong-seconds-separator": "2011-11-12T14:54-39Z",
"global-date-and-time-lowercase-z": "2011-11-12T14:54z",
"global-date-and-time-with-both-T-and-space": "2011-11-12T 14:54Z",
"global-date-and-time-zero-digit-fraction": "2011-11-12T06:54:39.-08:00",
"global-date-and-time-four-digit-fraction": "2011-11-12T06:54:39.9291-08:00",
"global-date-and-time-bad-fraction-separator": "2011-11-12T14:54:39,929+0000",
"global-date-and-time-timezone-non-T-character": "2011-11-12+14:54Z",
"global-date-and-time-timezone-lowercase-t": "2011-11-12t14:54Z",
"global-date-and-time-timezone-multiple-spaces": "2011-11-12 14:54Z",
"global-date-and-time-timezone-offset-space-start": "2011-11-12T06:54:39.929 08:00",
"global-date-and-time-timezone-offset-colon-start": "2011-11-12T06:54:39.929:08:00",
"global-date-and-time-timezone-plus-2400": "2011-11-12T06:54:39-24:00",
"global-date-and-time-timezone-minus-2400": "2011-11-12T06:54:39-24:00",
"global-date-and-time-timezone-iso8601-two-digit": "2011-11-12T06:54:39-08",
"global-date-and-time-iso8601-hhmmss-no-colon": "2011-11-12T145439Z",
"global-date-and-time-iso8601-hhmm-no-colon": "2011-11-12T1454Z",
"global-date-and-time-iso8601-hh": "2011-11-12T14Z",
"year": "2006",
"yearless-date": "07-15",
"month": "2011-11",
"week": "2011-W46",
"time": "14:54:39",
"local-date-and-time": "2011-11-12T14:54",
"duration-P-form": "PT4H18M3S",
"duration-time-component": "4h 18m 3s",
}
warnings = {
"global-date-and-time-timezone-plus-1500": "2011-11-12T00:00:00+1500",
"global-date-and-time-timezone-minus-1300": "2011-11-12T00:00:00-1300",
"global-date-and-time-timezone-minutes-15": "2011-11-12T00:00:00+08:15",
"date-0214-09-29": "0214-09-29",
"date-20014-09-29": "20014-09-29",
"date-0004-02-29": "0004-02-29",
"date-year-five-digits": "12014-09-29",
}
non_errors = {
"date": "2002-09-29",
"date-2000-02-29": "2000-02-29",
"date-2400-02-29": "2400-02-29",
"date-1968-02-29": "1968-02-29",
"date-1900-02-28": "1900-02-28",
"date-2100-02-28": "2100-02-28",
"date-2100-02-28": "2100-02-28",
"date-2200-02-28": "2200-02-28",
"date-2014-02-28": "2014-02-28",
"date-day-01-31": "2002-01-31",
"date-day-03-31": "2002-03-31",
"date-day-05-31": "2002-05-31",
"date-day-07-31": "2002-07-31",
"date-day-08-31": "2002-08-31",
"date-day-10-31": "2002-10-31",
"date-day-12-31": "2002-12-31",
"date-day-04-30": "2002-04-30",
"date-day-06-30": "2002-06-30",
"date-day-09-30": "2002-09-30",
"date-day-11-30": "2002-11-30",
"global-date-and-time-no-seconds": "2011-11-12T14:54Z",
"global-date-and-time-with-seconds": "2011-11-12T14:54:39+0000",
"global-date-and-time-with-one-digit-fraction": "2011-11-12T06:54:39.9-08:00",
"global-date-and-time-with-two-digit-fraction": "2011-11-12T06:54:39.92+07:00",
"global-date-and-time-with-three-digit-fraction": "2011-11-12T06:54:39.929-06:00",
"global-date-and-time-space": "2011-11-12 14:54Z",
"global-date-and-time-timezone": "2011-11-12T06:54:39+0900",
"global-date-and-time-timezone-30": "2011-11-12T06:54:39-0830",
"global-date-and-time-timezone-45": "2011-11-12T06:54:39-0845",
"global-date-and-time-timezone-with-colon": "2011-11-12T06:54:39-08:00",
"global-date-and-time-timezone-without-colon": "2011-11-12T06:54:39-0800",
}
for key in errors.keys():
error = errors[key]
template_ins = template
template_del = template
template_ins += '<title>%s</title>\n' % key
template_del += '<title>%s</title>\n' % key
template_ins += '<ins datetime="%s"></ins>' % errors[key]
template_del += '<del datetime="%s"></del>' % errors[key]
ins_file = open(os.path.join(ccdir, "html/elements/ins/%s-novalid.html" % key), 'wb')
ins_file.write(template_ins)
ins_file.close()
del_file = open(os.path.join(ccdir, "html/elements/del/%s-novalid.html" % key), 'wb')
del_file.write(template_del)
del_file.close()
for key in warnings.keys():
non_error = warnings[key]
template_ins = template
template_del = template
template_ins += '<title>%s</title>\n' % key
template_del += '<title>%s</title>\n' % key
template_ins += '<ins datetime="%s"></ins>' % warnings[key]
template_del += '<del datetime="%s"></del>' % warnings[key]
ins_file = open(os.path.join(ccdir, "html/elements/ins/%s-haswarn.html" % key), 'wb')
ins_file.write(template_ins)
ins_file.close()
del_file = open(os.path.join(ccdir, "html/elements/del/%s-haswarn.html" % key), 'wb')
del_file.write(template_del)
del_file.close()
ins_file = open(os.path.join(ccdir, "html/elements/ins/datetime-isvalid.html"), 'wb')
del_file = open(os.path.join(ccdir, "html/elements/del/datetime-isvalid.html"), 'wb')
ins_file.write(template + '<title>valid datetime</title>\n')
del_file.write(template + '<title>valid datetime</title>\n')
for key in non_errors.keys():
non_error = non_errors[key]
ins_file.write('<ins datetime="%s"></ins> <!-- %s -->\n' % (non_errors[key], key))
del_file.write('<del datetime="%s"></del> <!-- %s -->\n' % (non_errors[key], key))
ins_file.close()
del_file.close()
# vim: ts=4:sw=4
| mpl-2.0 | -3,316,956,019,688,272,000 | 47.005682 | 89 | 0.63404 | false |
Sheikh-Aman/Stupid-Simple | Owl/owl.py | 1 | 3536 | import urllib2
import json
# Copyright 2016, Aman Alam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script accesses the YouTube Data API, to find out which video
# from a YouTube channel was the most viewed video
# It currently fetches for CokeStudioPK channel, paginates to get all the videos uploaded,
# ignores the channels or playlists, and considers only the videos which have a like count
# greater than 1000.
# You'll need to change the API Key to your own, though. Read the README on the repo to know how
apiKey = "<put-your-own-API-Key-Here>" # Put your API Key here
channelCokeStudioId = "UCM1VesJtJ9vTXcMLLr_FfdQ"
getListUrl = "https://www.googleapis.com/youtube/v3/search?order=date&part=snippet" \
"&channelId={}&maxResults=50&safeSearch=none&&type=video&key={}"
getVideoStatsUrl = "https://www.googleapis.com/youtube/v3/videos?part=contentDetails,statistics" \
"&id={}&key={}"
paramPageToken = "&pageToken="
largestViewCount = 0
viewCountThreshold = 10000 # ignore any video below this view count.
def getjsonfromurl(url):
response_body = urllib2.urlopen(url).read()
return response_body
def getallvideos(channelid):
jsonStr = getjsonfromurl(getListUrl.format(channelid, apiKey))
videoListJsonDict = json.loads(jsonStr)
print "Will get list of ~{} videos..".format(videoListJsonDict['pageInfo']['totalResults'])
items = videoListJsonDict['items']
while ('nextPageToken' in videoListJsonDict):
jsonStr = getjsonfromurl(getListUrl.format(channelid, apiKey)+paramPageToken+videoListJsonDict['nextPageToken'])
videoListJsonDict = json.loads(jsonStr)
items = items+videoListJsonDict['items']
print "Total videos fetched : ", len(items)
return items
print "Querying the API.."
items = getallvideos(channelCokeStudioId)
for idx, val in enumerate(items):
if val['id']['kind'] == "youtube#video":
print 'Getting info for : '+val['snippet']['title']
statsJsonStr = getjsonfromurl(getVideoStatsUrl.format(val['id']['videoId'], apiKey))
videoStatsJsonDict = json.loads(statsJsonStr)
viewCount = int(videoStatsJsonDict['items'][0]['statistics']['viewCount'])
if viewCount > viewCountThreshold:
if largestViewCount < viewCount:
largestViewCount = viewCount
mostViewedVideo = [
# Video Name
val['snippet']['title'],
# View Count
viewCount,
# Like Count
videoStatsJsonDict['items'][0]['statistics']['likeCount'],
# Youtube Url
"https://www.youtube.com/watch?v=" + val['id']['videoId']
]
else:
print "- Skipping short video "+val['snippet']['title']
print "Most viewed Video: "+mostViewedVideo[0]\
+ ",\nView count: "+str(mostViewedVideo[1])\
+ ",\nLike Count: "+mostViewedVideo[2]\
+ ",\nWatch here : "+mostViewedVideo[3]
| apache-2.0 | 1,199,927,583,401,330,200 | 41.60241 | 120 | 0.670814 | false |
squarefactor/sorl | sorl/thumbnail/tests/templatetags.py | 2 | 10486 | import os
from django.conf import settings
from django.template import Template, Context, TemplateSyntaxError
from sorl.thumbnail.tests.classes import BaseTest, RELATIVE_PIC_NAME
class ThumbnailTagTest(BaseTest):
def render_template(self, source):
context = Context({
'source': RELATIVE_PIC_NAME,
'invalid_source': 'not%s' % RELATIVE_PIC_NAME,
'size': (90, 100),
'invalid_size': (90, 'fish'),
'strsize': '80x90',
'invalid_strsize': ('1notasize2'),
'invalid_q': 'notanumber'})
source = '{% load thumbnail %}' + source
return Template(source).render(context)
def testTagInvalid(self):
# No args, or wrong number of args
src = '{% thumbnail %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
src = '{% thumbnail source %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
src = '{% thumbnail source 80x80 as variable crop %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Invalid option
src = '{% thumbnail source 240x200 invalid %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Old comma separated options format can only have an = for quality
src = '{% thumbnail source 80x80 crop=1,quality=1 %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Invalid quality
src_invalid = '{% thumbnail source 240x200 quality=invalid_q %}'
src_missing = '{% thumbnail source 240x200 quality=missing_q %}'
# ...with THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src_invalid), '')
self.assertEqual(self.render_template(src_missing), '')
# ...and with THUMBNAIL_DEBUG = True
self.change_settings.change({'DEBUG': True})
self.assertRaises(TemplateSyntaxError, self.render_template,
src_invalid)
self.assertRaises(TemplateSyntaxError, self.render_template,
src_missing)
# Invalid source
src = '{% thumbnail invalid_source 80x80 %}'
src_on_context = '{% thumbnail invalid_source 80x80 as thumb %}'
# ...with THUMBNAIL_DEBUG = False
self.change_settings.change({'DEBUG': False})
self.assertEqual(self.render_template(src), '')
# ...and with THUMBNAIL_DEBUG = True
self.change_settings.change({'DEBUG': True})
self.assertRaises(TemplateSyntaxError, self.render_template, src)
self.assertRaises(TemplateSyntaxError, self.render_template,
src_on_context)
# Non-existant source
src = '{% thumbnail non_existant_source 80x80 %}'
src_on_context = '{% thumbnail non_existant_source 80x80 as thumb %}'
# ...with THUMBNAIL_DEBUG = False
self.change_settings.change({'DEBUG': False})
self.assertEqual(self.render_template(src), '')
# ...and with THUMBNAIL_DEBUG = True
self.change_settings.change({'DEBUG': True})
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Invalid size as a tuple:
src = '{% thumbnail source invalid_size %}'
# ...with THUMBNAIL_DEBUG = False
self.change_settings.change({'DEBUG': False})
self.assertEqual(self.render_template(src), '')
# ...and THUMBNAIL_DEBUG = True
self.change_settings.change({'DEBUG': True})
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Invalid size as a string:
src = '{% thumbnail source invalid_strsize %}'
# ...with THUMBNAIL_DEBUG = False
self.change_settings.change({'DEBUG': False})
self.assertEqual(self.render_template(src), '')
# ...and THUMBNAIL_DEBUG = True
self.change_settings.change({'DEBUG': True})
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Non-existant size
src = '{% thumbnail source non_existant_size %}'
# ...with THUMBNAIL_DEBUG = False
self.change_settings.change({'DEBUG': False})
self.assertEqual(self.render_template(src), '')
# ...and THUMBNAIL_DEBUG = True
self.change_settings.change({'DEBUG': True})
self.assertRaises(TemplateSyntaxError, self.render_template, src)
def testTag(self):
expected_base = RELATIVE_PIC_NAME.replace('.', '_')
# Set DEBUG = True to make it easier to trace any failures
self.change_settings.change({'DEBUG': True})
# Basic
output = self.render_template('src="'
'{% thumbnail source 240x240 %}"')
expected = '%s_240x240_q85.jpg' % expected_base
expected_fn = os.path.join(settings.MEDIA_ROOT, expected)
self.verify_thumbnail((240, 180), expected_filename=expected_fn)
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# Size from context variable
# as a tuple:
output = self.render_template('src="'
'{% thumbnail source size %}"')
expected = '%s_90x100_q85.jpg' % expected_base
expected_fn = os.path.join(settings.MEDIA_ROOT, expected)
self.verify_thumbnail((90, 67), expected_filename=expected_fn)
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# as a string:
output = self.render_template('src="'
'{% thumbnail source strsize %}"')
expected = '%s_80x90_q85.jpg' % expected_base
expected_fn = os.path.join(settings.MEDIA_ROOT, expected)
self.verify_thumbnail((80, 60), expected_filename=expected_fn)
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# On context
output = self.render_template('height:'
'{% thumbnail source 240x240 as thumb %}{{ thumb.height }}')
self.assertEqual(output, 'height:180')
# With options and quality
output = self.render_template('src="'
'{% thumbnail source 240x240 sharpen crop quality=95 %}"')
# Note that the opts are sorted to ensure a consistent filename.
expected = '%s_240x240_crop_sharpen_q95.jpg' % expected_base
expected_fn = os.path.join(settings.MEDIA_ROOT, expected)
self.verify_thumbnail((240, 240), expected_filename=expected_fn)
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# With option and quality on context (also using its unicode method to
# display the url)
output = self.render_template(
'{% thumbnail source 240x240 sharpen crop quality=95 as thumb %}'
'width:{{ thumb.width }}, url:{{ thumb }}')
self.assertEqual(output, 'width:240, url:%s' % expected_url)
# Old comma separated format for options is still supported.
output = self.render_template(
'{% thumbnail source 240x240 sharpen,crop,quality=95 as thumb %}'
'width:{{ thumb.width }}, url:{{ thumb }}')
self.assertEqual(output, 'width:240, url:%s' % expected_url)
filesize_tests = r"""
>>> from sorl.thumbnail.templatetags.thumbnail import filesize
>>> filesize('abc')
'abc'
>>> filesize(100, 'invalid')
100
>>> bytes = 20
>>> filesize(bytes)
'20 B'
>>> filesize(bytes, 'auto1000')
'20 B'
>>> bytes = 1001
>>> filesize(bytes)
'1001 B'
>>> filesize(bytes, 'auto1000')
'1 kB'
>>> bytes = 10100
>>> filesize(bytes)
'9.9 KiB'
# Note that the decimal place is only used if < 10
>>> filesize(bytes, 'auto1000')
'10 kB'
>>> bytes = 190000000
>>> filesize(bytes)
'181 MiB'
>>> filesize(bytes, 'auto1000')
'190 MB'
# 'auto*long' methods use pluralisation:
>>> filesize(1, 'auto1024long')
'1 byte'
>>> filesize(1, 'auto1000long')
'1 byte'
>>> filesize(2, 'auto1024long')
'2 bytes'
>>> filesize(0, 'auto1000long')
'0 bytes'
# Test all 'auto*long' output:
>>> for i in range(1,10):
... print '%s, %s' % (filesize(1024**i, 'auto1024long'),
... filesize(1000**i, 'auto1000long'))
1 kibibyte, 1 kilobyte
1 mebibyte, 1 megabyte
1 gibibyte, 1 gigabyte
1 tebibyte, 1 terabyte
1 pebibyte, 1 petabyte
1 exbibyte, 1 exabyte
1 zebibyte, 1 zettabyte
1 yobibyte, 1 yottabyte
1024 yobibytes, 1000 yottabytes
# Test all fixed outputs (eg 'kB' or 'MiB')
>>> from sorl.thumbnail.templatetags.thumbnail import filesize_formats,\
... filesize_long_formats
>>> for f in filesize_formats:
... print '%s (%siB, %sB):' % (filesize_long_formats[f], f.upper(), f)
... for i in range(0, 10):
... print ' %s, %s' % (filesize(1024**i, '%siB' % f.upper()),
... filesize(1000**i, '%sB' % f))
kilo (KiB, kB):
0.0009765625, 0.001
1.0, 1.0
1024.0, 1000.0
1048576.0, 1000000.0
1073741824.0, 1000000000.0
1.09951162778e+12, 1e+12
1.12589990684e+15, 1e+15
1.15292150461e+18, 1e+18
1.18059162072e+21, 1e+21
1.20892581961e+24, 1e+24
mega (MiB, MB):
0.0, 1e-06
0.0009765625, 0.001
1.0, 1.0
1024.0, 1000.0
1048576.0, 1000000.0
1073741824.0, 1000000000.0
1.09951162778e+12, 1e+12
1.12589990684e+15, 1e+15
1.15292150461e+18, 1e+18
1.18059162072e+21, 1e+21
giga (GiB, GB):
0.0, 1e-09
0.0, 1e-06
0.0009765625, 0.001
1.0, 1.0
1024.0, 1000.0
1048576.0, 1000000.0
1073741824.0, 1000000000.0
1.09951162778e+12, 1e+12
1.12589990684e+15, 1e+15
1.15292150461e+18, 1e+18
tera (TiB, TB):
0.0, 1e-12
0.0, 1e-09
0.0, 1e-06
0.0009765625, 0.001
1.0, 1.0
1024.0, 1000.0
1048576.0, 1000000.0
1073741824.0, 1000000000.0
1.09951162778e+12, 1e+12
1.12589990684e+15, 1e+15
peta (PiB, PB):
0.0, 1e-15
0.0, 1e-12
0.0, 1e-09
0.0, 1e-06
0.0009765625, 0.001
1.0, 1.0
1024.0, 1000.0
1048576.0, 1000000.0
1073741824.0, 1000000000.0
1.09951162778e+12, 1e+12
exa (EiB, EB):
0.0, 1e-18
0.0, 1e-15
0.0, 1e-12
0.0, 1e-09
0.0, 1e-06
0.0009765625, 0.001
1.0, 1.0
1024.0, 1000.0
1048576.0, 1000000.0
1073741824.0, 1000000000.0
zetta (ZiB, ZB):
0.0, 1e-21
0.0, 1e-18
0.0, 1e-15
0.0, 1e-12
0.0, 1e-09
0.0, 1e-06
0.0009765625, 0.001
1.0, 1.0
1024.0, 1000.0
1048576.0, 1000000.0
yotta (YiB, YB):
0.0, 1e-24
0.0, 1e-21
0.0, 1e-18
0.0, 1e-15
0.0, 1e-12
0.0, 1e-09
0.0, 1e-06
0.0009765625, 0.001
1.0, 1.0
1024.0, 1000.0
"""
| bsd-3-clause | 4,388,880,401,686,010,400 | 32.608974 | 78 | 0.622544 | false |
minghuascode/pyj | library/pyjamas/ui/DropHandler.py | 9 | 3204 | # Copyright (C) 2010 Jim Washington
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas.ui import Event
DROP_EVENTS = [ "dragenter", "dragover", "dragleave", "drop"]
def fireDropEvent(listeners, event):
etype = DOM.eventGetType(event)
if etype == "dragenter":
for listener in listeners:
listener.onDragEnter(event)
return True
elif etype == "dragover":
for listener in listeners:
listener.onDragOver(event)
return True
elif etype == "dragleave":
for listener in listeners:
listener.onDragLeave(event)
return True
elif etype == "drop":
for listener in listeners:
listener.onDrop(event)
return True
return False
class DropHandler(object):
def __init__(self):
self._dropListeners = []
self.sinkEvents(Event.DROPEVENTS)
def onBrowserEvent(self, event):
event_type = DOM.eventGetType(event)
if event_type in DROP_EVENTS:
return fireDropEvent(self._dropListeners, event)
return False
def addDropListener(self, listener):
self._dropListeners.append(listener)
def removeDropListener(self, listener):
self._dropListeners.remove(listener)
def onDragEnter(self,event):
"""
Decide whether to accept the drop.
You may inspect the event's dataTransfer member.
You may get the types using pyjamas.dnd.getTypes(event).
This event is used to determine whether the drop target may
accept the drop. If the drop is to be accepted, then this event has
to be canceled using DOM.eventPreventDefault(event).
"""
pass
def onDragOver(self,event):
"""
This event determines what feedback is to be shown to the user. If
the event is canceled, then the feedback (typically the cursor) is
updated based on the dropEffect attribute's value, as set by the event
handler; otherwise, the default behavior (typically to do nothing)
is used instead.
Setting event.dataTransfer.dropEffect may affect dropping behavior.
Cancel this event with DOM.eventPreventDefault(event) if you want the
drop to succeed.
"""
pass
def onDragLeave(self,event):
"""
This event happens when the mouse leaves the target element.
"""
pass
def onDrop(self,event):
"""allows the actual drop to be performed. This event also needs to be
canceled, so that the dropEffect attribute's value can be used by the
source (otherwise it's reset).
"""
pass
| apache-2.0 | -1,597,006,847,413,705,200 | 31.04 | 78 | 0.659176 | false |
edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/tests/unittests/test_autoparser.py | 1 | 4415 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import pytest
import dhtmlparser
import harvester.edeposit_autoparser as autoparser
# Variables ===================================================================
EXAMPLE_DATA = """
<root>
something something
<sometag>something something</sometag>
<sometag>something something</sometag>
<xax>
something something
<container>i want this</container>
</xax>
<sometag>something something</sometag>
<container id="mycontent">and this</container>
something something
</root>
"""
# Functions & objects =========================================================
def test_create_dom():
data = "<xe>x</xe>"
dom = autoparser._create_dom(data)
assert isinstance(dom, dhtmlparser.HTMLElement)
assert dom.childs[0].parent is dom
# html element from html element
dom = autoparser._create_dom(dom)
assert isinstance(dom, dhtmlparser.HTMLElement)
assert dom.childs[0].parent is dom
def test_locate_element():
data = "<xe>x</xe><xex>x</xex><xer>xx</xer>"
dom = autoparser._create_dom(data)
el = autoparser._locate_element(
dom,
"xx"
)
assert len(el) == 1
assert el[0].isTag()
assert el[0].getTagName() == "xer"
assert el[0].getContent() == "xx"
el = autoparser._locate_element(
dom,
"x"
)
assert len(el) == 2
assert el[0].isTag()
assert el[0].getTagName() == "xe"
assert el[0].getContent() == "x"
def test_locate_element_transformer_param():
data = "<xe>x</xe><xex>x</xex><xer>xx</xer>"
dom = autoparser._create_dom(data)
el = autoparser._locate_element(
dom,
"XX",
lambda x: x.upper()
)
assert len(el) == 1
assert el[0].isTag()
assert el[0].getTagName() == "xer"
assert el[0].getContent() == "xx"
def test_match_elements():
dom = autoparser._create_dom(EXAMPLE_DATA)
matches = {
"first": {
"data": "i want this",
},
"second": {
"data": "and this",
}
}
matching_elements = autoparser._match_elements(dom, matches)
assert matching_elements
assert len(matching_elements) == 2
assert matching_elements["first"].getContent() == matches["first"]["data"]
assert matching_elements["second"].getContent() == matches["second"]["data"]
def test_match_elements_not_found():
dom = autoparser._create_dom(EXAMPLE_DATA)
matches = {
"first": {
"data": "notfound_data",
}
}
with pytest.raises(UserWarning):
autoparser._match_elements(dom, matches)
def test_match_elements_multiple_matches():
dom = autoparser._create_dom(
"""
<root>
something something
<sometag>something something</sometag>
<sometag>something something</sometag>
<xax>
something something
<container>azgabash</container>
</xax>
<sometag>something something</sometag>
<container id="mycontent">azgabash</container>
something something
</root>
"""
)
matches = {
"first": {
"data": "azgabash",
}
}
with pytest.raises(UserWarning):
autoparser._match_elements(dom, matches)
def test_collect_paths():
dom = autoparser._create_dom(EXAMPLE_DATA)
el = dom.find("container")[0]
paths = autoparser._collect_paths(el)
assert paths
assert len(paths) > 5
def test_is_working_path():
dom = autoparser._create_dom(EXAMPLE_DATA)
el = dom.find("container")[0]
paths = autoparser._collect_paths(el)
for path in paths:
assert autoparser._is_working_path(dom, path, el)
def test_select_best_paths():
source = [{
'html': EXAMPLE_DATA,
"vars": {
'first': {
'required': True,
'data': "i want this",
'notfoundmsg': "Can't find variable '$name'."
},
'second': {
'data': 'and this'
},
}
}]
working = autoparser.select_best_paths(source)
assert working
assert len(working) >= 2
| mit | 8,019,252,120,661,142,000 | 23.527778 | 80 | 0.540204 | false |
zerotk/reraiseit | setup.py | 1 | 1574 | #!/bin/env python
from setuptools import setup
setup(
name='zerotk.reraiseit',
use_scm_version=True,
author='Alexandre Andrade',
author_email='kaniabi@gmail.com',
url='https://github.com/zerotk/reraiseit',
description='Reraise exceptions.',
long_description='''A function to re-raise exceptions adding information to the traceback and with unicode support.''',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
include_package_data=True,
packages=['zerotk', 'zerotk.reraiseit'],
namespace_packages=['zerotk'],
keywords=['exception', 'raise', 'reraise'],
install_requires=['six'],
setup_requires=['setuptools_scm', 'pytest-runner'],
tests_require=['pytest', 'coverage', 'cogapp'],
)
| lgpl-3.0 | 9,158,408,608,334,773,000 | 31.122449 | 123 | 0.634053 | false |
rhyolight/nupic | examples/tm/hello_tm.py | 10 | 6174 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
print """
This program shows how to access the Temporal Memory directly by demonstrating
how to create a TM instance, train it with vectors, get predictions, and
inspect the state.
The code here runs a very simple version of sequence learning, with one
cell per column. The TM is trained with the simple sequence A->B->C->D->E
HOMEWORK: once you have understood exactly what is going on here, try changing
cellsPerColumn to 4. What is the difference between once cell per column and 4
cells per column?
PLEASE READ THROUGH THE CODE COMMENTS - THEY EXPLAIN THE OUTPUT IN DETAIL
"""
# Can't live without numpy
import numpy
from itertools import izip as zip, count
from nupic.algorithms.temporal_memory import TemporalMemory as TM
# Utility routine for printing the input vector
def formatRow(x):
s = ''
for c in range(len(x)):
if c > 0 and c % 10 == 0:
s += ' '
s += str(x[c])
s += ' '
return s
# Step 1: create Temporal Pooler instance with appropriate parameters
tm = TM(columnDimensions = (50,),
cellsPerColumn=2,
initialPermanence=0.5,
connectedPermanence=0.5,
minThreshold=8,
maxNewSynapseCount=20,
permanenceIncrement=0.1,
permanenceDecrement=0.0,
activationThreshold=8,
)
# Step 2: create input vectors to feed to the temporal memory. Each input vector
# must be numberOfCols wide. Here we create a simple sequence of 5 vectors
# representing the sequence A -> B -> C -> D -> E
x = numpy.zeros((5, tm.numberOfColumns()), dtype="uint32")
x[0, 0:10] = 1 # Input SDR representing "A", corresponding to columns 0-9
x[1, 10:20] = 1 # Input SDR representing "B", corresponding to columns 10-19
x[2, 20:30] = 1 # Input SDR representing "C", corresponding to columns 20-29
x[3, 30:40] = 1 # Input SDR representing "D", corresponding to columns 30-39
x[4, 40:50] = 1 # Input SDR representing "E", corresponding to columns 40-49
# Step 3: send this simple sequence to the temporal memory for learning
# We repeat the sequence 10 times
for i in range(10):
# Send each letter in the sequence in order
for j in range(5):
activeColumns = set([i for i, j in zip(count(), x[j]) if j == 1])
# The compute method performs one step of learning and/or inference. Note:
# here we just perform learning but you can perform prediction/inference and
# learning in the same step if you want (online learning).
tm.compute(activeColumns, learn = True)
# The following print statements can be ignored.
# Useful for tracing internal states
print("active cells " + str(tm.getActiveCells()))
print("predictive cells " + str(tm.getPredictiveCells()))
print("winner cells " + str(tm.getWinnerCells()))
print("# of active segments " + str(tm.connections.numSegments()))
# The reset command tells the TM that a sequence just ended and essentially
# zeros out all the states. It is not strictly necessary but it's a bit
# messier without resets, and the TM learns quicker with resets.
tm.reset()
#######################################################################
#
# Step 3: send the same sequence of vectors and look at predictions made by
# temporal memory
for j in range(5):
print "\n\n--------","ABCDE"[j],"-----------"
print "Raw input vector : " + formatRow(x[j])
activeColumns = set([i for i, j in zip(count(), x[j]) if j == 1])
# Send each vector to the TM, with learning turned off
tm.compute(activeColumns, learn = False)
# The following print statements prints out the active cells, predictive
# cells, active segments and winner cells.
#
# What you should notice is that the columns where active state is 1
# represent the SDR for the current input pattern and the columns where
# predicted state is 1 represent the SDR for the next expected pattern
print "\nAll the active and predicted cells:"
print("active cells " + str(tm.getActiveCells()))
print("predictive cells " + str(tm.getPredictiveCells()))
print("winner cells " + str(tm.getWinnerCells()))
print("# of active segments " + str(tm.connections.numSegments()))
activeColumnsIndeces = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndeces = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
# Reconstructing the active and inactive columns with 1 as active and 0 as
# inactive representation.
actColState = ['1' if i in activeColumnsIndeces else '0' for i in range(tm.numberOfColumns())]
actColStr = ("".join(actColState))
predColState = ['1' if i in predictedColumnIndeces else '0' for i in range(tm.numberOfColumns())]
predColStr = ("".join(predColState))
# For convenience the cells are grouped
# 10 at a time. When there are multiple cells per column the printout
# is arranged so the cells in a column are stacked together
print "Active columns: " + formatRow(actColStr)
print "Predicted columns: " + formatRow(predColStr)
# predictedCells[c][i] represents the state of the i'th cell in the c'th
# column. To see if a column is predicted, we can simply take the OR
# across all the cells in that column. In numpy we can do this by taking
# the max along axis 1.
| agpl-3.0 | -6,695,195,385,743,187,000 | 39.352941 | 99 | 0.68918 | false |
florian-dacosta/OCB | openerp/addons/test_convert/tests/test_convert.py | 382 | 2303 | import collections
import unittest2
from lxml import etree as ET
from lxml.builder import E
from openerp.tests import common
from openerp.tools.convert import _eval_xml
Field = E.field
Value = E.value
class TestEvalXML(common.TransactionCase):
def eval_xml(self, node, obj=None, idref=None):
return _eval_xml(obj, node, pool=None, cr=self.cr, uid=self.uid,
idref=idref, context=None)
def test_char(self):
self.assertEqual(
self.eval_xml(Field("foo")),
"foo")
self.assertEqual(
self.eval_xml(Field("None")),
"None")
def test_int(self):
self.assertIsNone(
self.eval_xml(Field("None", type='int')),
"what the fuck?")
self.assertEqual(
self.eval_xml(Field(" 42 ", type="int")),
42)
with self.assertRaises(ValueError):
self.eval_xml(Field("4.82", type="int"))
with self.assertRaises(ValueError):
self.eval_xml(Field("Whelp", type="int"))
def test_float(self):
self.assertEqual(
self.eval_xml(Field("4.78", type="float")),
4.78)
with self.assertRaises(ValueError):
self.eval_xml(Field("None", type="float"))
with self.assertRaises(ValueError):
self.eval_xml(Field("Foo", type="float"))
def test_list(self):
self.assertEqual(
self.eval_xml(Field(type="list")),
[])
self.assertEqual(
self.eval_xml(Field(
Value("foo"),
Value("5", type="int"),
Value("4.76", type="float"),
Value("None", type="int"),
type="list"
)),
["foo", 5, 4.76, None])
def test_file(self):
Obj = collections.namedtuple('Obj', 'module')
obj = Obj('test_convert')
self.assertEqual(
self.eval_xml(Field('test_file.txt', type='file'), obj),
'test_convert,test_file.txt')
with self.assertRaises(IOError):
self.eval_xml(Field('test_nofile.txt', type='file'), obj)
@unittest2.skip("not tested")
def test_xml(self):
pass
@unittest2.skip("not tested")
def test_html(self):
pass
| agpl-3.0 | -8,721,784,582,223,784,000 | 26.746988 | 72 | 0.538862 | false |
battlehorse/rhizosphere | appengine/src/py/handlers/showcase/googlecode.py | 1 | 9970 | #!/usr/bin/env python
#
# Copyright 2010 The Rhizosphere Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import os.path
import re
_GDATA_LIBS_FOUND = True
try:
import gdata.auth
import gdata.service
import gdata.alt.appengine
except ImportError:
_GDATA_LIBS_FOUND = False
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from py import rhizoglobals
from py.models import googlecode
class BaseHandler(webapp.RequestHandler):
"""Base class with common methods."""
def _GetGDataClient(self):
client = gdata.service.GDataService(server='code.google.com',
source='Rhizosphere')
gdata.alt.appengine.run_on_appengine(client)
return client
def _Respond(self, template_file, template_values):
path = os.path.join(os.path.dirname(__file__),
'../../../templates/showcase/code/%s' % template_file)
self.response.out.write(template.render(path, template_values))
def _RespondError(self, error_msg):
template_values = rhizoglobals.DefaultTemplate(self.request)
template_values.update({'error': error_msg})
self._QueryStringToTemplate(template_values)
platform, device = rhizoglobals.IdentifyPlatformDevice(self.request)
smartphone = rhizoglobals.IsSmartphone(platform, device)
if smartphone:
self._Respond('codeindexmobile.html', template_values)
else:
self._Respond('codeindex.html', template_values)
def _QueryStringToTemplate(self, template_values):
qs = dict((qp, self.request.get(qp)) for qp in self.request.arguments())
template_values['qs'] = qs
class AuthHandler(BaseHandler):
"""Callback for AuthSub requests to access a user private feeds."""
def _ExtractAndSaveToken(self, client):
auth_token = gdata.auth.extract_auth_sub_token_from_url(self.request.url)
if not auth_token:
return None
session_token = client.upgrade_to_session_token(auth_token)
if session_token and users.get_current_user():
client.token_store.add_token(session_token)
elif session_token:
client.current_token = session_token
return session_token
def get(self):
client = self._GetGDataClient()
session_token = self._ExtractAndSaveToken(client)
if not session_token:
# The user hasn't granted access to his feeds.
self._RespondError('Cannot fulfill the request without '
'access to your Google Code data.')
return
# Sets the auth token in a cookie that will live to the end of the
# browser session.
client.current_token = session_token
self.response.headers.add_header(
'Set-Cookie',
'stk=%s' % base64.b64encode(client.GetAuthSubToken()))
# scrub the AuthSub-specific query parameters.
query_string = []
for query_param in self.request.arguments():
if query_param not in ['token', 'auth_sub_scopes']:
query_string.append('%s=%s' % (query_param, self.request.get(query_param)))
# Redirect back to the fetch handler.
self.redirect('/showcase/code/rhizo?%s' % '&'.join(query_string))
class FetchHandler(BaseHandler):
"""Fetches issues from Google Code and feeds them to a Rhizosphere viz."""
def _RedirectToAuthSub(self, client):
next_url = 'http://%s/showcase/code/auth?%s' % (
rhizoglobals.HostName(), self.request.query_string)
scope = 'http://code.google.com/feeds/issues'
auth_sub_url = client.GenerateAuthSubURL(
next_url, scope, secure=False, session=True)
self.response.set_status(302)
self.response.headers['Location'] = auth_sub_url
self.response.clear()
return
def _GetDateParam(self, param):
date_str = self.request.get(param)
if not date_str:
return None
m = re.search('(\d+)([wmd])', date_str)
if not m:
return None
multiplier = {'w': 7, 'm': 31, 'd': 1}
days_ago = int(m.group(1))*multiplier[m.group(2)]
target_date = datetime.datetime.now() - datetime.timedelta(days_ago)
return target_date.strftime('%Y-%m-%dT%H:%M:%S')
def _GetParams(self):
project = self.request.get('p')
if not project:
self._RespondError('You must specify a project name.')
return None
detail = self.request.get('det')
if detail not in ['all', 'compressed']:
detail = 'compressed'
if self.request.get('n') == 'max':
num_results = 10000
else:
num_results = self.request.get_range('n', 1, 10000, 1000)
canned_query = self.request.get('can')
if canned_query not in ['all', 'open', 'owned', 'reported',
'starred', 'new', 'to-verify']:
canned_query = 'all'
created_min = self._GetDateParam('pub')
updated_min = self._GetDateParam('upd')
return {
'project': project,
'detail': detail,
'num_results': num_results,
'canned_query': canned_query,
'created_min': created_min,
'updated_min': updated_min,
}
def _BuildFeedUrl(self, params):
feed_url = '/feeds/issues/p/%s/issues/full?max-results=%d&can=%s' % (
params['project'], params['num_results'], params['canned_query'])
if params.get('created_min'):
feed_url = '%s&published-min=%s' % (feed_url, params['created_min'])
if params.get('updated_min'):
feed_url = '%s&updated-min=%s' % (feed_url, params['updated_min'])
return feed_url
def get(self):
client = self._GetGDataClient()
if 'stk' in self.request.cookies:
token = gdata.auth.AuthSubToken()
token.set_token_string(base64.b64decode(self.request.cookies['stk']))
client.current_token = token
params = self._GetParams()
if not params:
# Params are malformed.
return
if (params['canned_query'] in ['owned', 'reported', 'starred'] and
not client.GetAuthSubToken()):
# We need the user credentials to retrieve this kind of feeds.
self._RedirectToAuthSub(client)
return
try:
feed_url = self._BuildFeedUrl(params)
feed = client.GetFeed(feed_url)
except urlfetch.DownloadError, er:
if 'timed out' in er[0]:
self._RespondError('The request timed out. Try again in a few seconds, '
'or set the advanced options to extract fewer '
'issues.')
else:
self._RespondError(
'An error occurred while fetching %s project data. '
'Try again in a few seconds, or set the advanced options to '
'extract fewer issues. (%s)' %
(params['project'], er))
return
except urlfetch.Error, er:
self._RespondError(
'An error occurred while fetching %s project data. '
'Try again in a few seconds, or set the advanced options to '
'extract fewer issues. (%s)' %
(params['project'], er))
return
except gdata.service.RequestError, er:
if er[0]['status'] == 404:
self._RespondError('Project %s does not exist' % params['project'])
else:
self._RespondError(
'Unable to fetch %s project data: %s' % (
params['project'], er[0]['body']))
return
issues = [ googlecode.Issue(entry) for entry in feed.entry]
if not len(issues):
self._RespondError('Your query didn\'t return any result.')
return
template_values = rhizoglobals.DefaultTemplate(self.request)
template_values.update(params)
if self.request.get('gdatadebug') == '1':
template_values['issues'] = issues
self._Respond('codedebug.html', template_values)
else:
platform, device = rhizoglobals.IdentifyPlatformDevice(self.request)
smartphone = rhizoglobals.IsSmartphone(platform, device)
jsonifier = googlecode.JSONHelper()
json_issues = [jsonifier.IssueToJSON(issue) for issue in issues]
stats = googlecode.IssueStats()
stats.Compute(issues)
json_stats = jsonifier.StatsToJSON(stats)
template_values.update({'issues': json_issues,
'stats': json_stats,
'platform': platform,
'device': device,
'smartphone': smartphone})
self._Respond('coderhizo.html', template_values)
class WelcomeHandler(BaseHandler):
"""Serves the welcome page to the Google Code Hosting showcase app."""
def get(self):
template_values = rhizoglobals.DefaultTemplate(self.request)
if not _GDATA_LIBS_FOUND:
self._RespondError("GData libraries not found. "
"Have you included the GData libraries?")
return
platform, device = rhizoglobals.IdentifyPlatformDevice(self.request)
smartphone = rhizoglobals.IsSmartphone(platform, device)
if smartphone:
self._Respond('codeindexmobile.html', template_values)
else:
self._Respond('codeindex.html', template_values)
application = webapp.WSGIApplication(
[('/showcase/code', WelcomeHandler),
('/showcase/code/', WelcomeHandler),
('/showcase/code/rhizo', FetchHandler),
('/showcase/code/auth', AuthHandler),],
debug=rhizoglobals.appenginedebug)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| apache-2.0 | -8,143,730,671,700,434,000 | 33.738676 | 83 | 0.649348 | false |
algorhythms/LintCode | Interleaving Positive and Negative Numbers.py | 1 | 1350 | """
Given an array with positive and negative integers. Re-range it to interleaving with positive and negative integers.
Have you met this question in a real interview? Yes
Example
Given [-1, -2, -3, 4, 5, 6], after re-range, it will be [-1, 5, -2, 4, -3, 6] or any other reasonable answer.
Note
You are not necessary to keep the original order of positive integers or negative integers.
Challenge
Do it in-place and without extra memory.
"""
__author__ = 'Daniel'
class Solution(object):
def rerange(self, A):
"""
Algorithm: Two Pointers
:type A: List[int]
:rtype: None, in-place
"""
n = len(A)
pos_cnt = len(filter(lambda x: x > 0, A))
# expecting positive
pos_expt = True if pos_cnt*2 > n else False
neg = 0 # next negative
pos = 0 # next positive
for i in xrange(n):
while neg < n and A[neg] > 0: neg += 1
while pos < n and A[pos] < 0: pos += 1
if pos_expt:
A[i], A[pos] = A[pos], A[i]
else:
A[i], A[neg] = A[neg], A[i]
if i == neg: neg += 1
if i == pos: pos += 1
pos_expt = not pos_expt
if __name__ == "__main__":
A = [-33, -19, 30, 26, 21, -9]
Solution().rerange(A)
assert A == [-33, 30, -19, 26, -9, 21] | apache-2.0 | -8,921,503,985,788,272,000 | 27.145833 | 116 | 0.531111 | false |
barbuza/django | django/contrib/gis/db/backends/mysql/introspection.py | 700 | 1771 | from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
# Updating the data_types_reverse dictionary with the appropriate
# type for Geometry fields.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# In order to get the specific geometry type of the field,
# we introspect on the table definition using `DESCRIBE`.
cursor.execute('DESCRIBE %s' %
self.connection.ops.quote_name(table_name))
# Increment over description info until we get to the geometry
# column.
for column, typ, null, key, default, extra in cursor.fetchall():
if column == geo_col:
# Using OGRGeomType to convert from OGC name to Django field.
# MySQL does not support 3D or SRIDs, so the field params
# are empty.
field_type = OGRGeomType(typ).django
field_params = {}
break
finally:
cursor.close()
return field_type, field_params
def supports_spatial_index(self, cursor, table_name):
# Supported with MyISAM, or InnoDB on MySQL 5.7.5+
storage_engine = self.get_storage_engine(cursor, table_name)
return (
(storage_engine == 'InnoDB' and self.connection.mysql_version >= (5, 7, 5)) or
storage_engine == 'MyISAM'
)
| bsd-3-clause | -3,778,621,017,512,529,400 | 42.195122 | 90 | 0.615471 | false |
UK992/servo | etc/wpt-summarize.py | 4 | 1754 | #!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# Usage: python wpt-summarize.py /wpt/test/url.html [--full]
#
# Extract all log lines for a particular test file from a WPT
# logs, outputting invidual JSON objects that can be manipulated
# with tools like jq. If a particular URL results in no output,
# the URL is likely used as a reference test's reference file,
# so passing `--full` will find any output from Servo process
# command lines that include the URL.
import sys
import json
full_search = len(sys.argv) > 3 and sys.argv[3] == '--full'
with open(sys.argv[1]) as f:
data = f.readlines()
thread = None
for entry in data:
entry = json.loads(entry)
if thread and "thread" in entry:
if entry["thread"] == thread:
print(json.dumps(entry))
if "action" in entry and entry["action"] == "test_end":
thread = None
else:
if ("action" in entry and
entry["action"] == "test_start" and
entry["test"] == sys.argv[2]):
thread = entry["thread"]
print(json.dumps(entry))
elif (full_search and
"command" in entry and
sys.argv[2] in entry["command"]):
thread = entry["thread"]
print(json.dumps(entry))
| mpl-2.0 | 1,967,211,925,997,286,400 | 37.130435 | 71 | 0.615165 | false |
fichter/grpc | src/python/grpcio/grpc/framework/base/null.py | 39 | 2106 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Null links that ignore tickets passed to them."""
from grpc.framework.base import interfaces
class _NullForeLink(interfaces.ForeLink):
"""A do-nothing ForeLink."""
def accept_back_to_front_ticket(self, ticket):
pass
def join_rear_link(self, rear_link):
raise NotImplementedError()
class _NullRearLink(interfaces.RearLink):
"""A do-nothing RearLink."""
def accept_front_to_back_ticket(self, ticket):
pass
def join_fore_link(self, fore_link):
raise NotImplementedError()
NULL_FORE_LINK = _NullForeLink()
NULL_REAR_LINK = _NullRearLink()
| bsd-3-clause | 7,337,527,146,759,436,000 | 36.607143 | 72 | 0.763058 | false |
Gjacquenot/PyRayTracer | raytracer/raytrace-receiver2.py | 1 | 38736 | # -*- coding: utf-8 -*-
"""
@author: Jaap Verheggen, Guillaume Jacquenot
"""
from __future__ import division
import vtk
import numpy as np
import vtk.util.numpy_support as ns
import raytracer as ray
cosd = lambda angle: np.cos(angle / 180 * np.pi)
sind = lambda angle: np.sin(angle / 180 * np.pi)
tand = lambda angle: np.tan(angle / 180 * np.pi)
def glyphs(cells, color=(1.0, 1.0, 1.0), size=1):
# Visualize normals as done previously but using refracted or reflected cells
arrow = vtk.vtkArrowSource()
glyphCell = vtk.vtkGlyph3D()
glyphCell.SetInputData(cells)
glyphCell.SetSourceConnection(arrow.GetOutputPort())
glyphCell.SetVectorModeToUseNormal()
glyphCell.SetScaleFactor(size)
glyphMapperCell = vtk.vtkPolyDataMapper()
glyphMapperCell.SetInputConnection(glyphCell.GetOutputPort())
glyphActorCell = vtk.vtkActor()
glyphActorCell.SetMapper(glyphMapperCell)
glyphActorCell.GetProperty().SetColor(color)
return glyphActorCell
def getnormals(surf, vertices=False, flip=False):
# Calculate normals
normals = vtk.vtkPolyDataNormals()
normals.SetOutputPointsPrecision(vtk.vtkAlgorithm.DOUBLE_PRECISION)
normals.SetInputData(surf.GetOutput())
if vertices:
# Enable normal calculation at cell vertices
normals.ComputePointNormalsOn()
# Disable normal calculation at cell centers
normals.ComputeCellNormalsOff()
else:
# Disable normal calculation at cell vertices
normals.ComputePointNormalsOff()
# Enable normal calculation at cell centers
normals.ComputeCellNormalsOn()
# Disable splitting of sharp edges
normals.ConsistencyOn()
normals.SplittingOff()
# Disable global flipping of normal orientation
if flip:
normals.FlipNormalsOn()
print('flip is true')
else:
normals.FlipNormalsOff()
# Enable automatic determination of correct normal orientation
# normals.AutoOrientNormalsOn()
# Perform calculation
normals.Update()
# Create dummy array for glyphs
if vertices:
verticepoints = surf.GetOutput().GetPoints()
normal_polydata = normals.GetOutput()
return normal_polydata, verticepoints
else:
normalcellcenters = vtk.vtkCellCenters()
normalcellcenters.VertexCellsOn()
normalcellcenters.SetInputConnection(normals.GetOutputPort())
normalcellcenters.Update()
#
pointsCellCenters = normalcellcenters.GetOutput(0)
normal_points = vtk.vtkPoints()
normal_points.SetDataTypeToDouble()
# Vectors where intersections are found
normal_vectors = vtk.vtkDoubleArray()
normal_vectors.SetNumberOfComponents(3)
# Loop through all point centers and add a point-actor through 'addPoint'
for idx in range(pointsCellCenters.GetNumberOfPoints()):
normal_points.InsertNextPoint(pointsCellCenters.GetPoint(idx))
normalsurf2 = normalcellcenters.GetOutput().GetCellData().GetNormals().GetTuple(idx)
# Insert the normal vector of the intersection cell in the dummy container
normal_vectors.InsertNextTuple(normalsurf2)
# Need to transform polydatanormals to polydata so I can reuse functions
normal_polydata = vtk.vtkPolyData()
normal_polydata.SetPoints(normal_points)
normal_polydata.GetPointData().SetNormals(normal_vectors)
return normal_polydata, normalcellcenters.GetOutput()
def stop(ren, appendFilter, srf1, srf2):
# rays between two surfaces. A stop.
obbsurf2 = vtk.vtkOBBTree()
obbsurf2.SetDataSet(srf2['surface'].GetOutput())
obbsurf2.BuildLocator()
# where intersections are found
intersection_points = vtk.vtkPoints()
intersection_points.SetDataTypeToDouble()
# Loop through all of surface1 cell-centers
for idx in range(srf1['raypoints'].GetNumberOfPoints()):
# Get coordinates of surface1 cell center
pointSurf1 = srf1['raypoints'].GetPoint(idx)
# Get incident vector at that cell
normalsurf1 = srf1['rays'].GetPointData().GetNormals().GetTuple(idx)
# Calculate the 'target' of the ray based on 'RayCastLength'
pointRaySurf2 = list(np.array(pointSurf1) + ray.RayCastLength * np.array(normalsurf1))
# Check if there are any intersections for the given ray
if ray.isHit(obbsurf2, pointSurf1, pointRaySurf2):
# Retrieve coordinates of intersection points and intersected cell ids
pointsInter, cellIdsInter = ray.getIntersect(obbsurf2, pointSurf1, pointRaySurf2)
# print(cellIdsInter)
# Render lines/rays emanating from the Source. Rays that intersect are
ray.addLine(ren, appendFilter, pointSurf1, pointsInter[0], [1.0, 1.0, 0.0], opacity=0.25)
# Insert the coordinates of the intersection point in the dummy container
intersection_points.InsertNextPoint(pointsInter[0])
return intersection_points
def flat(srf):
# Setup four points
points = vtk.vtkPoints()
points.SetDataTypeToDouble()
points.InsertNextPoint(-0.5 * srf['width'], -0.5 * srf['height'], 0.0)
points.InsertNextPoint(+0.5 * srf['width'], -0.5 * srf['height'], 0.0)
points.InsertNextPoint(+0.5 * srf['width'], +0.5 * srf['height'], 0.0)
points.InsertNextPoint(-0.5 * srf['width'], +0.5 * srf['height'], 0.0)
# Create the polygon
polygon = vtk.vtkPolygon()
polygon.GetPointIds().SetNumberOfIds(4) # make a quad
polygon.GetPointIds().SetId(0, 0)
polygon.GetPointIds().SetId(1, 1)
polygon.GetPointIds().SetId(2, 2)
polygon.GetPointIds().SetId(3, 3)
# Add the polygon to a list of polygons
polygons = vtk.vtkCellArray()
polygons.InsertNextCell(polygon)
# Create a PolyData
polygonPolyData = vtk.vtkPolyData()
polygonPolyData.SetPoints(points)
polygonPolyData.SetPolys(polygons)
# rotate and translate
transform = vtk.vtkTransform()
transform.PostMultiply()
if 'rotateWXYZ' in srf:
transform.RotateWXYZ(srf['rotateWXYZ'][0], srf['rotateWXYZ'][1:])
if 'center' in srf:
transform.Translate(srf['center'])
pgt = vtk.vtkTransformPolyDataFilter()
pgt.SetOutputPointsPrecision(vtk.vtkAlgorithm.DOUBLE_PRECISION)
pgt.SetTransform(transform)
pgt.SetInputData(polygonPolyData)
pgt.Update()
extrude = pgt
# Subdivision filters only work on triangles
triangles = vtk.vtkTriangleFilter()
triangles.SetInputConnection(extrude.GetOutputPort())
triangles.Update()
# Lets subdivide it for no reason at all
return triangles
def cylinder(srf):
source = vtk.vtkArcSource()
z1 = srf['curvx'] - (srf['curvx'] / abs(srf['curvx'])) * np.sqrt(srf['curvx']**2 - (srf['width'] / 2)**2) # s +/- sqrt(s^2-(w/2)^2)
x1 = +0.5 * srf['width']
y1 = -0.5 * srf['height']
source.SetCenter(0, y1, srf['curvx'])
source.SetPoint1(x1, y1, z1)
source.SetPoint2(-x1, y1, z1)
source.SetResolution(srf['resolution'])
# Linear extrude arc
extrude = vtk.vtkLinearExtrusionFilter()
extrude.SetInputConnection(source.GetOutputPort())
extrude.SetExtrusionTypeToVectorExtrusion()
extrude.SetVector(0, 1, 0)
extrude.SetScaleFactor(srf['height'])
extrude.Update()
# Rotate and translate
transform = vtk.vtkTransform()
transform.PostMultiply()
if 'rotateWXYZ' in srf:
transform.RotateWXYZ(srf['rotateWXYZ'][0], srf['rotateWXYZ'][1:])
if 'center' in srf:
transform.Translate(srf['center'])
pgt = vtk.vtkTransformPolyDataFilter()
pgt.SetOutputPointsPrecision(vtk.vtkAlgorithm.DOUBLE_PRECISION)
pgt.SetTransform(transform)
pgt.SetInputData(extrude.GetOutput())
pgt.Update()
extrude = pgt
# Subdivision filters only work on triangles
triangles = vtk.vtkTriangleFilter()
triangles.SetInputConnection(extrude.GetOutputPort())
triangles.Update()
# Lets subdivide it for no reason at all
return triangles
def asphere(srf):
# Delaunay mesh
dr = (srf['diameter']) / 2 / (srf['resolution'] - 1) # radius
R = srf['coeffs']['R']
k = srf['coeffs']['k']
polyVal = lambda x: sum([srf['coeffs']['A' + str(p)] * x**p for p in range(2, 14, 2)])
sizep = [sum([1 + x * 5 for x in range(srf['resolution'])]), 3]
array_xyz = np.empty(sizep)
cnt = 0
for ii in range(srf['resolution']):
for ss in range(1 + ii * 5):
phi = ss * 2 / (1 + ii * 5)
r = dr * ii
xx = np.sin(np.pi * phi) * r
yy = np.cos(np.pi * phi) * r
zz = r**2 / (R * (1 + np.sqrt(1 - (1 + k) * r**2 / R**2))) + polyVal(r)
array_xyz[cnt] = np.array([xx, yy, zz])
cnt += 1
# Second pass optimization
if 'raypoints' in srf:
# Opposite transformations
i_points = vtk.vtkPolyData()
i_points.SetPoints(srf['raypoints'])
transform = vtk.vtkTransform()
transform.PostMultiply()
if 'center' in srf:
transform.Translate([-p for p in srf['center']])
if 'rotateWXYZ' in srf:
transform.RotateWXYZ(-srf['rotateWXYZ'][0], srf['rotateWXYZ'][1:])
pgt = vtk.vtkTransformPolyDataFilter()
pgt.SetOutputPointsPrecision(vtk.vtkAlgorithm.DOUBLE_PRECISION)
pgt.SetTransform(transform)
pgt.SetInputData(i_points)
pgt.Update()
# Get intersection point array
parray_xyz = ns.vtk_to_numpy(i_points.GetPoints().GetData())
# Add 2nd pass arrays ath these points
refine = srf['resolution'] * 100
res = 4 # was srf['resolution']
d2r = (srf['diameter']) / 2 / (refine - 1)
for xyz in parray_xyz:
cnt = 0
rxy = np.hypot(xyz[0], xyz[1])
if rxy > d2r * (res + 1):
phic = np.arctan2(xyz[0], xyz[1])
r_range = int(np.ceil((rxy / (srf['diameter'] / 2)) * refine))
# Counter to get size of array
var = 0
for ii in range(r_range - res, r_range + res): # was 10
phi_range = int(np.ceil((phic / (2 * np.pi)) * (1 + ii * 5)))
for ss in range(phi_range - res, phi_range + res):
var += 1
sizep = [var, 3]
arr2nd_xyz = np.empty(sizep)
for ii in range(r_range - res, r_range + res): # was 10
phi_range = int(np.ceil((phic / (2 * np.pi)) * (1 + ii * 5)))
for ss in range(phi_range - res, phi_range + res):
phi = ss * 2 / (1 + ii * 5)
r = d2r * ii
xx = np.sin(np.pi * phi) * r
yy = np.cos(np.pi * phi) * r
zz = r**2 / (R * (1 + np.sqrt(1 - (1 + k) * r**2 / R**2))) + polyVal(r)
arr2nd_xyz[cnt] = np.array([xx, yy, zz])
cnt += 1
else:
sizep = [sum([1 + x * 5 for x in range(srf['resolution'])]), 3]
arr2nd_xyz = np.empty(sizep)
cnt = 0
for ii in range(srf['resolution']):
for ss in range(1 + ii * 5):
phi = ss * 2 / (1 + ii * 5)
r = d2r * ii
xx = np.sin(np.pi * phi) * r
yy = np.cos(np.pi * phi) * r
zz = r**2 / (R * (1 + np.sqrt(1 - (1 + k) * r**2 / R**2))) + polyVal(r)
arr2nd_xyz[cnt] = np.array([xx, yy, zz])
cnt += 1
array_xyz = np.vstack((array_xyz, arr2nd_xyz))
# Delete non unique values
b = np.ascontiguousarray(array_xyz).view(np.dtype((np.void, array_xyz.dtype.itemsize * array_xyz.shape[1])))
_, idx = np.unique(b, return_index=True)
unique_a = array_xyz[idx]
# I need to sort this in spherical coordinates, first phi, then theta then r
rtp_a = ray.Cartesian2Spherical(unique_a[1:]) # Skip 0,0,0
rtp_a = np.vstack((np.array([0.0, 0.0, 0.0]), rtp_a))
# Now sort
ind = np.lexsort((rtp_a[:, 2], rtp_a[:, 1], rtp_a[:, 0])) # Sort by a, then by b
sorted_rtp = rtp_a[ind]
sorted_xyz = ray.Spherical2Cartesian(sorted_rtp)
else:
sorted_xyz = array_xyz
# numpy array to vtk array
pcoords = ns.numpy_to_vtk(num_array=sorted_xyz, deep=True, array_type=vtk.VTK_DOUBLE)
# Shove coordinates in points container
points = vtk.vtkPoints()
points.SetDataTypeToDouble()
points.SetData(pcoords)
# Create a polydata object
point_pd = vtk.vtkPolyData()
# Set the points and vertices we created as the geometry and topology of the polydata
point_pd.SetPoints(points)
# make the delaunay mesh
delaunay = vtk.vtkDelaunay2D()
if vtk.VTK_MAJOR_VERSION < 6:
delaunay.SetInput(point_pd)
else:
delaunay.SetInputData(point_pd)
# delaunay.SetTolerance(0.00001)
delaunay.Update()
# Rotate and translate
transform = vtk.vtkTransform()
transform.PostMultiply()
if 'rotateWXYZ' in srf:
transform.RotateWXYZ(srf['rotateWXYZ'][0], srf['rotateWXYZ'][1:])
if 'center' in srf:
transform.Translate(srf['center'])
pgt = vtk.vtkTransformPolyDataFilter()
pgt.SetOutputPointsPrecision(vtk.vtkAlgorithm.DOUBLE_PRECISION)
pgt.SetTransform(transform)
if vtk.VTK_MAJOR_VERSION < 6:
pgt.SetInput(delaunay.GetOutput())
else:
pgt.SetInputData(delaunay.GetOutput())
pgt.Update()
delaunay = pgt
# Rotate polydata
return delaunay
def flatcircle(srf):
# Create rotational filter of a straight line
dx = (srf['diameter']) / 2 / (srf['resolution'] - 1) # radius
# print(dx, dx * srf['resolution'])
points = vtk.vtkPoints()
line = vtk.vtkLine()
lines = vtk.vtkCellArray()
for ii in range(srf['resolution']):
xx, yy, zz = dx * ii, 0, 0
points.InsertNextPoint(xx, yy, zz)
if ii != (srf['resolution'] - 1):
line.GetPointIds().SetId(0, ii)
line.GetPointIds().SetId(1, ii + 1)
lines.InsertNextCell(line)
# Create a PolyData
polygonPolyData = vtk.vtkPolyData()
polygonPolyData.SetPoints(points)
polygonPolyData.SetLines(lines)
# Radial extrude polygon
extrude = vtk.vtkRotationalExtrusionFilter()
if vtk.VTK_MAJOR_VERSION < 6:
extrude.SetInput(polygonPolyData)
else:
extrude.SetInputData(polygonPolyData)
extrude.CappingOff()
extrude.SetResolution(srf['angularresolution'])
extrude.Update()
# It would be best to rotate it by 360/res, so simple rays
# don't hit eges and low res can be used
rotate = vtk.vtkTransform()
rotate.RotateWXYZ(180 / srf['angularresolution'], 0, 0, 1)
pgt = vtk.vtkTransformPolyDataFilter()
pgt.SetTransform(rotate)
if vtk.VTK_MAJOR_VERSION < 6:
pgt.SetInput(extrude.GetOutput())
else:
pgt.SetInputData(extrude.GetOutput())
pgt.Update()
extrude = pgt
# stretch, rotate and translate
transform = vtk.vtkTransform()
transform.PostMultiply()
if 'scalex' in srf:
transform.Scale(srf['scalex'], 1.0, 1.0)
if 'rotateWXYZ' in srf:
transform.RotateWXYZ(srf['rotateWXYZ'][0], srf['rotateWXYZ'][1:])
if 'center' in srf:
transform.Translate(srf['center'])
pgt = vtk.vtkTransformPolyDataFilter()
pgt.SetOutputPointsPrecision(vtk.vtkAlgorithm.DOUBLE_PRECISION)
pgt.SetTransform(transform)
if vtk.VTK_MAJOR_VERSION < 6:
pgt.SetInput(extrude.GetOutput())
else:
pgt.SetInputData(extrude.GetOutput())
pgt.Update()
extrude = pgt
# Subdivision filters only work on triangles
triangles = vtk.vtkTriangleFilter()
triangles.SetInputConnection(extrude.GetOutputPort())
triangles.Update()
# Create a mapper and actor
return triangles
def sphere(srf):
# Create and configure sphere, using delaunay mesh
dr = (srf['diameter']) / 2 / (srf['resolution'] - 1) # radius
R = srf['radius']
sizep = [sum([1 + x * 5 for x in range(srf['resolution'])]), 3]
array_xyz = np.empty(sizep)
cnt = 0
for ii in range(srf['resolution']):
for ss in range(1 + ii * 5):
phi = ss * 2 / (1 + ii * 5)
r = dr * ii
xx = np.sin(np.pi * phi) * r
yy = np.cos(np.pi * phi) * r
zz = R * (1 - np.sqrt(1 - (r / R)**2))
array_xyz[cnt] = np.array([xx, yy, zz])
cnt += 1
# Second pass optimization
if 'raypoints' in srf:
# Opposite transformations
i_points = vtk.vtkPolyData()
i_points.SetPoints(srf['raypoints'])
transform = vtk.vtkTransform()
transform.PostMultiply()
if 'center' in srf:
transform.Translate([-p for p in srf['center']])
if 'rotateWXYZ' in srf:
transform.RotateWXYZ(-srf['rotateWXYZ'][0], srf['rotateWXYZ'][1:])
pgt = vtk.vtkTransformPolyDataFilter()
pgt.SetOutputPointsPrecision(vtk.vtkAlgorithm.DOUBLE_PRECISION)
pgt.SetTransform(transform)
pgt.SetInputData(i_points)
if vtk.VTK_MAJOR_VERSION < 6:
pgt.SetInput(i_points)
else:
pgt.SetInputData(i_points)
pgt.Update()
# Get intersection point array
parray_xyz = ns.vtk_to_numpy(pgt.GetOutput().GetPoints().GetData())
# Add 2nd pass arrays ath these points
refine = srf['resolution'] * 100
res = 4 # was srf['resolution']
d2r = (srf['diameter']) / 2 / (refine - 1)
for xyz in parray_xyz:
cnt = 0
rxy = np.hypot(xyz[0], xyz[1])
if rxy > d2r * (res + 1):
phic = np.arctan2(xyz[0], xyz[1])
r_range = int(np.ceil((rxy / (srf['diameter'] / 2)) * refine))
# Counter to get size of array
var = 0
for ii in range(r_range - res, r_range + res): # was 10
phi_range = int(np.ceil((phic / (2 * np.pi)) * (1 + ii * 5)))
for ss in range(phi_range - res, phi_range + res):
var += 1
sizep = [var, 3]
arr2nd_xyz = np.empty(sizep)
for ii in range(r_range - res, r_range + res): # was 10
phi_range = int(np.ceil((phic / (2 * np.pi)) * (1 + ii * 5)))
for ss in range(phi_range - res, phi_range + res):
phi = ss * 2 / (1 + ii * 5)
r = d2r * ii
xx = np.sin(np.pi * phi) * r
yy = np.cos(np.pi * phi) * r
zz = R * (1 - np.sqrt(1 - (r / R)**2))
arr2nd_xyz[cnt] = np.array([xx, yy, zz])
cnt += 1
else:
sizep = [sum([1 + x * 5 for x in range(srf['resolution'])]), 3]
arr2nd_xyz = np.empty(sizep)
cnt = 0
for ii in range(srf['resolution']):
for ss in range(1 + ii * 5):
phi = ss * 2 / (1 + ii * 5)
r = d2r * ii
xx = np.sin(np.pi * phi) * r
yy = np.cos(np.pi * phi) * r
zz = R * (1 - np.sqrt(1 - (r / R)**2))
arr2nd_xyz[cnt] = np.array([xx, yy, zz])
cnt += 1
array_xyz = np.vstack((array_xyz, arr2nd_xyz))
# Delete non unique values
b = np.ascontiguousarray(array_xyz).view(np.dtype((np.void, array_xyz.dtype.itemsize * array_xyz.shape[1])))
_, idx = np.unique(b, return_index=True)
unique_a = array_xyz[idx]
# I need to sort this in spherical coordinates, first phi, then theta then r
rtp_a = ray.Cartesian2Spherical(unique_a[1:]) # Skip 0,0,0
rtp_a = np.vstack((np.array([0.0, 0.0, 0.0]), rtp_a))
# Now sort
ind = np.lexsort((rtp_a[:, 2], rtp_a[:, 1], rtp_a[:, 0])) # Sort by a, then by b
sorted_xyz = unique_a[ind]
else:
sorted_xyz = array_xyz
# numpy array to vtk array
pcoords = ns.numpy_to_vtk(num_array=sorted_xyz, deep=True, array_type=vtk.VTK_DOUBLE)
# Shove coordinates in points container
points = vtk.vtkPoints()
points.SetDataTypeToDouble()
points.SetData(pcoords)
# Create a polydata object
point_pd = vtk.vtkPolyData()
# Set the points and vertices we created as the geometry and topology of the polydata
point_pd.SetPoints(points)
# Make the delaunay mesh
delaunay = vtk.vtkDelaunay2D()
delaunay.SetInputData(point_pd)
# delaunay.SetTolerance(0.00001)
delaunay.Update()
# Rotate and translate
transform = vtk.vtkTransform()
transform.PostMultiply()
if 'rotateWXYZ' in srf:
transform.RotateWXYZ(srf['rotateWXYZ'][0], srf['rotateWXYZ'][1:])
if 'center' in srf:
transform.Translate(srf['center'])
pgt = vtk.vtkTransformPolyDataFilter()
pgt.SetOutputPointsPrecision(vtk.vtkAlgorithm.DOUBLE_PRECISION)
pgt.SetTransform(transform)
pgt.SetInputData(delaunay.GetOutput())
pgt.Update()
delaunay = pgt
# rotate polydata
return delaunay
def objectsource(srf1, srf2, ratio=0.8):
# make points on surface 1 should this be elliptical?
# Use given points, don't create them, so all poitns on vertices of surf1 surface
sourcepoints = [srf1['raypoints'].GetPoint(i) for i in range(srf1['raypoints'].GetNumberOfPoints())]
# Make points on target
targetlist = [[0.0, 0.0, 0.0]]
if 'diameter' in srf2:
targetlist.append([ratio * srf2['diameter'] / 2, 0.0, 0.0])
targetlist.append([0.0, ratio * srf2['diameter'] / 2, 0.0])
targetlist.append([-ratio * srf2['diameter'] / 2, 0.0, 0.0])
targetlist.append([0.0, -ratio * srf2['diameter'] / 2, 0.0])
elif 'width' in srf2 and 'height' in srf2:
targetlist.append([ratio * srf2['width'] / 2, 0.0, 0.0])
targetlist.append([0.0, ratio * srf2['height'] / 2, 0.0])
targetlist.append([-ratio * srf2['width'] / 2, 0.0, 0.0])
targetlist.append([0.0, -ratio * srf2['height'] / 2, 0.0])
else:
print('Could not make targetlist in objectsource')
return
# Transform points, I'm going to cheat and use the vtk functions
points = vtk.vtkPoints()
points.SetDataTypeToDouble()
for tl in targetlist:
points.InsertNextPoint(tl)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
transform = vtk.vtkTransform()
transform.PostMultiply()
if 'scalex' in srf2:
transform.Scale(srf2['scalex'], 1.0, 1.0)
if 'rotateWXYZ' in srf2:
transform.RotateWXYZ(srf2['rotateWXYZ'][0], srf2['rotateWXYZ'][1:])
if 'centerH' in srf2:
transform.Translate(srf2['centerH'])
pgt = vtk.vtkTransformPolyDataFilter()
pgt.SetOutputPointsPrecision(vtk.vtkAlgorithm.DOUBLE_PRECISION)
pgt.SetTransform(transform)
pgt.SetInputData(polydata)
pgt.Update()
# And now I'm going to extract the points again
targetpoints = [pgt.GetOutput().GetPoint(i) for i in range(pgt.GetOutput().GetNumberOfPoints())]
# Get normal vector from source to target, 5 vectors per point
object_points = vtk.vtkPoints()
object_points.SetDataTypeToDouble()
object_normalvectors = vtk.vtkDoubleArray()
object_normalvectors.SetNumberOfComponents(3)
for sp in sourcepoints:
for tp in targetpoints:
vec = (tp[0] - sp[0], tp[1] - sp[1], tp[2] - sp[2])
object_normalvectors.InsertNextTuple(list(vec / np.linalg.norm(vec)))
object_points.InsertNextPoint(sp)
object_polydata = vtk.vtkPolyData()
object_polydata.SetPoints(object_points)
object_polydata.GetPointData().SetNormals(object_normalvectors)
return object_polydata, object_points
def pointsource(srf, simple=True):
if simple:
anglex = srf['anglex'] / 180 * np.pi
angley = srf['angley'] / 180 * np.pi
tuples = [(0, 0, 1)]
phix = [1, 0, -1, 0]
phiy = [0, 1, 0, -1]
theta = [anglex, angley, anglex, angley]
x = phix * np.sin(theta)
y = phiy * np.sin(theta)
z = np.cos(theta)
tuples = tuples + ([(xx, yy, zz) for xx, yy, zz in zip(x, y, z)])
# for tp in tuples: #has to be one
# print(np.sqrt(tp[0]**2+tp[1]**2+tp[2]**2))
else:
res = [4, 6, 8]
anglex = srf['anglex'] / 180 * np.pi
angley = srf['angley'] / 180 * np.pi
# Center line
tuples = [(0, 0, 1)]
for rr in res:
# Define pointsource, cylindrical
thetax = (res.index(rr) + 1) / len(res) * anglex
thetay = (res.index(rr) + 1) / len(res) * angley
phi = np.arange(rr) * (2 * np.pi / rr)
theta = thetax * thetay / np.hypot(thetay * np.cos(phi), thetax * np.sin(phi))
x = np.cos(phi) * np.sin(theta)
y = np.sin(phi) * np.sin(theta)
z = np.cos(theta)
tuples = tuples + ([(xx, yy, zz) for xx, yy, zz in zip(x, y, z)])
intersection_points = vtk.vtkPoints()
intersection_points.SetDataTypeToDouble()
normal_vectors = vtk.vtkDoubleArray()
normal_vectors.SetNumberOfComponents(3)
for sp in srf['sourcepoints']:
for tp in tuples:
normal_vectors.InsertNextTuple(tp)
intersection_points.InsertNextPoint(sp)
normal_polydata = vtk.vtkPolyData()
normal_polydata.SetPoints(intersection_points)
normal_polydata.GetPointData().SetNormals(normal_vectors)
return normal_polydata, intersection_points
def surfaceActor(ren, appendFilter, srf):
# Create a mapper and actor
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(srf['surface'].GetOutput())
# mapper.SetInputConnection(delaunay.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor([0.0, 0.0, 1.0]) # set color to blue
actor.GetProperty().EdgeVisibilityOn() # show edges/wireframe
actor.GetProperty().SetEdgeColor([1.0, 1.0, 1.0]) # render edges as white
# Plot and save
ren.AddActor(actor)
ren.ResetCamera()
appendFilter.AddInputData(srf['surface'].GetOutput())
appendFilter.Update()
def shape(ren, appendFilter, srf, addActor=False):
if srf['shape'] == 'sphere':
srf['surface'] = sphere(srf)
elif srf['shape'] == 'flat':
srf['surface'] = flat(srf)
elif srf['shape'] == 'flatcircle':
srf['surface'] = flatcircle(srf)
elif srf['shape'] == 'asphere':
srf['surface'] = asphere(srf)
elif srf['shape'] == 'cylinder':
srf['surface'] = cylinder(srf)
elif srf['shape'] == 'pointsource':
pass
else:
print("Couldn't understand shape")
return
if addActor:
surfaceActor(ren, appendFilter, srf)
return srf
def trace(ren, appendFilter, srf1, srf2, addActor=True):
obbsurf2 = vtk.vtkOBBTree()
obbsurf2.SetDataSet(srf2['surface'].GetOutput())
obbsurf2.BuildLocator()
# I dont know from where the ray is coming, use 'curv' for this as a hack
if srf2['shape'] == 'sphere':
if srf2['curv'] == 'positive':
Flip = False
elif srf2['curv'] == 'negative':
Flip = True
else:
print('Misunderstood curv in trace')
elif srf2['shape'] == 'asphere':
if srf2['rn'] >= srf1['rn']:
Flip = False
elif srf2['rn'] < srf1['rn']:
Flip = True
elif srf2['shape'] == 'cylinder':
if srf2['rn'] >= srf1['rn']:
Flip = False
elif srf2['rn'] < srf1['rn']:
Flip = True
elif srf2['shape'] == 'flat':
if srf2['rn'] >= srf1['rn']:
Flip = False
elif srf2['rn'] < srf1['rn']:
Flip = True
else:
Flip = False
srf2['normals'], srf2['normalpoints'] = getnormals(srf2['surface'], flip=Flip)
# #Sometimes, something goes wrong with the number of cells
# count1 = srf2['normals'].GetCellData().GetNormals().GetNumberOfTuples()
# count2 = obbsurf2.GetDataSet().GetNumberOfCells()
# assert count1 == count2, 'The number of normals does not match the number of cells in the obbtree'
# where intersections are found
intersection_points = vtk.vtkPoints()
intersection_points.SetDataTypeToDouble()
# normal vectors at intersection
normal_vectors = vtk.vtkDoubleArray()
normal_vectors.SetNumberOfComponents(3)
# normals of refracted vectors
reflect_vectors = vtk.vtkDoubleArray()
reflect_vectors.SetNumberOfComponents(3)
# Loop through all of surface1 cell-centers
for idx in range(srf1['raypoints'].GetNumberOfPoints()):
# Get coordinates of surface1 cell center
pointSurf1 = srf1['raypoints'].GetPoint(idx)
# Get incident vector at that cell
normalsurf1 = srf1['rays'].GetPointData().GetNormals().GetTuple(idx)
# Calculate the 'target' of the ray based on 'RayCastLength'
pointRaySurf2 = list(np.array(pointSurf1) + ray.RayCastLength * np.array(normalsurf1))
# Check if there are any intersections for the given ray
if ray.isHit(obbsurf2, pointSurf1, pointRaySurf2):
# Retrieve coordinates of intersection points and intersected cell ids
pointsInter, cellIdsInter = ray.getIntersect(obbsurf2, pointSurf1, pointRaySurf2)
# print(cellIdsInter)
# ray.addPoint(ren, False, pointsInter[0], [0.5, 0.5, 0.5])
# Render lines/rays emanating from the Source. Rays that intersect are
if addActor:
ray.addLine(ren, appendFilter, pointSurf1, pointsInter[0], [1.0, 1.0, 0.0], opacity=0.5)
# Insert the coordinates of the intersection point in the dummy container
intersection_points.InsertNextPoint(pointsInter[0])
# Get the normal vector at the surf2 cell that intersected with the ray
normalsurf2 = srf2['normals'].GetPointData().GetNormals().GetTuple(cellIdsInter[0])
# Insert the normal vector of the intersection cell in the dummy container
normal_vectors.InsertNextTuple(normalsurf2)
# Calculate the incident ray vector
vecInc2 = np.array(pointRaySurf2) - np.array(pointSurf1)
vecInc = list(vecInc2 / np.linalg.norm(vecInc2))
# Calculate the reflected ray vector
if srf2['type'] == 'lens':
vecRef = ray.calcVecRefract(vecInc / np.linalg.norm(vecInc), normalsurf2, srf1['rn'], srf2['rn']) # refract
elif srf2['type'] == 'stop' or 'mirror' or 'source':
vecRef = ray.calcVecReflect(vecInc / np.linalg.norm(vecInc), normalsurf2) # reflect
# Add to container
reflect_vectors.InsertNextTuple(vecRef)
# store intersection points
# intersection_points.Update()
# Create a dummy 'vtkPolyData' to store refracted vecs
reflect_polydata = vtk.vtkPolyData()
reflect_polydata.SetPoints(intersection_points)
reflect_polydata.GetPointData().SetNormals(reflect_vectors)
# Create a dummy 'vtkPolyData' to store normal vecs
normal_polydata = vtk.vtkPolyData()
normal_polydata.SetPoints(intersection_points)
normal_polydata.GetPointData().SetNormals(normal_vectors)
return intersection_points, reflect_polydata, normal_polydata
def run(surfaces, project, Directory, scene, refine=True, plot=True):
# Write output to vtp file
writer = vtk.vtkXMLPolyDataWriter()
filename = Directory + project + "%04d.vtp" % scene
writer.SetFileName(filename)
appendFilter = vtk.vtkAppendPolyData()
# Create a render window
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(600, 600)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
# Set camera position
camera = ren.MakeCamera()
camera.SetPosition(0, 100, 50)
camera.SetFocalPoint(0.0, 0, 0.0)
camera.SetViewAngle(0.0)
camera.SetParallelProjection(1)
ren.SetActiveCamera(camera)
traceit = range(len(surfaces))
for tri in traceit:
# Surface one is source
if tri == 0:
assert surfaces[tri]['type'] == 'source', 'surface zero needs to be source'
if surfaces[tri]['shape'] == 'point': # Use point source
surfaces[tri]['rays'], surfaces[tri]['raypoints'] = pointsource(surfaces[0], simple=True)
else: # Use object source
surfaces[tri] = shape(ren, appendFilter, surfaces[tri], addActor=True)
surfaces[tri]['rays'], surfaces[tri]['raypoints'] = getnormals(surfaces[tri]['surface'], vertices=True, flip=False)
surfaces[tri]['rays'], surfaces[tri]['raypoints'] = objectsource(surfaces[tri], surfaces[tri + 1], ratio=0.3)
glyphsa = glyphs(surfaces[tri]['rays'], color=[0.0, 1.0, 0.0]) # Green
ren.AddActor(glyphsa)
renWin.Render()
elif tri == len(surfaces) - 1:
surfaces[tri] = shape(ren, appendFilter, surfaces[tri], addActor=False) # TODO should be True
surfaces[tri]['raypoints'] = stop(ren, appendFilter, surfaces[tri - 1], surfaces[tri])
renWin.Render()
print('Tracing {0} and {1}'.format(tri - 1, tri))
else:
if refine: # If refine, shape again, trace again
surfaces[tri] = shape(ren, appendFilter, surfaces[tri], addActor=False)
surfaces[tri]['raypoints'], surfaces[tri]['rays'], surfaces[tri]['normals'] = trace(ren, appendFilter, surfaces[tri - 1], surfaces[tri], addActor=False)
surfaces[tri] = shape(ren, appendFilter, surfaces[tri], addActor=True)
surfaces[tri]['raypoints'], surfaces[tri]['rays'], surfaces[tri]['normals'] = trace(ren, appendFilter, surfaces[tri - 1], surfaces[tri])
# Plot glyphs
glyphsa = glyphs(surfaces[tri]['rays'], color=[0.0, 1.0, 0.0], size=1) # Green
ren.AddActor(glyphsa)
glyphsc = glyphs(surfaces[tri]['normals'], color=[0.0, 0.0, 1.0], size=1) # Blue
ren.AddActor(glyphsc)
renWin.Render()
print('Tracing {0} and {1}'.format(tri - 1, tri))
# Write output to vtp file
# appendFilter.Update()
# polydatacontainer = appendFilter
# writer.SetInputData(polydatacontainer.GetOutput())
# writer.Write()
#
# Check results in viewer, by exit screen, proceed
if plot:
iren.Start()
def main():
LBF_254_050 = {'tc': 6.5,
'f': 50.0,
'fb': 46.4,
'R1': -172,
'R2': 30.1,
'rn': 1.5168,
'diameter': 25.4}
LK1037L1 = {'f': -19.0,
'fb': -25.30, # is -20.3
'R1': 'flat',
'R2': 9.8,
'tc': 2.0,
'rn': 1.5168,
'width': 19.0,
'height': 21.0} # extrude over height
LJ1309L1 = {'f': 200.0,
'fb': 189.5, # is -20.3
'R1': 'flat',
'R2': 103.4,
'tc': 15.9,
'rn': 1.5168,
'width': 100,
'height': 90} # extrude over height
LJ1728L1 = {'f': 50.99,
'fb': 36.7, # is -20.3
'R1': 'flat',
'R2': 26.4,
'tc': 21.6,
'rn': 1.5168,
'width': 50.5,
'height': 52.7} # extrude over height
so1h = 100
so1v = 52.1239
si = 2000
angle = 2.0 # graden
surfaces = [{'type': 'source',
'shape': 'flat',
'center': [sind(angle) * si, 0.0, si - cosd(angle) * si],
'width': 2 * tand(0.1) * si, #
'height': 2 * tand(4.44) * si, # could be anything really
'rotateWXYZ': [-angle, 0, 1, 0], # Normal is [0, 0, -1]
'rn': 1.0},
{'type': 'lens',
'shape': 'cylinder',
'centerH': [0.0, 0.0, si - so1h],
'center': [0.0, 0.0, si - (so1h - (LJ1309L1['f'] - LJ1309L1['fb']) + LJ1309L1['tc'])],
# 'rotateWXYZ':[90.0, 0, 0, 1], # Normal is [0, 0, -1] in graden
'width': LJ1309L1['width'],
'height': LJ1309L1['height'],
'resolution': 1000,
'rn': LJ1309L1['rn'], # n-bk7
'curvx': LJ1309L1['R2']},
{'type': 'lens',
'shape': 'flat',
'centerH': [0.0, 0.0, si - so1h], # 2.76 = 4.36
'center': [0.0, 0.0, si - (so1h - (LJ1309L1['f'] - LJ1309L1['fb']))], # 2.91
'width': LJ1309L1['width'],
'height': LJ1309L1['height'],
# 'rotateWXYZ':[90, 0, 0, 1], # Normal is [0, 0, -1]
'rn': 1.0},
{'type': 'lens',
'shape': 'cylinder',
'centerH': [0.0, 0.0, si - so1v],
'center': [0.0, 0.0, si - (so1v - (LJ1728L1['f'] - LJ1728L1['fb']) + LJ1728L1['tc'])],
'rotateWXYZ':[90.0, 0, 0, 1], # Normal is [0, 0, -1] in graden
'width': LJ1728L1['width'],
'height': LJ1728L1['height'],
'resolution': 1000,
'rn': LJ1728L1['rn'], # n-bk7
'curvx': LJ1728L1['R2']},
{'type': 'lens',
'shape': 'flat',
'centerH': [0.0, 0.0, si - so1v], # 2.76 = 4.36
'center': [0.0, 0.0, si - (so1v - (LJ1728L1['f'] - LJ1728L1['fb']))], # 2.91
'width': LJ1728L1['width'],
'height': LJ1728L1['height'],
'rotateWXYZ':[90, 0, 0, 1], # Normal is [0, 0, -1]
'rn': 1.0},
{'type': 'stop',
'shape': 'flat',
'center': [0.0, 0.0, si],
# 'rotateWXYZ': [45, 0, 1, 0], # Normal is [0, 0, -1]
'width': 25.0,
'height': 25.0,
'rn': 1.0}]
import os
project = 'receiverB'
Directory = os.getcwd()
run(surfaces, project, Directory, 0, plot=True)
if __name__ == "__main__":
main()
| mit | 6,415,017,978,709,350,000 | 40.786408 | 168 | 0.579177 | false |
charlescearl/VirtualMesos | third_party/boto-2.0b2/boto/gs/connection.py | 5 | 2004 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.s3.connection import S3Connection
from boto.s3.connection import SubdomainCallingFormat
from boto.gs.bucket import Bucket
class GSConnection(S3Connection):
DefaultHost = 'commondatastorage.googleapis.com'
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
calling_format=SubdomainCallingFormat(), path='/'):
S3Connection.__init__(self, gs_access_key_id, gs_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory, calling_format, path,
"google", boto.gs.bucket.Bucket)
| apache-2.0 | 2,570,334,947,547,340,000 | 49.1 | 77 | 0.716567 | false |
feigames/Odoo | addons/marketing_campaign/report/campaign_analysis.py | 379 | 5310 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from openerp.addons.decimal_precision import decimal_precision as dp
class campaign_analysis(osv.osv):
_name = "campaign.analysis"
_description = "Campaign Analysis"
_auto = False
_rec_name = 'date'
def _total_cost(self, cr, uid, ids, field_name, arg, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case and section Data’s IDs
@param context: A standard dictionary for contextual values
"""
result = {}
for ca_obj in self.browse(cr, uid, ids, context=context):
wi_ids = self.pool.get('marketing.campaign.workitem').search(cr, uid,
[('segment_id.campaign_id', '=', ca_obj.campaign_id.id)])
total_cost = ca_obj.activity_id.variable_cost + \
((ca_obj.campaign_id.fixed_cost or 1.00) / len(wi_ids))
result[ca_obj.id] = total_cost
return result
_columns = {
'res_id' : fields.integer('Resource', readonly=True),
'year': fields.char('Year', size=4, readonly=True),
'month': fields.selection([('01','January'), ('02','February'),
('03','March'), ('04','April'),('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')],
'Month', readonly=True),
'day': fields.char('Day', size=10, readonly=True),
'date': fields.date('Date', readonly=True, select=True),
'campaign_id': fields.many2one('marketing.campaign', 'Campaign',
readonly=True),
'activity_id': fields.many2one('marketing.campaign.activity', 'Activity',
readonly=True),
'segment_id': fields.many2one('marketing.campaign.segment', 'Segment',
readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'country_id': fields.related('partner_id', 'country_id',
type='many2one', relation='res.country',string='Country'),
'total_cost' : fields.function(_total_cost, string='Cost',
type="float", digits_compute=dp.get_precision('Account')),
'revenue': fields.float('Revenue', readonly=True, digits_compute=dp.get_precision('Account')),
'count' : fields.integer('# of Actions', readonly=True),
'state': fields.selection([('todo', 'To Do'),
('exception', 'Exception'), ('done', 'Done'),
('cancelled', 'Cancelled')], 'Status', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'campaign_analysis')
cr.execute("""
create or replace view campaign_analysis as (
select
min(wi.id) as id,
min(wi.res_id) as res_id,
to_char(wi.date::date, 'YYYY') as year,
to_char(wi.date::date, 'MM') as month,
to_char(wi.date::date, 'YYYY-MM-DD') as day,
wi.date::date as date,
s.campaign_id as campaign_id,
wi.activity_id as activity_id,
wi.segment_id as segment_id,
wi.partner_id as partner_id ,
wi.state as state,
sum(act.revenue) as revenue,
count(*) as count
from
marketing_campaign_workitem wi
left join res_partner p on (p.id=wi.partner_id)
left join marketing_campaign_segment s on (s.id=wi.segment_id)
left join marketing_campaign_activity act on (act.id= wi.activity_id)
group by
s.campaign_id,wi.activity_id,wi.segment_id,wi.partner_id,wi.state,
wi.date::date
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,086,454,288,565,740,500 | 51.019608 | 102 | 0.530155 | false |
aragos/tichu-tournament | python/reportlab/pdfbase/_fontdata_widths_courierbold.py | 224 | 3664 | widths = {'A': 600,
'AE': 600,
'Aacute': 600,
'Acircumflex': 600,
'Adieresis': 600,
'Agrave': 600,
'Aring': 600,
'Atilde': 600,
'B': 600,
'C': 600,
'Ccedilla': 600,
'D': 600,
'E': 600,
'Eacute': 600,
'Ecircumflex': 600,
'Edieresis': 600,
'Egrave': 600,
'Eth': 600,
'Euro': 600,
'F': 600,
'G': 600,
'H': 600,
'I': 600,
'Iacute': 600,
'Icircumflex': 600,
'Idieresis': 600,
'Igrave': 600,
'J': 600,
'K': 600,
'L': 600,
'Lslash': 600,
'M': 600,
'N': 600,
'Ntilde': 600,
'O': 600,
'OE': 600,
'Oacute': 600,
'Ocircumflex': 600,
'Odieresis': 600,
'Ograve': 600,
'Oslash': 600,
'Otilde': 600,
'P': 600,
'Q': 600,
'R': 600,
'S': 600,
'Scaron': 600,
'T': 600,
'Thorn': 600,
'U': 600,
'Uacute': 600,
'Ucircumflex': 600,
'Udieresis': 600,
'Ugrave': 600,
'V': 600,
'W': 600,
'X': 600,
'Y': 600,
'Yacute': 600,
'Ydieresis': 600,
'Z': 600,
'Zcaron': 600,
'a': 600,
'aacute': 600,
'acircumflex': 600,
'acute': 600,
'adieresis': 600,
'ae': 600,
'agrave': 600,
'ampersand': 600,
'aring': 600,
'asciicircum': 600,
'asciitilde': 600,
'asterisk': 600,
'at': 600,
'atilde': 600,
'b': 600,
'backslash': 600,
'bar': 600,
'braceleft': 600,
'braceright': 600,
'bracketleft': 600,
'bracketright': 600,
'breve': 600,
'brokenbar': 600,
'bullet': 600,
'c': 600,
'caron': 600,
'ccedilla': 600,
'cedilla': 600,
'cent': 600,
'circumflex': 600,
'colon': 600,
'comma': 600,
'copyright': 600,
'currency': 600,
'd': 600,
'dagger': 600,
'daggerdbl': 600,
'degree': 600,
'dieresis': 600,
'divide': 600,
'dollar': 600,
'dotaccent': 600,
'dotlessi': 600,
'e': 600,
'eacute': 600,
'ecircumflex': 600,
'edieresis': 600,
'egrave': 600,
'eight': 600,
'ellipsis': 600,
'emdash': 600,
'endash': 600,
'equal': 600,
'eth': 600,
'exclam': 600,
'exclamdown': 600,
'f': 600,
'fi': 600,
'five': 600,
'fl': 600,
'florin': 600,
'four': 600,
'fraction': 600,
'g': 600,
'germandbls': 600,
'grave': 600,
'greater': 600,
'guillemotleft': 600,
'guillemotright': 600,
'guilsinglleft': 600,
'guilsinglright': 600,
'h': 600,
'hungarumlaut': 600,
'hyphen': 600,
'i': 600,
'iacute': 600,
'icircumflex': 600,
'idieresis': 600,
'igrave': 600,
'j': 600,
'k': 600,
'l': 600,
'less': 600,
'logicalnot': 600,
'lslash': 600,
'm': 600,
'macron': 600,
'minus': 600,
'mu': 600,
'multiply': 600,
'n': 600,
'nine': 600,
'ntilde': 600,
'numbersign': 600,
'o': 600,
'oacute': 600,
'ocircumflex': 600,
'odieresis': 600,
'oe': 600,
'ogonek': 600,
'ograve': 600,
'one': 600,
'onehalf': 600,
'onequarter': 600,
'onesuperior': 600,
'ordfeminine': 600,
'ordmasculine': 600,
'oslash': 600,
'otilde': 600,
'p': 600,
'paragraph': 600,
'parenleft': 600,
'parenright': 600,
'percent': 600,
'period': 600,
'periodcentered': 600,
'perthousand': 600,
'plus': 600,
'plusminus': 600,
'q': 600,
'question': 600,
'questiondown': 600,
'quotedbl': 600,
'quotedblbase': 600,
'quotedblleft': 600,
'quotedblright': 600,
'quoteleft': 600,
'quoteright': 600,
'quotesinglbase': 600,
'quotesingle': 600,
'r': 600,
'registered': 600,
'ring': 600,
's': 600,
'scaron': 600,
'section': 600,
'semicolon': 600,
'seven': 600,
'six': 600,
'slash': 600,
'space': 600,
'sterling': 600,
't': 600,
'thorn': 600,
'three': 600,
'threequarters': 600,
'threesuperior': 600,
'tilde': 600,
'trademark': 600,
'two': 600,
'twosuperior': 600,
'u': 600,
'uacute': 600,
'ucircumflex': 600,
'udieresis': 600,
'ugrave': 600,
'underscore': 600,
'v': 600,
'w': 600,
'x': 600,
'y': 600,
'yacute': 600,
'ydieresis': 600,
'yen': 600,
'z': 600,
'zcaron': 600,
'zero': 600}
| mit | -5,437,147,673,017,319,000 | 15 | 23 | 0.561681 | false |
alexissmirnov/donomo | donomo_archive/deps/paypal/standard/pdt/tests/test_pdt.py | 9 | 5522 | """
run this with ./manage.py test website
see http://www.djangoproject.com/documentation/testing/ for details
"""
import os
from django.conf import settings
from django.shortcuts import render_to_response
from django.test import TestCase
from paypal.standard.pdt.forms import PayPalPDTForm
from paypal.standard.pdt.models import PayPalPDT
from paypal.standard.pdt.signals import pdt_successful, pdt_failed
class DummyPayPalPDT(object):
def __init__(self, update_context_dict={}):
self.context_dict = {'st': 'SUCCESS', 'custom':'cb736658-3aad-4694-956f-d0aeade80194',
'txn_id':'1ED550410S3402306', 'mc_gross': '225.00',
'business': settings.PAYPAL_RECEIVER_EMAIL, 'error': 'Error code: 1234'}
self.context_dict.update(update_context_dict)
self.response = ''
def update_with_get_params(self, get_params):
if get_params.has_key('tx'):
self.context_dict['txn_id'] = get_params.get('tx')
if get_params.has_key('amt'):
self.context_dict['mc_gross'] = get_params.get('amt')
if get_params.has_key('cm'):
self.context_dict['custom'] = get_params.get('cm')
def _postback(self, test=True):
"""Perform a Fake PayPal PDT Postback request."""
# @@@ would be cool if this could live in the test templates dir...
return render_to_response("pdt/test_pdt_response.html", self.context_dict).content
class PDTTest(TestCase):
urls = "paypal.standard.pdt.tests.test_urls"
template_dirs = [os.path.join(os.path.dirname(__file__), 'templates'),]
def setUp(self):
# set up some dummy PDT get parameters
self.get_params = {"tx":"4WJ86550014687441", "st":"Completed", "amt":"225.00", "cc":"EUR",
"cm":"a3e192b8-8fea-4a86-b2e8-d5bf502e36be", "item_number":"",
"sig":"blahblahblah"}
# monkey patch the PayPalPDT._postback function
self.dpppdt = DummyPayPalPDT()
self.dpppdt.update_with_get_params(self.get_params)
PayPalPDT._postback = self.dpppdt._postback
def test_verify_postback(self):
dpppdt = DummyPayPalPDT()
paypal_response = dpppdt._postback()
assert('SUCCESS' in paypal_response)
self.assertEqual(len(PayPalPDT.objects.all()), 0)
pdt_obj = PayPalPDT()
pdt_obj.ipaddress = '127.0.0.1'
pdt_obj.response = paypal_response
pdt_obj._verify_postback()
self.assertEqual(len(PayPalPDT.objects.all()), 0)
self.assertEqual(pdt_obj.txn_id, '1ED550410S3402306')
def test_pdt(self):
self.assertEqual(len(PayPalPDT.objects.all()), 0)
self.dpppdt.update_with_get_params(self.get_params)
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
def test_pdt_signals(self):
self.successful_pdt_fired = False
self.failed_pdt_fired = False
def successful_pdt(sender, **kwargs):
self.successful_pdt_fired = True
pdt_successful.connect(successful_pdt)
def failed_pdt(sender, **kwargs):
self.failed_pdt_fired = True
pdt_failed.connect(failed_pdt)
self.assertEqual(len(PayPalPDT.objects.all()), 0)
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
self.assertTrue(self.successful_pdt_fired)
self.assertFalse(self.failed_pdt_fired)
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.flag, False)
def test_double_pdt_get(self):
self.assertEqual(len(PayPalPDT.objects.all()), 0)
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.flag, False)
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1) # we don't create a new pdt
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.flag, False)
def test_no_txn_id_in_pdt(self):
self.dpppdt.context_dict.pop('txn_id')
self.get_params={}
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction Failed', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 0)
def test_custom_passthrough(self):
self.assertEqual(len(PayPalPDT.objects.all()), 0)
self.dpppdt.update_with_get_params(self.get_params)
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.custom, self.get_params['cm'] ) | bsd-3-clause | 4,248,454,874,700,304,400 | 45.411765 | 101 | 0.624411 | false |
jayofdoom/cloud-init-debian-pkg | cloudinit/config/cc_byobu.py | 7 | 2886 | # vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Ensure this is aliased to a name not 'distros'
# since the module attribute 'distros'
# is a list of distros that are supported, not a sub-module
from cloudinit import distros as ds
from cloudinit import util
distros = ['ubuntu', 'debian']
def handle(name, cfg, cloud, log, args):
if len(args) != 0:
value = args[0]
else:
value = util.get_cfg_option_str(cfg, "byobu_by_default", "")
if not value:
log.debug("Skipping module named %s, no 'byobu' values found", name)
return
if value == "user" or value == "system":
value = "enable-%s" % value
valid = ("enable-user", "enable-system", "enable",
"disable-user", "disable-system", "disable")
if not value in valid:
log.warn("Unknown value %s for byobu_by_default", value)
mod_user = value.endswith("-user")
mod_sys = value.endswith("-system")
if value.startswith("enable"):
bl_inst = "install"
dc_val = "byobu byobu/launch-by-default boolean true"
mod_sys = True
else:
if value == "disable":
mod_user = True
mod_sys = True
bl_inst = "uninstall"
dc_val = "byobu byobu/launch-by-default boolean false"
shcmd = ""
if mod_user:
(users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ds.extract_default(users)
if not user:
log.warn(("No default byobu user provided, "
"can not launch %s for the default user"), bl_inst)
else:
shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
shcmd += " || X=$(($X+1)); "
if mod_sys:
shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
shcmd += " || X=$(($X+1)); "
if len(shcmd):
cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
log.debug("Setting byobu to %s", value)
util.subp(cmd, capture=False)
| gpl-3.0 | 6,275,317,914,335,046,000 | 35.075 | 76 | 0.606722 | false |
gregcaporaso/qiime | scripts/conditional_uncovered_probability.py | 15 | 4838 | #!/usr/bin/env python
# File created on 1 April 2012
from __future__ import division
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Jens Reeder", "Jose Antonio Navas Molina", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jens Reeder"
__email__ = "jens.reeder@gmail.com"
from qiime.util import make_option, parse_command_line_parameters
from qiime.alpha_diversity import (single_file_cup,
list_known_cup_metrics)
import os
# conditional_uncovered_probability.py
script_info = {}
script_info['version'] = __version__
script_info['script_description'] = "Calculate the conditional uncovered\
probability."
script_info['brief_description'] = """Calculate the conditional uncovered\
probability on each sample in an otu table."""
script_info['script_description'] = \
"""This script calculates the conditional uncovered probability for each sample
in an OTU table. It uses the methods introduced in Lladser, Gouet, and Reeder,
"Extrapolation of Urn Models via Poissonization: Accurate Measurements of the
Microbial Unknown" PLoS 2011.
Specifically, it computes a point estimate and a confidence interval using two
different methods. Thus it can happen that the PE is actually outside of the
CI.
We only provide the ability to generate 95% (alpha=0.95) CIs. The CIs are ULCL
CIs; they provide an upper and lower bound, where the lower bound is
conservative. The CIs are constructed using an upper-to-lower bound ratio of
10.
The CI method requires precomputed constants that depend on the lookahead. We
only provide constants for r=3..25,30,40,50.
"""
script_info['script_usage'] = []
script_info['script_usage'].append(
("Default case:",
"To calculate the cond. uncovered probability with the default values, "
"you can use the following command:",
"%prog -i otu_table.biom -o cup.txt"))
script_info['script_usage'].append(
("Change lookahead:",
"To change the accuracy of the prediction change the lookahead value. "
"Larger values of r lead to more precise predictions, but might be "
"unfeasable for small samples. For deeply sequenced samples, try "
"increasing r to 50:",
"%prog -i otu_table.biom -o cup_r50.txt -r 50"))
script_info['output_description'] = \
"""The resulting file(s) is a tab-delimited text file, where the columns
correspond to estimates of the cond. uncovered probability and the rows
correspond to samples. The output file is compatible with the alpha_diversity
output files and thus could be tied into the rarefaction workflow.
Example Output:
====== ======= ============= ================
\ PE Lower Bound Upper Bound
====== ======= ============= ================
PC.354 0.111 0.0245 0.245
PC.124 0.001 0.000564 0.00564
====== ======= ============= ================
"""
script_info['required_options'] = []
script_info['optional_options'] = [
make_option('-i', '--input_path',
help='Input OTU table filepath. [default: %default]',
type='existing_path'),
make_option('-o', '--output_path',
help='Output filepath to store the predictions. [default: %default]',
type='new_path'),
make_option('-r', '--look_ahead',
help='Number of unobserved, new colors necessary for prediction.'
' [default: %default]', default=25,
type='int'),
make_option('-m', '--metrics', default='lladser_pe,lladser_ci',
type='multiple_choice', mchoices=list_known_cup_metrics(),
help='CUP metric(s) to use. A comma-separated list should' +
' be provided when multiple metrics are specified. [default: %default]'),
make_option('-s', '--show_metrics', action='store_true',
dest="show_metrics",
help='Show the available CUP metrics and exit.')
]
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if opts.show_metrics:
print("Known metrics are: %s\n"
% (', '.join(list_known_cup_metrics()),))
exit(0)
almost_required_options = ['input_path', 'output_path']
for option in almost_required_options:
if getattr(opts, option) is None:
option_parser.error('Required option --%s omitted.' % option)
if os.path.isfile(opts.input_path):
try:
f = open(opts.output_path, 'w')
except IOError:
exit("ioerror, couldn't create output file")
f.close()
single_file_cup(opts.input_path, opts.metrics, opts.output_path,
opts.look_ahead)
else:
exit("io error, input path not valid. does it exist?")
if __name__ == "__main__":
main()
| gpl-2.0 | 8,896,769,902,928,978,000 | 36.215385 | 89 | 0.63642 | false |
chirilo/mozillians | vendor-local/lib/python/unidecode/x0c5.py | 253 | 4581 | data = (
'sseum', # 0x00
'sseub', # 0x01
'sseubs', # 0x02
'sseus', # 0x03
'sseuss', # 0x04
'sseung', # 0x05
'sseuj', # 0x06
'sseuc', # 0x07
'sseuk', # 0x08
'sseut', # 0x09
'sseup', # 0x0a
'sseuh', # 0x0b
'ssyi', # 0x0c
'ssyig', # 0x0d
'ssyigg', # 0x0e
'ssyigs', # 0x0f
'ssyin', # 0x10
'ssyinj', # 0x11
'ssyinh', # 0x12
'ssyid', # 0x13
'ssyil', # 0x14
'ssyilg', # 0x15
'ssyilm', # 0x16
'ssyilb', # 0x17
'ssyils', # 0x18
'ssyilt', # 0x19
'ssyilp', # 0x1a
'ssyilh', # 0x1b
'ssyim', # 0x1c
'ssyib', # 0x1d
'ssyibs', # 0x1e
'ssyis', # 0x1f
'ssyiss', # 0x20
'ssying', # 0x21
'ssyij', # 0x22
'ssyic', # 0x23
'ssyik', # 0x24
'ssyit', # 0x25
'ssyip', # 0x26
'ssyih', # 0x27
'ssi', # 0x28
'ssig', # 0x29
'ssigg', # 0x2a
'ssigs', # 0x2b
'ssin', # 0x2c
'ssinj', # 0x2d
'ssinh', # 0x2e
'ssid', # 0x2f
'ssil', # 0x30
'ssilg', # 0x31
'ssilm', # 0x32
'ssilb', # 0x33
'ssils', # 0x34
'ssilt', # 0x35
'ssilp', # 0x36
'ssilh', # 0x37
'ssim', # 0x38
'ssib', # 0x39
'ssibs', # 0x3a
'ssis', # 0x3b
'ssiss', # 0x3c
'ssing', # 0x3d
'ssij', # 0x3e
'ssic', # 0x3f
'ssik', # 0x40
'ssit', # 0x41
'ssip', # 0x42
'ssih', # 0x43
'a', # 0x44
'ag', # 0x45
'agg', # 0x46
'ags', # 0x47
'an', # 0x48
'anj', # 0x49
'anh', # 0x4a
'ad', # 0x4b
'al', # 0x4c
'alg', # 0x4d
'alm', # 0x4e
'alb', # 0x4f
'als', # 0x50
'alt', # 0x51
'alp', # 0x52
'alh', # 0x53
'am', # 0x54
'ab', # 0x55
'abs', # 0x56
'as', # 0x57
'ass', # 0x58
'ang', # 0x59
'aj', # 0x5a
'ac', # 0x5b
'ak', # 0x5c
'at', # 0x5d
'ap', # 0x5e
'ah', # 0x5f
'ae', # 0x60
'aeg', # 0x61
'aegg', # 0x62
'aegs', # 0x63
'aen', # 0x64
'aenj', # 0x65
'aenh', # 0x66
'aed', # 0x67
'ael', # 0x68
'aelg', # 0x69
'aelm', # 0x6a
'aelb', # 0x6b
'aels', # 0x6c
'aelt', # 0x6d
'aelp', # 0x6e
'aelh', # 0x6f
'aem', # 0x70
'aeb', # 0x71
'aebs', # 0x72
'aes', # 0x73
'aess', # 0x74
'aeng', # 0x75
'aej', # 0x76
'aec', # 0x77
'aek', # 0x78
'aet', # 0x79
'aep', # 0x7a
'aeh', # 0x7b
'ya', # 0x7c
'yag', # 0x7d
'yagg', # 0x7e
'yags', # 0x7f
'yan', # 0x80
'yanj', # 0x81
'yanh', # 0x82
'yad', # 0x83
'yal', # 0x84
'yalg', # 0x85
'yalm', # 0x86
'yalb', # 0x87
'yals', # 0x88
'yalt', # 0x89
'yalp', # 0x8a
'yalh', # 0x8b
'yam', # 0x8c
'yab', # 0x8d
'yabs', # 0x8e
'yas', # 0x8f
'yass', # 0x90
'yang', # 0x91
'yaj', # 0x92
'yac', # 0x93
'yak', # 0x94
'yat', # 0x95
'yap', # 0x96
'yah', # 0x97
'yae', # 0x98
'yaeg', # 0x99
'yaegg', # 0x9a
'yaegs', # 0x9b
'yaen', # 0x9c
'yaenj', # 0x9d
'yaenh', # 0x9e
'yaed', # 0x9f
'yael', # 0xa0
'yaelg', # 0xa1
'yaelm', # 0xa2
'yaelb', # 0xa3
'yaels', # 0xa4
'yaelt', # 0xa5
'yaelp', # 0xa6
'yaelh', # 0xa7
'yaem', # 0xa8
'yaeb', # 0xa9
'yaebs', # 0xaa
'yaes', # 0xab
'yaess', # 0xac
'yaeng', # 0xad
'yaej', # 0xae
'yaec', # 0xaf
'yaek', # 0xb0
'yaet', # 0xb1
'yaep', # 0xb2
'yaeh', # 0xb3
'eo', # 0xb4
'eog', # 0xb5
'eogg', # 0xb6
'eogs', # 0xb7
'eon', # 0xb8
'eonj', # 0xb9
'eonh', # 0xba
'eod', # 0xbb
'eol', # 0xbc
'eolg', # 0xbd
'eolm', # 0xbe
'eolb', # 0xbf
'eols', # 0xc0
'eolt', # 0xc1
'eolp', # 0xc2
'eolh', # 0xc3
'eom', # 0xc4
'eob', # 0xc5
'eobs', # 0xc6
'eos', # 0xc7
'eoss', # 0xc8
'eong', # 0xc9
'eoj', # 0xca
'eoc', # 0xcb
'eok', # 0xcc
'eot', # 0xcd
'eop', # 0xce
'eoh', # 0xcf
'e', # 0xd0
'eg', # 0xd1
'egg', # 0xd2
'egs', # 0xd3
'en', # 0xd4
'enj', # 0xd5
'enh', # 0xd6
'ed', # 0xd7
'el', # 0xd8
'elg', # 0xd9
'elm', # 0xda
'elb', # 0xdb
'els', # 0xdc
'elt', # 0xdd
'elp', # 0xde
'elh', # 0xdf
'em', # 0xe0
'eb', # 0xe1
'ebs', # 0xe2
'es', # 0xe3
'ess', # 0xe4
'eng', # 0xe5
'ej', # 0xe6
'ec', # 0xe7
'ek', # 0xe8
'et', # 0xe9
'ep', # 0xea
'eh', # 0xeb
'yeo', # 0xec
'yeog', # 0xed
'yeogg', # 0xee
'yeogs', # 0xef
'yeon', # 0xf0
'yeonj', # 0xf1
'yeonh', # 0xf2
'yeod', # 0xf3
'yeol', # 0xf4
'yeolg', # 0xf5
'yeolm', # 0xf6
'yeolb', # 0xf7
'yeols', # 0xf8
'yeolt', # 0xf9
'yeolp', # 0xfa
'yeolh', # 0xfb
'yeom', # 0xfc
'yeob', # 0xfd
'yeobs', # 0xfe
'yeos', # 0xff
)
| bsd-3-clause | 6,588,337,022,329,290,000 | 16.755814 | 19 | 0.439642 | false |
topazproject/topaz | tests/test_celldict.py | 3 | 1141 | from topaz.celldict import CellDict, Cell, GlobalsDict
from .base import BaseTopazTest
class TestCellDict(BaseTopazTest):
def test_single_set(self, space):
c = CellDict()
v = c.version
c.set(space, "a", 2)
assert c.version is not v
assert c._get_cell("a", c.version) == 2
def test_multi_set(self, space):
c = CellDict()
c.set(space, "a", 2)
v = c.version
c.set(space, "a", 3)
assert isinstance(c._get_cell("a", c.version), Cell)
assert c.version is not v
v = c.version
c.set(space, "a", 4)
assert isinstance(c._get_cell("a", c.version), Cell)
assert c.version is v
def test_globals(self, space):
space.stuff = 4
g = GlobalsDict()
g.define_virtual("x", lambda s: s.stuff)
assert g.get(space, "x") == 4
with self.raises(space, "NameError"):
g.set(space, "x", 5)
g.define_virtual("y", lambda s: s.stuff, lambda s, v: setattr(s, "stuff", v))
assert g.get(space, "y") == 4
g.set(space, "y", 5)
assert g.get(space, "y") == 5
| bsd-3-clause | 408,474,187,701,671,600 | 29.837838 | 85 | 0.544259 | false |
MonicaHsu/truvaluation | venv/lib/python2.7/site-packages/simplejson/tests/test_item_sort_key.py | 140 | 1127 | from unittest import TestCase
import simplejson as json
from operator import itemgetter
class TestItemSortKey(TestCase):
def test_simple_first(self):
a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}',
json.dumps(a, item_sort_key=json.simple_first))
def test_case(self):
a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=itemgetter(0)))
self.assertEqual(
'{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=lambda kv: kv[0].lower()))
| mit | -8,906,873,084,904,617,000 | 55.35 | 137 | 0.490683 | false |
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/web/test/test_util.py | 16 | 12590 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.util}.
"""
from __future__ import absolute_import, division
import gc
from twisted.python.failure import Failure
from twisted.trial.unittest import SynchronousTestCase, TestCase
from twisted.internet import defer
from twisted.python.compat import _PY3, intToBytes, networkString
from twisted.web import resource, util
from twisted.web.error import FlattenerError
from twisted.web.http import FOUND
from twisted.web.server import Request
from twisted.web.template import TagLoader, flattenString, tags
from twisted.web.test.requesthelper import DummyChannel, DummyRequest
from twisted.web.util import DeferredResource
from twisted.web.util import _SourceFragmentElement, _FrameElement
from twisted.web.util import _StackElement, FailureElement, formatFailure
from twisted.web.util import redirectTo, _SourceLineElement
class RedirectToTests(TestCase):
"""
Tests for L{redirectTo}.
"""
def test_headersAndCode(self):
"""
L{redirectTo} will set the C{Location} and C{Content-Type} headers on
its request, and set the response code to C{FOUND}, so the browser will
be redirected.
"""
request = Request(DummyChannel(), True)
request.method = b'GET'
targetURL = b"http://target.example.com/4321"
redirectTo(targetURL, request)
self.assertEqual(request.code, FOUND)
self.assertEqual(
request.responseHeaders.getRawHeaders(b'location'), [targetURL])
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-type'),
[b'text/html; charset=utf-8'])
def test_redirectToUnicodeURL(self) :
"""
L{redirectTo} will raise TypeError if unicode object is passed in URL
"""
request = Request(DummyChannel(), True)
request.method = b'GET'
targetURL = u'http://target.example.com/4321'
self.assertRaises(TypeError, redirectTo, targetURL, request)
class FailureElementTests(TestCase):
"""
Tests for L{FailureElement} and related helpers which can render a
L{Failure} as an HTML string.
"""
def setUp(self):
"""
Create a L{Failure} which can be used by the rendering tests.
"""
def lineNumberProbeAlsoBroken():
message = "This is a problem"
raise Exception(message)
# Figure out the line number from which the exception will be raised.
self.base = lineNumberProbeAlsoBroken.__code__.co_firstlineno + 1
try:
lineNumberProbeAlsoBroken()
except:
self.failure = Failure(captureVars=True)
self.frame = self.failure.frames[-1]
def test_sourceLineElement(self):
"""
L{_SourceLineElement} renders a source line and line number.
"""
element = _SourceLineElement(
TagLoader(tags.div(
tags.span(render="lineNumber"),
tags.span(render="sourceLine"))),
50, " print 'hello'")
d = flattenString(None, element)
expected = (
u"<div><span>50</span><span>"
u" \N{NO-BREAK SPACE} \N{NO-BREAK SPACE}print 'hello'</span></div>")
d.addCallback(
self.assertEqual, expected.encode('utf-8'))
return d
def test_sourceFragmentElement(self):
"""
L{_SourceFragmentElement} renders source lines at and around the line
number indicated by a frame object.
"""
element = _SourceFragmentElement(
TagLoader(tags.div(
tags.span(render="lineNumber"),
tags.span(render="sourceLine"),
render="sourceLines")),
self.frame)
source = [
u' \N{NO-BREAK SPACE} \N{NO-BREAK SPACE}message = '
u'"This is a problem"',
u' \N{NO-BREAK SPACE} \N{NO-BREAK SPACE}raise Exception(message)',
u'# Figure out the line number from which the exception will be '
u'raised.',
]
d = flattenString(None, element)
if _PY3:
stringToCheckFor = ''.join([
'<div class="snippet%sLine"><span>%d</span><span>%s</span>'
'</div>' % (
["", "Highlight"][lineNumber == 1],
self.base + lineNumber,
(u" \N{NO-BREAK SPACE}" * 4 + sourceLine))
for (lineNumber, sourceLine)
in enumerate(source)]).encode("utf8")
else:
stringToCheckFor = ''.join([
'<div class="snippet%sLine"><span>%d</span><span>%s</span>'
'</div>' % (
["", "Highlight"][lineNumber == 1],
self.base + lineNumber,
(u" \N{NO-BREAK SPACE}" * 4 + sourceLine).encode('utf8'))
for (lineNumber, sourceLine)
in enumerate(source)])
d.addCallback(self.assertEqual, stringToCheckFor)
return d
def test_frameElementFilename(self):
"""
The I{filename} renderer of L{_FrameElement} renders the filename
associated with the frame object used to initialize the
L{_FrameElement}.
"""
element = _FrameElement(
TagLoader(tags.span(render="filename")),
self.frame)
d = flattenString(None, element)
d.addCallback(
# __file__ differs depending on whether an up-to-date .pyc file
# already existed.
self.assertEqual,
b"<span>" + networkString(__file__.rstrip('c')) + b"</span>")
return d
def test_frameElementLineNumber(self):
"""
The I{lineNumber} renderer of L{_FrameElement} renders the line number
associated with the frame object used to initialize the
L{_FrameElement}.
"""
element = _FrameElement(
TagLoader(tags.span(render="lineNumber")),
self.frame)
d = flattenString(None, element)
d.addCallback(
self.assertEqual, b"<span>" + intToBytes(self.base + 1) + b"</span>")
return d
def test_frameElementFunction(self):
"""
The I{function} renderer of L{_FrameElement} renders the line number
associated with the frame object used to initialize the
L{_FrameElement}.
"""
element = _FrameElement(
TagLoader(tags.span(render="function")),
self.frame)
d = flattenString(None, element)
d.addCallback(
self.assertEqual, b"<span>lineNumberProbeAlsoBroken</span>")
return d
def test_frameElementSource(self):
"""
The I{source} renderer of L{_FrameElement} renders the source code near
the source filename/line number associated with the frame object used to
initialize the L{_FrameElement}.
"""
element = _FrameElement(None, self.frame)
renderer = element.lookupRenderMethod("source")
tag = tags.div()
result = renderer(None, tag)
self.assertIsInstance(result, _SourceFragmentElement)
self.assertIdentical(result.frame, self.frame)
self.assertEqual([tag], result.loader.load())
def test_stackElement(self):
"""
The I{frames} renderer of L{_StackElement} renders each stack frame in
the list of frames used to initialize the L{_StackElement}.
"""
element = _StackElement(None, self.failure.frames[:2])
renderer = element.lookupRenderMethod("frames")
tag = tags.div()
result = renderer(None, tag)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], _FrameElement)
self.assertIdentical(result[0].frame, self.failure.frames[0])
self.assertIsInstance(result[1], _FrameElement)
self.assertIdentical(result[1].frame, self.failure.frames[1])
# They must not share the same tag object.
self.assertNotEqual(result[0].loader.load(), result[1].loader.load())
self.assertEqual(2, len(result))
def test_failureElementTraceback(self):
"""
The I{traceback} renderer of L{FailureElement} renders the failure's
stack frames using L{_StackElement}.
"""
element = FailureElement(self.failure)
renderer = element.lookupRenderMethod("traceback")
tag = tags.div()
result = renderer(None, tag)
self.assertIsInstance(result, _StackElement)
self.assertIdentical(result.stackFrames, self.failure.frames)
self.assertEqual([tag], result.loader.load())
def test_failureElementType(self):
"""
The I{type} renderer of L{FailureElement} renders the failure's
exception type.
"""
element = FailureElement(
self.failure, TagLoader(tags.span(render="type")))
d = flattenString(None, element)
if _PY3:
exc = b"builtins.Exception"
else:
exc = b"exceptions.Exception"
d.addCallback(
self.assertEqual, b"<span>" + exc + b"</span>")
return d
def test_failureElementValue(self):
"""
The I{value} renderer of L{FailureElement} renders the value's exception
value.
"""
element = FailureElement(
self.failure, TagLoader(tags.span(render="value")))
d = flattenString(None, element)
d.addCallback(
self.assertEqual, b'<span>This is a problem</span>')
return d
class FormatFailureTests(TestCase):
"""
Tests for L{twisted.web.util.formatFailure} which returns an HTML string
representing the L{Failure} instance passed to it.
"""
def test_flattenerError(self):
"""
If there is an error flattening the L{Failure} instance,
L{formatFailure} raises L{FlattenerError}.
"""
self.assertRaises(FlattenerError, formatFailure, object())
def test_returnsBytes(self):
"""
The return value of L{formatFailure} is a C{str} instance (not a
C{unicode} instance) with numeric character references for any non-ASCII
characters meant to appear in the output.
"""
try:
raise Exception("Fake bug")
except:
result = formatFailure(Failure())
self.assertIsInstance(result, bytes)
if _PY3:
self.assertTrue(all(ch < 128 for ch in result))
else:
self.assertTrue(all(ord(ch) < 128 for ch in result))
# Indentation happens to rely on NO-BREAK SPACE
self.assertIn(b" ", result)
class SDResource(resource.Resource):
def __init__(self,default):
self.default = default
def getChildWithDefault(self, name, request):
d = defer.succeed(self.default)
resource = util.DeferredResource(d)
return resource.getChildWithDefault(name, request)
class DeferredResourceTests(SynchronousTestCase):
"""
Tests for L{DeferredResource}.
"""
def testDeferredResource(self):
r = resource.Resource()
r.isLeaf = 1
s = SDResource(r)
d = DummyRequest(['foo', 'bar', 'baz'])
resource.getChildForRequest(s, d)
self.assertEqual(d.postpath, ['bar', 'baz'])
def test_render(self):
"""
L{DeferredResource} uses the request object's C{render} method to
render the resource which is the result of the L{Deferred} being
handled.
"""
rendered = []
request = DummyRequest([])
request.render = rendered.append
result = resource.Resource()
deferredResource = DeferredResource(defer.succeed(result))
deferredResource.render(request)
self.assertEqual(rendered, [result])
def test_renderNoFailure(self):
"""
If the L{Deferred} fails, L{DeferredResource} reports the failure via
C{processingFailed}, and does not cause an unhandled error to be
logged.
"""
request = DummyRequest([])
d = request.notifyFinish()
failure = Failure(RuntimeError())
deferredResource = DeferredResource(defer.fail(failure))
deferredResource.render(request)
self.assertEqual(self.failureResultOf(d), failure)
del deferredResource
gc.collect()
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(errors, [])
| mit | -8,920,966,017,674,404,000 | 33.398907 | 81 | 0.606672 | false |
USGSDenverPychron/pychron | pychron/pipeline/tasks/tree_node.py | 1 | 3345 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.qt.QtCore import Qt
from pyface.qt.QtGui import QColor
from traitsui.tree_node import TreeNode
from pychron.envisage.resources import icon
from pychron.pipeline.engine import Pipeline
from pychron.pipeline.nodes import ReviewNode
class PipelineGroupTreeNode(TreeNode):
icon_name = ''
label = 'name'
class PipelineTreeNode(TreeNode):
icon_name = ''
label = 'name'
def get_background(self, obj):
if isinstance(obj, Pipeline):
c = QColor(Qt.white)
else:
if isinstance(obj, ReviewNode):
if not obj.enabled:
c = QColor('#ff8080') # light red
else:
c = QColor(Qt.cyan)
elif obj.skip_configure:
c = QColor('#D05BFF')
elif not obj.enabled:
c = QColor('#ff8080') # light red
else:
c = super(PipelineTreeNode, self).get_background(obj)
return c
def get_status_color(self, obj):
c = QColor(Qt.white)
if not isinstance(obj, Pipeline):
c = QColor(Qt.lightGray)
if obj.visited:
c = QColor(Qt.green)
elif obj.active:
c = QColor('orange')
# if obj.status == 'ran':
# c = QColor('green')
# elif obj.status == 'paused':
# c = QColor('orange')
return c
def get_icon(self, obj, is_expanded):
name = self.icon_name
if not isinstance(obj, Pipeline):
if not object.enabled:
name = 'cancel'
return icon(name)
# def get_background(self, obj):
# # print 'get', obj, obj.visited
# return 'green' if obj.visited else 'white'
class DataTreeNode(PipelineTreeNode):
icon_name = 'table'
class FilterTreeNode(PipelineTreeNode):
icon_name = 'table_filter'
class IdeogramTreeNode(PipelineTreeNode):
icon_name = 'histogram'
class SpectrumTreeNode(PipelineTreeNode):
icon_name = ''
class SeriesTreeNode(PipelineTreeNode):
icon_name = ''
class PDFTreeNode(PipelineTreeNode):
icon_name = 'file_pdf'
class GroupingTreeNode(PipelineTreeNode):
pass
class DBSaveTreeNode(PipelineTreeNode):
icon_name = 'database_save'
class FindTreeNode(PipelineTreeNode):
icon_name = 'find'
class FitTreeNode(PipelineTreeNode):
icon_name = 'lightning'
class ReviewTreeNode(PipelineTreeNode):
pass
# ============= EOF =============================================
| apache-2.0 | -2,415,939,757,539,616,000 | 25.76 | 81 | 0.578774 | false |
vigilv/scikit-learn | sklearn/gaussian_process/correlation_models.py | 230 | 7630 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The built-in correlation models submodule for the gaussian_process module.
"""
import numpy as np
def absolute_exponential(theta, d):
"""
Absolute exponential autocorrelation model.
(Ornstein-Uhlenbeck stochastic process)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i| )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.abs(np.asarray(d, dtype=np.float))
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(- theta[0] * np.sum(d, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1))
def squared_exponential(theta, d):
"""
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(-theta[0] * np.sum(d ** 2, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1))
def generalized_exponential(theta, d):
"""
Generalized exponential correlation model.
(Useful when one does not know the smoothness of the function to be
predicted.)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p )
i = 1
Parameters
----------
theta : array_like
An array with shape 1+1 (isotropic) or n+1 (anisotropic) giving the
autocorrelation parameter(s) (theta, p).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if n_features > 1 and lth == 2:
theta = np.hstack([np.repeat(theta[0], n_features), theta[1]])
elif lth != n_features + 1:
raise Exception("Length of theta must be 2 or %s" % (n_features + 1))
else:
theta = theta.reshape(1, lth)
td = theta[:, 0:-1].reshape(1, n_features) * np.abs(d) ** theta[:, -1]
r = np.exp(- np.sum(td, 1))
return r
def pure_nugget(theta, d):
"""
Spatial independence correlation model (pure nugget).
(Useful when one wants to solve an ordinary least squares problem!)::
n
theta, d --> r(theta, d) = 1 if sum |d_i| == 0
i = 1
0 otherwise
Parameters
----------
theta : array_like
None.
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
n_eval = d.shape[0]
r = np.zeros(n_eval)
r[np.all(d == 0., axis=1)] = 1.
return r
def cubic(theta, d):
"""
Cubic correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or " + str(n_features))
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td ** 2. * (3. - 2. * td)
r = np.prod(ss, 1)
return r
def linear(theta, d):
"""
Linear correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - theta_j*d_ij) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or %s" % n_features)
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td
r = np.prod(ss, 1)
return r
| bsd-3-clause | 5,880,914,574,739,771,000 | 25.866197 | 78 | 0.547706 | false |
alexrao/YouCompleteMe | third_party/ycmd/third_party/jedi/test/test_evaluate/test_buildout_detection.py | 13 | 2751 | import os
from textwrap import dedent
from jedi._compatibility import u
from jedi.evaluate.sys_path import (_get_parent_dir_with_file,
_get_buildout_scripts,
sys_path_with_modifications,
_check_module)
from jedi.evaluate import Evaluator
from jedi.parser import Parser, load_grammar
from ..helpers import cwd_at
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
def test_parent_dir_with_file():
parent = _get_parent_dir_with_file(
os.path.abspath(os.curdir), 'buildout.cfg')
assert parent is not None
assert parent.endswith(os.path.join('test', 'test_evaluate', 'buildout_project'))
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
def test_buildout_detection():
scripts = _get_buildout_scripts(os.path.abspath('./module_name.py'))
assert len(scripts) == 1
curdir = os.path.abspath(os.curdir)
appdir_path = os.path.normpath(os.path.join(curdir, '../../bin/app'))
assert scripts[0] == appdir_path
def test_append_on_non_sys_path():
SRC = dedent(u("""
class Dummy(object):
path = []
d = Dummy()
d.path.append('foo')"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
paths = _check_module(Evaluator(grammar), p.module)
assert len(paths) > 0
assert 'foo' not in paths
def test_path_from_invalid_sys_path_assignment():
SRC = dedent(u("""
import sys
sys.path = 'invalid'"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
paths = _check_module(Evaluator(grammar), p.module)
assert len(paths) > 0
assert 'invalid' not in paths
@cwd_at('test/test_evaluate/buildout_project/src/proj_name/')
def test_sys_path_with_modifications():
SRC = dedent(u("""
import os
"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
p.module.path = os.path.abspath(os.path.join(os.curdir, 'module_name.py'))
paths = sys_path_with_modifications(Evaluator(grammar), p.module)
assert '/tmp/.buildout/eggs/important_package.egg' in paths
def test_path_from_sys_path_assignment():
SRC = dedent(u("""
#!/usr/bin/python
import sys
sys.path[0:0] = [
'/usr/lib/python3.4/site-packages',
'/home/test/.buildout/eggs/important_package.egg'
]
path[0:0] = [1]
import important_package
if __name__ == '__main__':
sys.exit(important_package.main())"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
paths = _check_module(Evaluator(grammar), p.module)
assert 1 not in paths
assert '/home/test/.buildout/eggs/important_package.egg' in paths
| gpl-3.0 | -4,834,138,966,233,370,000 | 29.910112 | 85 | 0.61614 | false |
tensorflow/lucid | lucid/scratch/web/observable.py | 1 | 2465 | import json
from lucid.misc.io.showing import _display_html
def renderObservable(url, cells=None, data=None):
"""Display observable notebook cells in iPython.
Args:
url: url fragment to observable notebook. ex: '@observablehq/downloading-and-embedding-notebooks'
cells: an array of strings for the names of cells you want to render. ex: ['viewof stage', 'viewof x']
data: a dictionary of variables that you'd like to overwrite. ex: {'x': 200, 'width': 500}
"""
head = """
<div id="output"></div>
<div>
<a target="_blank" href='https://observablehq.com/{}'>source</a>
</div>
<script type="module">
""".format(url)
runtimeImport = "import {Runtime} from 'https://unpkg.com/@observablehq/notebook-runtime?module';"
notebookImport = "import notebook from 'https://api.observablehq.com/{0}.js';".format(url)
cellsSerialized = "let cells = {};".format(json.dumps(cells))
dataSerialized = "let data = {};".format(json.dumps(data))
code = """
const outputEl = document.getElementById("output");
// Converts data into a map
let dataMap = new Map();
if (data) {
Object.keys(data).forEach(key => {
dataMap.set(key, data[key]);
});
}
// Converts cells into a map
let cellsMap = new Map();
if (cells) {
cells.forEach((key, i) => {
const element = document.createElement("div");
outputEl.appendChild(element)
cellsMap.set(key, element)
});
}
function render(_node, value) {
if (!(value instanceof Element)) {
const el = document.createElement("span");
el.innerHTML = value;
value = el;
}
if (_node.firstChild !== value) {
if (_node.firstChild) {
while (_node.lastChild !== _node.firstChild) _node.removeChild(_node.lastChild);
_node.replaceChild(value, _node.firstChild);
} else {
_node.appendChild(value);
}
}
}
Runtime.load(notebook, (variable) => {
// Override a variable with a passed value
if (dataMap.has(variable.name)) {
variable.value = dataMap.get(variable.name)
}
// Render the output to the corrent element
if (cellsMap.has(variable.name)) {
return { fulfilled: (value) => render(cellsMap.get(variable.name), value) };
} else {
return true;
}
});
"""
foot = "</script>"
_display_html(
head + runtimeImport + notebookImport + cellsSerialized + dataSerialized + code + foot
)
| apache-2.0 | -788,707,965,468,401,200 | 27.662791 | 106 | 0.622312 | false |
quixoten/ansible | lib/ansible/cli/doc.py | 17 | 11990 | # (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import traceback
import textwrap
from ansible.compat.six import iteritems
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader
from ansible.cli import CLI
from ansible.utils import module_docs
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DocCLI(CLI):
""" Vault command line class """
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "test-docs.sh"]
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.module_list = []
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [module...]',
epilog='Show Ansible module documentation',
module_opts=True,
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available modules')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
self.options, self.args = self.parser.parse_args(self.args[1:])
display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
# list modules
if self.options.list_dir:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.pager(self.get_module_list_text())
return 0
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line module list
text = ''
for module in self.args:
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
if any(filename.endswith(x) for x in self.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
except:
display.vvv(traceback.print_exc())
display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
continue
if doc is not None:
all_keys = []
for (k,v) in iteritems(doc['options']):
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
if self.options.show_snippet:
text += self.get_snippet_text(doc)
else:
text += self.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception as e:
display.vvv(traceback.print_exc())
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
self.pager(text)
return 0
def find_modules(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self.find_modules(module)
elif any(module.endswith(x) for x in self.BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in self.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
self.module_list.append(module)
def get_module_list_text(self):
columns = display.columns
displace = max(len(x) for x in self.module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(self.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
desc = self.tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def get_snippet_text(self, doc):
text = []
desc = CLI.tty_ify(doc['short_description'])
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
pad = 31
subdent = ''.join([" " for a in xrange(pad)])
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = CLI.tty_ify(" ".join(opt['description']))
if opt.get('required', False):
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent)))
text.append('')
return "\n".join(text)
def get_man_text(self, doc):
opt_indent=" "
text = []
text.append("> %s\n" % doc['module'].upper())
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
if isinstance(doc['description'], list):
desc = " ".join(doc['description'])
else:
desc = doc['description']
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" "))
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n%s\n" % doc['deprecated'])
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
if opt.get('required', False):
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
if isinstance(opt['description'], list):
desc = " ".join(opt['description'])
else:
desc = opt['description']
if 'choices' in opt:
choices = ", ".join(str(i) for i in opt['choices'])
desc = desc + " (Choices: " + choices + ")"
if 'default' in opt:
default = str(opt['default'])
desc = desc + " [Default: " + default + "]"
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
notes = " ".join(doc['notes'])
text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), limit-6, initial_indent=" ", subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
maintainers = set()
if 'author' in doc:
if isinstance(doc['author'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
if 'maintainers' in doc:
if isinstance(doc['maintainers'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
text.append('MAINTAINERS: ' + ', '.join(maintainers))
text.append('')
return "\n".join(text)
| gpl-3.0 | 2,400,568,851,459,023,000 | 37.184713 | 153 | 0.54362 | false |
dataxu/ansible | lib/ansible/module_utils/facts/other/facter.py | 232 | 2985 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts.collector import BaseFactCollector
class FacterFactCollector(BaseFactCollector):
name = 'facter'
_fact_ids = set(['facter'])
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='facter',
prefix='facter_')
super(FacterFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_facter(self, module):
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
return facter_path
def run_facter(self, module, facter_path):
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = module.run_command(facter_path + " --puppet --json")
return rc, out, err
def get_facter_output(self, module):
facter_path = self.find_facter(module)
if not facter_path:
return None
rc, out, err = self.run_facter(module, facter_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
# Note that this mirrors previous facter behavior, where there isnt
# a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
# items are added to the main dict.
facter_dict = {}
if not module:
return facter_dict
facter_output = self.get_facter_output(module)
# TODO: if we fail, should we add a empty facter key or nothing?
if facter_output is None:
return facter_dict
try:
facter_dict = json.loads(facter_output)
except Exception:
# FIXME: maybe raise a FactCollectorError with some info attrs?
pass
return facter_dict
| gpl-3.0 | -9,162,952,936,423,398,000 | 34.117647 | 87 | 0.646566 | false |
ict-felix/stack | expedient/src/python/expedient/clearinghouse/commands/management/commands/setup_media.py | 2 | 1035 | '''Command to link static content to settings.STATIC_DOC_ROOT
Created on Aug 24, 2010
@author: jnaous
'''
from django.core.management.base import NoArgsCommand
from django.conf import settings
import pkg_resources
import os
class Command(NoArgsCommand):
help = "Link static content from package to %s" % settings.STATIC_DOC_ROOT
def handle_noargs(self, **options):
pkg_resources.ensure_directory(settings.MEDIA_ROOT)
pkg_resources.ensure_directory(
os.path.join(settings.MEDIA_ROOT, settings.AGGREGATE_LOGOS_DIR))
media_dir = os.path.join(
settings.SRC_DIR, "static", "expedient", "clearinghouse", "media")
for d in "css", "img", "js":
path = os.path.join(media_dir, d)
target = os.path.join(settings.MEDIA_ROOT, d)
if not os.access(target, os.F_OK):
os.symlink(path, target)
print "Created media directory and symlinks in %s" \
% settings.MEDIA_ROOT
| apache-2.0 | -736,013,820,706,910,700 | 32.387097 | 78 | 0.624155 | false |
robovm/robovm-studio | plugins/hg4idea/testData/bin/mercurial/pvec.py | 94 | 5989 | # pvec.py - probabilistic vector clocks for Mercurial
#
# Copyright 2012 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''
A "pvec" is a changeset property based on the theory of vector clocks
that can be compared to discover relatedness without consulting a
graph. This can be useful for tasks like determining how a
disconnected patch relates to a repository.
Currently a pvec consist of 448 bits, of which 24 are 'depth' and the
remainder are a bit vector. It is represented as a 70-character base85
string.
Construction:
- a root changeset has a depth of 0 and a bit vector based on its hash
- a normal commit has a changeset where depth is increased by one and
one bit vector bit is flipped based on its hash
- a merge changeset pvec is constructed by copying changes from one pvec into
the other to balance its depth
Properties:
- for linear changes, difference in depth is always <= hamming distance
- otherwise, changes are probably divergent
- when hamming distance is < 200, we can reliably detect when pvecs are near
Issues:
- hamming distance ceases to work over distances of ~ 200
- detecting divergence is less accurate when the common ancestor is very close
to either revision or total distance is high
- this could probably be improved by modeling the relation between
delta and hdist
Uses:
- a patch pvec can be used to locate the nearest available common ancestor for
resolving conflicts
- ordering of patches can be established without a DAG
- two head pvecs can be compared to determine whether push/pull/merge is needed
and approximately how many changesets are involved
- can be used to find a heuristic divergence measure between changesets on
different branches
'''
import base85, util
from node import nullrev
_size = 448 # 70 chars b85-encoded
_bytes = _size / 8
_depthbits = 24
_depthbytes = _depthbits / 8
_vecbytes = _bytes - _depthbytes
_vecbits = _vecbytes * 8
_radius = (_vecbits - 30) / 2 # high probability vectors are related
def _bin(bs):
'''convert a bytestring to a long'''
v = 0
for b in bs:
v = v * 256 + ord(b)
return v
def _str(v, l):
bs = ""
for p in xrange(l):
bs = chr(v & 255) + bs
v >>= 8
return bs
def _split(b):
'''depth and bitvec'''
return _bin(b[:_depthbytes]), _bin(b[_depthbytes:])
def _join(depth, bitvec):
return _str(depth, _depthbytes) + _str(bitvec, _vecbytes)
def _hweight(x):
c = 0
while x:
if x & 1:
c += 1
x >>= 1
return c
_htab = [_hweight(x) for x in xrange(256)]
def _hamming(a, b):
'''find the hamming distance between two longs'''
d = a ^ b
c = 0
while d:
c += _htab[d & 0xff]
d >>= 8
return c
def _mergevec(x, y, c):
# Ideally, this function would be x ^ y ^ ancestor, but finding
# ancestors is a nuisance. So instead we find the minimal number
# of changes to balance the depth and hamming distance
d1, v1 = x
d2, v2 = y
if d1 < d2:
d1, d2, v1, v2 = d2, d1, v2, v1
hdist = _hamming(v1, v2)
ddist = d1 - d2
v = v1
m = v1 ^ v2 # mask of different bits
i = 1
if hdist > ddist:
# if delta = 10 and hdist = 100, then we need to go up 55 steps
# to the ancestor and down 45
changes = (hdist - ddist + 1) / 2
else:
# must make at least one change
changes = 1
depth = d1 + changes
# copy changes from v2
if m:
while changes:
if m & i:
v ^= i
changes -= 1
i <<= 1
else:
v = _flipbit(v, c)
return depth, v
def _flipbit(v, node):
# converting bit strings to longs is slow
bit = (hash(node) & 0xffffffff) % _vecbits
return v ^ (1<<bit)
def ctxpvec(ctx):
'''construct a pvec for ctx while filling in the cache'''
r = ctx._repo
if not util.safehasattr(r, "_pveccache"):
r._pveccache = {}
pvc = r._pveccache
if ctx.rev() not in pvc:
cl = r.changelog
for n in xrange(ctx.rev() + 1):
if n not in pvc:
node = cl.node(n)
p1, p2 = cl.parentrevs(n)
if p1 == nullrev:
# start with a 'random' vector at root
pvc[n] = (0, _bin((node * 3)[:_vecbytes]))
elif p2 == nullrev:
d, v = pvc[p1]
pvc[n] = (d + 1, _flipbit(v, node))
else:
pvc[n] = _mergevec(pvc[p1], pvc[p2], node)
bs = _join(*pvc[ctx.rev()])
return pvec(base85.b85encode(bs))
class pvec(object):
def __init__(self, hashorctx):
if isinstance(hashorctx, str):
self._bs = hashorctx
self._depth, self._vec = _split(base85.b85decode(hashorctx))
else:
self._vec = ctxpvec(hashorctx)
def __str__(self):
return self._bs
def __eq__(self, b):
return self._vec == b._vec and self._depth == b._depth
def __lt__(self, b):
delta = b._depth - self._depth
if delta < 0:
return False # always correct
if _hamming(self._vec, b._vec) > delta:
return False
return True
def __gt__(self, b):
return b < self
def __or__(self, b):
delta = abs(b._depth - self._depth)
if _hamming(self._vec, b._vec) <= delta:
return False
return True
def __sub__(self, b):
if self | b:
raise ValueError("concurrent pvecs")
return self._depth - b._depth
def distance(self, b):
d = abs(b._depth - self._depth)
h = _hamming(self._vec, b._vec)
return max(d, h)
def near(self, b):
dist = abs(b.depth - self._depth)
if dist > _radius or _hamming(self._vec, b._vec) > _radius:
return False
| apache-2.0 | 7,892,422,276,843,619,000 | 27.519048 | 79 | 0.592252 | false |
imply/chuu | ppapi/generators/idl_parser.py | 25 | 37745 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Parser for PPAPI IDL """
#
# IDL Parser
#
# The parser is uses the PLY yacc library to build a set of parsing rules based
# on WebIDL.
#
# WebIDL, and WebIDL regular expressions can be found at:
# http://dev.w3.org/2006/webapi/WebIDL/
# PLY can be found at:
# http://www.dabeaz.com/ply/
#
# The parser generates a tree by recursively matching sets of items against
# defined patterns. When a match is made, that set of items is reduced
# to a new item. The new item can provide a match for parent patterns.
# In this way an AST is built (reduced) depth first.
import getopt
import glob
import os.path
import re
import sys
import time
from idl_ast import IDLAst
from idl_log import ErrOut, InfoOut, WarnOut
from idl_lexer import IDLLexer
from idl_node import IDLAttribute, IDLFile, IDLNode
from idl_option import GetOption, Option, ParseOptions
from idl_lint import Lint
from idl_visitor import IDLVisitor
from ply import lex
from ply import yacc
Option('build_debug', 'Debug tree building.')
Option('parse_debug', 'Debug parse reduction steps.')
Option('token_debug', 'Debug token generation.')
Option('dump_tree', 'Dump the tree.')
Option('srcroot', 'Working directory.', default=os.path.join('..', 'api'))
Option('include_private', 'Include private IDL directory in default API paths.')
#
# ERROR_REMAP
#
# Maps the standard error formula into a more friendly error message.
#
ERROR_REMAP = {
'Unexpected ")" after "(".' : 'Empty argument list.',
'Unexpected ")" after ",".' : 'Missing argument.',
'Unexpected "}" after ",".' : 'Trailing comma in block.',
'Unexpected "}" after "{".' : 'Unexpected empty block.',
'Unexpected comment after "}".' : 'Unexpected trailing comment.',
'Unexpected "{" after keyword "enum".' : 'Enum missing name.',
'Unexpected "{" after keyword "struct".' : 'Struct missing name.',
'Unexpected "{" after keyword "interface".' : 'Interface missing name.',
}
# DumpReduction
#
# Prints out the set of items which matched a particular pattern and the
# new item or set it was reduced to.
def DumpReduction(cls, p):
if p[0] is None:
InfoOut.Log("OBJ: %s(%d) - None\n" % (cls, len(p)))
InfoOut.Log(" [%s]\n" % [str(x) for x in p[1:]])
else:
out = ""
for index in range(len(p) - 1):
out += " >%s< " % str(p[index + 1])
InfoOut.Log("OBJ: %s(%d) - %s : %s\n" % (cls, len(p), str(p[0]), out))
# CopyToList
#
# Takes an input item, list, or None, and returns a new list of that set.
def CopyToList(item):
# If the item is 'Empty' make it an empty list
if not item: item = []
# If the item is not a list
if type(item) is not type([]): item = [item]
# Make a copy we can modify
return list(item)
# ListFromConcat
#
# Generate a new List by joining of two sets of inputs which can be an
# individual item, a list of items, or None.
def ListFromConcat(*items):
itemsout = []
for item in items:
itemlist = CopyToList(item)
itemsout.extend(itemlist)
return itemsout
# TokenTypeName
#
# Generate a string which has the type and value of the token.
def TokenTypeName(t):
if t.type == 'SYMBOL': return 'symbol %s' % t.value
if t.type in ['HEX', 'INT', 'OCT', 'FLOAT']:
return 'value %s' % t.value
if t.type == 'STRING' : return 'string "%s"' % t.value
if t.type == 'COMMENT' : return 'comment'
if t.type == t.value: return '"%s"' % t.value
return 'keyword "%s"' % t.value
#
# IDL Parser
#
# The Parser inherits the from the Lexer to provide PLY with the tokenizing
# definitions. Parsing patterns are encoded as function where p_<name> is
# is called any time a patern matching the function documentation is found.
# Paterns are expressed in the form of:
# """ <new item> : <item> ....
# | <item> ...."""
#
# Where new item is the result of a match against one or more sets of items
# separated by the "|".
#
# The function is called with an object 'p' where p[0] is the output object
# and p[n] is the set of inputs for positive values of 'n'. Len(p) can be
# used to distinguish between multiple item sets in the pattern.
#
# For more details on parsing refer to the PLY documentation at
# http://www.dabeaz.com/ply/
#
#
# The parser uses the following conventions:
# a <type>_block defines a block of <type> definitions in the form of:
# [comment] [ext_attr_block] <type> <name> '{' <type>_list '}' ';'
# A block is reduced by returning an object of <type> with a name of <name>
# which in turn has <type>_list as children.
#
# A [comment] is a optional C style comment block enclosed in /* ... */ which
# is appended to the adjacent node as a child.
#
# A [ext_attr_block] is an optional list of Extended Attributes which is
# appended to the adjacent node as a child.
#
# a <type>_list defines a list of <type> items which will be passed as a
# list of children to the parent pattern. A list is in the form of:
# [comment] [ext_attr_block] <...DEF...> ';' <type>_list | (empty)
# or
# [comment] [ext_attr_block] <...DEF...> <type>_cont
#
# In the first form, the list is reduced recursively, where the right side
# <type>_list is first reduced then joined with pattern currently being
# matched. The list is terminated with the (empty) pattern is matched.
#
# In the second form the list is reduced recursively, where the right side
# <type>_cont is first reduced then joined with the pattern currently being
# matched. The type_<cont> is in the form of:
# ',' <type>_list | (empty)
# The <type>_cont form is used to consume the ',' which only occurs when
# there is more than one object in the list. The <type>_cont also provides
# the terminating (empty) definition.
#
class IDLParser(IDLLexer):
# TOP
#
# This pattern defines the top of the parse tree. The parse tree is in the
# the form of:
#
# top
# *modifiers
# *comments
# *ext_attr_block
# ext_attr_list
# attr_arg_list
# *integer, value
# *param_list
# *typeref
#
# top_list
# describe_block
# describe_list
# enum_block
# enum_item
# interface_block
# member
# label_block
# label_item
# struct_block
# member
# typedef_decl
# typedef_data
# typedef_func
#
# (* sub matches found at multiple levels and are not truly children of top)
#
# We force all input files to start with two comments. The first comment is a
# Copyright notice followed by a set of file wide Extended Attributes, followed
# by the file comment and finally by file level patterns.
#
# Find the Copyright, File comment, and optional file wide attributes. We
# use a match with COMMENT instead of comments to force the token to be
# present. The extended attributes and the top_list become siblings which
# in turn are children of the file object created from the results of top.
def p_top(self, p):
"""top : COMMENT COMMENT ext_attr_block top_list"""
Copyright = self.BuildComment('Copyright', p, 1)
Filedoc = self.BuildComment('Comment', p, 2)
p[0] = ListFromConcat(Copyright, Filedoc, p[3], p[4])
if self.parse_debug: DumpReduction('top', p)
def p_top_short(self, p):
"""top : COMMENT ext_attr_block top_list"""
Copyright = self.BuildComment('Copyright', p, 1)
Filedoc = IDLNode('Comment', self.lexobj.filename, p.lineno(2)-1,
p.lexpos(2)-1, [self.BuildAttribute('NAME', ''),
self.BuildAttribute('FORM', 'cc')])
p[0] = ListFromConcat(Copyright, Filedoc, p[2], p[3])
if self.parse_debug: DumpReduction('top', p)
# Build a list of top level items.
def p_top_list(self, p):
"""top_list : callback_decl top_list
| describe_block top_list
| dictionary_block top_list
| enum_block top_list
| inline top_list
| interface_block top_list
| label_block top_list
| namespace top_list
| struct_block top_list
| typedef_decl top_list
| bad_decl top_list
| """
if len(p) > 2:
p[0] = ListFromConcat(p[1], p[2])
if self.parse_debug: DumpReduction('top_list', p)
# Recover from error and continue parsing at the next top match.
def p_top_error(self, p):
"""top_list : error top_list"""
p[0] = p[2]
# Recover from error and continue parsing at the next top match.
def p_bad_decl(self, p):
"""bad_decl : modifiers SYMBOL error '}' ';'"""
p[0] = []
#
# Modifier List
#
#
def p_modifiers(self, p):
"""modifiers : comments ext_attr_block"""
p[0] = ListFromConcat(p[1], p[2])
if self.parse_debug: DumpReduction('modifiers', p)
#
# Comments
#
# Comments are optional list of C style comment objects. Comments are returned
# as a list or None.
#
def p_comments(self, p):
"""comments : COMMENT comments
| """
if len(p) > 1:
child = self.BuildComment('Comment', p, 1)
p[0] = ListFromConcat(child, p[2])
if self.parse_debug: DumpReduction('comments', p)
else:
if self.parse_debug: DumpReduction('no comments', p)
#
# Namespace
#
# A namespace provides a named scope to an enclosed top_list.
#
def p_namespace(self, p):
"""namespace : modifiers NAMESPACE namespace_name '{' top_list '}' ';'"""
children = ListFromConcat(p[1], p[5])
p[0] = self.BuildNamed('Namespace', p, 3, children)
# We allow namespace names of the form foo.bar.baz.
def p_namespace_name(self, p):
"""namespace_name : SYMBOL
| SYMBOL '.' namespace_name"""
p[0] = "".join(p[1:])
#
# Dictionary
#
# A dictionary is a named list of optional and required members.
#
def p_dictionary_block(self, p):
"""dictionary_block : modifiers DICTIONARY SYMBOL '{' struct_list '}' ';'"""
p[0] = self.BuildNamed('Dictionary', p, 3, ListFromConcat(p[1], p[5]))
#
# Callback
#
# A callback is essentially a single function declaration (outside of an
# Interface).
#
def p_callback_decl(self, p):
"""callback_decl : modifiers CALLBACK SYMBOL '=' SYMBOL param_list ';'"""
children = ListFromConcat(p[1], p[6])
p[0] = self.BuildNamed('Callback', p, 3, children)
#
# Inline
#
# Inline blocks define option code to be emitted based on language tag,
# in the form of:
# #inline <LANGUAGE>
# <CODE>
# #endinl
#
def p_inline(self, p):
"""inline : modifiers INLINE"""
words = p[2].split()
name = self.BuildAttribute('NAME', words[1])
lines = p[2].split('\n')
value = self.BuildAttribute('VALUE', '\n'.join(lines[1:-1]) + '\n')
children = ListFromConcat(name, value, p[1])
p[0] = self.BuildProduction('Inline', p, 2, children)
if self.parse_debug: DumpReduction('inline', p)
# Extended Attributes
#
# Extended Attributes denote properties which will be applied to a node in the
# AST. A list of extended attributes are denoted by a brackets '[' ... ']'
# enclosing a comma separated list of extended attributes in the form of:
#
# Name
# Name=HEX | INT | OCT | FLOAT
# Name="STRING"
# Name=Function(arg ...)
# TODO(noelallen) -Not currently supported:
# ** Name(arg ...) ...
# ** Name=Scope::Value
#
# Extended Attributes are returned as a list or None.
def p_ext_attr_block(self, p):
"""ext_attr_block : '[' ext_attr_list ']'
| """
if len(p) > 1:
p[0] = p[2]
if self.parse_debug: DumpReduction('ext_attr_block', p)
else:
if self.parse_debug: DumpReduction('no ext_attr_block', p)
def p_ext_attr_list(self, p):
"""ext_attr_list : SYMBOL '=' SYMBOL ext_attr_cont
| SYMBOL '=' value ext_attr_cont
| SYMBOL '=' SYMBOL param_list ext_attr_cont
| SYMBOL ext_attr_cont"""
# If there are 4 tokens plus a return slot, this must be in the form
# SYMBOL = SYMBOL|value ext_attr_cont
if len(p) == 5:
p[0] = ListFromConcat(self.BuildAttribute(p[1], p[3]), p[4])
# If there are 5 tokens plus a return slot, this must be in the form
# SYMBOL = SYMBOL (param_list) ext_attr_cont
elif len(p) == 6:
member = self.BuildNamed('Member', p, 3, [p[4]])
p[0] = ListFromConcat(self.BuildAttribute(p[1], member), p[5])
# Otherwise, this must be: SYMBOL ext_attr_cont
else:
p[0] = ListFromConcat(self.BuildAttribute(p[1], 'True'), p[2])
if self.parse_debug: DumpReduction('ext_attribute_list', p)
def p_ext_attr_list_values(self, p):
"""ext_attr_list : SYMBOL '=' '(' values ')' ext_attr_cont
| SYMBOL '=' '(' symbols ')' ext_attr_cont"""
p[0] = ListFromConcat(self.BuildAttribute(p[1], p[4]), p[6])
def p_values(self, p):
"""values : value values_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_symbols(self, p):
"""symbols : SYMBOL symbols_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_symbols_cont(self, p):
"""symbols_cont : ',' SYMBOL symbols_cont
| """
if len(p) > 1: p[0] = ListFromConcat(p[2], p[3])
def p_values_cont(self, p):
"""values_cont : ',' value values_cont
| """
if len(p) > 1: p[0] = ListFromConcat(p[2], p[3])
def p_ext_attr_cont(self, p):
"""ext_attr_cont : ',' ext_attr_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('ext_attribute_cont', p)
def p_ext_attr_func(self, p):
"""ext_attr_list : SYMBOL '(' attr_arg_list ')' ext_attr_cont"""
p[0] = ListFromConcat(self.BuildAttribute(p[1] + '()', p[3]), p[5])
if self.parse_debug: DumpReduction('attr_arg_func', p)
def p_ext_attr_arg_list(self, p):
"""attr_arg_list : SYMBOL attr_arg_cont
| value attr_arg_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_attr_arg_cont(self, p):
"""attr_arg_cont : ',' attr_arg_list
| """
if self.parse_debug: DumpReduction('attr_arg_cont', p)
if len(p) > 1: p[0] = p[2]
def p_attr_arg_error(self, p):
"""attr_arg_cont : error attr_arg_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('attr_arg_error', p)
#
# Describe
#
# A describe block is defined at the top level. It provides a mechanism for
# attributing a group of ext_attr to a describe_list. Members of the
# describe list are language specific 'Type' declarations
#
def p_describe_block(self, p):
"""describe_block : modifiers DESCRIBE '{' describe_list '}' ';'"""
children = ListFromConcat(p[1], p[4])
p[0] = self.BuildProduction('Describe', p, 2, children)
if self.parse_debug: DumpReduction('describe_block', p)
# Recover from describe error and continue parsing at the next top match.
def p_describe_error(self, p):
"""describe_list : error describe_list"""
p[0] = []
def p_describe_list(self, p):
"""describe_list : modifiers SYMBOL ';' describe_list
| modifiers ENUM ';' describe_list
| modifiers STRUCT ';' describe_list
| modifiers TYPEDEF ';' describe_list
| """
if len(p) > 1:
Type = self.BuildNamed('Type', p, 2, p[1])
p[0] = ListFromConcat(Type, p[4])
#
# Constant Values (integer, value)
#
# Constant values can be found at various levels. A Constant value is returns
# as the string value after validated against a FLOAT, HEX, INT, OCT or
# STRING pattern as appropriate.
#
def p_value(self, p):
"""value : FLOAT
| HEX
| INT
| OCT
| STRING"""
p[0] = p[1]
if self.parse_debug: DumpReduction('value', p)
def p_value_lshift(self, p):
"""value : integer LSHIFT INT"""
p[0] = "%s << %s" % (p[1], p[3])
if self.parse_debug: DumpReduction('value', p)
# Integers are numbers which may not be floats used in cases like array sizes.
def p_integer(self, p):
"""integer : HEX
| INT
| OCT"""
p[0] = p[1]
if self.parse_debug: DumpReduction('integer', p)
#
# Expression
#
# A simple arithmetic expression.
#
precedence = (
('left','|','&','^'),
('left','LSHIFT','RSHIFT'),
('left','+','-'),
('left','*','/'),
('right','UMINUS','~'),
)
def p_expression_binop(self, p):
"""expression : expression LSHIFT expression
| expression RSHIFT expression
| expression '|' expression
| expression '&' expression
| expression '^' expression
| expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression"""
p[0] = "%s %s %s" % (str(p[1]), str(p[2]), str(p[3]))
if self.parse_debug: DumpReduction('expression_binop', p)
def p_expression_unop(self, p):
"""expression : '-' expression %prec UMINUS
| '~' expression %prec '~'"""
p[0] = "%s%s" % (str(p[1]), str(p[2]))
if self.parse_debug: DumpReduction('expression_unop', p)
def p_expression_term(self, p):
"expression : '(' expression ')'"
p[0] = "%s%s%s" % (str(p[1]), str(p[2]), str(p[3]))
if self.parse_debug: DumpReduction('expression_term', p)
def p_expression_symbol(self, p):
"expression : SYMBOL"
p[0] = p[1]
if self.parse_debug: DumpReduction('expression_symbol', p)
def p_expression_integer(self, p):
"expression : integer"
p[0] = p[1]
if self.parse_debug: DumpReduction('expression_integer', p)
#
# Array List
#
# Defined a list of array sizes (if any).
#
def p_arrays(self, p):
"""arrays : '[' ']' arrays
| '[' integer ']' arrays
| """
# If there are 3 tokens plus a return slot it is an unsized array
if len(p) == 4:
array = self.BuildProduction('Array', p, 1)
p[0] = ListFromConcat(array, p[3])
# If there are 4 tokens plus a return slot it is a fixed array
elif len(p) == 5:
count = self.BuildAttribute('FIXED', p[2])
array = self.BuildProduction('Array', p, 2, [count])
p[0] = ListFromConcat(array, p[4])
# If there is only a return slot, do not fill it for this terminator.
elif len(p) == 1: return
if self.parse_debug: DumpReduction('arrays', p)
# An identifier is a legal value for a parameter or attribute name. Lots of
# existing IDL files use "callback" as a parameter/attribute name, so we allow
# a SYMBOL or the CALLBACK keyword.
def p_identifier(self, p):
"""identifier : SYMBOL
| CALLBACK"""
p[0] = p[1]
# Save the line number of the underlying token (otherwise it gets
# discarded), since we use it in the productions with an identifier in
# them.
p.set_lineno(0, p.lineno(1))
#
# Parameter List
#
# A parameter list is a collection of arguments which are passed to a
# function.
#
def p_param_list(self, p):
"""param_list : '(' param_item param_cont ')'
| '(' ')' """
if len(p) > 3:
args = ListFromConcat(p[2], p[3])
else:
args = []
p[0] = self.BuildProduction('Callspec', p, 1, args)
if self.parse_debug: DumpReduction('param_list', p)
def p_param_item(self, p):
"""param_item : modifiers optional SYMBOL arrays identifier"""
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], p[2], typeref, p[4])
p[0] = self.BuildNamed('Param', p, 5, children)
if self.parse_debug: DumpReduction('param_item', p)
def p_optional(self, p):
"""optional : OPTIONAL
| """
if len(p) == 2:
p[0] = self.BuildAttribute('OPTIONAL', True)
def p_param_cont(self, p):
"""param_cont : ',' param_item param_cont
| """
if len(p) > 1:
p[0] = ListFromConcat(p[2], p[3])
if self.parse_debug: DumpReduction('param_cont', p)
def p_param_error(self, p):
"""param_cont : error param_cont"""
p[0] = p[2]
#
# Typedef
#
# A typedef creates a new referencable type. The typedef can specify an array
# definition as well as a function declaration.
#
def p_typedef_data(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL SYMBOL ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref)
p[0] = self.BuildNamed('Typedef', p, 4, children)
if self.parse_debug: DumpReduction('typedef_data', p)
def p_typedef_array(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL arrays SYMBOL ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref, p[4])
p[0] = self.BuildNamed('Typedef', p, 5, children)
if self.parse_debug: DumpReduction('typedef_array', p)
def p_typedef_func(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL SYMBOL param_list ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref, p[5])
p[0] = self.BuildNamed('Typedef', p, 4, children)
if self.parse_debug: DumpReduction('typedef_func', p)
#
# Enumeration
#
# An enumeration is a set of named integer constants. An enumeration
# is valid type which can be referenced in other definitions.
#
def p_enum_block(self, p):
"""enum_block : modifiers ENUM SYMBOL '{' enum_list '}' ';'"""
p[0] = self.BuildNamed('Enum', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('enum_block', p)
# Recover from enum error and continue parsing at the next top match.
def p_enum_errorA(self, p):
"""enum_block : modifiers ENUM error '{' enum_list '}' ';'"""
p[0] = []
def p_enum_errorB(self, p):
"""enum_block : modifiers ENUM error ';'"""
p[0] = []
def p_enum_list(self, p):
"""enum_list : modifiers SYMBOL '=' expression enum_cont
| modifiers SYMBOL enum_cont"""
if len(p) > 4:
val = self.BuildAttribute('VALUE', p[4])
enum = self.BuildNamed('EnumItem', p, 2, ListFromConcat(val, p[1]))
p[0] = ListFromConcat(enum, p[5])
else:
enum = self.BuildNamed('EnumItem', p, 2, p[1])
p[0] = ListFromConcat(enum, p[3])
if self.parse_debug: DumpReduction('enum_list', p)
def p_enum_cont(self, p):
"""enum_cont : ',' enum_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('enum_cont', p)
def p_enum_cont_error(self, p):
"""enum_cont : error enum_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('enum_error', p)
#
# Label
#
# A label is a special kind of enumeration which allows us to go from a
# set of labels
#
def p_label_block(self, p):
"""label_block : modifiers LABEL SYMBOL '{' label_list '}' ';'"""
p[0] = self.BuildNamed('Label', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('label_block', p)
def p_label_list(self, p):
"""label_list : modifiers SYMBOL '=' FLOAT label_cont"""
val = self.BuildAttribute('VALUE', p[4])
label = self.BuildNamed('LabelItem', p, 2, ListFromConcat(val, p[1]))
p[0] = ListFromConcat(label, p[5])
if self.parse_debug: DumpReduction('label_list', p)
def p_label_cont(self, p):
"""label_cont : ',' label_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('label_cont', p)
def p_label_cont_error(self, p):
"""label_cont : error label_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('label_error', p)
#
# Members
#
# A member attribute or function of a struct or interface.
#
def p_member_attribute(self, p):
"""member_attribute : modifiers SYMBOL arrays questionmark identifier"""
typeref = self.BuildAttribute('TYPEREF', p[2])
children = ListFromConcat(p[1], typeref, p[3], p[4])
p[0] = self.BuildNamed('Member', p, 5, children)
if self.parse_debug: DumpReduction('attribute', p)
def p_member_function(self, p):
"""member_function : modifiers static SYMBOL SYMBOL param_list"""
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], p[2], typeref, p[5])
p[0] = self.BuildNamed('Member', p, 4, children)
if self.parse_debug: DumpReduction('function', p)
def p_static(self, p):
"""static : STATIC
| """
if len(p) == 2:
p[0] = self.BuildAttribute('STATIC', True)
def p_questionmark(self, p):
"""questionmark : '?'
| """
if len(p) == 2:
p[0] = self.BuildAttribute('OPTIONAL', True)
#
# Interface
#
# An interface is a named collection of functions.
#
def p_interface_block(self, p):
"""interface_block : modifiers INTERFACE SYMBOL '{' interface_list '}' ';'"""
p[0] = self.BuildNamed('Interface', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('interface_block', p)
def p_interface_error(self, p):
"""interface_block : modifiers INTERFACE error '{' interface_list '}' ';'"""
p[0] = []
def p_interface_list(self, p):
"""interface_list : member_function ';' interface_list
| """
if len(p) > 1 :
p[0] = ListFromConcat(p[1], p[3])
if self.parse_debug: DumpReduction('interface_list', p)
#
# Struct
#
# A struct is a named collection of members which in turn reference other
# types. The struct is a referencable type.
#
def p_struct_block(self, p):
"""struct_block : modifiers STRUCT SYMBOL '{' struct_list '}' ';'"""
children = ListFromConcat(p[1], p[5])
p[0] = self.BuildNamed('Struct', p, 3, children)
if self.parse_debug: DumpReduction('struct_block', p)
# Recover from struct error and continue parsing at the next top match.
def p_struct_error(self, p):
"""enum_block : modifiers STRUCT error '{' struct_list '}' ';'"""
p[0] = []
def p_struct_list(self, p):
"""struct_list : member_attribute ';' struct_list
| member_function ';' struct_list
|"""
if len(p) > 1: p[0] = ListFromConcat(p[1], p[3])
#
# Parser Errors
#
# p_error is called whenever the parser can not find a pattern match for
# a set of items from the current state. The p_error function defined here
# is triggered logging an error, and parsing recover happens as the
# p_<type>_error functions defined above are called. This allows the parser
# to continue so as to capture more than one error per file.
#
def p_error(self, t):
filename = self.lexobj.filename
self.parse_errors += 1
if t:
lineno = t.lineno
pos = t.lexpos
prev = self.yaccobj.symstack[-1]
if type(prev) == lex.LexToken:
msg = "Unexpected %s after %s." % (
TokenTypeName(t), TokenTypeName(prev))
else:
msg = "Unexpected %s." % (t.value)
else:
lineno = self.last.lineno
pos = self.last.lexpos
msg = "Unexpected end of file after %s." % TokenTypeName(self.last)
self.yaccobj.restart()
# Attempt to remap the error to a friendlier form
if msg in ERROR_REMAP:
msg = ERROR_REMAP[msg]
# Log the error
ErrOut.LogLine(filename, lineno, pos, msg)
def Warn(self, node, msg):
WarnOut.LogLine(node.filename, node.lineno, node.pos, msg)
self.parse_warnings += 1
def __init__(self):
IDLLexer.__init__(self)
self.yaccobj = yacc.yacc(module=self, tabmodule=None, debug=False,
optimize=0, write_tables=0)
self.build_debug = GetOption('build_debug')
self.parse_debug = GetOption('parse_debug')
self.token_debug = GetOption('token_debug')
self.verbose = GetOption('verbose')
self.parse_errors = 0
#
# Tokenizer
#
# The token function returns the next token provided by IDLLexer for matching
# against the leaf paterns.
#
def token(self):
tok = self.lexobj.token()
if tok:
self.last = tok
if self.token_debug:
InfoOut.Log("TOKEN %s(%s)" % (tok.type, tok.value))
return tok
#
# BuildProduction
#
# Production is the set of items sent to a grammar rule resulting in a new
# item being returned.
#
# p - Is the Yacc production object containing the stack of items
# index - Index into the production of the name for the item being produced.
# cls - The type of item being producted
# childlist - The children of the new item
def BuildProduction(self, cls, p, index, childlist=None):
if not childlist: childlist = []
filename = self.lexobj.filename
lineno = p.lineno(index)
pos = p.lexpos(index)
out = IDLNode(cls, filename, lineno, pos, childlist)
if self.build_debug:
InfoOut.Log("Building %s" % out)
return out
def BuildNamed(self, cls, p, index, childlist=None):
if not childlist: childlist = []
childlist.append(self.BuildAttribute('NAME', p[index]))
return self.BuildProduction(cls, p, index, childlist)
def BuildComment(self, cls, p, index):
name = p[index]
# Remove comment markers
lines = []
if name[:2] == '//':
# For C++ style, remove any leading whitespace and the '//' marker from
# each line.
form = 'cc'
for line in name.split('\n'):
start = line.find('//')
lines.append(line[start+2:])
else:
# For C style, remove ending '*/''
form = 'c'
for line in name[:-2].split('\n'):
# Remove characters until start marker for this line '*' if found
# otherwise it should be blank.
offs = line.find('*')
if offs >= 0:
line = line[offs + 1:].rstrip()
else:
line = ''
lines.append(line)
name = '\n'.join(lines)
childlist = [self.BuildAttribute('NAME', name),
self.BuildAttribute('FORM', form)]
return self.BuildProduction(cls, p, index, childlist)
#
# BuildAttribute
#
# An ExtendedAttribute is a special production that results in a property
# which is applied to the adjacent item. Attributes have no children and
# instead represent key/value pairs.
#
def BuildAttribute(self, key, val):
return IDLAttribute(key, val)
#
# ParseData
#
# Attempts to parse the current data loaded in the lexer.
#
def ParseData(self, data, filename='<Internal>'):
self.SetData(filename, data)
try:
self.parse_errors = 0
self.parse_warnings = 0
return self.yaccobj.parse(lexer=self)
except lex.LexError as le:
ErrOut.Log(str(le))
return []
#
# ParseFile
#
# Loads a new file into the lexer and attemps to parse it.
#
def ParseFile(self, filename):
date = time.ctime(os.path.getmtime(filename))
data = open(filename).read()
if self.verbose:
InfoOut.Log("Parsing %s" % filename)
try:
out = self.ParseData(data, filename)
# If we have a src root specified, remove it from the path
srcroot = GetOption('srcroot')
if srcroot and filename.find(srcroot) == 0:
filename = filename[len(srcroot) + 1:]
filenode = IDLFile(filename, out, self.parse_errors + self.lex_errors)
filenode.SetProperty('DATETIME', date)
return filenode
except Exception as e:
ErrOut.LogLine(filename, self.last.lineno, self.last.lexpos,
'Internal parsing error - %s.' % str(e))
raise
#
# Flatten Tree
#
# Flattens the tree of IDLNodes for use in testing.
#
def FlattenTree(node):
add_self = False
out = []
for child in node.children:
if child.IsA('Comment'):
add_self = True
else:
out.extend(FlattenTree(child))
if add_self:
out = [str(node)] + out
return out
def TestErrors(filename, filenode):
nodelist = filenode.GetChildren()
lexer = IDLLexer()
data = open(filename).read()
lexer.SetData(filename, data)
pass_comments = []
fail_comments = []
while True:
tok = lexer.lexobj.token()
if tok == None: break
if tok.type == 'COMMENT':
args = tok.value[3:-3].split()
if args[0] == 'OK':
pass_comments.append((tok.lineno, ' '.join(args[1:])))
else:
if args[0] == 'FAIL':
fail_comments.append((tok.lineno, ' '.join(args[1:])))
obj_list = []
for node in nodelist:
obj_list.extend(FlattenTree(node))
errors = 0
#
# Check for expected successes
#
obj_cnt = len(obj_list)
pass_cnt = len(pass_comments)
if obj_cnt != pass_cnt:
InfoOut.Log("Mismatched pass (%d) vs. nodes built (%d)."
% (pass_cnt, obj_cnt))
InfoOut.Log("PASS: %s" % [x[1] for x in pass_comments])
InfoOut.Log("OBJS: %s" % obj_list)
errors += 1
if pass_cnt > obj_cnt: pass_cnt = obj_cnt
for i in range(pass_cnt):
line, comment = pass_comments[i]
if obj_list[i] != comment:
ErrOut.LogLine(filename, line, None, "OBJ %s : EXPECTED %s\n" %
(obj_list[i], comment))
errors += 1
#
# Check for expected errors
#
err_list = ErrOut.DrainLog()
err_cnt = len(err_list)
fail_cnt = len(fail_comments)
if err_cnt != fail_cnt:
InfoOut.Log("Mismatched fail (%d) vs. errors seen (%d)."
% (fail_cnt, err_cnt))
InfoOut.Log("FAIL: %s" % [x[1] for x in fail_comments])
InfoOut.Log("ERRS: %s" % err_list)
errors += 1
if fail_cnt > err_cnt: fail_cnt = err_cnt
for i in range(fail_cnt):
line, comment = fail_comments[i]
err = err_list[i].strip()
if err_list[i] != comment:
ErrOut.Log("%s(%d) Error\n\tERROR : %s\n\tEXPECT: %s" % (
filename, line, err_list[i], comment))
errors += 1
# Clear the error list for the next run
err_list = []
return errors
def TestFile(parser, filename):
# Capture errors instead of reporting them so we can compare them
# with the expected errors.
ErrOut.SetConsole(False)
ErrOut.SetCapture(True)
filenode = parser.ParseFile(filename)
# Renable output
ErrOut.SetConsole(True)
ErrOut.SetCapture(False)
# Compare captured errors
return TestErrors(filename, filenode)
def TestErrorFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_parser', '*.idl')
filenames = glob.glob(idldir)
parser = IDLParser()
total_errs = 0
for filename in filenames:
if filter and filename not in filter: continue
errs = TestFile(parser, filename)
if errs:
ErrOut.Log("%s test failed with %d error(s)." % (filename, errs))
total_errs += errs
if total_errs:
ErrOut.Log("Failed parsing test.")
else:
InfoOut.Log("Passed parsing test.")
return total_errs
def TestNamespaceFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_namespace', '*.idl')
filenames = glob.glob(idldir)
testnames = []
for filename in filenames:
if filter and filename not in filter: continue
testnames.append(filename)
# If we have no files to test, then skip this test
if not testnames:
InfoOut.Log('No files to test for namespace.')
return 0
InfoOut.SetConsole(False)
ast = ParseFiles(testnames)
InfoOut.SetConsole(True)
errs = ast.GetProperty('ERRORS')
if errs:
ErrOut.Log("Failed namespace test.")
else:
InfoOut.Log("Passed namespace test.")
return errs
def FindVersionError(releases, node):
err_cnt = 0
if node.IsA('Interface', 'Struct'):
comment_list = []
comment = node.GetOneOf('Comment')
if comment and comment.GetName()[:4] == 'REL:':
comment_list = comment.GetName()[5:].strip().split(' ')
first_list = [node.first_release[rel] for rel in releases]
first_list = sorted(set(first_list))
if first_list != comment_list:
node.Error("Mismatch in releases: %s vs %s." % (
comment_list, first_list))
err_cnt += 1
for child in node.GetChildren():
err_cnt += FindVersionError(releases, child)
return err_cnt
def TestVersionFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_version', '*.idl')
filenames = glob.glob(idldir)
testnames = []
for filename in filenames:
if filter and filename not in filter: continue
testnames.append(filename)
# If we have no files to test, then skip this test
if not testnames:
InfoOut.Log('No files to test for version.')
return 0
ast = ParseFiles(testnames)
errs = FindVersionError(ast.releases, ast)
errs += ast.errors
if errs:
ErrOut.Log("Failed version test.")
else:
InfoOut.Log("Passed version test.")
return errs
default_dirs = ['.', 'trusted', 'dev', 'private', 'extensions',
'extensions/dev']
def ParseFiles(filenames):
parser = IDLParser()
filenodes = []
if not filenames:
filenames = []
srcroot = GetOption('srcroot')
dirs = default_dirs
if GetOption('include_private'):
dirs += ['private']
for dirname in dirs:
srcdir = os.path.join(srcroot, dirname, '*.idl')
srcdir = os.path.normpath(srcdir)
filenames += sorted(glob.glob(srcdir))
if not filenames:
ErrOut.Log('No sources provided.')
for filename in filenames:
filenode = parser.ParseFile(filename)
filenodes.append(filenode)
ast = IDLAst(filenodes)
if GetOption('dump_tree'): ast.Dump(0)
Lint(ast)
return ast
def Main(args):
filenames = ParseOptions(args)
# If testing...
if GetOption('test'):
errs = TestErrorFiles(filenames)
errs = TestNamespaceFiles(filenames)
errs = TestVersionFiles(filenames)
if errs:
ErrOut.Log("Parser failed with %d errors." % errs)
return -1
return 0
# Otherwise, build the AST
ast = ParseFiles(filenames)
errs = ast.GetProperty('ERRORS')
if errs:
ErrOut.Log('Found %d error(s).' % errs);
InfoOut.Log("%d files processed." % len(filenames))
return errs
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause | 4,310,717,186,275,542,500 | 29.711961 | 81 | 0.621115 | false |
RichardLitt/wyrd-django-dev | tests/modeltests/custom_methods/tests.py | 150 | 1196 | from __future__ import absolute_import
from datetime import date
from django.test import TestCase
from .models import Article
class MethodsTests(TestCase):
def test_custom_methods(self):
a = Article.objects.create(
headline="Area man programs in Python", pub_date=date(2005, 7, 27)
)
b = Article.objects.create(
headline="Beatles reunite", pub_date=date(2005, 7, 27)
)
self.assertFalse(a.was_published_today())
self.assertQuerysetEqual(
a.articles_from_same_day_1(), [
"Beatles reunite",
],
lambda a: a.headline,
)
self.assertQuerysetEqual(
a.articles_from_same_day_2(), [
"Beatles reunite",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
b.articles_from_same_day_1(), [
"Area man programs in Python",
],
lambda a: a.headline,
)
self.assertQuerysetEqual(
b.articles_from_same_day_2(), [
"Area man programs in Python",
],
lambda a: a.headline
)
| bsd-3-clause | -2,435,113,597,600,341,500 | 26.181818 | 78 | 0.526756 | false |
kevintaw/django | django/contrib/gis/db/models/query.py | 16 | 36639 | import warnings
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.db.models.fields import (
GeometryField, LineStringField, PointField, get_srid_info,
)
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.sql import (
AreaField, DistanceField, GeomField, GMLField,
)
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
from django.db import connections
from django.db.models.expressions import RawSQL
from django.db.models.fields import Field
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango21Warning,
)
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
# ### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Performing setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup(
'area', field_name=kwargs.get('field_name'))
s = {'procedure_args': procedure_args,
'geo_field': geo_field,
'setup': False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def collect(self, **kwargs):
"""
Performs an aggregate collect operation on the given geometry field.
This is analogous to a union operation, but much faster because
boundaries are not dissolved.
"""
warnings.warn(
"The collect GeoQuerySet method is deprecated. Use the Collect() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Collect, **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
warnings.warn(
"The extent GeoQuerySet method is deprecated. Use the Extent() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent, **kwargs)
def extent3d(self, **kwargs):
"""
Returns the aggregate extent, in 3D, of the features in the
GeoQuerySet. It is returned as a 6-tuple, comprising:
(xmin, ymin, zmin, xmax, ymax, zmax).
"""
warnings.warn(
"The extent3d GeoQuerySet method is deprecated. Use the Extent3D() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent3D, **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geometry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the user wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ '
'support GeoJSON serialization.')
if not isinstance(precision, six.integer_types):
raise TypeError('Precision keyword must be set with an integer.')
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
s = {'desc': 'GeoJSON',
'procedure_args': {'precision': precision, 'options': options},
'procedure_fmt': '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc': 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc': 'GML', 'procedure_args': {'precision': precision}}
if backend.postgis:
s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s'
s['procedure_args'] = {'precision': precision, 'version': version}
if backend.oracle:
s['select_field'] = GMLField()
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc': 'KML',
'procedure_fmt': '%(geo_col)s,%(precision)s',
'procedure_args': {'precision': kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
warnings.warn(
"The make_line GeoQuerySet method is deprecated. Use the MakeLine() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field': GeomField()}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size': size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize,
'xorigin': xorigin, 'yorigin': yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt': procedure_fmt,
'procedure_args': procedure_args,
'select_field': GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, six.integer_types):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {
'desc': 'SVG',
'procedure_fmt': '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args': {
'rel': relative,
'precision': precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, six.integer_types):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name')
self._spatial_setup('transform', field_name=field_name)
self.query.add_context('transformed_srid', srid)
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
warnings.warn(
"The unionagg GeoQuerySet method is deprecated. Use the Union() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Union, **kwargs)
# ### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None:
desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function': func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_aggregate(self, aggregate, field_name=None,
geo_field_type=None, tolerance=0.05):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Getting the field the geographic aggregate will be called on.
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name)
# Checking if there are any geo field type limitations on this
# aggregate (e.g. ST_Makeline only operates on PointFields).
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__))
# Getting the string expression of the field name, as this is the
# argument taken by `Aggregate` objects.
agg_col = field_name or geo_field.name
# Adding any keyword parameters for the Aggregate object. Oracle backends
# in particular need an additional `tolerance` parameter.
agg_kwargs = {}
if connections[self.db].ops.oracle:
agg_kwargs['tolerance'] = tolerance
# Calling the QuerySet.aggregate, and returning only the value of the aggregate.
return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg']
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
warnings.warn(
"The %s GeoQuerySet method is deprecated. See GeoDjango Functions "
"documentation to find the expression-based replacement." % att,
RemovedInDjango21Warning, stacklevel=2
)
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(
att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type'))
for k, v in six.iteritems(default_args):
settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, six.string_types):
model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, None, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field'):
select_field = settings['select_field']
if connection.ops.oracle:
select_field.empty_strings_allowed = False
else:
select_field = Field()
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
self.query.add_annotation(
RawSQL(fmt % settings['procedure_args'], settings['select_params'], select_field),
model_att)
return self
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name'))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
srid = self.query.get_context('transformed_srid')
if srid:
u, unit_name, s = get_srid_info(srid, connection)
geodetic = unit_name.lower() in geo_field.geodetic_units
if geodetic and not connection.features.supports_distance_geodetic:
raise ValueError(
'This database does not support linear distance '
'calculations on geodetic coordinate systems.'
)
if distance:
if srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, srid)
if geom.srid is None or geom.srid == srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += (', %s(%s(%%%%s, %s), %s)' % (
backend.transform, backend.from_text,
geom.srid, srid))
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(six.memoryview(params[0].ewkb)).geom_type) == 'Point':
raise ValueError(
'Spherical distance calculation only supported with '
'Point Geometry parameters'
)
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.distance_spheroid, 'spheroid': params[1]})
else:
procedure_args.update({'function': backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.length_spheroid, 'spheroid': params[1]})
elif geom_3d and connection.features.supports_3d_functions:
# Use 3D variants of perimeter and length routines on supported backends.
if perimeter:
procedure_args.update({'function': backend.perimeter3d})
elif length:
procedure_args.update({'function': backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field': DistanceField(dist_att),
'setup': False,
'geo_field': geo_field,
'procedure_args': procedure_args,
'procedure_fmt': procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field': GeomField()}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance': tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {
'geom_args': ('geom',),
'select_field': GeomField(),
'procedure_fmt': '%(geo_col)s,%(geom)s',
'procedure_args': {'geom': geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
compiler = self.query.get_compiler(self.db)
opts = self.model._meta
if geo_field not in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
# Note: the operation really is defined as "must add select related!"
self.query.add_select_related([field_name])
# Call pre_sql_setup() so that compiler.select gets populated.
compiler.pre_sql_setup()
for col, _, _ in compiler.select:
if col.output_field == geo_field:
return col.as_sql(compiler, compiler.connection)[0]
raise ValueError("%r not in compiler's related_select_cols" % geo_field)
elif geo_field not in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
parent_model = geo_field.model._meta.concrete_model
return self._field_column(compiler, geo_field, parent_model._meta.db_table)
else:
return self._field_column(compiler, geo_field)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered or the one specified via
the `field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuerySet's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for field in self.model._meta.fields:
if isinstance(field, GeometryField):
return field
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GISLookup._check_geo_field(self.model._meta, field_name)
def _field_column(self, compiler, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuerySet` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None:
table_alias = compiler.query.get_meta().db_table
return "%s.%s" % (compiler.quote_name_unless_alias(table_alias),
compiler.connection.ops.quote_name(column or field.column))
| bsd-3-clause | 360,247,608,459,187,140 | 43.572993 | 114 | 0.584896 | false |
paolodedios/tensorflow | tensorflow/python/kernel_tests/embedding_ops_test.py | 6 | 48038 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ops used with embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import compat
def _AsLong(array):
"""Casts arrays elements to long type. Used to convert from numpy tf."""
return [int(x) for x in array]
class ScatterAddSubTest(test.TestCase):
def _TestCase(self, shape, indices, scatter_op=state_ops.scatter_add):
"""Run a random test case with the given shape and indices.
Args:
shape: Shape of the parameters array.
indices: One-dimensional array of ints, the indices of the last dimension
of the parameters to update.
scatter_op: ScatterAdd or ScatterSub.
"""
super(ScatterAddSubTest, self).setUp()
with self.cached_session(use_gpu=False):
# Create a random parameter array of given shape
p_init = np.random.rand(*shape).astype("f")
# Create the shape of the update array. All dimensions except the last
# match the parameter array, the last dimension equals the # of indices.
vals_shape = [len(indices)] + shape[1:]
vals_init = np.random.rand(*vals_shape).astype("f")
v_i = [float(x) for x in vals_init.ravel()]
p = variables.Variable(p_init)
vals = constant_op.constant(v_i, shape=vals_shape, name="vals")
ind = constant_op.constant(indices, dtype=dtypes.int32)
p2 = scatter_op(p, ind, vals, name="updated_p")
# p = init
self.evaluate(variables.global_variables_initializer())
# p += vals
result = self.evaluate(p2)
# Compute the expected 'p' using numpy operations.
for i, ind in enumerate(indices):
if scatter_op == state_ops.scatter_add:
p_init.reshape(shape[0], -1)[ind, :] += (vals_init.reshape(
vals_shape[0], -1)[i, :])
else:
p_init.reshape(shape[0], -1)[ind, :] -= (vals_init.reshape(
vals_shape[0], -1)[i, :])
self.assertTrue(all((p_init == result).ravel()))
@test_util.run_deprecated_v1
def testNoRepetitions(self):
self._TestCase([2, 2], [1])
self._TestCase([4, 4, 4], [2, 0])
self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])
@test_util.run_deprecated_v1
def testWithRepetitions(self):
self._TestCase([2, 2], [1, 1])
self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3])
self._TestCase([32, 4, 4], [31] * 8)
@test_util.run_deprecated_v1
def testRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices))
@test_util.run_deprecated_v1
def testSubRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices), state_ops.scatter_sub)
@test_util.run_deprecated_v1
def testWrongShape(self):
# Indices and values mismatch.
var = variables.Variable(
array_ops.zeros(shape=[1024, 64, 64], dtype=dtypes.float32))
indices = array_ops.placeholder(dtypes.int32, shape=[32])
values = array_ops.placeholder(dtypes.float32, shape=[33, 64, 64])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
# Var and values mismatch.
values = array_ops.placeholder(dtypes.float32, shape=[32, 64, 63])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape:
shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = array_ops.placeholder(dtype, shape=None, name=param_name)
else:
param = constant_op.constant(
1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == dtypes.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingParamsAsPartitionedVariable(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_resource=False):
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, dtype=dtype, shape=shape)
shape = shape or [10]
partitioned_variable = variable_scope.get_variable(
"p",
shape=[vocab_size] + shape,
initializer=array_ops.concat([params[p_i.name] for p_i in p], 0),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=num_shards, min_slice_size=1),
use_resource=use_resource)
return p, partitioned_variable, params, feed_dict
def _EmbeddingResult(params,
id_vals,
num_shards,
vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
weights_squared = []
for ids, wts in zip(id_vals, weight_vals):
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
if isinstance(ids, compat.integral_types):
ids = [ids]
wts = [wts]
for i, weight_value in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * weight_value
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.copy(
params[_PName(partition) + ":0"][offset, :]) * weight_value
else:
assert False
if value_aggregation is None:
assert weight_aggregation is None
assert squared_weight_aggregation is None
value_aggregation = val
weight_aggregation = weight_value
squared_weight_aggregation = weight_value * weight_value
else:
assert weight_aggregation is not None
assert squared_weight_aggregation is not None
value_aggregation += val
weight_aggregation += weight_value
squared_weight_aggregation += weight_value * weight_value
values.append(value_aggregation)
weights.append(weight_aggregation)
weights_squared.append(squared_weight_aggregation)
values = np.array(values).astype(np.float32)
weights = np.array(weights).astype(np.float32)
weights_squared = np.array(weights_squared).astype(np.float32)
return values, weights, weights_squared
class EmbeddingLookupTest(test.TestCase):
# This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since
# both the ids are in the first shard, one of the resulting lookup
# vector is going to be empty. The subsequent DivOp fails because of that.
# TODO(keveman): Disabling the test until the underlying problem is fixed.
@test_util.run_deprecated_v1
def testSimpleSharded(self):
with self.cached_session():
num_shards = 2
vocab_size = 4
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testMaxNorm(self):
with self.cached_session():
embeddings = constant_op.constant([[2.0]])
ids = constant_op.constant([0], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding, [[1.0]])
@test_util.run_deprecated_v1
def testMaxNormNontrivial(self):
with self.cached_session():
embeddings = constant_op.constant([[2.0, 4.0], [3.0, 1.0]])
ids = constant_op.constant([0, 1], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=2.0)
norms = math_ops.sqrt(
math_ops.reduce_sum(embeddings * embeddings, axis=1))
normalized = embeddings / array_ops.stack([norms, norms], axis=1)
self.assertAllClose(embedding, 2 * self.evaluate(normalized))
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedVariable(self):
with self.cached_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
self.evaluate(variables.global_variables_initializer())
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = self.evaluate(list(p_variable))
# Actual test
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedResourceVariable(self):
with self.cached_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, _ = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size, use_resource=True)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
self.evaluate(variables.global_variables_initializer())
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = self.evaluate(list(p_variable))
# Actual test
print(ops.get_default_graph().as_graph_def())
tf_result = self.evaluate(embedding)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedModPartitioningInt32Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedModPartitioningInt64Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt32Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt32IdsPartitionedVariable(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
_, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
self.evaluate(variables.global_variables_initializer())
embedding = embedding_ops.embedding_lookup(
p_variable, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt64Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningUnknownParamShape(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
# We clear parameter shapes, to test when shape is not statically known.
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, use_shapeless_placeholder=True)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookup(self):
vocab_size = 9
num_ids = 10
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for ids_shape in [(10,), (2, 5)]:
for num_shards in [1, 3]:
with self.cached_session():
ids = constant_op.constant(
id_vals, shape=ids_shape, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
y = embedding_ops.embedding_lookup(x, ids)
y_shape = ids_shape + tuple(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookupWithComputedParams(self):
vocab_size = 9
num_ids = 5
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for num_shards in [1, 3]:
with self.cached_session():
ids = constant_op.constant(id_vals, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
# This will force a conversion from IndexedSlices to Tensor.
x_squared = [math_ops.square(elem) for elem in x]
y = embedding_ops.embedding_lookup(x_squared, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-3)
def testConstructionNonSharded(self):
with ops.Graph().as_default():
p = variables.Variable(
array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
ids = constant_op.constant([0, 1, 1, 7], dtype=dtypes.int32)
embedding_ops.embedding_lookup([p], ids)
def testConstructionSharded(self):
with ops.Graph().as_default():
p = []
for _ in range(2):
p += [
variables.Variable(
array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
]
ids = constant_op.constant([0, 1, 1, 17], dtype=dtypes.int32)
embedding_ops.embedding_lookup(p, ids)
@test_util.run_deprecated_v1
def testHigherRank(self):
np.random.seed(8)
with self.cached_session():
for params_shape in (12,), (6, 3):
params = np.random.randn(*params_shape)
for ids_shape in (3, 2), (4, 3):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids)
self.assertAllEqual(simple, array_ops.gather(params, ids))
# Run a few random sharded versions
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in xrange(procs)
]
sharded = embedding_ops.embedding_lookup(split_params, ids)
self.assertAllEqual(simple, sharded)
@test_util.run_deprecated_v1
def testHigherRankMaxNorm(self):
np.random.seed(8)
with self.cached_session():
for params_shape in (12,), (6, 3), (6, 2, 3):
# Test embedding rank 0, 1, 2.
# Note: the first dimension must be a common multiple of procs below.
params = 2 * np.ones(params_shape)
params_norm = params / np.sqrt(
np.sum(
params * params, tuple(range(params.ndim)[1:]), keepdims=True))
for ids_shape in (), (3), (4, 3), (2, 3, 4):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape,
dtype=np.int64)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids, max_norm=1.0)
# assertAllClose is used here as different implementations of sqrt may
# be used to compute each of the values being compared. For example,
# on AVX512 builds the embedding operation makes use of Eigen's fast
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in xrange(procs)
]
sharded = embedding_ops.embedding_lookup(
split_params, ids, max_norm=1.0)
self.assertAllEqual(simple, sharded)
@test_util.run_deprecated_v1
def testTransform(self):
# This tests all combinations of:
# - ids rank 0, 1, >1
# - params sharded/unsharded
# It always applies max_norm.
np.random.seed(8)
l2_norm = 2.
with self.cached_session():
# Param values are in [l2_norm, l2_norm+1) so it will always clip.
params = np.random.rand(6, 3) + l2_norm
params_norm = l2_norm * params / np.sqrt(
np.sum(params * params, axis=1, keepdims=True))
# Compute the norm of each embedding. This will change the embedding
# rank to 0.
params_norm = np.linalg.norm(params_norm, axis=1)
transform = lambda x: linalg_ops.norm(x, axis=1)
for ids_shape in (), (3), (4, 3), (2, 3, 4):
# Test ids rank 0, 1, 2, 3.
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape,
dtype=np.int64)).reshape(ids_shape)
# Compare nonsharded to gather.
simple = embedding_ops._embedding_lookup_and_transform(
params, ids, max_norm=l2_norm, transform_fn=transform)
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in xrange(procs)
]
sharded = embedding_ops._embedding_lookup_and_transform(
split_params, ids, max_norm=l2_norm, transform_fn=transform)
# assertAllClose is used here as different implementations of sqrt may
# be used to compute each of the values being compared. For example,
# on AVX512 builds the embedding operation makes use of Eigen's fast
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, sharded)
def testRaggedMaxNorm(self):
embeddings = constant_op.constant([[2.0]])
ids = ragged_factory_ops.constant([[0, 0], [0]], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup([embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding, [[[1.0], [1.0]], [[1.0]]])
class EmbeddingLookupSparseTest(test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index:(index + num_val)]))
index += num_val
return grouped_vals
@test_util.run_deprecated_v1
def testEmbeddingLookupSparse(self):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = [None] + param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size))
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 5], ["sum", "mean", "sqrtn"],
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64],
[True, False]):
with self.cached_session():
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
embedding_sum = embedding_ops.embedding_lookup_sparse(
p,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list(),
expected_lookup_result_shape)
self.assertEqual(embedding_sum.dtype, dtype)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
params,
grouped_ids,
num_shards,
vocab_size,
weight_vals=grouped_ignored_weights
if ignore_weights else grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
np_embedding_sum /= np.reshape(
np.sqrt(np_weight_sq_sum), (batch_size, 1, 1))
rtol = 1e-6
if dtype == dtypes.bfloat16:
rtol = 1e-2
elif dtype == dtypes.float16:
rtol = 1e-3
atol = rtol
self.assertAllClose(np_embedding_sum, tf_embedding_sum, rtol, atol)
def testMissingInSparseIds(self):
# Github issue, 36359
with self.test_session():
x = array_ops.ones((4, 5))
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant([[1, 0], [3, 0]], dtypes.int64),
constant_op.constant([0, 2], dtypes.int32),
constant_op.constant([4, 1], dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant([[1, 0], [3, 0]], dtypes.int64),
constant_op.constant([1, 1], dtypes.float32),
constant_op.constant([4, 1], dtypes.int64))
for combiner in ["sum", "mean", "sqrtn"]:
embedding_sum = embedding_ops.embedding_lookup_sparse(
x, sp_ids, sp_weights, combiner=combiner)
tf_embedding_sum = ops.convert_to_tensor(embedding_sum)
self.assertAllClose(tf_embedding_sum[0], np.zeros(5))
self.assertAllClose(tf_embedding_sum[1], np.ones(5))
self.assertAllClose(tf_embedding_sum[2], np.zeros(5))
self.assertAllClose(tf_embedding_sum[3], np.ones(5))
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookupSparse(self):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = (self._RandomIdsAndWeights(
batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
with self.cached_session():
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
y = embedding_ops.embedding_lookup_sparse(
x,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
@test_util.run_deprecated_v1
def testIncompatibleShapes(self):
with self.cached_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64),
constant_op.constant([0, 1, 2], dtypes.int32),
constant_op.constant([2, 2], dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant([[0, 0], [0, 1]], dtypes.int64),
constant_op.constant([12.0, 5.0], dtypes.float32),
constant_op.constant([1, 2], dtypes.int64))
with self.assertRaises(ValueError):
embedding_ops.embedding_lookup_sparse(
x, sp_ids, sp_weights, combiner="mean")
class SafeEmbeddingLookupSparseTest(test.TestCase):
def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
assert vocab_size > 0
assert embed_dim > 0
assert num_shards > 0
assert num_shards <= vocab_size
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32)
embedding_weights = list(variable_scope.get_variable(
name="embedding_weights",
shape=[vocab_size, embed_dim],
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=initializer))
for w in embedding_weights:
self.evaluate(w.initializer)
embedding_weights = [self.evaluate(w) for w in embedding_weights]
return embedding_weights
def _ids_and_weights_2d(self):
# Each row demonstrates a test case:
# Row 0: multiple valid ids, 1 invalid id, weighted mean
# Row 1: all ids are invalid (leaving no valid ids after pruning)
# Row 2: no ids to begin with
# Row 3: single id
# Row 4: all ids have <=0 weight
indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [4, 0], [4, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [5, 4]
sparse_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def _ids_and_weights_3d(self):
# Each (2-D) index demonstrates a test case:
# Index 0, 0: multiple valid ids, 1 invalid id, weighted mean
# Index 0, 1: all ids are invalid (leaving no valid ids after pruning)
# Index 0, 2: no ids to begin with
# Index 1, 0: single id
# Index 1, 1: all ids have <=0 weight
# Index 1, 2: no ids to begin with
indices = [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [1, 0, 0], [1, 1, 0],
[1, 1, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [2, 3, 4]
sparse_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_return_zero_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids,
sparse_weights))
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_return_special_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, sparse_weights, default_id=3))
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3],
embedding_weights[0][2], embedding_weights[0][3]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_no_weights(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
self.assertAllClose(
embedding_lookup_result,
[(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4,
[0] * 4, embedding_weights[0][2], (
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_partitioned(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result,
[(embedding_weights[0] + embedding_weights[1]) / 2.0,
[0] * 4, [0] * 4, embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids,
sparse_weights))
self.assertAllClose(embedding_lookup_result, [[
(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0,
[0] * 4, [0] * 4
], [embedding_weights[0][2], [0] * 4, [0] * 4]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_return_special_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, sparse_weights, default_id=3))
self.assertAllClose(
embedding_lookup_result,
[[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3]], [
embedding_weights[0][2], embedding_weights[0][3],
embedding_weights[0][3]
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_no_weights(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
self.assertAllClose(embedding_lookup_result, [[(
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [
0
] * 4], [
embedding_weights[0][2],
(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_partitioned(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result, [[
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4, [0] * 4
], [
embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
class DynamicStitchOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testCint32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testCint32Gpu(self):
with self.session():
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testInt32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testInt32Gpu(self):
with self.session():
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testSumGradArgs(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2, 3]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([2, 3, 5, 7]),
ops.convert_to_tensor([1, 1])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [2, 3, 1, 1])
# We expect that the values are merged in order.
@test_util.run_deprecated_v1
def testStitchOrder(self):
with self.cached_session():
indices = []
np_values = []
values = []
for _ in range(10):
indices.extend([ops.convert_to_tensor(np.arange(100).astype(np.int32))])
np_values.extend([np.random.uniform(size=100)])
values.extend([ops.convert_to_tensor(np_values[-1])])
stitched = data_flow_ops.dynamic_stitch(indices, values)
self.assertAllEqual(np_values[-1], stitched)
class ParallelDynamicStitchOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testCint32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 4, 6]),
ops.convert_to_tensor([2, 3, 5])
]
values = [
ops.convert_to_tensor([12, 23, 34, 45]),
ops.convert_to_tensor([1, 2, 3])
]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values),
[12, 23, 1, 2, 34, 3, 45])
@test_util.run_deprecated_v1
def testInt32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 5, 6, 7]),
ops.convert_to_tensor([2, 4, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34, 45, 56]),
ops.convert_to_tensor([1, 3, 2])
]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values),
[12, 23, 1, 2, 3, 34, 45, 56])
@test_util.run_deprecated_v1
def testSimple(self):
with self.session(use_gpu=False):
indices = [ops.convert_to_tensor([0, 1]), ops.convert_to_tensor([2, 3])]
values = [ops.convert_to_tensor([2, 3]), ops.convert_to_tensor([1, 1])]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values), [2, 3, 1, 1])
if __name__ == "__main__":
test.main()
| apache-2.0 | 1,064,695,674,563,006,100 | 39.641286 | 80 | 0.624839 | false |
sergio-incaser/bank-payment | __unported__/account_banking/parsers/__init__.py | 14 | 1053 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,006,655,941,373,913,000 | 42.875 | 78 | 0.611586 | false |
trachelr/mne-python | mne/stats/parametric.py | 5 | 12542 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import numpy as np
from functools import reduce
from string import ascii_uppercase
from ..externals.six import string_types
from ..utils import deprecated
from ..fixes import matrix_rank
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def _f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test
p-value : float
The associated p-value from the F-distribution
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homocedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`stats.kruskal`_) although with
some loss of power
The algorithm is from Heiman[2], pp.394-7.
See scipy.stats.f_oneway that should give the same results while
being less efficient
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
from scipy import stats
sf = stats.f.sf
n_classes = len(args)
n_samples_per_class = np.array([len(a) for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = reduce(lambda x, y: x + y,
[np.sum(a ** 2, axis=0) for a in args])
sums_args = [np.sum(a, axis=0) for a in args]
square_of_sums_alldata = reduce(lambda x, y: x + y, sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = sf(dfbn, dfwn, f)
return f, prob
def f_oneway(*args):
"""Call scipy.stats.f_oneway, but return only f-value"""
return _f_oneway(*args)[0]
def _map_effects(n_factors, effects):
"""Map effects to indices"""
if n_factors > len(ascii_uppercase):
raise ValueError('Maximum number of factors supported is 26')
factor_names = list(ascii_uppercase[:n_factors])
if isinstance(effects, string_types):
if '*' in effects and ':' in effects:
raise ValueError('Not "*" and ":" permitted in effects')
elif '+' in effects and ':' in effects:
raise ValueError('Not "+" and ":" permitted in effects')
elif effects == 'all':
effects = None
elif len(effects) == 1 or ':' in effects:
effects = [effects]
elif '+' in effects:
# all main effects
effects = effects.split('+')
elif '*' in effects:
pass # handle later
else:
raise ValueError('"{0}" is not a valid option for "effects"'
.format(effects))
if isinstance(effects, list):
bad_names = [e for e in effects if e not in factor_names]
if len(bad_names) > 1:
raise ValueError('Effect names: {0} are not valid. They should '
'the first `n_factors` ({1}) characters from the'
'alphabet'.format(bad_names, n_factors))
indices = list(np.arange(2 ** n_factors - 1))
names = list()
for this_effect in indices:
contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
this_code = (n_factors - 1) - np.where(contrast_idx == 1)[0]
this_name = [factor_names[e] for e in this_code]
this_name.sort()
names.append(':'.join(this_name))
if effects is None or isinstance(effects, string_types):
effects_ = names
else:
effects_ = effects
selection = [names.index(sel) for sel in effects_]
names = [names[sel] for sel in selection]
if isinstance(effects, string_types):
if '*' in effects:
# hierarchical order of effects
# the * based effect can be used as stop index
sel_ind = names.index(effects.replace('*', ':')) + 1
names = names[:sel_ind]
selection = selection[:sel_ind]
return selection, names
def _get_contrast_indices(effect_idx, n_factors):
"""Henson's factor coding, see num2binvec"""
binrepr = np.binary_repr(effect_idx, n_factors)
return np.array([int(i) for i in binrepr], dtype=int)
def _iter_contrasts(n_subjects, factor_levels, effect_picks):
""" Aux Function: Setup contrasts """
from scipy.signal import detrend
sc = []
n_factors = len(factor_levels)
# prepare computation of Kronecker products
for n_levels in factor_levels:
# for each factor append
# 1) column vector of length == number of levels,
# 2) square matrix with diagonal == number of levels
# main + interaction effects for contrasts
sc.append([np.ones([n_levels, 1]),
detrend(np.eye(n_levels), type='constant')])
for this_effect in effect_picks:
contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
c_ = sc[0][contrast_idx[n_factors - 1]]
for i_contrast in range(1, n_factors):
this_contrast = contrast_idx[(n_factors - 1) - i_contrast]
c_ = np.kron(c_, sc[i_contrast][this_contrast])
df1 = matrix_rank(c_)
df2 = df1 * (n_subjects - 1)
yield c_, df1, df2
@deprecated('"f_threshold_twoway_rm" is deprecated and will be removed in'
'MNE-0.11. Please use f_threshold_mway_rm instead')
def f_threshold_twoway_rm(n_subjects, factor_levels, effects='A*B',
pvalue=0.05):
return f_threshold_mway_rm(
n_subjects=n_subjects, factor_levels=factor_levels,
effects=effects, pvalue=pvalue)
def f_threshold_mway_rm(n_subjects, factor_levels, effects='A*B',
pvalue=0.05):
""" Compute f-value thesholds for a two-way ANOVA
Parameters
----------
n_subjects : int
The number of subjects to be analyzed.
factor_levels : list-like
The number of levels per factor.
effects : str
A string denoting the effect to be returned. The following
mapping is currently supported:
'A': main effect of A
'B': main effect of B
'A:B': interaction effect
'A+B': both main effects
'A*B': all three effects
pvalue : float
The p-value to be thresholded.
Returns
-------
f_threshold : list | float
list of f-values for each effect if the number of effects
requested > 2, else float.
See Also
--------
f_oneway
f_mway_rm
Notes
-----
.. versionadded:: 0.10
"""
from scipy.stats import f
effect_picks, _ = _map_effects(len(factor_levels), effects)
f_threshold = []
for _, df1, df2 in _iter_contrasts(n_subjects, factor_levels,
effect_picks):
f_threshold.append(f(df1, df2).isf(pvalue))
return f_threshold if len(f_threshold) > 1 else f_threshold[0]
# The following functions based on MATLAB code by Rik Henson
# and Python code from the pvttble toolbox by Roger Lew.
@deprecated('"f_twoway_rm" is deprecated and will be removed in MNE 0.11."'
" Please use f_mway_rm instead")
def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
correction=False, return_pvals=True):
"""This function is deprecated, use `f_mway_rm` instead"""
return f_mway_rm(data=data, factor_levels=factor_levels, effects=effects,
alpha=alpha, correction=correction,
return_pvals=return_pvals)
def f_mway_rm(data, factor_levels, effects='all', alpha=0.05,
correction=False, return_pvals=True):
"""M-way repeated measures ANOVA for fully balanced designs
Parameters
----------
data : ndarray
3D array where the first two dimensions are compliant
with a subjects X conditions scheme where the first
factor repeats slowest::
A1B1 A1B2 A2B1 B2B2
subject 1 1.34 2.53 0.97 1.74
subject ... .... .... .... ....
subject k 2.45 7.90 3.09 4.76
The last dimensions is thought to carry the observations
for mass univariate analysis.
factor_levels : list-like
The number of levels per factor.
effects : str | list
A string denoting the effect to be returned. The following
mapping is currently supported (example with 2 factors):
* ``'A'``: main effect of A
* ``'B'``: main effect of B
* ``'A:B'``: interaction effect
* ``'A+B'``: both main effects
* ``'A*B'``: all three effects
* ``'all'``: all effects (equals 'A*B' in a 2 way design)
If list, effect names are used: ``['A', 'B', 'A:B']``.
alpha : float
The significance threshold.
correction : bool
The correction method to be employed if one factor has more than two
levels. If True, sphericity correction using the Greenhouse-Geisser
method will be applied.
return_pvals : bool
If True, return p values corresponding to f values.
Returns
-------
f_vals : ndarray
An array of f values with length corresponding to the number
of effects estimated. The shape depends on the number of effects
estimated.
p_vals : ndarray
If not requested via return_pvals, defaults to an empty array.
See Also
--------
f_oneway
f_threshold_mway_rm
Notes
-----
.. versionadded:: 0.10
"""
from scipy.stats import f
if data.ndim == 2: # general purpose support, e.g. behavioural data
data = data[:, :, np.newaxis]
elif data.ndim > 3: # let's allow for some magic here.
data = data.reshape(
data.shape[0], data.shape[1], np.prod(data.shape[2:]))
effect_picks, _ = _map_effects(len(factor_levels), effects)
n_obs = data.shape[2]
n_replications = data.shape[0]
# pute last axis in fornt to 'iterate' over mass univariate instances.
data = np.rollaxis(data, 2)
fvalues, pvalues = [], []
for c_, df1, df2 in _iter_contrasts(n_replications, factor_levels,
effect_picks):
y = np.dot(data, c_)
b = np.mean(y, axis=1)[:, np.newaxis, :]
ss = np.sum(np.sum(y * b, axis=2), axis=1)
mse = (np.sum(np.sum(y * y, axis=2), axis=1) - ss) / (df2 / df1)
fvals = ss / mse
fvalues.append(fvals)
if correction:
# sample covariances, leave off "/ (y.shape[1] - 1)" norm because
# it falls out.
v = np.array([np.dot(y_.T, y_) for y_ in y])
v = (np.array([np.trace(vv) for vv in v]) ** 2 /
(df1 * np.sum(np.sum(v * v, axis=2), axis=1)))
eps = v
df1, df2 = np.zeros(n_obs) + df1, np.zeros(n_obs) + df2
if correction:
df1, df2 = [d[None, :] * eps for d in (df1, df2)]
if return_pvals:
pvals = f(df1, df2).sf(fvals)
else:
pvals = np.empty(0)
pvalues.append(pvals)
# handle single effect returns
return [np.squeeze(np.asarray(vv)) for vv in (fvalues, pvalues)]
| bsd-3-clause | 5,728,243,986,000,986,000 | 34.131653 | 78 | 0.594323 | false |
stainsteelcrown/nonsense-story-generator | venv/lib/python2.7/site-packages/werkzeug/urls.py | 146 | 33150 | # -*- coding: utf-8 -*-
"""
werkzeug.urls
~~~~~~~~~~~~~
This module implements various URL related functions.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from werkzeug._compat import text_type, PY2, to_unicode, \
to_native, implements_to_string, try_coerce_native, \
normalize_string_tuple, make_literal_wrapper, \
fix_tuple_repr
from werkzeug._internal import _encode_idna, _decode_idna
from werkzeug.datastructures import MultiDict, iter_multi_items
from collections import namedtuple
# A regular expression for what a valid schema looks like
_scheme_re = re.compile(r'^[a-zA-Z0-9+-.]+$')
# Characters that are safe in any part of an URL.
_always_safe = (b'abcdefghijklmnopqrstuvwxyz'
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.-+')
_hexdigits = '0123456789ABCDEFabcdef'
_hextobyte = dict(
((a + b).encode(), int(a + b, 16))
for a in _hexdigits for b in _hexdigits
)
_URLTuple = fix_tuple_repr(namedtuple('_URLTuple',
['scheme', 'netloc', 'path', 'query', 'fragment']))
class _URLMixin(object):
__slots__ = ()
def replace(self, **kwargs):
"""Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified."""
return self._replace(**kwargs)
@property
def host(self):
"""The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
"""
return self._split_host()[0]
@property
def ascii_host(self):
"""Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
"""
rv = self.host
if rv is not None and isinstance(rv, text_type):
rv = _encode_idna(rv)
return to_native(rv, 'ascii', 'ignore')
@property
def port(self):
"""The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
"""
try:
rv = int(to_native(self._split_host()[1]))
if 0 <= rv <= 65535:
return rv
except (ValueError, TypeError):
pass
@property
def auth(self):
"""The authentication part in the URL if available, `None`
otherwise.
"""
return self._split_netloc()[0]
@property
def username(self):
"""The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[0]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_username(self):
"""The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
"""
return self._split_auth()[0]
@property
def password(self):
"""The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[1]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_password(self):
"""The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
"""
return self._split_auth()[1]
def decode_query(self, *args, **kwargs):
"""Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
"""
return url_decode(self.query, *args, **kwargs)
def join(self, *args, **kwargs):
"""Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
"""
return url_parse(url_join(self, *args, **kwargs))
def to_url(self):
"""Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
"""
return url_unparse(self)
def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or '')
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
_url_unquote_legacy(self.raw_username or '', '/:%@'),
_url_unquote_legacy(self.raw_password or '', '/:%@'),
]))
if auth:
rv = '%s@%s' % (auth, rv)
return rv
def to_uri_tuple(self):
"""Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow.
It's usually more interesting to directly call :meth:`iri_to_uri` which
will return a string.
"""
return url_parse(iri_to_uri(self).encode('ascii'))
def to_iri_tuple(self):
"""Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar.
It's usually more interesting to directly call :meth:`uri_to_iri` which
will return a string.
"""
return url_parse(uri_to_iri(self))
def _split_netloc(self):
if self._at in self.netloc:
return self.netloc.split(self._at, 1)
return None, self.netloc
def _split_auth(self):
auth = self._split_netloc()[0]
if not auth:
return None, None
if self._colon not in auth:
return auth, None
return auth.split(self._colon, 1)
def _split_host(self):
rv = self._split_netloc()[1]
if not rv:
return None, None
if not rv.startswith(self._lbracket):
if self._colon in rv:
return rv.split(self._colon, 1)
return rv, None
idx = rv.find(self._rbracket)
if idx < 0:
return rv, None
host = rv[1:idx]
rest = rv[idx + 1:]
if rest.startswith(self._colon):
return host, rest[1:]
return host, None
@implements_to_string
class URL(_URLTuple, _URLMixin):
"""Represents a parsed URL. This behaves like a regular tuple but
also has some extra attributes that give further insight into the
URL.
"""
__slots__ = ()
_at = '@'
_colon = ':'
_lbracket = '['
_rbracket = ']'
def __str__(self):
return self.to_url()
def encode_netloc(self):
"""Encodes the netloc part to an ASCII safe URL as bytes."""
rv = self.ascii_host or ''
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
url_quote(self.raw_username or '', 'utf-8', 'strict', '/:%'),
url_quote(self.raw_password or '', 'utf-8', 'strict', '/:%'),
]))
if auth:
rv = '%s@%s' % (auth, rv)
return rv.encode('ascii')
def encode(self, charset='utf-8', errors='replace'):
"""Encodes the URL to a tuple made out of bytes. The charset is
only being used for the path, query and fragment.
"""
return BytesURL(
self.scheme.encode('ascii'),
self.encode_netloc(),
self.path.encode(charset, errors),
self.query.encode(charset, errors),
self.fragment.encode(charset, errors)
)
class BytesURL(_URLTuple, _URLMixin):
"""Represents a parsed URL in bytes."""
__slots__ = ()
_at = b'@'
_colon = b':'
_lbracket = b'['
_rbracket = b']'
def __str__(self):
return self.to_url().decode('utf-8', 'replace')
def encode_netloc(self):
"""Returns the netloc unchanged as bytes."""
return self.netloc
def decode(self, charset='utf-8', errors='replace'):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode('ascii'),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors)
)
def _unquote_to_bytes(string, unsafe=''):
if isinstance(string, text_type):
string = string.encode('utf-8')
if isinstance(unsafe, text_type):
unsafe = unsafe.encode('utf-8')
unsafe = frozenset(bytearray(unsafe))
bits = iter(string.split(b'%'))
result = bytearray(next(bits, b''))
for item in bits:
try:
char = _hextobyte[item[:2]]
if char in unsafe:
raise KeyError()
result.append(char)
result.extend(item[2:])
except KeyError:
result.extend(b'%')
result.extend(item)
return bytes(result)
def _url_encode_impl(obj, charset, encode_keys, sort, key):
iterable = iter_multi_items(obj)
if sort:
iterable = sorted(iterable, key=key)
for key, value in iterable:
if value is None:
continue
if not isinstance(key, bytes):
key = text_type(key).encode(charset)
if not isinstance(value, bytes):
value = text_type(value).encode(charset)
yield url_quote_plus(key) + '=' + url_quote_plus(value)
def _url_unquote_legacy(value, unsafe=''):
try:
return url_unquote(value, charset='utf-8',
errors='strict', unsafe=unsafe)
except UnicodeError:
return url_unquote(value, charset='latin1', unsafe=unsafe)
def url_parse(url, scheme=None, allow_fragments=True):
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
"""
s = make_literal_wrapper(url)
is_text_based = isinstance(url, text_type)
if scheme is None:
scheme = s('')
netloc = query = fragment = s('')
i = url.find(s(':'))
if i > 0 and _scheme_re.match(to_native(url[:i], errors='replace')):
# make sure "iri" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i + 1:]
if not rest or any(c not in s('0123456789') for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == s('//'):
delim = len(url)
for c in s('/?#'):
wdelim = url.find(c, 2)
if wdelim >= 0:
delim = min(delim, wdelim)
netloc, url = url[2:delim], url[delim:]
if (s('[') in netloc and s(']') not in netloc) or \
(s(']') in netloc and s('[') not in netloc):
raise ValueError('Invalid IPv6 URL')
if allow_fragments and s('#') in url:
url, fragment = url.split(s('#'), 1)
if s('?') in url:
url, query = url.split(s('?'), 1)
result_type = is_text_based and URL or BytesURL
return result_type(scheme, netloc, url, query, fragment)
def url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = frozenset(bytearray(safe) + _always_safe) - frozenset(bytearray(unsafe))
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(('%%%02X' % char).encode('ascii'))
return to_native(bytes(rv))
def url_quote_plus(string, charset='utf-8', errors='strict', safe=''):
"""URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: The string to quote.
:param charset: The charset to be used.
:param safe: An optional sequence of safe characters.
"""
return url_quote(string, charset, errors, safe + ' ', '+').replace(' ', '+')
def url_unparse(components):
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
"""
scheme, netloc, path, query, fragment = \
normalize_string_tuple(components)
s = make_literal_wrapper(scheme)
url = s('')
# We generally treat file:///x and file:/x the same which is also
# what browsers seem to do. This also allows us to ignore a schema
# register for netloc utilization or having to differenciate between
# empty and missing netloc.
if netloc or (scheme and path.startswith(s('/'))):
if path and path[:1] != s('/'):
path = s('/') + path
url = s('//') + (netloc or s('')) + path
elif path:
url += path
if scheme:
url = scheme + s(':') + url
if query:
url = url + s('?') + query
if fragment:
url = url + s('#') + fragment
return url
def url_unquote(string, charset='utf-8', errors='replace', unsafe=''):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv
def url_unquote_plus(s, charset='utf-8', errors='replace'):
"""URL decode a single string with the given `charset` and decode "+" to
whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
:param s: The string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: The error handling for the `charset` decoding.
"""
if isinstance(s, text_type):
s = s.replace(u'+', u' ')
else:
s = s.replace(b'+', b' ')
return url_unquote(s, charset, errors)
def url_fix(s, charset='utf-8'):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
scheme, netloc, path, qs, anchor = url_parse(to_unicode(s, charset, 'replace'))
path = url_quote(path, charset, safe='/%+$!*\'(),')
qs = url_quote_plus(qs, charset, safe=':&%=+$!*\'(),')
return to_native(url_unparse((scheme, netloc, path, qs, anchor)))
def uri_to_iri(uri, charset='utf-8', errors='replace'):
r"""
Converts a URI in a given charset to a IRI.
Examples for URI versus IRI:
>>> uri_to_iri(b'http://xn--n3h.net/')
u'http://\u2603.net/'
>>> uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th'
Query strings are left unchanged:
>>> uri_to_iri('/?foo=24&x=%26%2f')
u'/?foo=24&x=%26%2f'
.. versionadded:: 0.6
:param uri: The URI to convert.
:param charset: The charset of the URI.
:param errors: The error handling on decode.
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, '%/;?')
query = url_unquote(uri.query, charset, errors, '%;/?:@&=+,$')
fragment = url_unquote(uri.fragment, charset, errors, '%;/?:@&=+,$')
return url_unparse((uri.scheme, uri.decode_netloc(),
path, query, fragment))
def iri_to_uri(iri, charset='utf-8', errors='strict', safe_conversion=False):
r"""
Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always
uses utf-8 URLs internally because this is what browsers and HTTP do as
well. In some places where it accepts an URL it also accepts a unicode IRI
and converts it into a URI.
Examples for IRI versus URI:
>>> iri_to_uri(u'http://☃.net/')
'http://xn--n3h.net/'
>>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
There is a general problem with IRI and URI conversion with some
protocols that appear in the wild that are in violation of the URI
specification. In places where Werkzeug goes through a forced IRI to
URI conversion it will set the `safe_conversion` flag which will
not perform a conversion if the end result is already ASCII. This
can mean that the return value is not an entirely correct URI but
it will not destroy such invalid URLs in the process.
As an example consider the following two IRIs::
magnet:?xt=uri:whatever
itms-services://?action=download-manifest
The internal representation after parsing of those URLs is the same
and there is no way to reconstruct the original one. If safe
conversion is enabled however this function becomes a noop for both of
those strings as they both can be considered URIs.
.. versionadded:: 0.6
.. versionchanged:: 0.9.6
The `safe_conversion` parameter was added.
:param iri: The IRI to convert.
:param charset: The charset for the URI.
:param safe_conversion: indicates if a safe conversion should take place.
For more information see the explanation above.
"""
if isinstance(iri, tuple):
iri = url_unparse(iri)
if safe_conversion:
try:
native_iri = to_native(iri)
ascii_iri = to_native(iri).encode('ascii')
if ascii_iri.split() == [ascii_iri]:
return native_iri
except UnicodeError:
pass
iri = url_parse(to_unicode(iri, charset, errors))
netloc = iri.encode_netloc().decode('ascii')
path = url_quote(iri.path, charset, errors, '/:~+%')
query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=')
fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/')
return to_native(url_unparse((iri.scheme, netloc,
path, query, fragment)))
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
errors='replace', separator='&', cls=None):
"""
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`.
If the charset is set to `None` no unicode decoding will happen and
raw bytes will be returned.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`
then keys will be unicode in all cases. Otherwise,
they remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
if isinstance(s, text_type) and not isinstance(separator, text_type):
separator = separator.decode(charset or 'ascii')
elif isinstance(s, bytes) and not isinstance(separator, bytes):
separator = separator.encode(charset or 'ascii')
return cls(_url_decode_impl(s.split(separator), charset, decode_keys,
include_empty, errors))
def url_decode_stream(stream, charset='utf-8', decode_keys=False,
include_empty=True, errors='replace', separator='&',
cls=None, limit=None, return_iterator=False):
"""Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed.
.. versionadded:: 0.8
:param stream: a stream with the encoded querystring
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`,
keys will be unicode in all cases. Otherwise, they
remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param limit: the content length of the URL data. Not necessary if
a limited stream is provided.
:param return_iterator: if set to `True` the `cls` argument is ignored
and an iterator over all decoded pairs is
returned
"""
from werkzeug.wsgi import make_chunk_iter
if return_iterator:
cls = lambda x: x
elif cls is None:
cls = MultiDict
pair_iter = make_chunk_iter(stream, separator, limit)
return cls(_url_decode_impl(pair_iter, charset, decode_keys,
include_empty, errors))
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
for pair in pair_iter:
if not pair:
continue
s = make_literal_wrapper(pair)
equal = s('=')
if equal in pair:
key, value = pair.split(equal, 1)
else:
if not include_empty:
continue
key = pair
value = s('')
key = url_unquote_plus(key, charset, errors)
if charset is not None and PY2 and not decode_keys:
key = try_coerce_native(key)
yield key, url_unquote_plus(value, charset, errors)
def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
separator=b'&'):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, 'ascii')
return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
def url_encode_stream(obj, stream=None, charset='utf-8', encode_keys=False,
sort=False, key=None, separator=b'&'):
"""Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, 'ascii')
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if stream is None:
return gen
for idx, chunk in enumerate(gen):
if idx:
stream.write(separator)
stream.write(chunk)
def url_join(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
"""
if isinstance(base, tuple):
base = url_unparse(base)
if isinstance(url, tuple):
url = url_unparse(url)
base, url = normalize_string_tuple((base, url))
s = make_literal_wrapper(base)
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bquery, bfragment = \
url_parse(base, allow_fragments=allow_fragments)
scheme, netloc, path, query, fragment = \
url_parse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return url_unparse((scheme, netloc, path, query, fragment))
netloc = bnetloc
if path[:1] == s('/'):
segments = path.split(s('/'))
elif not path:
segments = bpath.split(s('/'))
if not query:
query = bquery
else:
segments = bpath.split(s('/'))[:-1] + path.split(s('/'))
# If the rightmost part is "./" we want to keep the slash but
# remove the dot.
if segments[-1] == s('.'):
segments[-1] = s('')
# Resolve ".." and "."
segments = [segment for segment in segments if segment != s('.')]
while 1:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == s('..') and \
segments[i - 1] not in (s(''), s('..')):
del segments[i - 1:i + 1]
break
i += 1
else:
break
# Remove trailing ".." if the URL is absolute
unwanted_marker = [s(''), s('..')]
while segments[:2] == unwanted_marker:
del segments[1]
path = s('/').join(segments)
return url_unparse((scheme, netloc, path, query, fragment))
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which than are used to assemble the URL. Works with URLs
and posix paths.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/foo')
>>> href('bar', 23)
'/foo/bar/23'
>>> href('foo', bar=23)
'/foo/foo?bar=23'
If any of the arguments (positional or keyword) evaluates to `None` it
will be skipped. If no keyword arguments are given the last argument
can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
otherwise the keyword arguments are used for the query parameters, cutting
off the first trailing underscore of the parameter name:
>>> href(is_=42)
'/foo?is=42'
>>> href({'foo': 'bar'})
'/foo?foo=bar'
Combining of both methods is not allowed:
>>> href({'foo': 'bar'}, bar=42)
Traceback (most recent call last):
...
TypeError: keyword arguments and query-dicts can't be combined
Accessing attributes on the href object creates a new href object with
the attribute name as prefix:
>>> bar_href = href.bar
>>> bar_href("blub")
'/foo/bar/blub'
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm:
>>> href = Href("/", sort=True)
>>> href(a=1, b=2, c=3)
'/?a=1&b=2&c=3'
.. versionadded:: 0.5
`sort` and `key` were added.
"""
def __init__(self, base='./', charset='utf-8', sort=False, key=None):
if not base:
base = './'
self.base = base
self.charset = charset
self.sort = sort
self.key = key
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
base = self.base
if base[-1:] != '/':
base += '/'
return Href(url_join(base, name), self.charset, self.sort, self.key)
def __call__(self, *path, **query):
if path and isinstance(path[-1], dict):
if query:
raise TypeError('keyword arguments and query-dicts '
'can\'t be combined')
query, path = path[-1], path[:-1]
elif query:
query = dict([(k.endswith('_') and k[:-1] or k, v)
for k, v in query.items()])
path = '/'.join([to_unicode(url_quote(x, self.charset), 'ascii')
for x in path if x is not None]).lstrip('/')
rv = self.base
if path:
if not rv.endswith('/'):
rv += '/'
rv = url_join(rv, './' + path)
if query:
rv += '?' + to_unicode(url_encode(query, self.charset, sort=self.sort,
key=self.key), 'ascii')
return to_native(rv)
| mit | 5,352,139,911,993,716,000 | 35.182314 | 83 | 0.594967 | false |
techvoltage/capstone | bindings/python/test.py | 33 | 5021 | #!/usr/bin/env python
# Capstone Python bindings, by Nguyen Anh Quynnh <aquynh@gmail.com>
from __future__ import print_function
from capstone import *
import binascii
import sys
from xprint import to_hex, to_x, to_x_32
_python3 = sys.version_info.major == 3
X86_CODE16 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00"
X86_CODE32 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00"
X86_CODE64 = b"\x55\x48\x8b\x05\xb8\x13\x00\x00"
ARM_CODE = b"\xED\xFF\xFF\xEB\x04\xe0\x2d\xe5\x00\x00\x00\x00\xe0\x83\x22\xe5\xf1\x02\x03\x0e\x00\x00\xa0\xe3\x02\x30\xc1\xe7\x00\x00\x53\xe3"
ARM_CODE2 = b"\x10\xf1\x10\xe7\x11\xf2\x31\xe7\xdc\xa1\x2e\xf3\xe8\x4e\x62\xf3"
THUMB_CODE = b"\x70\x47\xeb\x46\x83\xb0\xc9\x68"
THUMB_CODE2 = b"\x4f\xf0\x00\x01\xbd\xe8\x00\x88\xd1\xe8\x00\xf0"
THUMB_MCLASS = b"\xef\xf3\x02\x80"
ARMV8 = b"\xe0\x3b\xb2\xee\x42\x00\x01\xe1\x51\xf0\x7f\xf5"
MIPS_CODE = b"\x0C\x10\x00\x97\x00\x00\x00\x00\x24\x02\x00\x0c\x8f\xa2\x00\x00\x34\x21\x34\x56"
MIPS_CODE2 = b"\x56\x34\x21\x34\xc2\x17\x01\x00"
MIPS_32R6M = b"\x00\x07\x00\x07\x00\x11\x93\x7c\x01\x8c\x8b\x7c\x00\xc7\x48\xd0"
MIPS_32R6 = b"\xec\x80\x00\x19\x7c\x43\x22\xa0"
ARM64_CODE = b"\x21\x7c\x02\x9b\x21\x7c\x00\x53\x00\x40\x21\x4b\xe1\x0b\x40\xb9"
PPC_CODE = b"\x80\x20\x00\x00\x80\x3f\x00\x00\x10\x43\x23\x0e\xd0\x44\x00\x80\x4c\x43\x22\x02\x2d\x03\x00\x80\x7c\x43\x20\x14\x7c\x43\x20\x93\x4f\x20\x00\x21\x4c\xc8\x00\x21"
SPARC_CODE = b"\x80\xa0\x40\x02\x85\xc2\x60\x08\x85\xe8\x20\x01\x81\xe8\x00\x00\x90\x10\x20\x01\xd5\xf6\x10\x16\x21\x00\x00\x0a\x86\x00\x40\x02\x01\x00\x00\x00\x12\xbf\xff\xff\x10\xbf\xff\xff\xa0\x02\x00\x09\x0d\xbf\xff\xff\xd4\x20\x60\x00\xd4\x4e\x00\x16\x2a\xc2\x80\x03"
SPARCV9_CODE = b"\x81\xa8\x0a\x24\x89\xa0\x10\x20\x89\xa0\x1a\x60\x89\xa0\x00\xe0"
SYSZ_CODE = b"\xed\x00\x00\x00\x00\x1a\x5a\x0f\x1f\xff\xc2\x09\x80\x00\x00\x00\x07\xf7\xeb\x2a\xff\xff\x7f\x57\xe3\x01\xff\xff\x7f\x57\xeb\x00\xf0\x00\x00\x24\xb2\x4f\x00\x78"
XCORE_CODE = b"\xfe\x0f\xfe\x17\x13\x17\xc6\xfe\xec\x17\x97\xf8\xec\x4f\x1f\xfd\xec\x37\x07\xf2\x45\x5b\xf9\xfa\x02\x06\x1b\x10"
all_tests = (
(CS_ARCH_X86, CS_MODE_16, X86_CODE16, "X86 16bit (Intel syntax)", 0),
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32bit (ATT syntax)", CS_OPT_SYNTAX_ATT),
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32 (Intel syntax)", 0),
(CS_ARCH_X86, CS_MODE_64, X86_CODE64, "X86 64 (Intel syntax)", 0),
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE, "ARM", 0),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE2, "THUMB-2", 0),
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE2, "ARM: Cortex-A15 + NEON", 0),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE, "THUMB", 0),
(CS_ARCH_ARM, CS_MODE_THUMB + CS_MODE_MCLASS, THUMB_MCLASS, "Thumb-MClass", 0),
(CS_ARCH_ARM, CS_MODE_ARM + CS_MODE_V8, ARMV8, "Arm-V8", 0),
(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_BIG_ENDIAN, MIPS_CODE, "MIPS-32 (Big-endian)", 0),
(CS_ARCH_MIPS, CS_MODE_MIPS64 + CS_MODE_LITTLE_ENDIAN, MIPS_CODE2, "MIPS-64-EL (Little-endian)", 0),
(CS_ARCH_MIPS, CS_MODE_MIPS32R6 + CS_MODE_MICRO + CS_MODE_BIG_ENDIAN, MIPS_32R6M, "MIPS-32R6 | Micro (Big-endian)", 0),
(CS_ARCH_MIPS, CS_MODE_MIPS32R6 + CS_MODE_BIG_ENDIAN, MIPS_32R6, "MIPS-32R6 (Big-endian)", 0),
(CS_ARCH_ARM64, CS_MODE_ARM, ARM64_CODE, "ARM-64", 0),
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64", 0),
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64, print register with number only", CS_OPT_SYNTAX_NOREGNAME),
(CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN, SPARC_CODE, "Sparc", 0),
(CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN + CS_MODE_V9, SPARCV9_CODE, "SparcV9", 0),
(CS_ARCH_SYSZ, 0, SYSZ_CODE, "SystemZ", 0),
(CS_ARCH_XCORE, 0, XCORE_CODE, "XCore", 0),
)
# ## Test cs_disasm_quick()
def test_cs_disasm_quick():
for arch, mode, code, comment, syntax in all_tests:
print('*' * 40)
print("Platform: %s" % comment)
print("Disasm:"),
print(to_hex(code))
for insn in cs_disasm_quick(arch, mode, code, 0x1000):
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print()
# ## Test class Cs
def test_class():
for arch, mode, code, comment, syntax in all_tests:
print('*' * 16)
print("Platform: %s" % comment)
print("Code: %s" % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
if syntax != 0:
md.syntax = syntax
for insn in md.disasm(code, 0x1000):
# bytes = binascii.hexlify(insn.bytes)
# print("0x%x:\t%s\t%s\t// hex-code: %s" %(insn.address, insn.mnemonic, insn.op_str, bytes))
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print("0x%x:" % (insn.address + insn.size))
print()
except CsError as e:
print("ERROR: %s" % e)
# test_cs_disasm_quick()
# print ("*" * 40)
if __name__ == '__main__':
test_class()
| bsd-3-clause | 6,139,981,171,410,466,000 | 50.234694 | 272 | 0.625772 | false |
transferwise/bootstrap | test-infra/s3_cache.py | 2166 | 5734 | #!/usr/bin/env python2.7
# pylint: disable=C0301
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, chdir, remove as _delete_file
from os.path import dirname, basename, abspath, realpath, expandvars
from hashlib import sha256
from subprocess import check_call as run
from json import load, dump as save
from contextlib import contextmanager
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
CONFIG_FILE = './S3Cachefile.json'
UPLOAD_TODO_FILE = './S3CacheTodo.json'
BYTES_PER_MB = 1024 * 1024
@contextmanager
def timer():
start = datetime.utcnow()
yield
end = datetime.utcnow()
elapsed = end - start
print("\tDone. Took", int(elapsed.total_seconds()), "second(s).")
@contextmanager
def todo_file(writeback=True):
try:
with open(UPLOAD_TODO_FILE, 'rt') as json_file:
todo = load(json_file)
except (IOError, OSError, ValueError):
todo = {}
yield todo
if writeback:
try:
with open(UPLOAD_TODO_FILE, 'wt') as json_file:
save(todo, json_file)
except (OSError, IOError) as save_err:
print("Error saving {}:".format(UPLOAD_TODO_FILE), save_err)
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def mark_needs_uploading(cache_name):
with todo_file() as todo:
todo[cache_name] = True
def mark_uploaded(cache_name):
with todo_file() as todo:
todo.pop(cache_name, None)
def need_to_upload(cache_name):
with todo_file(writeback=False) as todo:
return todo.get(cache_name, False)
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
with timer():
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
with timer():
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
mark_uploaded(cache_name) # reset
try:
print("Downloading {} tarball from S3...".format(cache_name))
with timer():
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
mark_needs_uploading(cache_name)
raise SystemExit("Cached {} download failed!".format(cache_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(cache_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(cache_name, _tarball_size(directory)))
with timer():
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(cache_name))
mark_uploaded(cache_name)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 2:
raise SystemExit("USAGE: s3_cache.py <download | upload> <cache name>")
mode, cache_name = argv
script_dir = dirname(realpath(__file__))
chdir(script_dir)
try:
with open(CONFIG_FILE, 'rt') as config_file:
config = load(config_file)
except (IOError, OSError, ValueError) as config_err:
print(config_err)
raise SystemExit("Error when trying to load config from JSON file!")
try:
cache_info = config[cache_name]
key_file = expandvars(cache_info["key"])
fallback_cmd = cache_info["generate"]
directory = expandvars(cache_info["cache"])
except (TypeError, KeyError) as load_err:
print(load_err)
raise SystemExit("Config for cache named {!r} is missing or malformed!".format(cache_name))
try:
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME)
if bucket is None:
raise SystemExit("Could not access bucket!")
key_file_hash = _sha256_of_file(key_file)
key = Key(bucket, key_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if need_to_upload(cache_name):
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
except BaseException as exc:
if mode != 'download':
raise
print("Error!:", exc)
print("Unable to download from cache.")
print("Running fallback command to generate cache directory {!r}: {}".format(directory, fallback_cmd))
with timer():
run(fallback_cmd, shell=True)
| mit | -8,763,417,256,080,298,000 | 30.163043 | 110 | 0.636554 | false |
gabrielsaldana/sqmc | sabesqmc/quote/tests/test_forms.py | 1 | 2384 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.test import TestCase
from ..forms import QuoteForm
class TestQuoteForm(TestCase):
def setUp(self):
pass
def test_validate_emtpy_quote(self):
form = QuoteForm({'message': ''})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': ' '})
self.assertFalse(form.is_valid())
def test_validate_invalid_quote(self):
form = QuoteForm({'message': 'Mensaje invalido'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'mensaje invalido'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'me nsaje invalido'})
self.assertFalse(form.is_valid())
def test_urls_in_quote(self):
form = QuoteForm({'message': 'http://122.33.43.322'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'http://sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'http://sabesquemecaga.com/asdfads/'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://www.sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://www.sabesquemecaga.com/test/12'})
self.assertFalse(form.is_valid())
def test_emails_in_quote(self):
form = QuoteForm({'message': 'Me caga test@test.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga test.this@test.asdfas.com'})
self.assertFalse(form.is_valid())
def test_validate_short_quote(self):
form = QuoteForm({'message': 'Me caga '})
self.assertFalse(form.is_valid())
def test_validate_long_quote(self):
form = QuoteForm({'message': 'Me caga que sea que Este mensaje es demasiado largo y no pase las pruebas de lo que tenemos que probar asdfadfa adsfasdfa. Me caga que sea que Este mensaje es demasiado largo y no pase las pruebas de lo que tenemos que probar.'})
self.assertFalse(form.is_valid())
def test_valid_message(self):
form = QuoteForm({'message': 'Me caga probar esto'})
self.assertTrue(form.is_valid())
| agpl-3.0 | 373,462,940,213,938,370 | 40.824561 | 267 | 0.636745 | false |
hbrunn/OCB | addons/payment_authorize/controllers/main.py | 247 | 1261 | # -*- coding: utf-8 -*-
import pprint
import logging
import urlparse
from openerp import http
from openerp.http import request
_logger = logging.getLogger(__name__)
class AuthorizeController(http.Controller):
_return_url = '/payment/authorize/return/'
_cancel_url = '/payment/authorize/cancel/'
@http.route([
'/payment/authorize/return/',
'/payment/authorize/cancel/',
], type='http', auth='public')
def authorize_form_feedback(self, **post):
_logger.info('Authorize: entering form_feedback with post data %s', pprint.pformat(post))
return_url = '/'
if post:
request.env['payment.transaction'].sudo().form_feedback(post, 'authorize')
return_url = post.pop('return_url', '/')
base_url = request.env['ir.config_parameter'].get_param('web.base.url')
# Authorize.Net is expecting a response to the POST sent by their server.
# This response is in the form of a URL that Authorize.Net will pass on to the
# client's browser to redirect them to the desired location need javascript.
return request.render('payment_authorize.payment_authorize_redirect', {
'return_url': '%s' % urlparse.urljoin(base_url, return_url)
})
| agpl-3.0 | 8,330,796,822,597,135,000 | 38.40625 | 97 | 0.654243 | false |
cjparsons74/kupfer | kupfer/obj/objects.py | 1 | 11035 | # -*- coding: UTF-8 -*-
"""
Copyright 2007--2009 Ulrik Sverdrup <ulrik.sverdrup@gmail.com>
This file is a part of the program kupfer, which is
released under GNU General Public License v3 (or any later version),
see the main program file, and COPYING for details.
"""
import os
from os import path
import gobject
from kupfer import icons, launch, utils
from kupfer import pretty
from kupfer.obj.base import Leaf, Action, Source, InvalidDataError
from kupfer.obj import fileactions
from kupfer.interface import TextRepresentation
from kupfer.kupferstring import tounicode
def ConstructFileLeafTypes():
""" Return a seq of the Leaf types returned by ConstructFileLeaf"""
yield FileLeaf
yield AppLeaf
def ConstructFileLeaf(obj):
"""
If the path in @obj points to a Desktop Item file,
return an AppLeaf, otherwise return a FileLeaf
"""
root, ext = path.splitext(obj)
if ext == ".desktop":
try:
return AppLeaf(init_path=obj)
except InvalidDataError:
pass
return FileLeaf(obj)
def _directory_content(dirpath, show_hidden):
from kupfer.obj.sources import DirectorySource
return DirectorySource(dirpath, show_hidden)
class FileLeaf (Leaf, TextRepresentation):
"""
Represents one file: the represented object is a bytestring (important!)
"""
serializable = 1
def __init__(self, obj, name=None):
"""Construct a FileLeaf
The display name of the file is normally derived from the full path,
and @name should normally be left unspecified.
@obj: byte string (file system encoding)
@name: unicode name or None for using basename
"""
if obj is None:
raise InvalidDataError("File path for %s may not be None" % name)
# Use glib filename reading to make display name out of filenames
# this function returns a `unicode` object
if not name:
name = gobject.filename_display_basename(obj)
super(FileLeaf, self).__init__(obj, name)
def __eq__(self, other):
try:
return (type(self) == type(other) and
unicode(self) == unicode(other) and
path.samefile(self.object, other.object))
except OSError, exc:
pretty.print_debug(__name__, exc)
return False
def repr_key(self):
return self.object
def canonical_path(self):
"""Return the true path of the File (without symlinks)"""
return path.realpath(self.object)
def is_valid(self):
return os.access(self.object, os.R_OK)
def _is_executable(self):
return os.access(self.object, os.R_OK | os.X_OK)
def is_dir(self):
return path.isdir(self.object)
def get_text_representation(self):
return gobject.filename_display_name(self.object)
def get_description(self):
return utils.get_display_path_for_bytestring(self.canonical_path())
def get_actions(self):
return fileactions.get_actions_for_file(self)
def has_content(self):
return self.is_dir() or Leaf.has_content(self)
def content_source(self, alternate=False):
if self.is_dir():
return _directory_content(self.object, alternate)
else:
return Leaf.content_source(self)
def get_thumbnail(self, width, height):
if self.is_dir(): return None
return icons.get_thumbnail_for_file(self.object, width, height)
def get_gicon(self):
return icons.get_gicon_for_file(self.object)
def get_icon_name(self):
if self.is_dir():
return "folder"
else:
return "text-x-generic"
class SourceLeaf (Leaf):
def __init__(self, obj, name=None):
"""Create SourceLeaf for source @obj"""
if not name:
name = unicode(obj)
Leaf.__init__(self, obj, name)
def has_content(self):
return True
def repr_key(self):
return repr(self.object)
def content_source(self, alternate=False):
return self.object
def get_description(self):
return self.object.get_description()
@property
def fallback_icon_name(self):
return self.object.fallback_icon_name
def get_gicon(self):
return self.object.get_gicon()
def get_icon_name(self):
return self.object.get_icon_name()
class AppLeaf (Leaf):
def __init__(self, item=None, init_path=None, app_id=None):
"""Try constructing an Application for GAppInfo @item,
for file @path or for package name @app_id.
"""
self.init_item = item
self.init_path = init_path
self.init_item_id = app_id and app_id + ".desktop"
# finish will raise InvalidDataError on invalid item
self.finish()
Leaf.__init__(self, self.object, self.object.get_name())
self._add_aliases()
def _add_aliases(self):
# find suitable alias
# use package name: non-extension part of ID
lowername = unicode(self).lower()
package_name = self._get_package_name()
if package_name and package_name not in lowername:
self.kupfer_add_alias(package_name)
def __hash__(self):
return hash(unicode(self))
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.get_id() == other.get_id())
def __getstate__(self):
self.init_item_id = self.object and self.object.get_id()
state = dict(vars(self))
state["object"] = None
state["init_item"] = None
return state
def __setstate__(self, state):
vars(self).update(state)
self.finish()
def finish(self):
"""Try to set self.object from init's parameters"""
item = None
if self.init_item:
item = self.init_item
else:
# Construct an AppInfo item from either path or item_id
from gio.unix import DesktopAppInfo, desktop_app_info_new_from_filename
if self.init_path and os.access(self.init_path, os.X_OK):
# serilizable if created from a "loose file"
self.serializable = 1
item = desktop_app_info_new_from_filename(self.init_path)
try:
# try to annotate the GAppInfo object
item.init_path = self.init_path
except AttributeError, exc:
pretty.print_debug(__name__, exc)
elif self.init_item_id:
try:
item = DesktopAppInfo(self.init_item_id)
except RuntimeError:
pretty.print_debug(__name__, "Application not found:",
self.init_item_id)
self.object = item
if not self.object:
raise InvalidDataError
def repr_key(self):
return self.get_id()
def _get_package_name(self):
return gobject.filename_display_basename(self.get_id())
def get_id(self):
"""Return the unique ID for this app.
This is the GIO id "gedit.desktop" minus the .desktop part for
system-installed applications.
"""
return launch.application_id(self.object)
def get_actions(self):
if launch.application_is_running(self.object):
yield Launch(_("Go To"), is_running=True)
yield CloseAll()
else:
yield Launch()
yield LaunchAgain()
def get_description(self):
# Use Application's description, else use executable
# for "file-based" applications we show the path
app_desc = tounicode(self.object.get_description())
ret = tounicode(app_desc if app_desc else self.object.get_executable())
if self.init_path:
app_path = utils.get_display_path_for_bytestring(self.init_path)
return u"(%s) %s" % (app_path, ret)
return ret
def get_gicon(self):
return self.object.get_icon()
def get_icon_name(self):
return "exec"
class OpenUrl (Action):
rank_adjust = 5
def __init__(self, name=None):
if not name:
name = _("Open URL")
super(OpenUrl, self).__init__(name)
def activate(self, leaf):
url = leaf.object
self.open_url(url)
def open_url(self, url):
utils.show_url(url)
def get_description(self):
return _("Open URL with default viewer")
def get_icon_name(self):
return "forward"
class Launch (Action):
""" Launches an application (AppLeaf) """
rank_adjust = 5
def __init__(self, name=None, is_running=False, open_new=False):
"""
If @is_running, style as if the app is running (Show application)
If @open_new, always start a new instance.
"""
if not name:
name = _("Launch")
Action.__init__(self, name)
self.is_running = is_running
self.open_new = open_new
def activate(self, leaf):
desktop_item = leaf.object
launch.launch_application(leaf.object, activate=not self.open_new)
def get_description(self):
if self.is_running:
return _("Show application window")
return _("Launch application")
def get_icon_name(self):
if self.is_running:
return "go-jump"
return Action.get_icon_name(self)
class LaunchAgain (Launch):
rank_adjust = 0
def __init__(self, name=None):
if not name:
name = _("Launch Again")
Launch.__init__(self, name, open_new=True)
def item_types(self):
yield AppLeaf
def valid_for_item(self, leaf):
return launch.application_is_running(leaf.object)
def get_description(self):
return _("Launch another instance of this application")
class CloseAll (Action):
"""Attempt to close all application windows"""
rank_adjust = -10
def __init__(self):
Action.__init__(self, _("Close"))
def activate(self, leaf):
return launch.application_close_all(leaf.object)
def item_types(self):
yield AppLeaf
def valid_for_item(self, leaf):
return launch.application_is_running(leaf.object)
def get_description(self):
return _("Attempt to close all application windows")
def get_icon_name(self):
return "window-close"
class UrlLeaf (Leaf, TextRepresentation):
def __init__(self, obj, name):
super(UrlLeaf, self).__init__(obj, name)
def get_actions(self):
return (OpenUrl(), )
def get_description(self):
return self.object
def get_icon_name(self):
return "text-html"
class RunnableLeaf (Leaf):
"""Leaf where the Leaf is basically the action itself,
for items such as Quit, Log out etc.
"""
def __init__(self, obj=None, name=None):
Leaf.__init__(self, obj, name)
def get_actions(self):
yield Perform()
def run(self):
raise NotImplementedError
def repr_key(self):
return ""
def get_gicon(self):
iname = self.get_icon_name()
if iname:
return icons.get_gicon_with_fallbacks(None, (iname, ))
return icons.ComposedIcon("kupfer-object", "gtk-execute")
def get_icon_name(self):
return ""
class Perform (Action):
"""Perform the action in a RunnableLeaf"""
rank_adjust = 5
def __init__(self, name=None):
# TRANS: 'Run' as in Perform a (saved) command
if not name: name = _("Run")
super(Perform, self).__init__(name=name)
def activate(self, leaf):
return leaf.run()
def get_description(self):
return _("Perform command")
class TextLeaf (Leaf, TextRepresentation):
"""Represent a text query
The represented object is a unicode string
"""
serializable = 1
def __init__(self, text, name=None):
"""@text *must* be unicode or UTF-8 str"""
text = tounicode(text)
if not name:
lines = [l for l in text.splitlines() if l.strip()]
name = lines[0] if lines else text
Leaf.__init__(self, text, name)
def get_actions(self):
return ()
def repr_key(self):
return hash(self.object)
def get_description(self):
lines = [l for l in self.object.splitlines() if l.strip()]
desc = lines[0] if lines else self.object
numlines = len(lines) or 1
# TRANS: This is description for a TextLeaf, a free-text search
# TRANS: The plural parameter is the number of lines %(num)d
return ngettext('"%(text)s"', '(%(num)d lines) "%(text)s"',
numlines) % {"num": numlines, "text": desc }
def get_icon_name(self):
return "edit-select-all"
| gpl-3.0 | -217,940,039,745,900,320 | 26.179803 | 74 | 0.695696 | false |
klabble/pollution-music | weather.py | 1 | 4565 | #!/usr/bin/env python3
# Copyright 2013 David Walker
#
# This file is part of Pollution Music.
#
# Pollution Music is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Pollution Music is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pollution Music. If not, see <http://www.gnu.org/licenses/>.
import calendar
from collections import defaultdict
import datetime
import json
import time
import urllib.request, urllib.error, urllib.parse
class WeatherData(object):
'''Caching front-end to weather data API.'''
# minimum time to wait, in seconds, between calls to the weather api
MIN_TIME_BETWEEN_CALLS = 12.0
def __init__(self, location, api_key):
'''Init with location about which to retrieve weather data.
Args:
location: Must be a string in form "state_or_country_code/city",
e.g., "CA/San_Francisco" or "China/Beijing".
api_key: issued by wunderground
'''
# this object stores weather data for only one location
self._location = location
self._url_prefix = 'http://api.wunderground.com/api/' + api_key + '/'
# for self-throttling to avoid exceeding calls/min limit
self._time_of_last_call = None
# cached weather observations. When populated, the expression
# self._wx[year][month][day][hour] (keys are of type int) will give a
# comma-separated-value string with the readings for temperature, wind
# speed, and so on.
self._wx = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(str))))
# empty csv to return when an observation is not available
self._no_data = ',,,,'
def _get_history(self, dt):
'''Ask the weather API for the history of observations for the given
date, and return the observation for the given hour on that date.
Args:
dt: datetime instance with values for year, month, day, and hour
Returns:
A string containing weather observations separated by commas.
'''
self._throttle()
url = (self._url_prefix +
'history_{}{:02}{:02}/q/{}.json'.format(
dt.year, dt.month, dt.day, self._location))
h = json.loads(urllib.request.urlopen(url).read().decode())
for ob in h['history']['observations']:
if ob['date']['min'] == '00':
hour = int(ob['date']['hour'])
self._wx[dt.year][dt.month][dt.day][hour] = ','.join([
ob['tempm'],
ob['hum'],
ob['wspdm'],
ob['wdird'],
ob['pressurem']])
ob = self._wx[dt.year][dt.month][dt.day][dt.hour]
if ob == '':
ob = self._wx[dt.year][dt.month][dt.day][dt.hour] = self._no_data
return ob
def _throttle(self):
'''Record the time of this call, first sleeping if necessary to avoid
exceeding the call/minute limit for the weather api.'''
if self._time_of_last_call is not None:
time_since_last_call = time.time() - self._time_of_last_call
if time_since_last_call < WeatherData.MIN_TIME_BETWEEN_CALLS:
time.sleep(WeatherData.MIN_TIME_BETWEEN_CALLS -
time_since_last_call)
self._time_of_last_call = time.time()
def get_observation(self, dt):
'''Return a comma-delimited string containing weather observations for
the date and hour specified by dt.'''
ob = self._wx[dt.year][dt.month][dt.day][dt.hour]
if ob == '':
# Never fetched this data, ask the API for it.
ob = self._get_history(dt)
return ob
def get_cur_conditions(self):
self._throttle()
url = (self._url_prefix + 'geolookup/conditions/q/' + self._location +
'.json')
return json.loads(urllib.request.urlopen(url).read().decode())
def main():
WeatherData('China/Beijing', WUNDERGROUND_API_KEY)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,753,267,676,844,873,000 | 38.353448 | 80 | 0.604381 | false |
was4444/chromium.src | third_party/WebKit/Source/devtools/scripts/modular_build.py | 32 | 6644 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Utilities for the modular DevTools build.
"""
from os import path
import os
try:
import simplejson as json
except ImportError:
import json
def read_file(filename):
with open(path.normpath(filename), 'rt') as input:
return input.read()
def write_file(filename, content):
if path.exists(filename):
os.remove(filename)
with open(filename, 'wt') as output:
output.write(content)
def bail_error(message):
raise Exception(message)
def load_and_parse_json(filename):
try:
return json.loads(read_file(filename))
except:
print 'ERROR: Failed to parse %s' % filename
raise
def concatenate_scripts(file_names, module_dir, output_dir, output):
for file_name in file_names:
output.write('/* %s */\n' % file_name)
file_path = path.join(module_dir, file_name)
if not path.isfile(file_path):
file_path = path.join(output_dir, path.basename(module_dir), file_name)
output.write(read_file(file_path))
output.write(';')
class Descriptors:
def __init__(self, application_dir, application_descriptor, module_descriptors):
self.application_dir = application_dir
self.application = application_descriptor
self.modules = module_descriptors
self._cached_sorted_modules = None
def application_json(self):
return json.dumps(self.application.values())
def all_compiled_files(self):
files = {}
for name in self.modules:
module = self.modules[name]
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files[path.normpath(path.join(self.application_dir, name, script))] = True
return files.keys()
def module_compiled_files(self, name):
files = []
module = self.modules.get(name)
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files.append(script)
return files
def module_resources(self, name):
return [name + '/' + resource for resource in self.modules[name].get('resources', [])]
def sorted_modules(self):
if self._cached_sorted_modules:
return self._cached_sorted_modules
result = []
unvisited_modules = set(self.modules)
temp_modules = set()
def visit(parent, name):
if name not in unvisited_modules:
return None
if name not in self.modules:
return (parent, name)
if name in temp_modules:
bail_error('Dependency cycle found at module "%s"' % name)
temp_modules.add(name)
deps = self.modules[name].get('dependencies')
if deps:
for dep_name in deps:
bad_dep = visit(name, dep_name)
if bad_dep:
return bad_dep
unvisited_modules.remove(name)
temp_modules.remove(name)
result.append(name)
return None
while len(unvisited_modules):
for next in unvisited_modules:
break
failure = visit(None, next)
if failure:
# failure[0] can never be None
bail_error('Unknown module "%s" encountered in dependencies of "%s"' % (failure[1], failure[0]))
self._cached_sorted_modules = result
return result
def sorted_dependencies_closure(self, module_name):
visited = set()
def sorted_deps_for_module(name):
result = []
desc = self.modules[name]
deps = desc.get('dependencies', [])
for dep in deps:
result += sorted_deps_for_module(dep)
if name not in visited:
result.append(name)
visited.add(name)
return result
return sorted_deps_for_module(module_name)
class DescriptorLoader:
def __init__(self, application_dir):
self.application_dir = application_dir
def load_application(self, application_descriptor_name):
return self.load_applications([application_descriptor_name])
def load_applications(self, application_descriptor_names):
merged_application_descriptor = {}
all_module_descriptors = {}
for application_descriptor_name in application_descriptor_names:
module_descriptors = {}
application_descriptor_filename = path.join(self.application_dir, application_descriptor_name)
application_descriptor = {desc['name']: desc for desc in load_and_parse_json(application_descriptor_filename)}
for name in application_descriptor:
merged_application_descriptor[name] = application_descriptor[name]
for (module_name, module) in application_descriptor.items():
if module_descriptors.get(module_name):
bail_error('Duplicate definition of module "%s" in %s' % (module_name, application_descriptor_filename))
if not all_module_descriptors.get(module_name):
module_descriptors[module_name] = self._read_module_descriptor(module_name, application_descriptor_filename)
all_module_descriptors[module_name] = module_descriptors[module_name]
for module in module_descriptors.values():
deps = module.get('dependencies', [])
for dep in deps:
if dep not in application_descriptor:
bail_error('Module "%s" (dependency of "%s") not listed in application descriptor %s' % (dep, module['name'], application_descriptor_filename))
return Descriptors(self.application_dir, merged_application_descriptor, all_module_descriptors)
def _read_module_descriptor(self, module_name, application_descriptor_filename):
json_filename = path.join(self.application_dir, module_name, 'module.json')
if not path.exists(json_filename):
bail_error('Module descriptor %s referenced in %s is missing' % (json_filename, application_descriptor_filename))
module_json = load_and_parse_json(json_filename)
module_json['name'] = module_name
return module_json
| bsd-3-clause | -7,166,083,372,488,460,000 | 35.911111 | 167 | 0.611078 | false |
nevil/edash-packager | packager/third_party/protobuf/python/google/protobuf/descriptor_database.py | 230 | 4411 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a container for DescriptorProtos."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
class DescriptorDatabase(object):
"""A container accepting FileDescriptorProtos and maps DescriptorProtos."""
def __init__(self):
self._file_desc_protos_by_file = {}
self._file_desc_protos_by_symbol = {}
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
"""
self._file_desc_protos_by_file[file_desc_proto.name] = file_desc_proto
package = file_desc_proto.package
for message in file_desc_proto.message_type:
self._file_desc_protos_by_symbol.update(
(name, file_desc_proto) for name in _ExtractSymbols(message, package))
for enum in file_desc_proto.enum_type:
self._file_desc_protos_by_symbol[
'.'.join((package, enum.name))] = file_desc_proto
def FindFileByName(self, name):
"""Finds the file descriptor proto by file name.
Typically the file name is a relative path ending to a .proto file. The
proto with the given name will have to have been added to this database
using the Add method or else an error will be raised.
Args:
name: The file name to find.
Returns:
The file descriptor proto matching the name.
Raises:
KeyError if no file by the given name was added.
"""
return self._file_desc_protos_by_file[name]
def FindFileContainingSymbol(self, symbol):
"""Finds the file descriptor proto containing the specified symbol.
The symbol should be a fully qualified name including the file descriptor's
package and any containing messages. Some examples:
'some.package.name.Message'
'some.package.name.Message.NestedEnum'
The file descriptor proto containing the specified symbol must be added to
this database using the Add method or else an error will be raised.
Args:
symbol: The fully qualified symbol name.
Returns:
The file descriptor proto containing the symbol.
Raises:
KeyError if no file contains the specified symbol.
"""
return self._file_desc_protos_by_symbol[symbol]
def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
"""
message_name = '.'.join((package, desc_proto.name))
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name))
| bsd-3-clause | -8,719,473,828,288,866,000 | 35.758333 | 80 | 0.72818 | false |
dyyi/moneybook | venv/Lib/site-packages/pip/_vendor/distlib/manifest.py | 559 | 13598 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Class representing the list of files in a distribution.
Equivalent to distutils.filelist, but fixes some problems.
"""
import fnmatch
import logging
import os
import re
from . import DistlibException
from .compat import fsdecode
from .util import convert_path
__all__ = ['Manifest']
logger = logging.getLogger(__name__)
# a \ followed by some spaces + EOL
_COLLAPSE_PATTERN = re.compile('\\\w*\n', re.M)
_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)
class Manifest(object):
"""A list of files built by on exploring the filesystem and filtered by
applying various patterns to what we find there.
"""
def __init__(self, base=None):
"""
Initialise an instance.
:param base: The base directory to explore under.
"""
self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))
self.prefix = self.base + os.sep
self.allfiles = None
self.files = set()
#
# Public API
#
def findall(self):
"""Find all files under the base and set ``allfiles`` to the absolute
pathnames of files found.
"""
from stat import S_ISREG, S_ISDIR, S_ISLNK
self.allfiles = allfiles = []
root = self.base
stack = [root]
pop = stack.pop
push = stack.append
while stack:
root = pop()
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
# Avoid excess stat calls -- just one will do, thank you!
stat = os.stat(fullname)
mode = stat.st_mode
if S_ISREG(mode):
allfiles.append(fsdecode(fullname))
elif S_ISDIR(mode) and not S_ISLNK(mode):
push(fullname)
def add(self, item):
"""
Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base.
"""
if not item.startswith(self.prefix):
item = os.path.join(self.base, item)
self.files.add(os.path.normpath(item))
def add_many(self, items):
"""
Add a list of files to the manifest.
:param items: The pathnames to add. These can be relative to the base.
"""
for item in items:
self.add(item)
def sorted(self, wantdirs=False):
"""
Return sorted files in directory order
"""
def add_dir(dirs, d):
dirs.add(d)
logger.debug('add_dir added %s', d)
if d != self.base:
parent, _ = os.path.split(d)
assert parent not in ('', '/')
add_dir(dirs, parent)
result = set(self.files) # make a copy!
if wantdirs:
dirs = set()
for f in result:
add_dir(dirs, os.path.dirname(f))
result |= dirs
return [os.path.join(*path_tuple) for path_tuple in
sorted(os.path.split(path) for path in result)]
def clear(self):
"""Clear all collected files."""
self.files = set()
self.allfiles = []
def process_directive(self, directive):
"""
Process a directive which either adds some files from ``allfiles`` to
``files``, or removes some files from ``files``.
:param directive: The directive to process. This should be in a format
compatible with distutils ``MANIFEST.in`` files:
http://docs.python.org/distutils/sourcedist.html#commands
"""
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dirpattern).
action, patterns, thedir, dirpattern = self._parse_directive(directive)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
for pattern in patterns:
if not self._include_pattern(pattern, anchor=True):
logger.warning('no files found matching %r', pattern)
elif action == 'exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, anchor=True)
#if not found:
# logger.warning('no previously-included files '
# 'found matching %r', pattern)
elif action == 'global-include':
for pattern in patterns:
if not self._include_pattern(pattern, anchor=False):
logger.warning('no files found matching %r '
'anywhere in distribution', pattern)
elif action == 'global-exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, anchor=False)
#if not found:
# logger.warning('no previously-included files '
# 'matching %r found anywhere in '
# 'distribution', pattern)
elif action == 'recursive-include':
for pattern in patterns:
if not self._include_pattern(pattern, prefix=thedir):
logger.warning('no files found matching %r '
'under directory %r', pattern, thedir)
elif action == 'recursive-exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, prefix=thedir)
#if not found:
# logger.warning('no previously-included files '
# 'matching %r found under directory %r',
# pattern, thedir)
elif action == 'graft':
if not self._include_pattern(None, prefix=dirpattern):
logger.warning('no directories found matching %r',
dirpattern)
elif action == 'prune':
if not self._exclude_pattern(None, prefix=dirpattern):
logger.warning('no previously-included directories found '
'matching %r', dirpattern)
else: # pragma: no cover
# This should never happen, as it should be caught in
# _parse_template_line
raise DistlibException(
'invalid action %r' % action)
#
# Private API
#
def _parse_directive(self, directive):
"""
Validate a directive.
:param directive: The directive to validate.
:return: A tuple of action, patterns, thedir, dir_patterns
"""
words = directive.split()
if len(words) == 1 and words[0] not in ('include', 'exclude',
'global-include',
'global-exclude',
'recursive-include',
'recursive-exclude',
'graft', 'prune'):
# no action given, let's use the default 'include'
words.insert(0, 'include')
action = words[0]
patterns = thedir = dir_pattern = None
if action in ('include', 'exclude',
'global-include', 'global-exclude'):
if len(words) < 2:
raise DistlibException(
'%r expects <pattern1> <pattern2> ...' % action)
patterns = [convert_path(word) for word in words[1:]]
elif action in ('recursive-include', 'recursive-exclude'):
if len(words) < 3:
raise DistlibException(
'%r expects <dir> <pattern1> <pattern2> ...' % action)
thedir = convert_path(words[1])
patterns = [convert_path(word) for word in words[2:]]
elif action in ('graft', 'prune'):
if len(words) != 2:
raise DistlibException(
'%r expects a single <dir_pattern>' % action)
dir_pattern = convert_path(words[1])
else:
raise DistlibException('unknown action %r' % action)
return action, patterns, thedir, dir_pattern
def _include_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern.
Patterns are not quite the same as implemented by the 'fnmatch'
module: '*' and '?' match non-special characters, where "special"
is platform-dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return True if files are found.
"""
# XXX docstring lying about what the special chars are?
found = False
pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
# delayed loading of allfiles list
if self.allfiles is None:
self.findall()
for name in self.allfiles:
if pattern_re.search(name):
self.files.add(name)
found = True
return found
def _exclude_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Remove strings (presumably filenames) from 'files' that match
'pattern'.
Other parameters are the same as for 'include_pattern()', above.
The list 'self.files' is modified in place. Return True if files are
found.
This API is public to allow e.g. exclusion of SCM subdirs, e.g. when
packaging source distributions
"""
found = False
pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
for f in list(self.files):
if pattern_re.search(f):
self.files.remove(f)
found = True
return found
def _translate_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Translate a shell-like wildcard pattern to a compiled regular
expression.
Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if isinstance(pattern, str):
return re.compile(pattern)
else:
return pattern
if pattern:
pattern_re = self._glob_to_re(pattern)
else:
pattern_re = ''
base = re.escape(os.path.join(self.base, ''))
if prefix is not None:
# ditch end of pattern character
empty_pattern = self._glob_to_re('')
prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
sep = os.sep
if os.sep == '\\':
sep = r'\\'
pattern_re = '^' + base + sep.join((prefix_re,
'.*' + pattern_re))
else: # no prefix -- respect anchor flag
if anchor:
pattern_re = '^' + base + pattern_re
return re.compile(pattern_re)
def _glob_to_re(self, pattern):
"""Translate a shell-like glob pattern to a regular expression.
Return a string containing the regex. Differs from
'fnmatch.translate()' in that '*' does not match "special characters"
(which are platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters (currently: just os.sep).
sep = os.sep
if os.sep == '\\':
# we're using a regex to manipulate a regex, so we need
# to escape the backslash twice
sep = r'\\\\'
escaped = r'\1[^%s]' % sep
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
return pattern_re
| apache-2.0 | -8,514,432,104,207,264,000 | 36.051771 | 79 | 0.542065 | false |
0x46616c6b/ansible | lib/ansible/plugins/action/net_config.py | 137 | 4196 | #
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 | 1,176,952,846,355,783,400 | 36.132743 | 85 | 0.617969 | false |
vCentre/vFRP-6233 | frappe/patches/v4_1/file_manager_fix.py | 29 | 3309 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import os
from frappe.utils.file_manager import get_content_hash, get_file, get_file_name
from frappe.utils import get_files_path, get_site_path
# The files missed by the previous patch might have been replaced with new files
# with the same filename
#
# This patch does the following,
# * Detect which files were replaced and rename them with name{hash:5}.extn and
# update filedata record for the new file
#
# * make missing_files.txt in site dir with files that should be recovered from
# a backup from a time before version 3 migration
#
# * Patch remaining unpatched File records.
def execute():
frappe.db.auto_commit_on_many_writes = True
rename_replacing_files()
for name, file_name, file_url in frappe.db.sql(
"""select name, file_name, file_url from `tabFile`
where ifnull(file_name, '')!='' and ifnull(content_hash, '')=''"""):
b = frappe.get_doc('File', name)
old_file_name = b.file_name
b.file_name = os.path.basename(old_file_name)
if old_file_name.startswith('files/') or old_file_name.startswith('/files/'):
b.file_url = os.path.normpath('/' + old_file_name)
else:
b.file_url = os.path.normpath('/files/' + old_file_name)
try:
_file_name, content = get_file(name)
b.content_hash = get_content_hash(content)
except IOError:
print 'Warning: Error processing ', name
b.content_hash = None
b.flags.ignore_duplicate_entry_error = True
b.save()
frappe.db.auto_commit_on_many_writes = False
def get_replaced_files():
ret = []
new_files = dict(frappe.db.sql("select name, file_name from `tabFile` where file_name not like 'files/%'"))
old_files = dict(frappe.db.sql("select name, file_name from `tabFile` where ifnull(content_hash, '')=''"))
invfiles = invert_dict(new_files)
for nname, nfilename in new_files.iteritems():
if 'files/' + nfilename in old_files.values():
ret.append((nfilename, invfiles[nfilename]))
return ret
def rename_replacing_files():
replaced_files = get_replaced_files()
if len(replaced_files):
missing_files = [v[0] for v in replaced_files]
with open(get_site_path('missing_files.txt'), 'w') as f:
f.write(('\n'.join(missing_files) + '\n').encode('utf-8'))
for file_name, file_datas in replaced_files:
print 'processing ' + file_name
content_hash = frappe.db.get_value('File', file_datas[0], 'content_hash')
if not content_hash:
continue
new_file_name = get_file_name(file_name, content_hash)
if os.path.exists(get_files_path(new_file_name)):
continue
print 'skipping ' + file_name
try:
os.rename(get_files_path(file_name), get_files_path(new_file_name))
except OSError:
print 'Error renaming ', file_name
for name in file_datas:
f = frappe.get_doc('File', name)
f.file_name = new_file_name
f.file_url = '/files/' + new_file_name
f.save()
def invert_dict(ddict):
ret = {}
for k,v in ddict.iteritems():
if not ret.get(v):
ret[v] = [k]
else:
ret[v].append(k)
return ret
def get_file_name(fname, hash):
if '.' in fname:
partial, extn = fname.rsplit('.', 1)
else:
partial = fname
extn = ''
return '{partial}{suffix}.{extn}'.format(partial=partial, extn=extn, suffix=hash[:5])
| mit | 9,029,095,338,125,810,000 | 32.765306 | 108 | 0.688426 | false |
adazey/Muzez | libs/nltk/corpus/reader/timit.py | 4 | 17406 | # Natural Language Toolkit: TIMIT Corpus Reader
#
# Copyright (C) 2001-2007 NLTK Project
# Author: Haejoong Lee <haejoong@ldc.upenn.edu>
# Steven Bird <stevenbird1@gmail.com>
# Jacob Perkins <japerk@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
# [xx] this docstring is out-of-date:
"""
Read tokens, phonemes and audio data from the NLTK TIMIT Corpus.
This corpus contains selected portion of the TIMIT corpus.
- 16 speakers from 8 dialect regions
- 1 male and 1 female from each dialect region
- total 130 sentences (10 sentences per speaker. Note that some
sentences are shared among other speakers, especially sa1 and sa2
are spoken by all speakers.)
- total 160 recording of sentences (10 recordings per speaker)
- audio format: NIST Sphere, single channel, 16kHz sampling,
16 bit sample, PCM encoding
Module contents
===============
The timit corpus reader provides 4 functions and 4 data items.
- utterances
List of utterances in the corpus. There are total 160 utterances,
each of which corresponds to a unique utterance of a speaker.
Here's an example of an utterance identifier in the list::
dr1-fvmh0/sx206
- _---- _---
| | | | |
| | | | |
| | | | `--- sentence number
| | | `----- sentence type (a:all, i:shared, x:exclusive)
| | `--------- speaker ID
| `------------ sex (m:male, f:female)
`-------------- dialect region (1..8)
- speakers
List of speaker IDs. An example of speaker ID::
dr1-fvmh0
Note that if you split an item ID with colon and take the first element of
the result, you will get a speaker ID.
>>> itemid = 'dr1-fvmh0/sx206'
>>> spkrid , sentid = itemid.split('/')
>>> spkrid
'dr1-fvmh0'
The second element of the result is a sentence ID.
- dictionary()
Phonetic dictionary of words contained in this corpus. This is a Python
dictionary from words to phoneme lists.
- spkrinfo()
Speaker information table. It's a Python dictionary from speaker IDs to
records of 10 fields. Speaker IDs the same as the ones in timie.speakers.
Each record is a dictionary from field names to values, and the fields are
as follows::
id speaker ID as defined in the original TIMIT speaker info table
sex speaker gender (M:male, F:female)
dr speaker dialect region (1:new england, 2:northern,
3:north midland, 4:south midland, 5:southern, 6:new york city,
7:western, 8:army brat (moved around))
use corpus type (TRN:training, TST:test)
in this sample corpus only TRN is available
recdate recording date
birthdate speaker birth date
ht speaker height
race speaker race (WHT:white, BLK:black, AMR:american indian,
SPN:spanish-american, ORN:oriental,???:unknown)
edu speaker education level (HS:high school, AS:associate degree,
BS:bachelor's degree (BS or BA), MS:master's degree (MS or MA),
PHD:doctorate degree (PhD,JD,MD), ??:unknown)
comments comments by the recorder
The 4 functions are as follows.
- tokenized(sentences=items, offset=False)
Given a list of items, returns an iterator of a list of word lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the word list is a tuple of word(string), start offset and
end offset, where offset is represented as a number of 16kHz samples.
- phonetic(sentences=items, offset=False)
Given a list of items, returns an iterator of a list of phoneme lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the phoneme list is a tuple of word(string), start offset
and end offset, where offset is represented as a number of 16kHz samples.
- audiodata(item, start=0, end=None)
Given an item, returns a chunk of audio samples formatted into a string.
When the fuction is called, if start and end are omitted, the entire
samples of the recording will be returned. If only end is omitted,
samples from the start offset to the end of the recording will be returned.
- play(data)
Play the given audio samples. The audio samples can be obtained from the
timit.audiodata function.
"""
from __future__ import print_function, unicode_literals
import sys
import os
import re
import tempfile
import time
from nltk import compat
from nltk.tree import Tree
from nltk.internals import import_from_stdlib
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
class TimitCorpusReader(CorpusReader):
"""
Reader for the TIMIT corpus (or any other corpus with the same
file layout and use of file formats). The corpus root directory
should contain the following files:
- timitdic.txt: dictionary of standard transcriptions
- spkrinfo.txt: table of speaker information
In addition, the root directory should contain one subdirectory
for each speaker, containing three files for each utterance:
- <utterance-id>.txt: text content of utterances
- <utterance-id>.wrd: tokenized text content of utterances
- <utterance-id>.phn: phonetic transcription of utterances
- <utterance-id>.wav: utterance sound file
"""
_FILE_RE = (r'(\w+-\w+/\w+\.(phn|txt|wav|wrd))|' +
r'timitdic\.txt|spkrinfo\.txt')
"""A regexp matching fileids that are used by this corpus reader."""
_UTTERANCE_RE = r'\w+-\w+/\w+\.txt'
def __init__(self, root, encoding='utf8'):
"""
Construct a new TIMIT corpus reader in the given directory.
:param root: The root directory for this corpus.
"""
# Ensure that wave files don't get treated as unicode data:
if isinstance(encoding, compat.string_types):
encoding = [('.*\.wav', None), ('.*', encoding)]
CorpusReader.__init__(self, root,
find_corpus_fileids(root, self._FILE_RE),
encoding=encoding)
self._utterances = [name[:-4] for name in
find_corpus_fileids(root, self._UTTERANCE_RE)]
"""A list of the utterance identifiers for all utterances in
this corpus."""
self._speakerinfo = None
self._root = root
self.speakers = sorted(set(u.split('/')[0] for u in self._utterances))
def fileids(self, filetype=None):
"""
Return a list of file identifiers for the files that make up
this corpus.
:param filetype: If specified, then ``filetype`` indicates that
only the files that have the given type should be
returned. Accepted values are: ``txt``, ``wrd``, ``phn``,
``wav``, or ``metadata``,
"""
if filetype is None:
return CorpusReader.fileids(self)
elif filetype in ('txt', 'wrd', 'phn', 'wav'):
return ['%s.%s' % (u, filetype) for u in self._utterances]
elif filetype == 'metadata':
return ['timitdic.txt', 'spkrinfo.txt']
else:
raise ValueError('Bad value for filetype: %r' % filetype)
def utteranceids(self, dialect=None, sex=None, spkrid=None,
sent_type=None, sentid=None):
"""
:return: A list of the utterance identifiers for all
utterances in this corpus, or for the given speaker, dialect
region, gender, sentence type, or sentence number, if
specified.
"""
if isinstance(dialect, compat.string_types): dialect = [dialect]
if isinstance(sex, compat.string_types): sex = [sex]
if isinstance(spkrid, compat.string_types): spkrid = [spkrid]
if isinstance(sent_type, compat.string_types): sent_type = [sent_type]
if isinstance(sentid, compat.string_types): sentid = [sentid]
utterances = self._utterances[:]
if dialect is not None:
utterances = [u for u in utterances if u[2] in dialect]
if sex is not None:
utterances = [u for u in utterances if u[4] in sex]
if spkrid is not None:
utterances = [u for u in utterances if u[:9] in spkrid]
if sent_type is not None:
utterances = [u for u in utterances if u[11] in sent_type]
if sentid is not None:
utterances = [u for u in utterances if u[10:] in spkrid]
return utterances
def transcription_dict(self):
"""
:return: A dictionary giving the 'standard' transcription for
each word.
"""
_transcriptions = {}
for line in self.open('timitdic.txt'):
if not line.strip() or line[0] == ';': continue
m = re.match(r'\s*(\S+)\s+/(.*)/\s*$', line)
if not m: raise ValueError('Bad line: %r' % line)
_transcriptions[m.group(1)] = m.group(2).split()
return _transcriptions
def spkrid(self, utterance):
return utterance.split('/')[0]
def sentid(self, utterance):
return utterance.split('/')[1]
def utterance(self, spkrid, sentid):
return '%s/%s' % (spkrid, sentid)
def spkrutteranceids(self, speaker):
"""
:return: A list of all utterances associated with a given
speaker.
"""
return [utterance for utterance in self._utterances
if utterance.startswith(speaker+'/')]
def spkrinfo(self, speaker):
"""
:return: A dictionary mapping .. something.
"""
if speaker in self._utterances:
speaker = self.spkrid(speaker)
if self._speakerinfo is None:
self._speakerinfo = {}
for line in self.open('spkrinfo.txt'):
if not line.strip() or line[0] == ';': continue
rec = line.strip().split(None, 9)
key = "dr%s-%s%s" % (rec[2],rec[1].lower(),rec[0].lower())
self._speakerinfo[key] = SpeakerInfo(*rec)
return self._speakerinfo[speaker]
def phones(self, utterances=None):
return [line.split()[-1]
for fileid in self._utterance_fileids(utterances, '.phn')
for line in self.open(fileid) if line.strip()]
def phone_times(self, utterances=None):
"""
offset is represented as a number of 16kHz samples!
"""
return [(line.split()[2], int(line.split()[0]), int(line.split()[1]))
for fileid in self._utterance_fileids(utterances, '.phn')
for line in self.open(fileid) if line.strip()]
def words(self, utterances=None):
return [line.split()[-1]
for fileid in self._utterance_fileids(utterances, '.wrd')
for line in self.open(fileid) if line.strip()]
def word_times(self, utterances=None):
return [(line.split()[2], int(line.split()[0]), int(line.split()[1]))
for fileid in self._utterance_fileids(utterances, '.wrd')
for line in self.open(fileid) if line.strip()]
def sents(self, utterances=None):
return [[line.split()[-1]
for line in self.open(fileid) if line.strip()]
for fileid in self._utterance_fileids(utterances, '.wrd')]
def sent_times(self, utterances=None):
return [(line.split(None,2)[-1].strip(),
int(line.split()[0]), int(line.split()[1]))
for fileid in self._utterance_fileids(utterances, '.txt')
for line in self.open(fileid) if line.strip()]
def phone_trees(self, utterances=None):
if utterances is None: utterances = self._utterances
if isinstance(utterances, compat.string_types): utterances = [utterances]
trees = []
for utterance in utterances:
word_times = self.word_times(utterance)
phone_times = self.phone_times(utterance)
sent_times = self.sent_times(utterance)
while sent_times:
(sent, sent_start, sent_end) = sent_times.pop(0)
trees.append(Tree('S', []))
while (word_times and phone_times and
phone_times[0][2] <= word_times[0][1]):
trees[-1].append(phone_times.pop(0)[0])
while word_times and word_times[0][2] <= sent_end:
(word, word_start, word_end) = word_times.pop(0)
trees[-1].append(Tree(word, []))
while phone_times and phone_times[0][2] <= word_end:
trees[-1][-1].append(phone_times.pop(0)[0])
while phone_times and phone_times[0][2] <= sent_end:
trees[-1].append(phone_times.pop(0)[0])
return trees
# [xx] NOTE: This is currently broken -- we're assuming that the
# fileids are WAV fileids (aka RIFF), but they're actually NIST SPHERE
# fileids.
def wav(self, utterance, start=0, end=None):
# nltk.chunk conflicts with the stdlib module 'chunk'
wave = import_from_stdlib('wave')
w = wave.open(self.open(utterance+'.wav'), 'rb')
if end is None:
end = w.getnframes()
# Skip past frames before start, then read the frames we want
w.readframes(start)
frames = w.readframes(end-start)
# Open a new temporary file -- the wave module requires
# an actual file, and won't work w/ stringio. :(
tf = tempfile.TemporaryFile()
out = wave.open(tf, 'w')
# Write the parameters & data to the new file.
out.setparams(w.getparams())
out.writeframes(frames)
out.close()
# Read the data back from the file, and return it. The
# file will automatically be deleted when we return.
tf.seek(0)
return tf.read()
def audiodata(self, utterance, start=0, end=None):
assert(end is None or end > start)
headersize = 44
if end is None:
data = self.open(utterance+'.wav').read()
else:
data = self.open(utterance+'.wav').read(headersize+end*2)
return data[headersize+start*2:]
def _utterance_fileids(self, utterances, extension):
if utterances is None: utterances = self._utterances
if isinstance(utterances, compat.string_types): utterances = [utterances]
return ['%s%s' % (u, extension) for u in utterances]
def play(self, utterance, start=0, end=None):
"""
Play the given audio sample.
:param utterance: The utterance id of the sample to play
"""
# Method 1: os audio dev.
try:
import ossaudiodev
try:
dsp = ossaudiodev.open('w')
dsp.setfmt(ossaudiodev.AFMT_S16_LE)
dsp.channels(1)
dsp.speed(16000)
dsp.write(self.audiodata(utterance, start, end))
dsp.close()
except IOError as e:
print(("can't acquire the audio device; please "
"activate your audio device."), file=sys.stderr)
print("system error message:", str(e), file=sys.stderr)
return
except ImportError:
pass
# Method 2: pygame
try:
# FIXME: this won't work under python 3
import pygame.mixer, StringIO
pygame.mixer.init(16000)
f = StringIO.StringIO(self.wav(utterance, start, end))
pygame.mixer.Sound(f).play()
while pygame.mixer.get_busy():
time.sleep(0.01)
return
except ImportError:
pass
# Method 3: complain. :)
print(("you must install pygame or ossaudiodev "
"for audio playback."), file=sys.stderr)
@compat.python_2_unicode_compatible
class SpeakerInfo(object):
def __init__(self, id, sex, dr, use, recdate, birthdate,
ht, race, edu, comments=None):
self.id = id
self.sex = sex
self.dr = dr
self.use = use
self.recdate = recdate
self.birthdate = birthdate
self.ht = ht
self.race = race
self.edu = edu
self.comments = comments
def __repr__(self):
attribs = 'id sex dr use recdate birthdate ht race edu comments'
args = ['%s=%r' % (attr, getattr(self, attr))
for attr in attribs.split()]
return 'SpeakerInfo(%s)' % (', '.join(args))
def read_timit_block(stream):
"""
Block reader for timit tagged sentences, which are preceded by a sentence
number that will be ignored.
"""
line = stream.readline()
if not line: return []
n, sent = line.split(' ', 1)
return [sent]
| gpl-3.0 | 807,967,345,690,794,500 | 36.68 | 85 | 0.58003 | false |
muff1nman/duplicity | duplicity/backends/ncftpbackend.py | 1 | 5129 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os.path
import urllib
import duplicity.backend
from duplicity import globals
from duplicity import log
from duplicity import tempdir
class NCFTPBackend(duplicity.backend.Backend):
"""Connect to remote store using File Transfer Protocol"""
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
# we expect an error return, so go low-level and ignore it
try:
p = os.popen("ncftpls -v")
fout = p.read()
ret = p.close()
except Exception:
pass
# the expected error is 8 in the high-byte and some output
if ret != 0x0800 or not fout:
log.FatalError("NcFTP not found: Please install NcFTP version 3.1.9 or later",
log.ErrorCode.ftp_ncftp_missing)
# version is the second word of the first line
version = fout.split('\n')[0].split()[1]
if version < "3.1.9":
log.FatalError("NcFTP too old: Duplicity requires NcFTP version 3.1.9,"
"3.2.1 or later. Version 3.2.0 will not work properly.",
log.ErrorCode.ftp_ncftp_too_old)
elif version == "3.2.0":
log.Warn("NcFTP (ncftpput) version 3.2.0 may fail with duplicity.\n"
"see: http://www.ncftpd.com/ncftp/doc/changelog.html\n"
"If you have trouble, please upgrade to 3.2.1 or later",
log.WarningCode.ftp_ncftp_v320)
log.Notice("NcFTP version is %s" % version)
self.parsed_url = parsed_url
self.url_string = duplicity.backend.strip_auth_from_url(self.parsed_url)
# strip ncftp+ prefix
self.url_string = duplicity.backend.strip_prefix(self.url_string, 'ncftp')
# This squelches the "file not found" result from ncftpls when
# the ftp backend looks for a collection that does not exist.
# version 3.2.2 has error code 5, 1280 is some legacy value
self.popen_breaks['ncftpls'] = [5, 1280]
# Use an explicit directory name.
if self.url_string[-1] != '/':
self.url_string += '/'
self.password = self.get_password()
if globals.ftp_connection == 'regular':
self.conn_opt = '-E'
else:
self.conn_opt = '-F'
self.tempfile, self.tempname = tempdir.default().mkstemp()
os.write(self.tempfile, "host %s\n" % self.parsed_url.hostname)
os.write(self.tempfile, "user %s\n" % self.parsed_url.username)
os.write(self.tempfile, "pass %s\n" % self.password)
os.close(self.tempfile)
self.flags = "-f %s %s -t %s -o useCLNT=0,useHELP_SITE=0 " % \
(self.tempname, self.conn_opt, globals.timeout)
if parsed_url.port is not None and parsed_url.port != 21:
self.flags += " -P '%s'" % (parsed_url.port)
def _put(self, source_path, remote_filename):
remote_path = os.path.join(urllib.unquote(self.parsed_url.path.lstrip('/')), remote_filename).rstrip()
commandline = "ncftpput %s -m -V -C '%s' '%s'" % \
(self.flags, source_path.name, remote_path)
self.subprocess_popen(commandline)
def _get(self, remote_filename, local_path):
remote_path = os.path.join(urllib.unquote(self.parsed_url.path), remote_filename).rstrip()
commandline = "ncftpget %s -V -C '%s' '%s' '%s'" % \
(self.flags, self.parsed_url.hostname, remote_path.lstrip('/'), local_path.name)
self.subprocess_popen(commandline)
def _list(self):
# Do a long listing to avoid connection reset
commandline = "ncftpls %s -l '%s'" % (self.flags, self.url_string)
_, l, _ = self.subprocess_popen(commandline)
# Look for our files as the last element of a long list line
return [x.split()[-1] for x in l.split('\n') if x and not x.startswith("total ")]
def _delete(self, filename):
commandline = "ncftpls %s -l -X 'DELE %s' '%s'" % \
(self.flags, filename, self.url_string)
self.subprocess_popen(commandline)
duplicity.backend.register_backend("ncftp+ftp", NCFTPBackend)
duplicity.backend.uses_netloc.extend(['ncftp+ftp'])
| gpl-2.0 | 4,047,611,509,393,753,600 | 42.10084 | 110 | 0.626048 | false |
mkalte666/Markkasse | setup.py | 1 | 3478 | #!/usr/bin/env python
import io
import sys
import os
import sqlite3
import hashlib
import binascii
print "Setting Up Mark System"
print "This will, delete all data but the ones in the backup-folder !"
print "If you are shure you want to continue, type \" YES \". yep, in capslock!\n"
ShouldInstall = unicode(raw_input("Shure? "))
if ShouldInstall != unicode("YES"):
print "Quitting Installation...\n"
sys.exit()
print "Cleaning Up..."
os.system("rm -rf ./marksystem")
os.system("rm -rf ./log")
print "Done!"
print "Beginning Installation. Creating folders...\n"
os.system("mkdir ./backup")
os.system("mkdir ./marksystem")
os.system("mkdir ./marksystem/db")
os.system("mkdir ./marksystem/templates")
os.system("mkdir ./marksystem/static/")
os.system("mkdir ./log/")
os.system("mkdir ./marksystem/static/css")
os.system("mkdir ./marksystem/static/uploads")
os.system("mkdir ./marksystem/static/img")
os.system("mkdir ./marksystem/static/font/")
os.system("mkdir ./marksystem/static/js/")
print "Done!\n"
print "Copying Files..."
os.system("cp ./installation/*.py ./marksystem/")
os.system("touch ./log/mark.log")
os.system("cp ./installation/templates/* ./marksystem/templates")
os.system("cp ./installation/media/img/* ./marksystem/static/img")
os.system("cp ./installation/media/css/* ./marksystem/static/css")
os.system("cp ./installation/media/font/* ./marksystem/static/font")
os.system("cp ./installation/js/* ./marksystem/static/js/")
#copys of files from the installation-files folder here
print "Done!\n"
print "Creating Database..."
#database creation
connection = sqlite3.connect("./marksystem/db/mark.db")
cursor = connection.cursor()
cursor.execute('''CREATE TABLE user_info(id INTEGER PRIMARY KEY, name TEXT, hash TEXT, session TEXT, userlevel INTEGER)''')
cursor.execute('''CREATE TABLE products(id INTEGER PRIMARY KEY, name TEXT, price REAL, amoutInStock INTEGER, image TEXT, isSubproduct BOOLEAN, parent INTEGER, isBuyable INTEGER)''')
cursor.execute('''CREATE TABLE transactions(id INTEGER PRIMARY KEY, description TEXT, inflow REAL, outflow REAL, userID INTEGER, productIDs TEXT, isGenerated BOOLEAN, date TEXT)''')
cursor.execute('''CREATE TABLE pending_orders(id INTEGER PRIMARY KEY, transactionId INTEGER)''')
cursor.execute('''CREATE TABLE debtTransactions(id INTEGER PRIMARY KEY, transactionId INTEGER, isPaied BOOLEAN, userId INTEGER)''')
print "Setting basic information in Database"
cursor.execute('''insert into products(name, price, isSubproduct, parent, isBuyable) values ('remove me!', 3.0, 0, -1, 1)''')
print "Set Root User:"
username = unicode(raw_input("Username: "))
password = "not the"
passwordConfirm = "same"
while password != passwordConfirm:
password = hashlib.sha256(unicode(raw_input("Password: "))).hexdigest()
passwordConfirm = hashlib.sha256(unicode(raw_input("Confirm: "))).hexdigest()
print "Change Password after logging in for the first time!!!"
cursor.execute('''INSERT INTO user_info (name, hash, session, userlevel) VALUES (?, ?, 'invalid', 9001)''', (username, password, ))
connection.commit()
cursor.close()
connection.close()
print "Done!\n"
print "Genarating files"
sessionKey = os.urandom(24).encode('hex')
outfile = open('./marksystem/generated.py', 'w')
outfile.write("secretKey = '"+unicode(sessionKey)+"'\n")
maxdays = 6
maxdays = input("Input maximal time user can owe the system Money:")
outfile.write("maxdays = "+unicode(maxdays)+"\n")
outfile.close()
print "Done!"
print "Installation Compleated!"
| mit | 1,649,113,951,998,755,300 | 39.917647 | 181 | 0.735193 | false |
cloudcache/zstack-utility | kvmagent/kvmagent/test/test_nfs_primary_storage_create_root_volume.py | 3 | 2034 | '''
@author: Frank
'''
import unittest
import time
import os.path
from kvmagent import kvmagent
from kvmagent.plugins import nfs_primarystorage_plugin
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import log
from zstacklib.utils import uuidhelper
from zstacklib.utils import linux
logger = log.get_logger(__name__)
class Test(unittest.TestCase):
NFS_URL = 'localhost:/home/primary'
CALLBACK_URL = 'http://localhost:7070/testcallback'
def callback(self, req):
rsp = jsonobject.loads(req[http.REQUEST_BODY])
print jsonobject.dumps(rsp)
def setUp(self):
self.service = kvmagent.new_rest_service()
kvmagent.get_http_server().register_sync_uri('/testcallback', self.callback)
self.service.start(True)
time.sleep(1)
def mount(self):
cmd = nfs_primarystorage_plugin.MountCmd()
cmd.url = self.NFS_URL
cmd.mountPath = os.path.join('/mnt', uuidhelper.uuid())
callurl = kvmagent._build_url_for_test([nfs_primarystorage_plugin.MOUNT_PATH])
ret = http.json_dump_post(callurl, cmd)
rsp = jsonobject.loads(ret)
self.assertTrue(rsp.success, rsp.error)
self.assertTrue(linux.is_mounted(cmd.url, cmd.mountPath))
def testName(self):
self.mount()
cmd = nfs_primarystorage_plugin.CreateRootVolumeFromTemplateCmd()
cmd.installUrl = '/tmp/test1.qcow2'
cmd.templatePathInCache = "/tmp/test.qcow2"
cmd.timeout = 30
url = kvmagent._build_url_for_test([nfs_primarystorage_plugin.CREATE_VOLUME_FROM_TEMPLATE_PATH])
rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL})
time.sleep(10)
self.service.stop()
linux.umount_by_url(self.NFS_URL)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | apache-2.0 | 3,680,430,698,550,966,300 | 33.103448 | 124 | 0.643559 | false |
dmitry-r/incubator-airflow | airflow/ti_deps/deps/not_running_dep.py | 58 | 1332 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
from airflow.utils.state import State
class NotRunningDep(BaseTIDep):
NAME = "Task Instance Not Already Running"
# Task instances must not already be running, as running two copies of the same
# task instance at the same time (AKA double-trigger) should be avoided at all
# costs, even if the context specifies that all dependencies should be ignored.
IGNOREABLE = False
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.state == State.RUNNING:
yield self._failing_status(
reason="Task is already running, it started on {0}.".format(
ti.start_date))
| apache-2.0 | -1,236,148,100,036,173,600 | 40.625 | 83 | 0.717718 | false |
obadonke/pythonPlay | scripts/where_dat_ex_go/wheredatexgo.py | 2 | 2822 | import csv
class ExceptionStats:
def __init__(self):
self.status = None
self.owner = None
self.count = 0
self.hash = None
def inc_count(self):
self.count += 1
class ExceptionData:
def __init__(self, ident):
self.ident = ident
self.prev_stats = ExceptionStats()
self.curr_stats = ExceptionStats()
self.status_changed = False;
exceptions = dict()
def should_skip(row: dict):
""" Apply same filters as the report
"""
if row['Region'] == "IGNORE":
return True
if row['VersionShort'] == "1" or row['VersionShort'] == "1.0":
return True
if '09/2014' in row['Date']:
return True
return False
def read_exc_from_file(ex_dict, filename, stat_getter):
with open(filename, 'r') as csv_file:
reader = csv.DictReader(csv_file, delimiter=',')
for row in reader:
if should_skip(row):
continue
ident = row['Id']
row_ex = None
if not ident in ex_dict:
row_ex = ExceptionData(ident)
ex_dict[ident] = row_ex
else:
row_ex = ex_dict[ident]
# just override status and owner as we go
stats = stat_getter(row_ex)
stats.status = row['Status']
stats.owner = row['Owner']
stats.hash = row['Exception Hash']
stats.inc_count()
def get_prev_stats(row_ex):
return row_ex.prev_stats
def get_curr_stats(row_ex):
return row_ex.curr_stats
# do initial count of exceptions
read_exc_from_file(exceptions, '20170526.csv', get_prev_stats)
# count current status values
read_exc_from_file(exceptions, '20170620.csv', get_curr_stats)
prev_total_count = 0
curr_total_count = 0
stat_changed_count = 0
with open('20170526-0620.csv', 'w', newline='') as csv_file:
ex_writer = csv.writer(csv_file, delimiter=',')
ex_writer.writerow(['id', 'old_owner', 'old_status', 'old_count', 'old_hash','new_owner', 'new_status', 'new_count', 'new_hash', 'status_changed'])
for k, ex in exceptions.items():
ex.status_changed = not (ex.prev_stats.status == ex.curr_stats.status)
ex_writer.writerow([
ex.ident,
ex.prev_stats.owner, ex.prev_stats.status, ex.prev_stats.count, ex.prev_stats.hash,
ex.curr_stats.owner, ex.curr_stats.status, ex.curr_stats.count, ex.curr_stats.hash,
ex.status_changed
])
prev_total_count += ex.prev_stats.count
curr_total_count += ex.curr_stats.count
if ex.status_changed:
stat_changed_count += ex.curr_stats.count
print("Prev total count:",prev_total_count)
print("Curr total count:",curr_total_count)
print("Changed total count:",stat_changed_count)
| gpl-3.0 | -5,940,291,306,676,327,000 | 26.940594 | 151 | 0.591425 | false |
hannorein/rebound | docs/generate_python_docs.py | 1 | 1054 | import rebound
import inspect
import docstring_to_markdown
def convert_code_blocks(doc):
new_doc = ""
lines = doc.split("\n")
first = True
for line in lines:
if first:
if line[:3]==">>>":
first = False
new_doc += "```python\n"
new_doc += line[3:]+"\n"
else:
new_doc += line+"\n"
else:
if line[:3]==">>>":
new_doc += line[3:]+"\n"
else:
new_doc += "```\n"
new_doc += line+"\n"
first = True
if first==False:
new_doc += "```\n"
return new_doc
def render_class(cls, functions=None):
d = "## Class `"+cls+"`\n"
d += convert_code_blocks(inspect.cleandoc(eval(cls).__doc__))
for function in functions:
f = getattr(eval(cls),function)
d += "## Function `"+cls+"."+function+"`\n"
d += convert_code_blocks(inspect.cleandoc(f.__doc__))
return d
print(render_class("rebound.Simulation",["copy"]))
| gpl-3.0 | 9,207,667,706,578,794,000 | 26.025641 | 65 | 0.470588 | false |
anbangleo/NlsdeWeb | Python-3.6.0/Lib/test/test_float.py | 2 | 62538 | import fractions
import operator
import os
import random
import sys
import struct
import time
import unittest
from test import support
from test.test_grammar import (VALID_UNDERSCORE_LITERALS,
INVALID_UNDERSCORE_LITERALS)
from math import isinf, isnan, copysign, ldexp
INF = float("inf")
NAN = float("nan")
have_getformat = hasattr(float, "__getformat__")
requires_getformat = unittest.skipUnless(have_getformat,
"requires __getformat__")
requires_setformat = unittest.skipUnless(hasattr(float, "__setformat__"),
"requires __setformat__")
#locate file with float format test values
test_dir = os.path.dirname(__file__) or os.curdir
format_testfile = os.path.join(test_dir, 'formatfloat_testcases.txt')
class FloatSubclass(float):
pass
class OtherFloatSubclass(float):
pass
class GeneralFloatCases(unittest.TestCase):
def test_float(self):
self.assertEqual(float(3.14), 3.14)
self.assertEqual(float(314), 314.0)
self.assertEqual(float(" 3.14 "), 3.14)
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertRaises(ValueError, float, "++3.14")
self.assertRaises(ValueError, float, "+-3.14")
self.assertRaises(ValueError, float, "-+3.14")
self.assertRaises(ValueError, float, "--3.14")
self.assertRaises(ValueError, float, ".nan")
self.assertRaises(ValueError, float, "+.inf")
self.assertRaises(ValueError, float, ".")
self.assertRaises(ValueError, float, "-.")
self.assertRaises(TypeError, float, {})
self.assertRaisesRegex(TypeError, "not 'dict'", float, {})
# Lone surrogate
self.assertRaises(UnicodeEncodeError, float, '\uD8F0')
# check that we don't accept alternate exponent markers
self.assertRaises(ValueError, float, "-1.7d29")
self.assertRaises(ValueError, float, "3D-14")
self.assertEqual(float(" \u0663.\u0661\u0664 "), 3.14)
self.assertEqual(float("\N{EM SPACE}3.14\N{EN SPACE}"), 3.14)
# extra long strings should not be a problem
float(b'.' + b'1'*1000)
float('.' + '1'*1000)
def test_underscores(self):
for lit in VALID_UNDERSCORE_LITERALS:
if not any(ch in lit for ch in 'jJxXoObB'):
self.assertEqual(float(lit), eval(lit))
self.assertEqual(float(lit), float(lit.replace('_', '')))
for lit in INVALID_UNDERSCORE_LITERALS:
if lit in ('0_7', '09_99'): # octals are not recognized here
continue
if not any(ch in lit for ch in 'jJxXoObB'):
self.assertRaises(ValueError, float, lit)
# Additional test cases; nan and inf are never valid as literals,
# only in the float() constructor, but we don't allow underscores
# in or around them.
self.assertRaises(ValueError, float, '_NaN')
self.assertRaises(ValueError, float, 'Na_N')
self.assertRaises(ValueError, float, 'IN_F')
self.assertRaises(ValueError, float, '-_INF')
self.assertRaises(ValueError, float, '-INF_')
# Check that we handle bytes values correctly.
self.assertRaises(ValueError, float, b'0_.\xff9')
def test_non_numeric_input_types(self):
# Test possible non-numeric types for the argument x, including
# subclasses of the explicitly documented accepted types.
class CustomStr(str): pass
class CustomBytes(bytes): pass
class CustomByteArray(bytearray): pass
factories = [
bytes,
bytearray,
lambda b: CustomStr(b.decode()),
CustomBytes,
CustomByteArray,
memoryview,
]
try:
from array import array
except ImportError:
pass
else:
factories.append(lambda b: array('B', b))
for f in factories:
x = f(b" 3.14 ")
with self.subTest(type(x)):
self.assertEqual(float(x), 3.14)
with self.assertRaisesRegex(ValueError, "could not convert"):
float(f(b'A' * 0x10))
def test_float_memoryview(self):
self.assertEqual(float(memoryview(b'12.3')[1:4]), 2.3)
self.assertEqual(float(memoryview(b'12.3\x00')[1:4]), 2.3)
self.assertEqual(float(memoryview(b'12.3 ')[1:4]), 2.3)
self.assertEqual(float(memoryview(b'12.3A')[1:4]), 2.3)
self.assertEqual(float(memoryview(b'12.34')[1:4]), 2.3)
def test_error_message(self):
testlist = ('\xbd', '123\xbd', ' 123 456 ')
for s in testlist:
try:
float(s)
except ValueError as e:
self.assertIn(s.strip(), e.args[0])
else:
self.fail("Expected int(%r) to raise a ValueError", s)
@support.run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE')
def test_float_with_comma(self):
# set locale to something that doesn't use '.' for the decimal point
# float must not accept the locale specific decimal point but
# it still has to accept the normal python syntax
import locale
if not locale.localeconv()['decimal_point'] == ',':
self.skipTest('decimal_point is not ","')
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float("+3.14 "), 3.14)
self.assertEqual(float("-3.14 "), -3.14)
self.assertEqual(float(".14 "), .14)
self.assertEqual(float("3. "), 3.0)
self.assertEqual(float("3.e3 "), 3000.0)
self.assertEqual(float("3.2e3 "), 3200.0)
self.assertEqual(float("2.5e-1 "), 0.25)
self.assertEqual(float("5e-1"), 0.5)
self.assertRaises(ValueError, float, " 3,14 ")
self.assertRaises(ValueError, float, " +3,14 ")
self.assertRaises(ValueError, float, " -3,14 ")
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertEqual(float(" 25.e-1 "), 2.5)
self.assertAlmostEqual(float(" .25e-1 "), .025)
def test_floatconversion(self):
# Make sure that calls to __float__() work properly
class Foo1(object):
def __float__(self):
return 42.
class Foo2(float):
def __float__(self):
return 42.
class Foo3(float):
def __new__(cls, value=0.):
return float.__new__(cls, 2*value)
def __float__(self):
return self
class Foo4(float):
def __float__(self):
return 42
# Issue 5759: __float__ not called on str subclasses (though it is on
# unicode subclasses).
class FooStr(str):
def __float__(self):
return float(str(self)) + 1
self.assertEqual(float(Foo1()), 42.)
self.assertEqual(float(Foo2()), 42.)
with self.assertWarns(DeprecationWarning):
self.assertEqual(float(Foo3(21)), 42.)
self.assertRaises(TypeError, float, Foo4(42))
self.assertEqual(float(FooStr('8')), 9.)
class Foo5:
def __float__(self):
return ""
self.assertRaises(TypeError, time.sleep, Foo5())
# Issue #24731
class F:
def __float__(self):
return OtherFloatSubclass(42.)
with self.assertWarns(DeprecationWarning):
self.assertEqual(float(F()), 42.)
with self.assertWarns(DeprecationWarning):
self.assertIs(type(float(F())), float)
with self.assertWarns(DeprecationWarning):
self.assertEqual(FloatSubclass(F()), 42.)
with self.assertWarns(DeprecationWarning):
self.assertIs(type(FloatSubclass(F())), FloatSubclass)
def test_is_integer(self):
self.assertFalse((1.1).is_integer())
self.assertTrue((1.).is_integer())
self.assertFalse(float("nan").is_integer())
self.assertFalse(float("inf").is_integer())
def test_floatasratio(self):
for f, ratio in [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
]:
self.assertEqual(f.as_integer_ratio(), ratio)
for i in range(10000):
f = random.random()
f *= 10 ** random.randint(-100, 100)
n, d = f.as_integer_ratio()
self.assertEqual(float(n).__truediv__(d), f)
R = fractions.Fraction
self.assertEqual(R(0, 1),
R(*float(0.0).as_integer_ratio()))
self.assertEqual(R(5, 2),
R(*float(2.5).as_integer_ratio()))
self.assertEqual(R(1, 2),
R(*float(0.5).as_integer_ratio()))
self.assertEqual(R(4728779608739021, 2251799813685248),
R(*float(2.1).as_integer_ratio()))
self.assertEqual(R(-4728779608739021, 2251799813685248),
R(*float(-2.1).as_integer_ratio()))
self.assertEqual(R(-2100, 1),
R(*float(-2100.0).as_integer_ratio()))
self.assertRaises(OverflowError, float('inf').as_integer_ratio)
self.assertRaises(OverflowError, float('-inf').as_integer_ratio)
self.assertRaises(ValueError, float('nan').as_integer_ratio)
def test_float_containment(self):
floats = (INF, -INF, 0.0, 1.0, NAN)
for f in floats:
self.assertIn(f, [f])
self.assertIn(f, (f,))
self.assertIn(f, {f})
self.assertIn(f, {f: None})
self.assertEqual([f].count(f), 1, "[].count('%r') != 1" % f)
self.assertIn(f, floats)
for f in floats:
# nonidentical containers, same type, same contents
self.assertTrue([f] == [f], "[%r] != [%r]" % (f, f))
self.assertTrue((f,) == (f,), "(%r,) != (%r,)" % (f, f))
self.assertTrue({f} == {f}, "{%r} != {%r}" % (f, f))
self.assertTrue({f : None} == {f: None}, "{%r : None} != "
"{%r : None}" % (f, f))
# identical containers
l, t, s, d = [f], (f,), {f}, {f: None}
self.assertTrue(l == l, "[%r] not equal to itself" % f)
self.assertTrue(t == t, "(%r,) not equal to itself" % f)
self.assertTrue(s == s, "{%r} not equal to itself" % f)
self.assertTrue(d == d, "{%r : None} not equal to itself" % f)
def assertEqualAndEqualSign(self, a, b):
# fail unless a == b and a and b have the same sign bit;
# the only difference from assertEqual is that this test
# distinguishes -0.0 and 0.0.
self.assertEqual((a, copysign(1.0, a)), (b, copysign(1.0, b)))
@support.requires_IEEE_754
def test_float_mod(self):
# Check behaviour of % operator for IEEE 754 special cases.
# In particular, check signs of zeros.
mod = operator.mod
self.assertEqualAndEqualSign(mod(-1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1e-100, 1.0), 1.0)
self.assertEqualAndEqualSign(mod(-0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(1e-100, 1.0), 1e-100)
self.assertEqualAndEqualSign(mod(1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(-1e-100, -1.0), -1e-100)
self.assertEqualAndEqualSign(mod(-0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(1e-100, -1.0), -1.0)
self.assertEqualAndEqualSign(mod(1.0, -1.0), -0.0)
@support.requires_IEEE_754
def test_float_pow(self):
# test builtin pow and ** operator for IEEE 754 special cases.
# Special cases taken from section F.9.4.4 of the C99 specification
for pow_op in pow, operator.pow:
# x**NAN is NAN for any x except 1
self.assertTrue(isnan(pow_op(-INF, NAN)))
self.assertTrue(isnan(pow_op(-2.0, NAN)))
self.assertTrue(isnan(pow_op(-1.0, NAN)))
self.assertTrue(isnan(pow_op(-0.5, NAN)))
self.assertTrue(isnan(pow_op(-0.0, NAN)))
self.assertTrue(isnan(pow_op(0.0, NAN)))
self.assertTrue(isnan(pow_op(0.5, NAN)))
self.assertTrue(isnan(pow_op(2.0, NAN)))
self.assertTrue(isnan(pow_op(INF, NAN)))
self.assertTrue(isnan(pow_op(NAN, NAN)))
# NAN**y is NAN for any y except +-0
self.assertTrue(isnan(pow_op(NAN, -INF)))
self.assertTrue(isnan(pow_op(NAN, -2.0)))
self.assertTrue(isnan(pow_op(NAN, -1.0)))
self.assertTrue(isnan(pow_op(NAN, -0.5)))
self.assertTrue(isnan(pow_op(NAN, 0.5)))
self.assertTrue(isnan(pow_op(NAN, 1.0)))
self.assertTrue(isnan(pow_op(NAN, 2.0)))
self.assertTrue(isnan(pow_op(NAN, INF)))
# (+-0)**y raises ZeroDivisionError for y a negative odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -1.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -1.0)
# (+-0)**y raises ZeroDivisionError for y finite and negative
# but not an odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -0.5)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -0.5)
# (+-0)**y is +-0 for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 1.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 1.0), 0.0)
# (+-0)**y is 0 for y finite and positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 2.0), 0.0)
# (-1)**+-inf is 1
self.assertEqualAndEqualSign(pow_op(-1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, INF), 1.0)
# 1**y is 1 for any y, even if y is an infinity or nan
self.assertEqualAndEqualSign(pow_op(1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, NAN), 1.0)
# x**+-0 is 1 for any x, even if x is a zero, infinity, or nan
self.assertEqualAndEqualSign(pow_op(-INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, -0.0), 1.0)
# x**y defers to complex pow for finite negative x and
# non-integral y.
self.assertEqual(type(pow_op(-2.0, -0.5)), complex)
self.assertEqual(type(pow_op(-2.0, 0.5)), complex)
self.assertEqual(type(pow_op(-1.0, -0.5)), complex)
self.assertEqual(type(pow_op(-1.0, 0.5)), complex)
self.assertEqual(type(pow_op(-0.5, -0.5)), complex)
self.assertEqual(type(pow_op(-0.5, 0.5)), complex)
# x**-INF is INF for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, -INF), INF)
self.assertEqualAndEqualSign(pow_op(-0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.5, -INF), INF)
# x**-INF is 0 for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -INF), 0.0)
# x**INF is 0 for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, INF), 0.0)
# x**INF is INF for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, INF), INF)
self.assertEqualAndEqualSign(pow_op(-2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(INF, INF), INF)
# (-INF)**y is -0.0 for y a negative odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -1.0), -0.0)
# (-INF)**y is 0.0 for y negative but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-INF, -2.0), 0.0)
# (-INF)**y is -INF for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 1.0), -INF)
# (-INF)**y is INF for y positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(-INF, 2.0), INF)
# INF**y is INF for y positive
self.assertEqualAndEqualSign(pow_op(INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(INF, 1.0), INF)
self.assertEqualAndEqualSign(pow_op(INF, 2.0), INF)
# INF**y is 0.0 for y negative
self.assertEqualAndEqualSign(pow_op(INF, -2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -1.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.5), 0.0)
# basic checks not covered by the special cases above
self.assertEqualAndEqualSign(pow_op(-2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(-2.0, -1.0), -0.5)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 1.0), -2.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 2.0), 4.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(2.0, -1.0), 0.5)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 1.0), 2.0)
self.assertEqualAndEqualSign(pow_op(2.0, 2.0), 4.0)
# 1 ** large and -1 ** large; some libms apparently
# have problems with these
self.assertEqualAndEqualSign(pow_op(1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1e100), 1.0)
# check sign for results that underflow to 0
self.assertEqualAndEqualSign(pow_op(-2.0, -2000.0), 0.0)
self.assertEqual(type(pow_op(-2.0, -2000.5)), complex)
self.assertEqualAndEqualSign(pow_op(-2.0, -2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2001.0), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 2000.0), 0.0)
self.assertEqual(type(pow_op(-0.5, 2000.5)), complex)
self.assertEqualAndEqualSign(pow_op(-0.5, 2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2001.0), 0.0)
# check we don't raise an exception for subnormal results,
# and validate signs. Tests currently disabled, since
# they fail on systems where a subnormal result from pow
# is flushed to zero (e.g. Debian/ia64.)
#self.assertTrue(0.0 < pow_op(0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(0.5, 1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-0.5, 1047) > -1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-2.0, -1047) > -1e-315)
@requires_setformat
class FormatFunctionsTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_getformat(self):
self.assertIn(float.__getformat__('double'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertIn(float.__getformat__('float'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertRaises(ValueError, float.__getformat__, 'chicken')
self.assertRaises(TypeError, float.__getformat__, 1)
def test_setformat(self):
for t in 'double', 'float':
float.__setformat__(t, 'unknown')
if self.save_formats[t] == 'IEEE, big-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
elif self.save_formats[t] == 'IEEE, little-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
else:
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'chicken')
self.assertRaises(ValueError, float.__setformat__,
'chicken', 'unknown')
BE_DOUBLE_INF = b'\x7f\xf0\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_INF = bytes(reversed(BE_DOUBLE_INF))
BE_DOUBLE_NAN = b'\x7f\xf8\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_NAN = bytes(reversed(BE_DOUBLE_NAN))
BE_FLOAT_INF = b'\x7f\x80\x00\x00'
LE_FLOAT_INF = bytes(reversed(BE_FLOAT_INF))
BE_FLOAT_NAN = b'\x7f\xc0\x00\x00'
LE_FLOAT_NAN = bytes(reversed(BE_FLOAT_NAN))
# on non-IEEE platforms, attempting to unpack a bit pattern
# representing an infinity or a NaN should raise an exception.
@requires_setformat
class UnknownFormatTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
float.__setformat__('double', 'unknown')
float.__setformat__('float', 'unknown')
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_double_specials_dont_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
def test_float_specials_dont_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
# on an IEEE platform, all we guarantee is that bit patterns
# representing infinities or NaNs do not raise an exception; all else
# is accident (today).
# let's also try to guarantee that -0.0 and 0.0 don't get confused.
class IEEEFormatTestCase(unittest.TestCase):
@support.requires_IEEE_754
def test_double_specials_do_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
struct.unpack(fmt, data)
@support.requires_IEEE_754
def test_float_specials_do_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
struct.unpack(fmt, data)
class FormatTestCase(unittest.TestCase):
def test_format(self):
# these should be rewritten to use both format(x, spec) and
# x.__format__(spec)
self.assertEqual(format(0.0, 'f'), '0.000000')
# the default is 'g', except for empty format spec
self.assertEqual(format(0.0, ''), '0.0')
self.assertEqual(format(0.01, ''), '0.01')
self.assertEqual(format(0.01, 'g'), '0.01')
# empty presentation type should format in the same way as str
# (issue 5920)
x = 100/7.
self.assertEqual(format(x, ''), str(x))
self.assertEqual(format(x, '-'), str(x))
self.assertEqual(format(x, '>'), str(x))
self.assertEqual(format(x, '2'), str(x))
self.assertEqual(format(1.0, 'f'), '1.000000')
self.assertEqual(format(-1.0, 'f'), '-1.000000')
self.assertEqual(format( 1.0, ' f'), ' 1.000000')
self.assertEqual(format(-1.0, ' f'), '-1.000000')
self.assertEqual(format( 1.0, '+f'), '+1.000000')
self.assertEqual(format(-1.0, '+f'), '-1.000000')
# % formatting
self.assertEqual(format(-1.0, '%'), '-100.000000%')
# conversion to string should fail
self.assertRaises(ValueError, format, 3.0, "s")
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# issue 3382
self.assertEqual(format(NAN, 'f'), 'nan')
self.assertEqual(format(NAN, 'F'), 'NAN')
self.assertEqual(format(INF, 'f'), 'inf')
self.assertEqual(format(INF, 'F'), 'INF')
@support.requires_IEEE_754
def test_format_testfile(self):
with open(format_testfile) as testfile:
for line in testfile:
if line.startswith('--'):
continue
line = line.strip()
if not line:
continue
lhs, rhs = map(str.strip, line.split('->'))
fmt, arg = lhs.split()
self.assertEqual(fmt % float(arg), rhs)
self.assertEqual(fmt % -float(arg), '-' + rhs)
def test_issue5864(self):
self.assertEqual(format(123.456, '.4'), '123.5')
self.assertEqual(format(1234.56, '.4'), '1.235e+03')
self.assertEqual(format(12345.6, '.4'), '1.235e+04')
class ReprTestCase(unittest.TestCase):
def test_repr(self):
floats_file = open(os.path.join(os.path.split(__file__)[0],
'floating_points.txt'))
for line in floats_file:
line = line.strip()
if not line or line.startswith('#'):
continue
v = eval(line)
self.assertEqual(v, eval(repr(v)))
floats_file.close()
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_short_repr(self):
# test short float repr introduced in Python 3.1. One aspect
# of this repr is that we get some degree of str -> float ->
# str roundtripping. In particular, for any numeric string
# containing 15 or fewer significant digits, those exact same
# digits (modulo trailing zeros) should appear in the output.
# No more repr(0.03) -> "0.029999999999999999"!
test_strings = [
# output always includes *either* a decimal point and at
# least one digit after that point, or an exponent.
'0.0',
'1.0',
'0.01',
'0.02',
'0.03',
'0.04',
'0.05',
'1.23456789',
'10.0',
'100.0',
# values >= 1e16 get an exponent...
'1000000000000000.0',
'9999999999999990.0',
'1e+16',
'1e+17',
# ... and so do values < 1e-4
'0.001',
'0.001001',
'0.00010000000000001',
'0.0001',
'9.999999999999e-05',
'1e-05',
# values designed to provoke failure if the FPU rounding
# precision isn't set correctly
'8.72293771110361e+25',
'7.47005307342313e+26',
'2.86438000439698e+28',
'8.89142905246179e+28',
'3.08578087079232e+35',
]
for s in test_strings:
negs = '-'+s
self.assertEqual(s, repr(float(s)))
self.assertEqual(negs, repr(float(negs)))
# Since Python 3.2, repr and str are identical
self.assertEqual(repr(float(s)), str(float(s)))
self.assertEqual(repr(float(negs)), str(float(negs)))
@support.requires_IEEE_754
class RoundTestCase(unittest.TestCase):
def test_inf_nan(self):
self.assertRaises(OverflowError, round, INF)
self.assertRaises(OverflowError, round, -INF)
self.assertRaises(ValueError, round, NAN)
self.assertRaises(TypeError, round, INF, 0.0)
self.assertRaises(TypeError, round, -INF, 1.0)
self.assertRaises(TypeError, round, NAN, "ceci n'est pas un integer")
self.assertRaises(TypeError, round, -0.0, 1j)
def test_large_n(self):
for n in [324, 325, 400, 2**31-1, 2**31, 2**32, 2**100]:
self.assertEqual(round(123.456, n), 123.456)
self.assertEqual(round(-123.456, n), -123.456)
self.assertEqual(round(1e300, n), 1e300)
self.assertEqual(round(1e-320, n), 1e-320)
self.assertEqual(round(1e150, 300), 1e150)
self.assertEqual(round(1e300, 307), 1e300)
self.assertEqual(round(-3.1415, 308), -3.1415)
self.assertEqual(round(1e150, 309), 1e150)
self.assertEqual(round(1.4e-315, 315), 1e-315)
def test_small_n(self):
for n in [-308, -309, -400, 1-2**31, -2**31, -2**31-1, -2**100]:
self.assertEqual(round(123.456, n), 0.0)
self.assertEqual(round(-123.456, n), -0.0)
self.assertEqual(round(1e300, n), 0.0)
self.assertEqual(round(1e-320, n), 0.0)
def test_overflow(self):
self.assertRaises(OverflowError, round, 1.6e308, -308)
self.assertRaises(OverflowError, round, -1.7e308, -308)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_previous_round_bugs(self):
# particular cases that have occurred in bug reports
self.assertEqual(round(562949953421312.5, 1),
562949953421312.5)
self.assertEqual(round(56294995342131.5, 3),
56294995342131.5)
# round-half-even
self.assertEqual(round(25.0, -1), 20.0)
self.assertEqual(round(35.0, -1), 40.0)
self.assertEqual(round(45.0, -1), 40.0)
self.assertEqual(round(55.0, -1), 60.0)
self.assertEqual(round(65.0, -1), 60.0)
self.assertEqual(round(75.0, -1), 80.0)
self.assertEqual(round(85.0, -1), 80.0)
self.assertEqual(round(95.0, -1), 100.0)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_matches_float_format(self):
# round should give the same results as float formatting
for i in range(500):
x = i/1000.
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
for i in range(5, 5000, 10):
x = i/1000.
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
for i in range(500):
x = random.random()
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
def test_format_specials(self):
# Test formatting of nans and infs.
def test(fmt, value, expected):
# Test with both % and format().
self.assertEqual(fmt % value, expected, fmt)
fmt = fmt[1:] # strip off the %
self.assertEqual(format(value, fmt), expected, fmt)
for fmt in ['%e', '%f', '%g', '%.0e', '%.6f', '%.20g',
'%#e', '%#f', '%#g', '%#.20e', '%#.15f', '%#.3g']:
pfmt = '%+' + fmt[1:]
sfmt = '% ' + fmt[1:]
test(fmt, INF, 'inf')
test(fmt, -INF, '-inf')
test(fmt, NAN, 'nan')
test(fmt, -NAN, 'nan')
# When asking for a sign, it's always provided. nans are
# always positive.
test(pfmt, INF, '+inf')
test(pfmt, -INF, '-inf')
test(pfmt, NAN, '+nan')
test(pfmt, -NAN, '+nan')
# When using ' ' for a sign code, only infs can be negative.
# Others have a space.
test(sfmt, INF, ' inf')
test(sfmt, -INF, '-inf')
test(sfmt, NAN, ' nan')
test(sfmt, -NAN, ' nan')
def test_None_ndigits(self):
for x in round(1.23), round(1.23, None), round(1.23, ndigits=None):
self.assertEqual(x, 1)
self.assertIsInstance(x, int)
for x in round(1.78), round(1.78, None), round(1.78, ndigits=None):
self.assertEqual(x, 2)
self.assertIsInstance(x, int)
# Beginning with Python 2.6 float has cross platform compatible
# ways to create and represent inf and nan
class InfNanTest(unittest.TestCase):
def test_inf_from_str(self):
self.assertTrue(isinf(float("inf")))
self.assertTrue(isinf(float("+inf")))
self.assertTrue(isinf(float("-inf")))
self.assertTrue(isinf(float("infinity")))
self.assertTrue(isinf(float("+infinity")))
self.assertTrue(isinf(float("-infinity")))
self.assertEqual(repr(float("inf")), "inf")
self.assertEqual(repr(float("+inf")), "inf")
self.assertEqual(repr(float("-inf")), "-inf")
self.assertEqual(repr(float("infinity")), "inf")
self.assertEqual(repr(float("+infinity")), "inf")
self.assertEqual(repr(float("-infinity")), "-inf")
self.assertEqual(repr(float("INF")), "inf")
self.assertEqual(repr(float("+Inf")), "inf")
self.assertEqual(repr(float("-iNF")), "-inf")
self.assertEqual(repr(float("Infinity")), "inf")
self.assertEqual(repr(float("+iNfInItY")), "inf")
self.assertEqual(repr(float("-INFINITY")), "-inf")
self.assertEqual(str(float("inf")), "inf")
self.assertEqual(str(float("+inf")), "inf")
self.assertEqual(str(float("-inf")), "-inf")
self.assertEqual(str(float("infinity")), "inf")
self.assertEqual(str(float("+infinity")), "inf")
self.assertEqual(str(float("-infinity")), "-inf")
self.assertRaises(ValueError, float, "info")
self.assertRaises(ValueError, float, "+info")
self.assertRaises(ValueError, float, "-info")
self.assertRaises(ValueError, float, "in")
self.assertRaises(ValueError, float, "+in")
self.assertRaises(ValueError, float, "-in")
self.assertRaises(ValueError, float, "infinit")
self.assertRaises(ValueError, float, "+Infin")
self.assertRaises(ValueError, float, "-INFI")
self.assertRaises(ValueError, float, "infinitys")
self.assertRaises(ValueError, float, "++Inf")
self.assertRaises(ValueError, float, "-+inf")
self.assertRaises(ValueError, float, "+-infinity")
self.assertRaises(ValueError, float, "--Infinity")
def test_inf_as_str(self):
self.assertEqual(repr(1e300 * 1e300), "inf")
self.assertEqual(repr(-1e300 * 1e300), "-inf")
self.assertEqual(str(1e300 * 1e300), "inf")
self.assertEqual(str(-1e300 * 1e300), "-inf")
def test_nan_from_str(self):
self.assertTrue(isnan(float("nan")))
self.assertTrue(isnan(float("+nan")))
self.assertTrue(isnan(float("-nan")))
self.assertEqual(repr(float("nan")), "nan")
self.assertEqual(repr(float("+nan")), "nan")
self.assertEqual(repr(float("-nan")), "nan")
self.assertEqual(repr(float("NAN")), "nan")
self.assertEqual(repr(float("+NAn")), "nan")
self.assertEqual(repr(float("-NaN")), "nan")
self.assertEqual(str(float("nan")), "nan")
self.assertEqual(str(float("+nan")), "nan")
self.assertEqual(str(float("-nan")), "nan")
self.assertRaises(ValueError, float, "nana")
self.assertRaises(ValueError, float, "+nana")
self.assertRaises(ValueError, float, "-nana")
self.assertRaises(ValueError, float, "na")
self.assertRaises(ValueError, float, "+na")
self.assertRaises(ValueError, float, "-na")
self.assertRaises(ValueError, float, "++nan")
self.assertRaises(ValueError, float, "-+NAN")
self.assertRaises(ValueError, float, "+-NaN")
self.assertRaises(ValueError, float, "--nAn")
def test_nan_as_str(self):
self.assertEqual(repr(1e300 * 1e300 * 0), "nan")
self.assertEqual(repr(-1e300 * 1e300 * 0), "nan")
self.assertEqual(str(1e300 * 1e300 * 0), "nan")
self.assertEqual(str(-1e300 * 1e300 * 0), "nan")
def test_inf_signs(self):
self.assertEqual(copysign(1.0, float('inf')), 1.0)
self.assertEqual(copysign(1.0, float('-inf')), -1.0)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_nan_signs(self):
# When using the dtoa.c code, the sign of float('nan') should
# be predictable.
self.assertEqual(copysign(1.0, float('nan')), 1.0)
self.assertEqual(copysign(1.0, float('-nan')), -1.0)
fromHex = float.fromhex
toHex = float.hex
class HexFloatTestCase(unittest.TestCase):
MAX = fromHex('0x.fffffffffffff8p+1024') # max normal
MIN = fromHex('0x1p-1022') # min normal
TINY = fromHex('0x0.0000000000001p-1022') # min subnormal
EPS = fromHex('0x0.0000000000001p0') # diff between 1.0 and next float up
def identical(self, x, y):
# check that floats x and y are identical, or that both
# are NaNs
if isnan(x) or isnan(y):
if isnan(x) == isnan(y):
return
elif x == y and (x != 0.0 or copysign(1.0, x) == copysign(1.0, y)):
return
self.fail('%r not identical to %r' % (x, y))
def test_ends(self):
self.identical(self.MIN, ldexp(1.0, -1022))
self.identical(self.TINY, ldexp(1.0, -1074))
self.identical(self.EPS, ldexp(1.0, -52))
self.identical(self.MAX, 2.*(ldexp(1.0, 1023) - ldexp(1.0, 970)))
def test_invalid_inputs(self):
invalid_inputs = [
'infi', # misspelt infinities and nans
'-Infinit',
'++inf',
'-+Inf',
'--nan',
'+-NaN',
'snan',
'NaNs',
'nna',
'an',
'nf',
'nfinity',
'inity',
'iinity',
'0xnan',
'',
' ',
'x1.0p0',
'0xX1.0p0',
'+ 0x1.0p0', # internal whitespace
'- 0x1.0p0',
'0 x1.0p0',
'0x 1.0p0',
'0x1 2.0p0',
'+0x1 .0p0',
'0x1. 0p0',
'-0x1.0 1p0',
'-0x1.0 p0',
'+0x1.0p +0',
'0x1.0p -0',
'0x1.0p 0',
'+0x1.0p+ 0',
'-0x1.0p- 0',
'++0x1.0p-0', # double signs
'--0x1.0p0',
'+-0x1.0p+0',
'-+0x1.0p0',
'0x1.0p++0',
'+0x1.0p+-0',
'-0x1.0p-+0',
'0x1.0p--0',
'0x1.0.p0',
'0x.p0', # no hex digits before or after point
'0x1,p0', # wrong decimal point character
'0x1pa',
'0x1p\uff10', # fullwidth Unicode digits
'\uff10x1p0',
'0x\uff11p0',
'0x1.\uff10p0',
'0x1p0 \n 0x2p0',
'0x1p0\0 0x1p0', # embedded null byte is not end of string
]
for x in invalid_inputs:
try:
result = fromHex(x)
except ValueError:
pass
else:
self.fail('Expected float.fromhex(%r) to raise ValueError; '
'got %r instead' % (x, result))
def test_whitespace(self):
value_pairs = [
('inf', INF),
('-Infinity', -INF),
('nan', NAN),
('1.0', 1.0),
('-0x.2', -0.125),
('-0.0', -0.0)
]
whitespace = [
'',
' ',
'\t',
'\n',
'\n \t',
'\f',
'\v',
'\r'
]
for inp, expected in value_pairs:
for lead in whitespace:
for trail in whitespace:
got = fromHex(lead + inp + trail)
self.identical(got, expected)
def test_from_hex(self):
MIN = self.MIN;
MAX = self.MAX;
TINY = self.TINY;
EPS = self.EPS;
# two spellings of infinity, with optional signs; case-insensitive
self.identical(fromHex('inf'), INF)
self.identical(fromHex('+Inf'), INF)
self.identical(fromHex('-INF'), -INF)
self.identical(fromHex('iNf'), INF)
self.identical(fromHex('Infinity'), INF)
self.identical(fromHex('+INFINITY'), INF)
self.identical(fromHex('-infinity'), -INF)
self.identical(fromHex('-iNFiNitY'), -INF)
# nans with optional sign; case insensitive
self.identical(fromHex('nan'), NAN)
self.identical(fromHex('+NaN'), NAN)
self.identical(fromHex('-NaN'), NAN)
self.identical(fromHex('-nAN'), NAN)
# variations in input format
self.identical(fromHex('1'), 1.0)
self.identical(fromHex('+1'), 1.0)
self.identical(fromHex('1.'), 1.0)
self.identical(fromHex('1.0'), 1.0)
self.identical(fromHex('1.0p0'), 1.0)
self.identical(fromHex('01'), 1.0)
self.identical(fromHex('01.'), 1.0)
self.identical(fromHex('0x1'), 1.0)
self.identical(fromHex('0x1.'), 1.0)
self.identical(fromHex('0x1.0'), 1.0)
self.identical(fromHex('+0x1.0'), 1.0)
self.identical(fromHex('0x1p0'), 1.0)
self.identical(fromHex('0X1p0'), 1.0)
self.identical(fromHex('0X1P0'), 1.0)
self.identical(fromHex('0x1P0'), 1.0)
self.identical(fromHex('0x1.p0'), 1.0)
self.identical(fromHex('0x1.0p0'), 1.0)
self.identical(fromHex('0x.1p4'), 1.0)
self.identical(fromHex('0x.1p04'), 1.0)
self.identical(fromHex('0x.1p004'), 1.0)
self.identical(fromHex('0x1p+0'), 1.0)
self.identical(fromHex('0x1P-0'), 1.0)
self.identical(fromHex('+0x1p0'), 1.0)
self.identical(fromHex('0x01p0'), 1.0)
self.identical(fromHex('0x1p00'), 1.0)
self.identical(fromHex(' 0x1p0 '), 1.0)
self.identical(fromHex('\n 0x1p0'), 1.0)
self.identical(fromHex('0x1p0 \t'), 1.0)
self.identical(fromHex('0xap0'), 10.0)
self.identical(fromHex('0xAp0'), 10.0)
self.identical(fromHex('0xaP0'), 10.0)
self.identical(fromHex('0xAP0'), 10.0)
self.identical(fromHex('0xbep0'), 190.0)
self.identical(fromHex('0xBep0'), 190.0)
self.identical(fromHex('0xbEp0'), 190.0)
self.identical(fromHex('0XBE0P-4'), 190.0)
self.identical(fromHex('0xBEp0'), 190.0)
self.identical(fromHex('0xB.Ep4'), 190.0)
self.identical(fromHex('0x.BEp8'), 190.0)
self.identical(fromHex('0x.0BEp12'), 190.0)
# moving the point around
pi = fromHex('0x1.921fb54442d18p1')
self.identical(fromHex('0x.006487ed5110b46p11'), pi)
self.identical(fromHex('0x.00c90fdaa22168cp10'), pi)
self.identical(fromHex('0x.01921fb54442d18p9'), pi)
self.identical(fromHex('0x.03243f6a8885a3p8'), pi)
self.identical(fromHex('0x.06487ed5110b46p7'), pi)
self.identical(fromHex('0x.0c90fdaa22168cp6'), pi)
self.identical(fromHex('0x.1921fb54442d18p5'), pi)
self.identical(fromHex('0x.3243f6a8885a3p4'), pi)
self.identical(fromHex('0x.6487ed5110b46p3'), pi)
self.identical(fromHex('0x.c90fdaa22168cp2'), pi)
self.identical(fromHex('0x1.921fb54442d18p1'), pi)
self.identical(fromHex('0x3.243f6a8885a3p0'), pi)
self.identical(fromHex('0x6.487ed5110b46p-1'), pi)
self.identical(fromHex('0xc.90fdaa22168cp-2'), pi)
self.identical(fromHex('0x19.21fb54442d18p-3'), pi)
self.identical(fromHex('0x32.43f6a8885a3p-4'), pi)
self.identical(fromHex('0x64.87ed5110b46p-5'), pi)
self.identical(fromHex('0xc9.0fdaa22168cp-6'), pi)
self.identical(fromHex('0x192.1fb54442d18p-7'), pi)
self.identical(fromHex('0x324.3f6a8885a3p-8'), pi)
self.identical(fromHex('0x648.7ed5110b46p-9'), pi)
self.identical(fromHex('0xc90.fdaa22168cp-10'), pi)
self.identical(fromHex('0x1921.fb54442d18p-11'), pi)
# ...
self.identical(fromHex('0x1921fb54442d1.8p-47'), pi)
self.identical(fromHex('0x3243f6a8885a3p-48'), pi)
self.identical(fromHex('0x6487ed5110b46p-49'), pi)
self.identical(fromHex('0xc90fdaa22168cp-50'), pi)
self.identical(fromHex('0x1921fb54442d18p-51'), pi)
self.identical(fromHex('0x3243f6a8885a30p-52'), pi)
self.identical(fromHex('0x6487ed5110b460p-53'), pi)
self.identical(fromHex('0xc90fdaa22168c0p-54'), pi)
self.identical(fromHex('0x1921fb54442d180p-55'), pi)
# results that should overflow...
self.assertRaises(OverflowError, fromHex, '-0x1p1024')
self.assertRaises(OverflowError, fromHex, '0x1p+1025')
self.assertRaises(OverflowError, fromHex, '+0X1p1030')
self.assertRaises(OverflowError, fromHex, '-0x1p+1100')
self.assertRaises(OverflowError, fromHex, '0X1p123456789123456789')
self.assertRaises(OverflowError, fromHex, '+0X.8p+1025')
self.assertRaises(OverflowError, fromHex, '+0x0.8p1025')
self.assertRaises(OverflowError, fromHex, '-0x0.4p1026')
self.assertRaises(OverflowError, fromHex, '0X2p+1023')
self.assertRaises(OverflowError, fromHex, '0x2.p1023')
self.assertRaises(OverflowError, fromHex, '-0x2.0p+1023')
self.assertRaises(OverflowError, fromHex, '+0X4p+1022')
self.assertRaises(OverflowError, fromHex, '0x1.ffffffffffffffp+1023')
self.assertRaises(OverflowError, fromHex, '-0X1.fffffffffffff9p1023')
self.assertRaises(OverflowError, fromHex, '0X1.fffffffffffff8p1023')
self.assertRaises(OverflowError, fromHex, '+0x3.fffffffffffffp1022')
self.assertRaises(OverflowError, fromHex, '0x3fffffffffffffp+970')
self.assertRaises(OverflowError, fromHex, '0x10000000000000000p960')
self.assertRaises(OverflowError, fromHex, '-0Xffffffffffffffffp960')
# ...and those that round to +-max float
self.identical(fromHex('+0x1.fffffffffffffp+1023'), MAX)
self.identical(fromHex('-0X1.fffffffffffff7p1023'), -MAX)
self.identical(fromHex('0X1.fffffffffffff7fffffffffffffp1023'), MAX)
# zeros
self.identical(fromHex('0x0p0'), 0.0)
self.identical(fromHex('0x0p1000'), 0.0)
self.identical(fromHex('-0x0p1023'), -0.0)
self.identical(fromHex('0X0p1024'), 0.0)
self.identical(fromHex('-0x0p1025'), -0.0)
self.identical(fromHex('0X0p2000'), 0.0)
self.identical(fromHex('0x0p123456789123456789'), 0.0)
self.identical(fromHex('-0X0p-0'), -0.0)
self.identical(fromHex('-0X0p-1000'), -0.0)
self.identical(fromHex('0x0p-1023'), 0.0)
self.identical(fromHex('-0X0p-1024'), -0.0)
self.identical(fromHex('-0x0p-1025'), -0.0)
self.identical(fromHex('-0x0p-1072'), -0.0)
self.identical(fromHex('0X0p-1073'), 0.0)
self.identical(fromHex('-0x0p-1074'), -0.0)
self.identical(fromHex('0x0p-1075'), 0.0)
self.identical(fromHex('0X0p-1076'), 0.0)
self.identical(fromHex('-0X0p-2000'), -0.0)
self.identical(fromHex('-0x0p-123456789123456789'), -0.0)
# values that should underflow to 0
self.identical(fromHex('0X1p-1075'), 0.0)
self.identical(fromHex('-0X1p-1075'), -0.0)
self.identical(fromHex('-0x1p-123456789123456789'), -0.0)
self.identical(fromHex('0x1.00000000000000001p-1075'), TINY)
self.identical(fromHex('-0x1.1p-1075'), -TINY)
self.identical(fromHex('0x1.fffffffffffffffffp-1075'), TINY)
# check round-half-even is working correctly near 0 ...
self.identical(fromHex('0x1p-1076'), 0.0)
self.identical(fromHex('0X2p-1076'), 0.0)
self.identical(fromHex('0X3p-1076'), TINY)
self.identical(fromHex('0x4p-1076'), TINY)
self.identical(fromHex('0X5p-1076'), TINY)
self.identical(fromHex('0X6p-1076'), 2*TINY)
self.identical(fromHex('0x7p-1076'), 2*TINY)
self.identical(fromHex('0X8p-1076'), 2*TINY)
self.identical(fromHex('0X9p-1076'), 2*TINY)
self.identical(fromHex('0xap-1076'), 2*TINY)
self.identical(fromHex('0Xbp-1076'), 3*TINY)
self.identical(fromHex('0xcp-1076'), 3*TINY)
self.identical(fromHex('0Xdp-1076'), 3*TINY)
self.identical(fromHex('0Xep-1076'), 4*TINY)
self.identical(fromHex('0xfp-1076'), 4*TINY)
self.identical(fromHex('0x10p-1076'), 4*TINY)
self.identical(fromHex('-0x1p-1076'), -0.0)
self.identical(fromHex('-0X2p-1076'), -0.0)
self.identical(fromHex('-0x3p-1076'), -TINY)
self.identical(fromHex('-0X4p-1076'), -TINY)
self.identical(fromHex('-0x5p-1076'), -TINY)
self.identical(fromHex('-0x6p-1076'), -2*TINY)
self.identical(fromHex('-0X7p-1076'), -2*TINY)
self.identical(fromHex('-0X8p-1076'), -2*TINY)
self.identical(fromHex('-0X9p-1076'), -2*TINY)
self.identical(fromHex('-0Xap-1076'), -2*TINY)
self.identical(fromHex('-0xbp-1076'), -3*TINY)
self.identical(fromHex('-0xcp-1076'), -3*TINY)
self.identical(fromHex('-0Xdp-1076'), -3*TINY)
self.identical(fromHex('-0xep-1076'), -4*TINY)
self.identical(fromHex('-0Xfp-1076'), -4*TINY)
self.identical(fromHex('-0X10p-1076'), -4*TINY)
# ... and near MIN ...
self.identical(fromHex('0x0.ffffffffffffd6p-1022'), MIN-3*TINY)
self.identical(fromHex('0x0.ffffffffffffd8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdap-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdcp-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdep-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe0p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe2p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe4p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe6p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffeap-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffecp-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffeep-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff0p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff2p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff4p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff6p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff8p-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffap-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffcp-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffep-1022'), MIN)
self.identical(fromHex('0x1.00000000000000p-1022'), MIN)
self.identical(fromHex('0x1.00000000000002p-1022'), MIN)
self.identical(fromHex('0x1.00000000000004p-1022'), MIN)
self.identical(fromHex('0x1.00000000000006p-1022'), MIN)
self.identical(fromHex('0x1.00000000000008p-1022'), MIN)
self.identical(fromHex('0x1.0000000000000ap-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000cp-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000ep-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000010p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000012p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000014p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000016p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000018p-1022'), MIN+2*TINY)
# ... and near 1.0.
self.identical(fromHex('0x0.fffffffffffff0p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff1p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff2p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff3p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff4p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff5p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff6p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff7p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff8p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff9p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffap0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffffbp0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffcp0'), 1.0)
self.identical(fromHex('0x0.fffffffffffffdp0'), 1.0)
self.identical(fromHex('0X0.fffffffffffffep0'), 1.0)
self.identical(fromHex('0x0.ffffffffffffffp0'), 1.0)
self.identical(fromHex('0X1.00000000000000p0'), 1.0)
self.identical(fromHex('0X1.00000000000001p0'), 1.0)
self.identical(fromHex('0x1.00000000000002p0'), 1.0)
self.identical(fromHex('0X1.00000000000003p0'), 1.0)
self.identical(fromHex('0x1.00000000000004p0'), 1.0)
self.identical(fromHex('0X1.00000000000005p0'), 1.0)
self.identical(fromHex('0X1.00000000000006p0'), 1.0)
self.identical(fromHex('0X1.00000000000007p0'), 1.0)
self.identical(fromHex('0x1.00000000000007ffffffffffffffffffffp0'),
1.0)
self.identical(fromHex('0x1.00000000000008p0'), 1.0)
self.identical(fromHex('0x1.00000000000008000000000000000001p0'),
1+EPS)
self.identical(fromHex('0X1.00000000000009p0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ap0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000bp0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000cp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000dp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ep0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000fp0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000010p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000011p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000012p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000013p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000014p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000015p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000016p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000017p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000017ffffffffffffffffffffp0'),
1.0+EPS)
self.identical(fromHex('0x1.00000000000018p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.00000000000018000000000000000001p0'),
1.0+2*EPS)
self.identical(fromHex('0x1.00000000000019p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001ap0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001bp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001cp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001dp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001ep0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001fp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.00000000000020p0'), 1.0+2*EPS)
def test_roundtrip(self):
def roundtrip(x):
return fromHex(toHex(x))
for x in [NAN, INF, self.MAX, self.MIN, self.MIN-self.TINY, self.TINY, 0.0]:
self.identical(x, roundtrip(x))
self.identical(-x, roundtrip(-x))
# fromHex(toHex(x)) should exactly recover x, for any non-NaN float x.
import random
for i in range(10000):
e = random.randrange(-1200, 1200)
m = random.random()
s = random.choice([1.0, -1.0])
try:
x = s*ldexp(m, e)
except OverflowError:
pass
else:
self.identical(x, fromHex(toHex(x)))
def test_subclass(self):
class F(float):
def __new__(cls, value):
return float.__new__(cls, value + 1)
f = F.fromhex((1.5).hex())
self.assertIs(type(f), F)
self.assertEqual(f, 2.5)
class F2(float):
def __init__(self, value):
self.foo = 'bar'
f = F2.fromhex((1.5).hex())
self.assertIs(type(f), F2)
self.assertEqual(f, 1.5)
self.assertEqual(getattr(f, 'foo', 'none'), 'bar')
if __name__ == '__main__':
unittest.main()
| mit | 8,006,852,908,241,265,000 | 43.542735 | 84 | 0.572868 | false |
nuagenetworks/vspk-python | vspk/v5_0/fetchers/nuinfrastructuregatewayprofiles_fetcher.py | 2 | 2247 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUInfrastructureGatewayProfilesFetcher(NURESTFetcher):
""" Represents a NUInfrastructureGatewayProfiles fetcher
Notes:
This fetcher enables to fetch NUInfrastructureGatewayProfile objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUInfrastructureGatewayProfile class that is managed.
Returns:
.NUInfrastructureGatewayProfile: the managed class
"""
from .. import NUInfrastructureGatewayProfile
return NUInfrastructureGatewayProfile
| bsd-3-clause | -5,783,161,004,879,334,000 | 41.415094 | 86 | 0.741878 | false |
gangadhar-kadam/lgnlvefrape | frappe/core/doctype/doctype/doctype.py | 6 | 14843 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now, cint
from frappe.model import no_value_fields
from frappe.model.document import Document
from frappe.model.db_schema import type_map
from frappe.core.doctype.property_setter.property_setter import make_property_setter
from frappe.core.doctype.notification_count.notification_count import delete_notification_count_for
from frappe.modules import make_boilerplate
form_grid_templates = {
"fields": "templates/form_grid/fields.html"
}
class DocType(Document):
def validate(self):
if not frappe.conf.get("developer_mode"):
frappe.throw(_("Not in Developer Mode! Set in site_config.json"))
for c in [".", "/", "#", "&", "=", ":", "'", '"']:
if c in self.name:
frappe.throw(_("{0} not allowed in name").format(c))
self.validate_series()
self.scrub_field_names()
self.validate_title_field()
validate_fields(self)
if self.istable:
# no permission records for child table
self.permissions = []
else:
validate_permissions(self)
self.make_amendable()
def change_modified_of_parent(self):
if frappe.flags.in_import:
return
parent_list = frappe.db.sql("""SELECT parent
from tabDocField where fieldtype="Table" and options=%s""", self.name)
for p in parent_list:
frappe.db.sql('UPDATE tabDocType SET modified=%s WHERE `name`=%s', (now(), p[0]))
def scrub_field_names(self):
restricted = ('name','parent','creation','modified','modified_by',
'parentfield','parenttype',"file_list")
for d in self.get("fields"):
if d.fieldtype:
if (not getattr(d, "fieldname", None)):
if d.label:
d.fieldname = d.label.strip().lower().replace(' ','_')
if d.fieldname in restricted:
d.fieldname = d.fieldname + '1'
else:
d.fieldname = d.fieldtype.lower().replace(" ","_") + "_" + str(d.idx)
def validate_title_field(self):
if self.title_field and \
self.title_field not in [d.fieldname for d in self.get("fields")]:
frappe.throw(_("Title field must be a valid fieldname"))
def validate_series(self, autoname=None, name=None):
if not autoname: autoname = self.autoname
if not name: name = self.name
if not autoname and self.get("fields", {"fieldname":"naming_series"}):
self.autoname = "naming_series:"
if autoname and (not autoname.startswith('field:')) \
and (not autoname.startswith('eval:')) \
and (not autoname in ('Prompt', 'hash')) \
and (not autoname.startswith('naming_series:')):
prefix = autoname.split('.')[0]
used_in = frappe.db.sql('select name from tabDocType where substring_index(autoname, ".", 1) = %s and name!=%s', (prefix, name))
if used_in:
frappe.throw(_("Series {0} already used in {1}").format(prefix, used_in[0][0]))
def on_update(self):
from frappe.model.db_schema import updatedb
updatedb(self.name)
self.change_modified_of_parent()
make_module_and_roles(self)
from frappe import conf
if not (frappe.flags.in_import or frappe.flags.in_test) and conf.get('developer_mode') or 0:
self.export_doc()
self.make_controller_template()
# update index
if not getattr(self, "custom", False):
from frappe.modules import load_doctype_module
module = load_doctype_module(self.name, self.module)
if hasattr(module, "on_doctype_update"):
module.on_doctype_update()
delete_notification_count_for(doctype=self.name)
frappe.clear_cache(doctype=self.name)
def before_rename(self, old, new, merge=False):
if merge:
frappe.throw(_("DocType can not be merged"))
def after_rename(self, old, new, merge=False):
if self.issingle:
frappe.db.sql("""update tabSingles set doctype=%s where doctype=%s""", (new, old))
else:
frappe.db.sql("rename table `tab%s` to `tab%s`" % (old, new))
def before_reload(self):
if not (self.issingle and self.istable):
self.preserve_naming_series_options_in_property_setter()
def preserve_naming_series_options_in_property_setter(self):
"""preserve naming_series as property setter if it does not exist"""
naming_series = self.get("fields", {"fieldname": "naming_series"})
if not naming_series:
return
# check if atleast 1 record exists
if not (frappe.db.table_exists("tab" + self.name) and frappe.db.sql("select name from `tab{}` limit 1".format(self.name))):
return
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.name,
"property": "options", "field_name": "naming_series"})
if not existing_property_setter:
make_property_setter(self.name, "naming_series", "options", naming_series[0].options, "Text", validate_fields_for_doctype=False)
if naming_series[0].default:
make_property_setter(self.name, "naming_series", "default", naming_series[0].default, "Text", validate_fields_for_doctype=False)
def export_doc(self):
from frappe.modules.export_file import export_to_files
export_to_files(record_list=[['DocType', self.name]])
def import_doc(self):
from frappe.modules.import_module import import_from_files
import_from_files(record_list=[[self.module, 'doctype', self.name]])
def make_controller_template(self):
make_boilerplate("controller.py", self)
if not (self.istable or self.issingle):
make_boilerplate("test_controller.py", self)
make_boilerplate("test_records.json", self)
def make_amendable(self):
"""
if is_submittable is set, add amended_from docfields
"""
if self.is_submittable:
if not frappe.db.sql("""select name from tabDocField
where fieldname = 'amended_from' and parent = %s""", self.name):
self.append("fields", {
"label": "Amended From",
"fieldtype": "Link",
"fieldname": "amended_from",
"options": self.name,
"read_only": 1,
"print_hide": 1,
"no_copy": 1
})
def get_max_idx(self):
max_idx = frappe.db.sql("""select max(idx) from `tabDocField` where parent = %s""",
self.name)
return max_idx and max_idx[0][0] or 0
def validate_fields_for_doctype(doctype):
validate_fields(frappe.get_meta(doctype))
# this is separate because it is also called via custom field
def validate_fields(meta):
def check_illegal_characters(fieldname):
for c in ['.', ',', ' ', '-', '&', '%', '=', '"', "'", '*', '$',
'(', ')', '[', ']', '/']:
if c in fieldname:
frappe.throw(_("{0} not allowed in fieldname {1}").format(c, fieldname))
def check_unique_fieldname(fieldname):
duplicates = filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields))
if len(duplicates) > 1:
frappe.throw(_("Fieldname {0} appears multiple times in rows {1}").format(fieldname, ", ".join(duplicates)))
def check_illegal_mandatory(d):
if (d.fieldtype in no_value_fields) and d.fieldtype!="Table" and d.reqd:
frappe.throw(_("Field {0} of type {1} cannot be mandatory").format(d.label, d.fieldtype))
def check_link_table_options(d):
if d.fieldtype in ("Link", "Table"):
if not d.options:
frappe.throw(_("Options requried for Link or Table type field {0} in row {1}").format(d.label, d.idx))
if d.options=="[Select]" or d.options==d.parent:
return
if d.options != d.parent and not frappe.db.exists("DocType", d.options):
frappe.throw(_("Options must be a valid DocType for field {0} in row {1}").format(d.label, d.idx))
def check_hidden_and_mandatory(d):
if d.hidden and d.reqd and not d.default:
frappe.throw(_("Field {0} in row {1} cannot be hidden and mandatory without default").format(d.label, d.idx))
def check_min_items_in_list(fields):
if len(filter(lambda d: d.in_list_view, fields))==0:
for d in fields[:5]:
if d.fieldtype in type_map:
d.in_list_view = 1
def check_width(d):
if d.fieldtype == "Currency" and cint(d.width) < 100:
frappe.throw(_("Max width for type Currency is 100px in row {0}").format(d.idx))
def check_in_list_view(d):
if d.in_list_view and d.fieldtype!="Image" and (d.fieldtype in no_value_fields):
frappe.throw(_("'In List View' not allowed for type {0} in row {1}").format(d.fieldtype, d.idx))
def check_dynamic_link_options(d):
if d.fieldtype=="Dynamic Link":
doctype_pointer = filter(lambda df: df.fieldname==d.options, fields)
if not doctype_pointer or (doctype_pointer[0].fieldtype!="Link") \
or (doctype_pointer[0].options!="DocType"):
frappe.throw(_("Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'"))
def check_illegal_default(d):
if d.fieldtype == "Check" and d.default and d.default not in ('0', '1'):
frappe.throw(_("Default for 'Check' type of field must be either '0' or '1'"))
def check_precision(d):
if d.fieldtype in ("Currency", "Float", "Percent") and d.precision is not None and not (1 <= cint(d.precision) <= 6):
frappe.throw(_("Precision should be between 1 and 6"))
def check_fold(fields):
fold_exists = False
for i, f in enumerate(fields):
if f.fieldtype=="Fold":
if fold_exists:
frappe.throw(_("There can be only one Fold in a form"))
fold_exists = True
if i < len(fields)-1:
nxt = fields[i+1]
if nxt.fieldtype != "Section Break" \
or (nxt.fieldtype=="Section Break" and not nxt.label):
frappe.throw(_("Fold must come before a labelled Section Break"))
else:
frappe.throw(_("Fold can not be at the end of the form"))
def check_search_fields(meta):
if not meta.search_fields:
return
fieldname_list = [d.fieldname for d in fields]
for fieldname in (meta.search_fields or "").split(","):
fieldname = fieldname.strip()
if fieldname not in fieldname_list:
frappe.throw(_("Search Fields should contain valid fieldnames"))
fields = meta.get("fields")
for d in fields:
if not d.permlevel: d.permlevel = 0
if not d.fieldname:
frappe.throw(_("Fieldname is required in row {0}").format(d.idx))
check_illegal_characters(d.fieldname)
check_unique_fieldname(d.fieldname)
check_illegal_mandatory(d)
check_link_table_options(d)
check_dynamic_link_options(d)
check_hidden_and_mandatory(d)
check_in_list_view(d)
check_illegal_default(d)
check_min_items_in_list(fields)
check_fold(fields)
check_search_fields(meta)
def validate_permissions_for_doctype(doctype, for_remove=False):
doctype = frappe.get_doc("DocType", doctype)
if frappe.conf.developer_mode and not frappe.flags.in_test:
# save doctype
doctype.save()
else:
validate_permissions(doctype, for_remove)
# save permissions
for perm in doctype.get("permissions"):
perm.db_update()
def validate_permissions(doctype, for_remove=False):
permissions = doctype.get("permissions")
if not permissions:
frappe.throw(_('Enter at least one permission row'), frappe.MandatoryError)
issingle = issubmittable = isimportable = False
if doctype:
issingle = cint(doctype.issingle)
issubmittable = cint(doctype.is_submittable)
isimportable = cint(doctype.allow_import)
def get_txt(d):
return _("For {0} at level {1} in {2} in row {3}").format(d.role, d.permlevel, d.parent, d.idx)
def check_atleast_one_set(d):
if not d.read and not d.write and not d.submit and not d.cancel and not d.create:
frappe.throw(_("{0}: No basic permissions set").format(get_txt(d)))
def check_double(d):
has_similar = False
for p in permissions:
if (p.role==d.role and p.permlevel==d.permlevel
and p.apply_user_permissions==d.apply_user_permissions and p!=d):
has_similar = True
break
if has_similar:
frappe.throw(_("{0}: Only one rule allowed with the same Role, Level and Apply User Permissions").format(get_txt(d)))
def check_level_zero_is_set(d):
if cint(d.permlevel) > 0 and d.role != 'All':
has_zero_perm = False
for p in permissions:
if p.role==d.role and (p.permlevel or 0)==0 and p!=d:
has_zero_perm = True
break
if not has_zero_perm:
frappe.throw(_("{0}: Permission at level 0 must be set before higher levels are set").format(get_txt(d)))
if d.create or d.submit or d.cancel or d.amend or d.match:
frappe.throw(_("{0}: Create, Submit, Cancel and Amend only valid at level 0").format(get_txt(d)))
def check_permission_dependency(d):
if d.cancel and not d.submit:
frappe.throw(_("{0}: Cannot set Cancel without Submit").format(get_txt(d)))
if (d.submit or d.cancel or d.amend) and not d.write:
frappe.throw(_("{0}: Cannot set Submit, Cancel, Amend without Write").format(get_txt(d)))
if d.amend and not d.write:
frappe.throw(_("{0}: Cannot set Amend without Cancel").format(get_txt(d)))
if d.get("import") and not d.create:
frappe.throw(_("{0}: Cannot set Import without Create").format(get_txt(d)))
def remove_rights_for_single(d):
if not issingle:
return
if d.report:
frappe.msgprint(_("Report cannot be set for Single types"))
d.report = 0
d.set("import", 0)
d.set("export", 0)
for ptype, label in (
("set_user_permissions", _("Set User Permissions")),
("apply_user_permissions", _("Apply User Permissions"))):
if d.get(ptype):
d.set(ptype, 0)
frappe.msgprint(_("{0} cannot be set for Single types").format(label))
def check_if_submittable(d):
if d.submit and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Submit if not Submittable").format(get_txt(d)))
elif d.amend and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Amend if not Submittable").format(get_txt(d)))
def check_if_importable(d):
if d.get("import") and not isimportable:
frappe.throw(_("{0}: Cannot set import as {1} is not importable").format(get_txt(d), doctype))
for d in permissions:
if not d.permlevel:
d.permlevel=0
check_atleast_one_set(d)
if not for_remove:
check_double(d)
check_permission_dependency(d)
check_if_submittable(d)
check_if_importable(d)
check_level_zero_is_set(d)
remove_rights_for_single(d)
def make_module_and_roles(doc, perm_fieldname="permissions"):
try:
if not frappe.db.exists("Module Def", doc.module):
m = frappe.get_doc({"doctype": "Module Def", "module_name": doc.module})
m.app_name = frappe.local.module_app[frappe.scrub(doc.module)]
m.ignore_mandatory = m.ignore_permissions = True
m.insert()
default_roles = ["Administrator", "Guest", "All"]
roles = [p.role for p in doc.get("permissions") or []] + default_roles
for role in list(set(roles)):
if not frappe.db.exists("Role", role):
r = frappe.get_doc({"doctype": "Role", "role_name": role})
r.role_name = role
r.ignore_mandatory = r.ignore_permissions = True
r.insert()
except frappe.DoesNotExistError, e:
pass
except frappe.SQLError, e:
if e.args[0]==1146:
pass
else:
raise
def init_list(doctype):
doc = frappe.get_meta(doctype)
make_boilerplate("controller_list.js", doc)
make_boilerplate("controller_list.html", doc)
| mit | -197,055,989,850,605,540 | 34.594724 | 132 | 0.680927 | false |
Pakoach/Sick-Beard | cherrypy/lib/jsontools.py | 80 | 1574 | import sys
import cherrypy
if sys.version_info >= (2, 6):
# Python 2.6: simplejson is part of the standard library
import json
else:
try:
import simplejson as json
except ImportError:
json = None
if json is None:
def json_decode(s):
raise ValueError('No JSON library is available')
def json_encode(s):
raise ValueError('No JSON library is available')
else:
json_decode = json.JSONDecoder().decode
json_encode = json.JSONEncoder().iterencode
def json_in(force=True, debug=False):
request = cherrypy.serving.request
def json_processor(entity):
"""Read application/json data into request.json."""
if not entity.headers.get(u"Content-Length", u""):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
request.json = json_decode(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
if force:
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an application/json content type')
request.body.processors[u'application/json'] = json_processor
def json_out(debug=False):
request = cherrypy.serving.request
response = cherrypy.serving.response
real_handler = request.handler
def json_handler(*args, **kwargs):
response.headers['Content-Type'] = 'application/json'
value = real_handler(*args, **kwargs)
return json_encode(value)
request.handler = json_handler
| gpl-3.0 | -5,036,942,576,542,986,000 | 30.48 | 66 | 0.649301 | false |