repo_name
stringlengths 5
100
| path
stringlengths 4
254
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 681
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,298,349B
| line_mean
float64 3.5
100
| line_max
int64 15
1k
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class | ratio
float64 1.5
8.15
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
saguila/AlgoritmoA | pqdict.py | 2 | 16450 | """Copyright (c) 2012 Nezar Abdennur
This module contains code adapted from the Python implementation of the heapq
module, which was written by Kevin O'Connor and augmented by Tim Peters and
Raymond Hettinger.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""Priority Queue Dictionary -- An indexed priority queue data structure.
Stores a set of prioritized hashable elements. Can be used as an updatable
schedule.
The priority queue is implemented as a binary heap, which supports:
- O(1) access to the top priority element
- O(log n) removal of the top priority element
- O(log n) insertion of a new element
In addition, an internal dictionary or "index" maps elements to their position
in the heap array. This index is kept up-to-date when the heap is manipulated.
As a result, PQD also supports:
- O(1) lookup of an arbitrary element's priority key
- O(log n) removal of an arbitrary element
- O(log n) updating of an arbitrary element's priority key
The standard heap operations used internally (here, called "sink" and "swim")
are based on the code in the python heapq module.* These operations are extended
to preserve correctness of the internal dictionary.
* The names of the methods in heapq (sift up/down) seem to refer to the motion
of the items being compared to, rather than the item being operated on as is
normally done in textbooks (i.e. bubble down/up, instead). I stuck to the
textbook convention, but using the sink/swim nomenclature from Sedgewick et al:
the way I see it, an item that is too "heavy" (low-priority) should sink down
the tree, while one that is too "light" should float or swim up. Note, however,
that the sink implementation is non-conventional. See heapq for details about
why.
"""
__author__ = ('Nezar Abdennur', 'nabdennur@gmail.com')
__all__ = ['PQDict', 'PQDictEntry', 'heapsort_by_value']
from collections import Mapping, MutableMapping
from abc import ABCMeta, abstractmethod
class PQDictEntry(object):
__metaclass__ = ABCMeta
def __init__(self, dkey, pkey):
self.dkey = dkey
self.pkey = pkey
@abstractmethod
def __lt__(self, other):
return NotImplemented
# def set_pkey(self, pkey):
# pass
def __eq__(self, other):
return self.pkey == other.pkey
def __repr__(self):
return self.__class__.__name__ + \
"(%s: %s)" % (repr(self.dkey), self.pkey)
class MinPQDEntry(PQDictEntry):
__init__ = PQDictEntry.__init__
__eq__ = PQDictEntry.__eq__
def __lt__(self, other):
return self.pkey < other.pkey
class MaxPQDEntry(PQDictEntry):
__init__ = PQDictEntry.__init__
__eq__ = PQDictEntry.__eq__
def __lt__(self, other):
return self.pkey > other.pkey
class PQDict(MutableMapping):
"""
Maps dictionary keys (keys) to priority keys (values). Maintains an
internal heap so that the highest priority item can always be obtained in
constant time. The mapping is mutable so items may be added, removed and
have their priorities updated.
"""
# Implementation details:
# - heap (list): stores (dkey,pkey)-pairs as "entries" (PQDEntry objects).
# - nodefinder (dict): maps each dkey to the position of its entry in the
# heap
# - the < comparator is used to rank entries
__slots__ = ('nodefinder', 'heap', 'create_entry')
create_entry = MinPQDEntry
__eq__ = MutableMapping.__eq__
__ne__ = MutableMapping.__ne__
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
get = MutableMapping.get
clear = MutableMapping.clear
update = MutableMapping.update
setdefault = MutableMapping.setdefault
#fromkeys
def __init__(self, *args, **kwargs):
"""
Mimics the standard dict constructor:
Accepts a sequence/iterator of (dkey, pkey) pairs.
Accepts named arguments or an unpacked dictionary.
Also accepts a single mapping object to convert it to a pqdict.
The default priority ranking for entries is in decreasing pkey value
(i.e., a min-pq: LOWER pkey values have a HIGHER rank). This is typical
for a scheduler, where the higher ranked tasks have earlier times.
"""
if len(args) > 1:
raise TypeError
self.heap = []
self.nodefinder = {}
pos = 0
if args:
if isinstance(args[0], Mapping):
seq = args[0].items()
else:
seq = args[0]
try:
for dkey, pkey in seq:
entry = self.create_entry(dkey, pkey)
self.heap.append(entry)
self.nodefinder[dkey] = pos
pos += 1
except TypeError:
raise ValueError
if kwargs:
for dkey, pkey in kwargs.items():
entry = self.create_entry(dkey, pkey)
self.heap.append(entry)
self.nodefinder[dkey] = pos
pos += 1
self._heapify()
@classmethod
def minpq(cls, *args, **kwargs):
pq = cls()
pq.create_entry = MinPQDEntry
pq.__init__(*args, **kwargs)
return pq
@classmethod
def maxpq(cls, *args, **kwargs):
pq = cls()
pq.create_entry = MaxPQDEntry
pq.__init__(*args, **kwargs)
return pq
@classmethod
def custompq(cls, entrytype, *args, **kwargs):
pq = cls()
if issubclass(entrytype, PQDictEntry):
pq.create_entry = entrytype
else:
raise TypeError('Custom entry class must be a subclass of' \
'PQDictEntry')
pq.__init__(*args, **kwargs)
return pq
@classmethod
def fromfunction(cls, iterable, pkeygen): #instead of fromkeys
"""
Provide a key function that determines priorities by which to heapify
the elements of an iterable into a PQD.
"""
return cls( (dkey, pkeygen(dkey)) for dkey in iterable )
def __len__(self):
"""
Return number of items in the PQD.
"""
return len(self.nodefinder)
def __contains__(self, dkey):
"""
Return True if dkey is in the PQD else return False.
"""
return dkey in self.nodefinder
def __iter__(self):
"""
Return an iterator over the dictionary keys of the PQD. The order
of iteration is undefined! Use iterkeys() to iterate over dictionary
keys sorted by priority.
"""
for entry in self.heap:
yield entry.dkey
def __getitem__(self, dkey):
"""
Return the priority of dkey. Raises a KeyError if not in the PQD.
"""
return self.heap[self.nodefinder[dkey]].pkey #raises KeyError
def __setitem__(self, dkey, pkey):
"""
Assign priority to dictionary key.
"""
heap = self.heap
finder = self.nodefinder
try:
pos = finder[dkey]
except KeyError:
# add new entry
n = len(self.heap)
self.heap.append(self.create_entry(dkey, pkey))
self.nodefinder[dkey] = n
self._swim(n)
else:
# update existing entry
heap[pos].pkey = pkey
parent_pos = (pos - 1) >> 1
child_pos = 2*pos + 1
if parent_pos > 0 and heap[pos] < heap[parent_pos]:
self._swim(pos)
elif child_pos < len(heap):
right_pos = child_pos + 1
if (right_pos < len(heap)
and not heap[child_pos] < heap[right_pos]):
child_pos = right_pos
if heap[child_pos] < heap[pos]:
self._sink(pos)
def __delitem__(self, dkey):
"""
Remove item. Raises a KeyError if dkey is not in the PQD.
"""
heap = self.heap
finder = self.nodefinder
# Remove very last item and place in vacant spot. Let the new item
# sink until it reaches its new resting place.
try:
pos = finder.pop(dkey)
except KeyError:
raise
else:
entry = heap[pos]
last = heap.pop(-1)
if entry is not last:
heap[pos] = last
finder[last.dkey] = pos
parent_pos = (pos - 1) >> 1
child_pos = 2*pos + 1
if parent_pos > 0 and heap[pos] < heap[parent_pos]:
self._swim(pos)
elif child_pos < len(heap):
right_pos = child_pos + 1
if (right_pos < len(heap)
and not heap[child_pos] < heap[right_pos]):
child_pos = right_pos
if heap[child_pos] < heap[pos]:
self._sink(pos)
del entry
def __copy__(self):
"""
Return a new PQD with the same dkeys associated with the same priority
keys.
"""
# We want the two PQDs to behave as different schedules on the same
# set of dkeys. As a result:
# - The new heap list contains copies of all entries because PQDEntry
# objects are mutable and should not be shared by two PQDicts.
# - The new nodefinder dict (dkey->heap positions) must be a copy of
# the old nodefinder dict since it maps the same dkeys to positions
# in a different list.
from copy import copy
other = self.__class__()
other.heap = [copy(entry) for entry in self.heap]
other.nodefinder = copy(self.nodefinder)
return other
copy = __copy__
def __repr__(self):
things = ', '.join(['%s: %s' % (repr(entry.dkey), entry.pkey)
for entry in self.heap])
return self.__class__.__name__ + '({' + things + '})'
__marker = object()
def pop(self, dkey, default=__marker):
"""
If dkey is in the PQD, remove it and return its priority key, else
return default. If default is not given and dkey is not in the PQD, a
KeyError is raised.
"""
heap = self.heap
finder = self.nodefinder
try:
pos = finder.pop(dkey)
except KeyError:
if default is self.__marker:
raise
return default
else:
delentry = heap[pos]
last = heap.pop(-1)
if delentry is not last:
heap[pos] = last
finder[last.dkey] = pos
parent_pos = (pos - 1) >> 1
child_pos = 2*pos + 1
if parent_pos > 0 and heap[pos] < heap[parent_pos]:
self._swim(pos)
elif child_pos < len(heap):
right_pos = child_pos + 1
if (right_pos < len(heap)
and not heap[child_pos] < heap[right_pos]):
child_pos = right_pos
if heap[child_pos] < heap[pos]:
self._sink(pos)
pkey = delentry.pkey
del delentry
return pkey
def popitem(self):
"""
Extract top priority item. Raises KeyError if PQD is empty.
"""
try:
last = self.heap.pop(-1)
except IndexError:
raise KeyError
else:
if self.heap:
entry = self.heap[0]
self.heap[0] = last
self.nodefinder[last.dkey] = 0
self._sink(0)
else:
entry = last
self.nodefinder.pop(entry.dkey)
return entry.dkey, entry.pkey
def additem(self, dkey, pkey):
"""
Add a new item. Raises KeyError if item is already in the PQD.
"""
if dkey in self.nodefinder:
raise KeyError
self[dkey] = pkey
def updateitem(self, dkey, new_pkey):
"""
Update the priority key of an existing item. Raises KeyError if item is
not in the PQD.
"""
if dkey not in self.nodefinder:
raise KeyError
self[dkey] = new_pkey
def peek(self):
"""
Get top priority item.
"""
try:
entry = self.heap[0]
except IndexError:
raise KeyError
return entry.dkey, entry.pkey
def iterkeys(self):
"""
Destructive heapsort iterator over dictionary keys, ordered by priority
key.
"""
try:
while True:
yield self.popitem()[0]
except KeyError:
return
def itervalues(self):
"""
Destructive heapsort iterator over priority keys.
"""
try:
while True:
yield self.popitem()[1]
except KeyError:
return
def iteritems(self):
"""
Destructive heapsort iterator over items, ordered by priority key.
"""
try:
while True:
yield self.popitem()
except KeyError:
return
def _heapify(self):
n = len(self.heap)
for pos in reversed(range(n//2)):
self._sink(pos)
def _sink(self, top=0):
heap = self.heap
finder = self.nodefinder
# Peel off top item
pos = top
entry = heap[pos]
# Sift up a trail of child nodes
child_pos = 2*pos + 1
while child_pos < len(heap):
# Choose the index of smaller child.
right_pos = child_pos + 1
if right_pos < len(heap) and not heap[child_pos] < heap[right_pos]:
child_pos = right_pos
# Move the smaller child up.
child_entry = heap[child_pos]
heap[pos] = child_entry
finder[child_entry.dkey] = pos
pos = child_pos
child_pos = 2*pos + 1
# We are now at a leaf. Put item there and let it swim until it reaches
# its new resting place.
heap[pos] = entry
finder[entry.dkey] = pos
self._swim(pos, top)
def _swim(self, pos, top=0):
heap = self.heap
finder = self.nodefinder
# Remove item from its place
entry = heap[pos]
# Bubble item up by sifting parents down until finding a place it fits.
while pos > top:
parent_pos = (pos - 1) >> 1
parent_entry = heap[parent_pos]
if entry < parent_entry:
heap[pos] = parent_entry
finder[parent_entry.dkey] = pos
pos = parent_pos
continue
break
# Put item in its new place
heap[pos] = entry
finder[entry.dkey] = pos
def heapsorted_by_value(mapping, maxheap=False):
"""
Takes an arbitrary mapping and, treating the values as priority keys, sorts
its items by priority via heapsort using a PQDict.
Returns:
a list of the dictionary items sorted by value
"""
if maxheap:
pq = PQDict.maxpq(mapping)
else:
pq = PQDict(mapping)
return [item for item in pq.iteritems()] | mit | -5,154,796,241,812,267,000 | 31.193738 | 80 | 0.561641 | false | 4.160344 | false | false | false |
laurent-george/weboob | modules/barclays/pages.py | 6 | 10301 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import datetime
from decimal import Decimal
import re
from weboob.deprecated.browser import Page
from weboob.capabilities.bank import Account
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
class LoginPage(Page):
def login(self, login, passwd):
self.browser.select_form(name='frmLogin')
self.browser['username'] = login.encode(self.browser.ENCODING)
self.browser['password'] = passwd.encode(self.browser.ENCODING)
self.browser.submit(nologin=True)
def has_redirect(self):
if len(self.document.getroot().xpath('//form')) > 0:
return False
else:
return True
class Login2Page(Page):
def login(self, secret):
label = self.document.xpath('//span[@class="PF_LABEL"]')[0].text.strip()
letters = ''
for n in re.findall('(\d+)', label):
letters += secret[int(n) - 1]
self.browser.select_form(name='frmControl')
self.browser['word'] = letters
self.browser.submit(name='valider', nologin=True)
class IndexPage(Page):
pass
class AccountsPage(Page):
ACCOUNT_TYPES = {u'Epargne': Account.TYPE_SAVINGS,
u'Liquidités': Account.TYPE_CHECKING,
u'Titres': Account.TYPE_MARKET,
u'Prêts': Account.TYPE_LOAN,
}
def get_list(self):
accounts = []
for block in self.document.xpath('//div[@class="pave"]/div'):
head_type = block.xpath('./div/span[@class="accGroupLabel"]')[0].text.strip()
account_type = self.ACCOUNT_TYPES.get(head_type, Account.TYPE_UNKNOWN)
for tr in block.cssselect('ul li.tbord_account'):
id = tr.attrib.get('id', '')
if id.find('contratId') != 0:
self.logger.warning('Unable to parse contract ID: %r' % id)
continue
id = id[id.find('contratId')+len('contratId'):]
link = tr.cssselect('span.accountLabel a')[0]
balance = Decimal(FrenchTransaction.clean_amount(tr.cssselect('span.accountTotal')[0].text))
if id.endswith('CRT'):
account = accounts[-1]
account._card_links.append(link.attrib['href'])
if not account.coming:
account.coming = Decimal('0.0')
account.coming += balance
continue
account = Account()
account.id = id
account.label = unicode(link.text.strip())
account.type = account_type
account.balance = balance
account.currency = account.get_currency(tr.cssselect('span.accountDev')[0].text)
account._link = link.attrib['href']
account._card_links = []
accounts.append(account)
if len(accounts) == 0:
# Sometimes, accounts are only in javascript...
for script in self.document.xpath('//script'):
text = script.text
if text is None:
continue
if 'remotePerso' not in text:
continue
account = None
attribs = {}
account_type = Account.TYPE_UNKNOWN
for line in text.split('\n'):
line = line.strip()
m = re.match("data.libelle = '(.*)';", line)
if m:
account_type = self.ACCOUNT_TYPES.get(m.group(1), Account.TYPE_UNKNOWN)
elif line == 'var remotePerso = new Object;':
account = Account()
elif account is not None:
m = re.match("remotePerso.(\w+) = '?(.*?)'?;", line)
if m:
attribs[m.group(1)] = m.group(2)
elif line.startswith('listProduitsGroup'):
account.id = attribs['refContrat']
account.label = attribs['libelle']
account.type = account_type
account.balance = Decimal(FrenchTransaction.clean_amount(attribs['soldeDateOpeValeurFormatted']))
account.currency = account.get_currency(attribs['codeDevise'])
account._link = 'tbord.do?id=%s' % attribs['id']
account._card_links = []
if account.id.endswith('CRT'):
a = accounts[-1]
a._card_links.append(account._link)
if not a.coming:
a.coming = Decimal('0.0')
a.coming += account.balance
else:
accounts.append(account)
account = None
return accounts
class Transaction(FrenchTransaction):
PATTERNS = [(re.compile('^RET DAB (?P<text>.*?) RETRAIT DU (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2}).*'),
FrenchTransaction.TYPE_WITHDRAWAL),
(re.compile('^RET DAB (?P<text>.*?) CARTE ?:.*'),
FrenchTransaction.TYPE_WITHDRAWAL),
(re.compile('^RET DAB (?P<dd>\d{2})/(?P<mm>\d{2})/(?P<yy>\d{2}) (?P<text>.*?) CARTE .*'),
FrenchTransaction.TYPE_WITHDRAWAL),
(re.compile('^(?P<text>.*) RETRAIT DU (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2}) .*'),
FrenchTransaction.TYPE_WITHDRAWAL),
(re.compile('(\w+) (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2}) CB[:\*][^ ]+ (?P<text>.*)'),
FrenchTransaction.TYPE_CARD),
(re.compile('^(?P<category>VIR(EMEN)?T? (SEPA)?(RECU|FAVEUR)?)( /FRM)?(?P<text>.*)'),
FrenchTransaction.TYPE_TRANSFER),
(re.compile('^PRLV (?P<text>.*) (REF \w+)?$'),FrenchTransaction.TYPE_ORDER),
(re.compile('^CHEQUE.*? (REF \w+)?$'), FrenchTransaction.TYPE_CHECK),
(re.compile('^(AGIOS /|FRAIS) (?P<text>.*)'), FrenchTransaction.TYPE_BANK),
(re.compile('^(CONVENTION \d+ )?COTIS(ATION)? (?P<text>.*)'),
FrenchTransaction.TYPE_BANK),
(re.compile('^REMISE (?P<text>.*)'), FrenchTransaction.TYPE_DEPOSIT),
(re.compile('^(?P<text>.*)( \d+)? QUITTANCE .*'),
FrenchTransaction.TYPE_ORDER),
(re.compile('^.* LE (?P<dd>\d{2})/(?P<mm>\d{2})/(?P<yy>\d{2})$'),
FrenchTransaction.TYPE_UNKNOWN),
]
class HistoryBasePage(Page):
def get_history(self):
self.logger.warning('Do not support account of type %s' % type(self).__name__)
return iter([])
class TransactionsPage(HistoryBasePage):
def get_history(self):
for tr in self.document.xpath('//table[@id="operation"]/tbody/tr'):
tds = tr.findall('td')
if len(tds) < 5:
continue
t = Transaction(tds[-1].findall('img')[-1].attrib.get('id', ''))
date = u''.join([txt.strip() for txt in tds[0].itertext()])
raw = u' '.join([txt.strip() for txt in tds[1].itertext()])
debit = u''.join([txt.strip() for txt in tds[-3].itertext()])
credit = u''.join([txt.strip() for txt in tds[-2].itertext()])
t.parse(date, re.sub(r'[ ]+', ' ', raw))
t.set_amount(credit, debit)
t._coming = False
if t.raw.startswith('ACHAT CARTE -DEBIT DIFFERE'):
continue
yield t
class CardPage(HistoryBasePage):
def get_history(self):
debit_date = None
coming = True
for tr in self.document.xpath('//table[@class="report"]/tbody/tr'):
tds = tr.findall('td')
if len(tds) == 2:
# headers
m = re.match('.* (\d+)/(\d+)/(\d+)', tds[0].text.strip())
debit_date = datetime.date(int(m.group(3)), int(m.group(2)), int(m.group(1)))
if debit_date < datetime.date.today():
coming = False
if len(tds) != 3:
continue
t = Transaction(0)
date = u''.join([txt.strip() for txt in tds[0].itertext()])
raw = u' '.join([txt.strip() for txt in tds[1].itertext()])
amount = u''.join([txt.strip() for txt in tds[-1].itertext()])
t.parse(date, re.sub(r'[ ]+', ' ', raw))
if debit_date is not None:
t.date = debit_date
t.label = unicode(tds[1].find('span').text.strip())
t.type = t.TYPE_CARD
t._coming = coming
t.set_amount(amount)
yield t
class ValuationPage(HistoryBasePage):
pass
class LoanPage(HistoryBasePage):
pass
class MarketPage(HistoryBasePage):
pass
class AssurancePage(HistoryBasePage):
pass
| agpl-3.0 | -2,684,563,615,446,834,700 | 40.865854 | 125 | 0.491407 | false | 4.108097 | false | false | false |
nash-x/hws | neutron/service.py | 3 | 10505 | # Copyright 2011 VMware, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import logging as std_logging
import os
import random
from oslo.config import cfg
from oslo.messaging import server as rpc_server
from neutron.common import config
from neutron.common import rpc as n_rpc
from neutron import context
from neutron.db import api as session
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import service as common_service
from neutron import wsgi
service_opts = [
cfg.IntOpt('periodic_interval',
default=40,
help=_('Seconds between running periodic tasks')),
cfg.IntOpt('api_workers',
default=0,
help=_('Number of separate API worker processes for service')),
cfg.IntOpt('rpc_workers',
default=0,
help=_('Number of RPC worker processes for service')),
cfg.IntOpt('periodic_fuzzy_delay',
default=5,
help=_('Range of seconds to randomly delay when starting the '
'periodic task scheduler to reduce stampeding. '
'(Disable by setting to 0)')),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class WsgiService(object):
"""Base class for WSGI based services.
For each api you define, you must also define these flags:
:<api>_listen: The address on which to listen
:<api>_listen_port: The port on which to listen
"""
def __init__(self, app_name):
self.app_name = app_name
self.wsgi_app = None
def start(self):
self.wsgi_app = _run_wsgi(self.app_name)
def wait(self):
self.wsgi_app.wait()
class NeutronApiService(WsgiService):
"""Class for neutron-api service."""
@classmethod
def create(cls, app_name='neutron'):
# Setup logging early, supplying both the CLI options and the
# configuration mapping from the config file
# We only update the conf dict for the verbose and debug
# flags. Everything else must be set up in the conf file...
# Log the options used when starting if we're in debug mode...
config.setup_logging()
# Dump the initial option values
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
service = cls(app_name)
return service
def serve_wsgi(cls):
try:
service = cls.create()
service.start()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Unrecoverable error: please check log '
'for details.'))
return service
class RpcWorker(object):
"""Wraps a worker to be handled by ProcessLauncher"""
def __init__(self, plugin):
self._plugin = plugin
self._servers = []
def start(self):
# We may have just forked from parent process. A quick disposal of the
# existing sql connections avoids producing errors later when they are
# discovered to be broken.
session.get_engine().pool.dispose()
self._servers = self._plugin.start_rpc_listeners()
def wait(self):
for server in self._servers:
if isinstance(server, rpc_server.MessageHandlingServer):
server.wait()
def stop(self):
for server in self._servers:
if isinstance(server, rpc_server.MessageHandlingServer):
server.kill()
self._servers = []
def serve_rpc():
plugin = manager.NeutronManager.get_plugin()
# If 0 < rpc_workers then start_rpc_listeners would be called in a
# subprocess and we cannot simply catch the NotImplementedError. It is
# simpler to check this up front by testing whether the plugin supports
# multiple RPC workers.
if not plugin.rpc_workers_supported():
LOG.debug(_("Active plugin doesn't implement start_rpc_listeners"))
if 0 < cfg.CONF.rpc_workers:
msg = _("'rpc_workers = %d' ignored because start_rpc_listeners "
"is not implemented.")
LOG.error(msg, cfg.CONF.rpc_workers)
raise NotImplementedError()
try:
rpc = RpcWorker(plugin)
if cfg.CONF.rpc_workers < 1:
rpc.start()
return rpc
else:
launcher = common_service.ProcessLauncher(wait_interval=1.0)
launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers)
return launcher
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Unrecoverable error: please check log '
'for details.'))
def _run_wsgi(app_name):
app = config.load_paste_app(app_name)
if not app:
LOG.error(_('No known API applications configured.'))
return
server = wsgi.Server("Neutron")
server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host,
workers=cfg.CONF.api_workers)
# Dump all option values here after all options are parsed
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
LOG.info(_("Neutron service started, listening on %(host)s:%(port)s"),
{'host': cfg.CONF.bind_host,
'port': cfg.CONF.bind_port})
return server
class Service(n_rpc.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
*args, **kwargs):
self.binary = binary
self.manager_class_name = manager
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
super(Service, self).__init__(host, topic, manager=self.manager)
def start(self):
self.manager.init_host()
super(Service, self).start()
if self.report_interval:
pulse = loopingcall.FixedIntervalLoopingCall(self.report_state)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
self.timers.append(pulse)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.FixedIntervalLoopingCall(
self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
self.manager.after_start()
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_interval: defaults to CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary.rpartition('neutron-')[2]
topic = topic.replace("-", "_")
if not manager:
manager = CONF.get('%s_manager' % topic, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_interval is None:
periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay)
return service_obj
def kill(self):
"""Destroy the service object."""
self.stop()
def stop(self):
super(Service, self).stop()
for x in self.timers:
try:
x.stop()
except Exception:
LOG.exception(_("Exception occurs when timer stops"))
pass
self.timers = []
def wait(self):
super(Service, self).wait()
for x in self.timers:
try:
x.wait()
except Exception:
LOG.exception(_("Exception occurs when waiting for timer"))
pass
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def report_state(self):
"""Update the state of this service."""
# Todo(gongysh) report state to neutron server
pass
| apache-2.0 | -2,348,898,602,650,476,500 | 34.133779 | 79 | 0.620086 | false | 4.349896 | true | false | false |
mclaughlin6464/pasta | pasta/ising.py | 1 | 5474 | '''
This is a dummy file for me to get started making an Ising model. I'll get this 2-D Ising running, then generalize.
'''
import argparse
from itertools import izip
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
def run_ising(N, d, K, J,h, n_steps, plot = False):
'''
:param N:
:param d:
:param K:
:param J:
:param h:
:param n_steps:
:param plot:
:return:
'''
if plot:
try:
assert d <= 2
except AssertionError:
raise AssertionError("Can only plot in one or two dimensions.")
#TODO wrap these better
assert N >0 and N < 1000
assert d > 0
assert n_steps > 0
np.random.seed(0)
size = tuple(N for i in xrange(d))
lattice = np.ones(size)
#make a random initial state
lattice-= np.random.randint(0,2, size =size)*2
# do different initialization
E_0 = energy(lattice, potential, K, h)
if plot:
plt.ion()
for step in xrange(n_steps):
if step%1000 == 0:
print step
site = tuple(np.random.randint(0, N, size=d))
# consider flipping this site
lattice[site] *= -1
E_f = energy(lattice, potential, K, h)
# if E_F < E_0, keep
# if E_F > E_0, keep randomly given change of energies
if E_f >= E_0:
keep = np.random.uniform() < np.exp(K / J * (E_0 - E_f))
else:
keep = True
if keep:
E_0 = E_f
else:
lattice[site] *= -1
# fig = plt.figure()
if plot and step % 100 == 0:
if d == 1:
plt.imshow(lattice.reshape((1, -1)),interpolation='none')
else:
plt.imshow(lattice, interpolation='none')
plt.title(correlation(lattice, N/2))
plt.pause(0.01)
plt.clf()
return np.array([correlation(lattice, r) for r in xrange(1, N/2+1)])
def get_NN(site, N, d, r= 1):
'''
The NN of the site. Will only return those UP in index (east, south, and down) to avoid double counting.
Accounts for PBC
:param site:
(d,) array of coordinates in the lattice
:param N:
Size of one side of the lattice
:param d:
dimension of the lattice
:return:
dxd numpy array where each row corresponds to the nearest neighbors.
'''
mult_sites = np.r_[ [site for i in xrange(d)]]
adjustment = np.eye(d)*r
return ((mult_sites+adjustment)%N).astype(int)
def potential(s1, s2, K, h):
'''
Basic Ising potential
:param s1:
First spin (-1 or 1)
:param s2:
Second spin
:param K:
Coupling constant
:return:
Energy of this particular bond
'''
return -1*K*s1*s2 - h/2*(s1+s2)#should this be abstracted to call the NN function?
def energy(lattice, potential, K, h = 0):
'''
Calculate the energy of a lattice
:param lattice:
Lattice to calculate the energy on
:param potential:
Function defining the potential of a given site.
:return:
Energy of the lattice
'''
N = lattice.shape[0]
d = len(lattice.shape)
dim_slices = np.meshgrid(*(xrange(N) for i in xrange(d)), indexing = 'ij')
all_sites = izip(*[slice.flatten() for slice in dim_slices])
E = 0
for site in all_sites:
nn = get_NN(site, N, d)
for neighbor in nn:
E+=potential(lattice[site], lattice[tuple(neighbor)],K = K, h = h)
return E
def magnetization(lattice):
return lattice.mean()
def correlation(lattice, r):
'''
The average spin correlation at distance r.
:param lattice:
The lattice to calculate the statistic on.
:param r:
Distance to measure correlation
:return:
'''
N = lattice.shape[0]
d = len(lattice.shape)
dim_slices = np.meshgrid(*(xrange(N) for i in xrange(d)), indexing='ij')
all_sites = izip(*[slice.flatten() for slice in dim_slices])
xi = 0
for site in all_sites:
nn = get_NN(site, N, d, r)
for neighbor in nn:
xi += lattice[site]*lattice[tuple(neighbor)]
return xi/((N**d)*d)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simulate an ising model')
parser.add_argument('N', type = int, help = 'Length of one side of the cube.')
parser.add_argument('d', type = int, help = 'Number of dimensions of the cube.')
#parser.add_argument('K', type = float, help ='Bond coupling strength.')
parser.add_argument('J', type = float, default = 1.0, nargs = '?',\
help = 'Energy of bond strength. Optional, default is 1.')
parser.add_argument('h', type = float, default=0.0, nargs = '?',\
help = 'Magnetic field strength. Optional, default is 0.')
parser.add_argument('n_steps', type = int, default = 1000, nargs = '?',\
help = 'Number of steps to simulate. Default is 1e5')
parser.add_argument('--plot', action = 'store_true',\
help = 'Whether or not to plot results. Only allowed with d = 1 or 2.')
args = parser.parse_args()
spins = []
Ks = [ 0.5,0.6,0.65, 0.7,0.8, 0.9]
for K in Ks:
print K
spins.append(run_ising(K = K, **vars(args)))
for K, spin in izip(Ks, spins):
plt.plot(spin, label = K )
plt.legend(loc = 'best')
plt.ylim([-0.1, 1.1])
plt.show() | mit | -154,069,744,090,616,030 | 28.278075 | 115 | 0.568871 | false | 3.491071 | false | false | false |
mhnatiuk/phd_sociology_of_religion | scrapper/lib/python2.7/site-packages/scrapy/contrib/linkextractors/htmlparser.py | 11 | 2468 | """
HTMLParser-based link extractor
"""
from HTMLParser import HTMLParser
from urlparse import urljoin
from w3lib.url import safe_url_string
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False):
HTMLParser.__init__(self)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding):
self.reset()
self.feed(response_text)
self.close()
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
link.url = urljoin(base_url, link.url)
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url, response.encoding)
def reset(self):
HTMLParser.reset(self)
self.base_url = None
self.current_link = None
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def handle_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
| gpl-2.0 | -148,817,184,968,540,600 | 31.906667 | 97 | 0.60778 | false | 4 | false | false | false |
Cynary/distro6.01 | arch/6.01Soft/lib601-F13-4/build/lib/soar/io/io_useif.py | 2 | 3114 | #
# soar
# io.py - object-oriented interface to the robot
#
# This io file makes use of the "official" soar interface
# (sonarDistances, etc), and it is still ugly, since it relies on having
# a handle on the brain environment, but it is arguably neater than
# the io.py file. However it seems to introduce some kind of lag that
# makes the really complicated labs with localization stuff work poorly
import soar.util
from soar.util import *
robotRadius = 0.2
def configure_io(namespace):
# need to use global 'cause we don't want to accidentally overwrite
# the brain environ by setting it to None when io.py is imported
global io_environ
io_environ = namespace
class SensorInput():
global io_environ
"""
Represents one set of sensor readings from the robot, incluing
sonars, odometry, and readings from the analogInputs
"""
def __init__(self, cheat=False):
self.sonars = io_environ['sonarDistances']()
if cheat:
p = io_environ['cheatPose']()
else:
p = io_environ['pose']()
self.odometry = valueListToPose(p)
self.analogInputs = io_environ['analogInputs']()
def __str__(self):
return 'Sonar: ' + util.prettyString(self.sonars) + \
"; Odo: " + util.prettyString(self.odometry) +\
"; Analog: " + util.prettyString(self.analogInputs)
referenceVoltage = 5.0
class Action:
"""
One set of commands to send to the robot
"""
def __init__(self, fvel = 0.0, rvel = 0.0,
voltage = referenceVoltage,
discreteStepLength = None):
"""
@param fvel: signed number indicating forward velocity in m/s
@param rvel: signed number indicating rotational velocity in
rad/sec (?) positive is left, negative is right
@param voltage: voltage to send to analog input port of
control board; should be between 0 and 10v ??
@param discreteStepLength: if C{None}, then the robot
continues driving at the last commanded velocity until a new
action command is received; if set to a positive value, the
robot will drive at the last commanded velocity until
C{discreteStepLength} seconds have passed, and then stop.
Setting the step length to, e.g., 0.1, is useful when the
brain is doing so much computation that the robot drives too
far between steps.
"""
self.fvel = fvel
self.rvel = rvel
self.voltage = voltage
self.discreteStepLength = discreteStepLength
def execute(self):
if self.discreteStepLength:
io_environ['discreteMotorOutput'](self.fvel, self.rvel,
self.discreteStepLength)
else:
io_environ['motorOutput'](self.fvel, self.rvel)
io_environ['analogOutput'](self.voltage)
def __str__(self):
return 'Act: ' + \
util.prettyString([self.fvel, self.rvel, self.voltage])
def registerUserFunction(type, f):
io_environ['registerUserFunction'](type, f)
| mit | -9,170,203,133,476,238,000 | 36.518072 | 72 | 0.635196 | false | 3.902256 | true | false | false |
wanghaoran1988/origin | cmd/cluster-capacity/go/src/github.com/kubernetes-incubator/cluster-capacity/vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py | 100 | 7192 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from charms import layer
from charms.reactive import hook
from charms.reactive import is_state
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_not
from charmhelpers.core import hookenv
from shlex import split
from subprocess import call
from subprocess import check_call
from subprocess import check_output
@hook('upgrade-charm')
def reset_delivery_states():
''' Remove the state set when resources are unpacked. '''
remove_state('kubernetes-e2e.installed')
@when('kubernetes-e2e.installed')
def messaging():
''' Probe our relations to determine the propper messaging to the
end user '''
missing_services = []
if not is_state('kubernetes-master.available'):
missing_services.append('kubernetes-master')
if not is_state('certificates.available'):
missing_services.append('certificates')
if missing_services:
if len(missing_services) > 1:
subject = 'relations'
else:
subject = 'relation'
services = ','.join(missing_services)
message = 'Missing {0}: {1}'.format(subject, services)
hookenv.status_set('blocked', message)
return
hookenv.status_set('active', 'Ready to test.')
@when_not('kubernetes-e2e.installed')
def install_kubernetes_e2e():
''' Deliver the e2e and kubectl components from the binary resource stream
packages declared in the charm '''
charm_dir = os.getenv('CHARM_DIR')
arch = determine_arch()
# Get the resource via resource_get
resource = 'e2e_{}'.format(arch)
try:
archive = hookenv.resource_get(resource)
except Exception:
message = 'Error fetching the {} resource.'.format(resource)
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing {} resource.'.format(resource))
hookenv.status_set('blocked', 'Missing {} resource.'.format(resource))
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked',
'Incomplete {} resource.'.format(resource))
return
hookenv.status_set('maintenance',
'Unpacking {} resource.'.format(resource))
unpack_path = '{}/files/kubernetes'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
services = ['e2e.test', 'ginkgo', 'kubectl']
for service in services:
unpacked = '{}/{}'.format(unpack_path, service)
app_path = '/usr/local/bin/{}'.format(service)
install = ['install', '-v', unpacked, app_path]
call(install)
set_state('kubernetes-e2e.installed')
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'kubernetes-master.available',
'kubernetes-e2e.installed')
@when_not('kubeconfig.ready')
def prepare_kubeconfig_certificates(master):
''' Prepare the data to feed to create the kubeconfig file. '''
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
key = layer_options.get('client_key_path')
cert = layer_options.get('client_certificate_path')
servers = get_kube_api_servers(master)
# pedantry
kubeconfig_path = '/home/ubuntu/.kube/config'
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/root/.kube/config', servers[0], ca, key, cert,
user='root')
create_kubeconfig(kubeconfig_path, servers[0], ca, key, cert,
user='ubuntu')
# Set permissions on the ubuntu users kubeconfig to ensure a consistent UX
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
set_state('kubeconfig.ready')
@when('kubernetes-e2e.installed', 'kubeconfig.ready')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubectl', 'version', '--client']
from subprocess import CalledProcessError
try:
version = check_output(cmd).decode('utf-8')
except CalledProcessError:
message = "Missing kubeconfig causes errors. Skipping version set."
hookenv.log(message)
return
git_version = version.split('GitVersion:"v')[-1]
version_from = git_version.split('",')[0]
hookenv.application_version_set(version_from.rstrip())
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
context='juju-context', cluster='juju-cluster'):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
'--client-key={2} --client-certificate={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, user, key, certificate)))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_kube_api_servers(master):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in master.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def determine_arch():
''' dpkg wrapper to surface the architecture we are tied to'''
cmd = ['dpkg', '--print-architecture']
output = check_output(cmd).decode('utf-8')
return output.rstrip()
| apache-2.0 | -6,698,703,982,956,785,000 | 34.60396 | 78 | 0.663515 | false | 4 | true | false | false |
darryncampbell/KerrieWorking-Angular | app/bower_components/bootstrap/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| mit | 2,759,179,791,188,848,600 | 31.925234 | 114 | 0.675844 | false | 3.712329 | false | false | false |
njvack/ge-mri-rtafni | scanner-console/vendor/dicom/contrib/imViewer_Simple.py | 4 | 12358 | #==========================================================================
# imViewer-Simple.py
#
# An example program that opens uncompressed DICOM images and
# converts them via numPy and PIL to be viewed in wxWidgets GUI
# apps. The conversion is currently:
#
# pydicom->NumPy->PIL->wxPython.Image->wxPython.Bitmap
#
# Gruesome but it mostly works. Surely there is at least one
# of these steps that could be eliminated (probably PIL) but
# haven't tried that yet and I may want some of the PIL manipulation
# functions.
#
# This won't handle RLE, embedded JPEG-Lossy, JPEG-lossless,
# JPEG2000, old ACR/NEMA files, or anything wierd. Also doesn't
# handle some RGB images that I tried.
#
# Have added Adit Panchal's LUT code. It helps a lot, but needs
# to be further generalized. Added test for window and/or level
# as 'list' type - crude, but it worked for a bunch of old MR and
# CT slices I have.
#
# Testing: minimal
# Tried only on WinXP sp2 using numpy 1.3.0
# and PIL 1.1.7b1, Python 2.6.4, and wxPython 2.8.10.1
#
# Dave Witten: Nov. 11, 2009
#==========================================================================
import os
import os.path
import sys
import dicom
import wx
have_PIL = True
try:
import PIL.Image
except:
have_PIL = False
have_numpy = True
try:
import numpy as np
except:
have_numpy = False
#----------------------------------------------------------------
# Initialize image capabilities.
#----------------------------------------------------------------
wx.InitAllImageHandlers()
def MsgDlg(window, string, caption='OFAImage', style=wx.YES_NO | wx.CANCEL):
"""Common MessageDialog."""
dlg = wx.MessageDialog(window, string, caption, style)
result = dlg.ShowModal()
dlg.Destroy()
return result
class ImFrame(wx.Frame):
"""Class for main window."""
def __init__(self, parent, title):
"""Create the pydicom image example's main frame window."""
wx.Frame.__init__(self, parent, id=-1, title="", pos=wx.DefaultPosition,
size=wx.Size(w=1024, h=768),
style=wx.DEFAULT_FRAME_STYLE | wx.SUNKEN_BORDER | wx.CLIP_CHILDREN)
#--------------------------------------------------------
# Set up the menubar.
#--------------------------------------------------------
self.mainmenu = wx.MenuBar()
# Make the 'File' menu.
menu = wx.Menu()
item = menu.Append(wx.ID_ANY, '&Open', 'Open file for editing')
self.Bind(wx.EVT_MENU, self.OnFileOpen, item)
item = menu.Append(wx.ID_ANY, 'E&xit', 'Exit Program')
self.Bind(wx.EVT_MENU, self.OnFileExit, item)
self.mainmenu.Append(menu, '&File')
# Attach the menu bar to the window.
self.SetMenuBar(self.mainmenu)
#--------------------------------------------------------
# Set up the main splitter window.
#--------------------------------------------------------
self.mainSplitter = wx.SplitterWindow(self, style=wx.NO_3D | wx.SP_3D)
self.mainSplitter.SetMinimumPaneSize(1)
#-------------------------------------------------------------
# Create the folderTreeView on the left.
#-------------------------------------------------------------
self.dsTreeView = wx.TreeCtrl(self.mainSplitter, style=wx.TR_LINES_AT_ROOT | wx.TR_HAS_BUTTONS)
#--------------------------------------------------------
# Create the ImageView on the right pane.
#--------------------------------------------------------
self.imView = wx.Panel(self.mainSplitter, style=wx.VSCROLL | wx.HSCROLL | wx.CLIP_CHILDREN)
self.imView.Bind(wx.EVT_PAINT, self.OnPaint)
self.imView.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.imView.Bind(wx.EVT_SIZE, self.OnSize)
#--------------------------------------------------------
# Install the splitter panes.
#--------------------------------------------------------
self.mainSplitter.SplitVertically(self.dsTreeView, self.imView)
self.mainSplitter.SetSashPosition(300, True)
#--------------------------------------------------------
# Initialize some values
#--------------------------------------------------------
self.dcmdsRoot = False
self.foldersRoot = False
self.loadCentered = True
self.bitmap = None
self.Show(True)
def OnFileExit(self, event):
"""Exits the program."""
self.Destroy()
event.Skip()
def OnSize(self, event):
"Window 'size' event."
self.Refresh()
def OnEraseBackground(self, event):
"Window 'erase background' event."
pass
def populateTree(self, ds):
""" Populate the tree in the left window with the [desired]
dataset values"""
if not self.dcmdsRoot:
self.dcmdsRoot = self.dsTreeView.AddRoot(text="DICOM Objects")
else:
self.dsTreeView.DeleteChildren(self.dcmdsRoot)
self.recurse_tree(ds, self.dcmdsRoot)
self.dsTreeView.ExpandAll()
def recurse_tree(self, ds, parent, hide=False):
""" order the dicom tags """
for data_element in ds:
if isinstance(data_element.value, unicode):
ip = self.dsTreeView.AppendItem(parent, text=unicode(data_element))
else:
ip = self.dsTreeView.AppendItem(parent, text=str(data_element))
if data_element.VR == "SQ":
for i, ds in enumerate(data_element.value):
sq_item_description = data_element.name.replace(" Sequence", "")
item_text = "%s %d" % (sq_item_description, i + 1)
parentNodeID = self.dsTreeView.AppendItem(ip, text=item_text.rjust(128))
self.recurse_tree(ds, parentNodeID)
## --- Most of what is important happens below this line ---------------------
def OnFileOpen(self, event):
"""Opens a selected file."""
dlg = wx.FileDialog(self, 'Choose a file to add.', '', '', '*.*', wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
fullPath = dlg.GetPath()
imageFile = dlg.GetFilename()
#checkDICMHeader()
self.show_file(imageFile, fullPath)
def OnPaint(self, event):
"Window 'paint' event."
dc = wx.PaintDC(self.imView)
dc = wx.BufferedDC(dc)
# paint a background just so it isn't *so* boring.
dc.SetBackground(wx.Brush("WHITE"))
dc.Clear()
dc.SetBrush(wx.Brush("GREY", wx.CROSSDIAG_HATCH))
windowsize = self.imView.GetSizeTuple()
dc.DrawRectangle(0, 0, windowsize[0], windowsize[1])
bmpX0 = 0
bmpY0 = 0
if self.bitmap is not None:
if self.loadCentered:
bmpX0 = (windowsize[0] - self.bitmap.Width) / 2
bmpY0 = (windowsize[1] - self.bitmap.Height) / 2
dc.DrawBitmap(self.bitmap, bmpX0, bmpY0, False)
#------------------------------------------------------------
# ImFrame.ConvertWXToPIL()
# Expropriated from Andrea Gavana's
# ShapedButton.py in the wxPython dist
#------------------------------------------------------------
def ConvertWXToPIL(self, bmp):
""" Convert wx.Image Into PIL Image. """
width = bmp.GetWidth()
height = bmp.GetHeight()
im = wx.EmptyImage(width, height)
im.fromarray("RGBA", (width, height), bmp.GetData())
return img
#------------------------------------------------------------
# ImFrame.ConvertPILToWX()
# Expropriated from Andrea Gavana's
# ShapedButton.py in the wxPython dist
#------------------------------------------------------------
def ConvertPILToWX(self, pil, alpha=True):
""" Convert PIL Image Into wx.Image. """
if alpha:
image = apply(wx.EmptyImage, pil.size)
image.SetData(pil.convert("RGB").tostring())
image.SetAlphaData(pil.convert("RGBA").tostring()[3::4])
else:
image = wx.EmptyImage(pil.size[0], pil.size[1])
new_image = pil.convert('RGB')
data = new_image.tostring()
image.SetData(data)
return image
def get_LUT_value(self, data, window, level):
"""Apply the RGB Look-Up Table for the given data and window/level value."""
if not have_numpy:
raise ImportError("Numpy is not available. See http://numpy.scipy.org/ to download and install")
if isinstance(window, list):
window = window[0]
if isinstance(level, list):
level = level[0]
return np.piecewise(data,
[data <= (level - 0.5 - (window - 1) / 2),
data > (level - 0.5 + (window - 1) / 2)],
[0, 255, lambda data: ((data - (level - 0.5)) / (window - 1) + 0.5) * (255 - 0)]
)
#-----------------------------------------------------------
# ImFrame.loadPIL_LUT(dataset)
# Display an image using the Python Imaging Library (PIL)
#-----------------------------------------------------------
def loadPIL_LUT(self, dataset):
if not have_PIL:
raise ImportError("Python Imaging Library is not available. See http://www.pythonware.com/products/pil/ to download and install")
if('PixelData' not in dataset):
raise TypeError("Cannot show image -- DICOM dataset does not have pixel data")
if('WindowWidth' not in dataset) or ('WindowCenter' not in dataset): # can only apply LUT if these values exist
bits = dataset.BitsAllocated
samples = dataset.SamplesPerPixel
if bits == 8 and samples == 1:
mode = "L"
elif bits == 8 and samples == 3:
mode = "RGB"
elif bits == 16: # not sure about this -- PIL source says is 'experimental' and no documentation.
mode = "I;16" # Also, should bytes swap depending on endian of file and system??
else:
raise TypeError("Don't know PIL mode for %d BitsAllocated and %d SamplesPerPixel" % (bits, samples))
size = (dataset.Columns, dataset.Rows)
im = PIL.Image.frombuffer(mode, size, dataset.PixelData, "raw", mode, 0, 1) # Recommended to specify all details by http://www.pythonware.com/library/pil/handbook/image.htm
else:
image = self.get_LUT_value(dataset.pixel_array, dataset.WindowWidth, dataset.WindowCenter)
im = PIL.Image.fromarray(image).convert('L') # Convert mode to L since LUT has only 256 values: http://www.pythonware.com/library/pil/handbook/image.htm
return im
def show_file(self, imageFile, fullPath):
""" Load the DICOM file, make sure it contains at least one
image, and set it up for display by OnPaint(). ** be
careful not to pass a unicode string to read_file or it will
give you 'fp object does not have a defer_size attribute,
or some such."""
ds = dicom.read_file(str(fullPath))
ds.decode() # change strings to unicode
self.populateTree(ds)
if 'PixelData' in ds:
self.dImage = self.loadPIL_LUT(ds)
if self.dImage is not None:
tmpImage = self.ConvertPILToWX(self.dImage, False)
self.bitmap = wx.BitmapFromImage(tmpImage)
self.Refresh()
##------ This is just the initialization of the App -------------------------
#=======================================================
# The main App Class.
#=======================================================
class App(wx.App):
"""Image Application."""
def OnInit(self):
"""Create the Image Application."""
frame = ImFrame(None, 'wxImage Example')
return True
#---------------------------------------------------------------------
# If this file is running as main or a standalone test, begin execution here.
#---------------------------------------------------------------------
if __name__ == '__main__':
app = App(0)
app.MainLoop()
| mit | -4,177,575,431,541,067,000 | 40.891525 | 185 | 0.515941 | false | 4.092053 | false | false | false |
claneys/shinken | test/test_servicetpl_no_hostname.py | 18 | 2049 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestsericeTplNoHostname(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_servicetpl_no_hostname.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | -1,834,636,403,029,674,500 | 36.254545 | 134 | 0.666179 | false | 3.461149 | true | false | false |
LibreSoftTeam/2016-uml-miner | phase3-outputs/scripts/umlfiles2table_images.py | 1 | 2885 | #!/usr/bin/python3
import csv
import pymysql
singleList = []
multipleList = []
# Connect to the database
connection = pymysql.connect(
host='localhost',
user='operator',
passwd='operator',
db='chunk4_images',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
with open('updated4_images.csv', 'r') as csvfile:
for myupdatedCSV in csv.reader(csvfile):
if myupdatedCSV[3] == "UML":
updatedCSV = myupdatedCSV[2].split('/')
repo = updatedCSV[3] + "/" + updatedCSV[4]
fileurl = myupdatedCSV[2]
filename = fileurl.split('/')[-1]
filepath = '/'.join(fileurl.split('/')[6:])
if 'https://raw.githubusercontent.com/' not in fileurl:
continue
# Get repo id from database
cursor = connection.cursor()
sql = 'SELECT id FROM repositories WHERE uri="'
sql += 'https://github.com/{0}"'.format(repo)
# print(sql)
cursor.execute(sql)
result = cursor.fetchone()
try:
repo_id = result['id']
# print(repo_id)
except:
#print("# Error", result, repo)
continue
# Get file id from database
sql = 'SELECT id FROM files WHERE repository_id={0} and file_name="{1}"'.format(repo_id, filename)
# print(sql)
cursor.execute(sql)
if cursor.rowcount == 1:
result = cursor.fetchone()
file_id = result['id']
singleList.append((file_id, repo_id, fileurl.replace("'", "\\'"), filepath.replace("'", "\\'")))
else:
result = cursor.fetchall()
# print("Warning:", result, filepath)
found = 0
for file in result:
sql = 'SELECT file_path from file_links WHERE file_id={0}'.format(file['id'])
# print(sql)
cursor.execute(sql)
result = cursor.fetchone()
db_path = result['file_path']
if db_path == filepath:
singleList.append((file['id'], repo_id, fileurl.replace("'", "\\'"), filepath.replace("'", "\\'")))
found = 1
break
#if not found:
#print("# ERROR:", filepath, "not found")
connection.close()
# Write data into database
create = """
USE chunk4_images;
CREATE TABLE uml_files (
id int,
repository_id int,
file_url VARCHAR(255),
file_path VARCHAR(255)
);
"""
print(create)
for entry in singleList:
print("INSERT INTO uml_files (id, repository_id, file_url, file_path) VALUES ({0}, {1}, '{2}', '{3}');".format(*entry))
| gpl-3.0 | -7,699,399,729,735,828,000 | 32.16092 | 123 | 0.49844 | false | 4.199418 | false | false | false |
lewisc/spark-tk | python/sparktk/models/timeseries/arx.py | 7 | 16906 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
ARX (autoregressive exogenous) Model
"""
from sparktk.loggers import log_load; log_load(__name__); del log_load
from sparktk import TkContext
from sparktk.propobj import PropertiesObject
__all__ = ["train", "load", "ArxModel"]
def train(frame, ts_column, x_columns, y_max_lag, x_max_lag, no_intercept=False):
"""
Creates a ARX model by training on the given frame. Fit an autoregressive model with additional
exogenous variables.
Parameters
----------
:param frame: (Frame) Frame used for training
:param ts_column: (str) Name of the column that contains the time series values.
:param x_columns: (List(str)) Names of the column(s) that contain the values of exogenous regressors.
:param y_max_lag: (int) The maximum lag order for the dependent (time series) variable.
:param x_max_lag: (int) The maximum lag order for exogenous variables.
:param no_intercept: (bool) A boolean flag indicating if the intercept should be dropped. Default is false.
:return: (ArxModel) Trained ARX model
Notes
-----
1. Dataset being trained must be small enough to be worked with on a single node.
+ If the specified set of exogenous variables is not invertible, an exception is
thrown stating that the "matrix is singular". This happens when there are
certain patterns in the dataset or columns of all zeros. In order to work
around the singular matrix issue, try selecting a different set of columns for
exogenous variables, or use a different time window for training.
"""
# check parameter/types
if not isinstance(ts_column, basestring):
raise TypeError("'ts_column' should be a string (name of the column that has the timeseries value).")
if not isinstance(x_columns, list) or not all(isinstance(c, str) for c in x_columns):
raise TypeError("'x_columns' should be a list of strings (names of the exogenous columns).")
elif len(x_columns) <= 0:
raise ValueError("'x_columns' should not be empty.")
if not isinstance(x_max_lag, int):
raise TypeError("'x_max_lag' should be an integer.")
if not isinstance(y_max_lag, int):
raise TypeError("'y_max_lag' should be an integer.")
if not isinstance(no_intercept, bool):
raise TypeError("'no_intercept' should be a boolean.")
tc = frame._tc
_scala_obj = get_scala_obj(tc)
scala_x_columns = tc.jutils.convert.to_scala_vector_string(x_columns)
scala_model = _scala_obj.train(frame._scala, ts_column, scala_x_columns, y_max_lag, x_max_lag, no_intercept)
return ArxModel(tc, scala_model)
def load(path, tc=TkContext.implicit):
"""load ArxModel from given path"""
TkContext.validate(tc)
return tc.load(path, ArxModel)
def get_scala_obj(tc):
"""Gets reference to the ArxModel scala object"""
return tc.sc._jvm.org.trustedanalytics.sparktk.models.timeseries.arx.ArxModel
class ArxModel(PropertiesObject):
"""
A trained ARX model.
Example
-------
Consider the following model trained and tested on the sample data set in *frame* 'frame'.
The frame has a snippet of air quality data from:
https://archive.ics.uci.edu/ml/datasets/Air+Quality.
Lichman, M. (2013). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
<hide>
>>> schema = [('Date', str),('Time', str),('CO_GT', float),('PT08_S1_CO', int),('NMHC_GT', int),
... ('C6H6_GT', float),('PT08_S2_NMHC', int),('NOx_GT', int),('PT08_S3_NOx', int),('NO2_GT', int),
... ('PT08_S4_NO2', int),('PT08_S5_O3_', int),('T', float),('RH', float),('AH', float)]
>>> frame = tc.frame.create([["10/03/2004","18.00.00",2.6,1360,150,11.9,1046,166,1056,113,1692,1268,13.6,48.9,0.7578],
... ["10/03/2004","19.00.00",2,1292,112,9.4,955,103,1174,92,1559,972,13.3,47.7,0.7255],
... ["10/03/2004","20.00.00",2.2,1402,88,9.0,939,131,1140,114,1555,1074,11.9,54.0,0.7502],
... ["10/03/2004","21.00.00",2.2,1376,80,9.2,948,172,1092,122,1584,1203,11.0,60.0,0.7867],
... ["10/03/2004","22.00.00",1.6,1272,51,6.5,836,131,1205,116,1490,1110,11.2,59.6,0.7888],
... ["10/03/2004","23.00.00",1.2,1197,38,4.7,750,89,1337,96,1393,949,11.2,59.2,0.7848],
... ["11/03/2004","00.00.00",1.2,1185,31,3.6,690,62,1462,77,1333,733,11.3,56.8,0.7603],
... ["11/03/2004","01.00.00",1,1136,31,3.3,672,62,1453,76,1333,730,10.7,60.0,0.7702],
... ["11/03/2004","02.00.00",0.9,1094,24,2.3,609,45,1579,60,1276,620,10.7,59.7,0.7648],
... ["11/03/2004","03.00.00",0.6,1010,19,1.7,561,-200,1705,-200,1235,501,10.3,60.2,0.7517],
... ["11/03/2004","04.00.00",-200,1011,14,1.3,527,21,1818,34,1197,445,10.1,60.5,0.7465],
... ["11/03/2004","05.00.00",0.7,1066,8,1.1,512,16,1918,28,1182,422,11.0,56.2,0.7366],
... ["11/03/2004","06.00.00",0.7,1052,16,1.6,553,34,1738,48,1221,472,10.5,58.1,0.7353],
... ["11/03/2004","07.00.00",1.1,1144,29,3.2,667,98,1490,82,1339,730,10.2,59.6,0.7417],
... ["11/03/2004","08.00.00",2,1333,64,8.0,900,174,1136,112,1517,1102,10.8,57.4,0.7408],
... ["11/03/2004","09.00.00",2.2,1351,87,9.5,960,129,1079,101,1583,1028,10.5,60.6,0.7691],
... ["11/03/2004","10.00.00",1.7,1233,77,6.3,827,112,1218,98,1446,860,10.8,58.4,0.7552],
... ["11/03/2004","11.00.00",1.5,1179,43,5.0,762,95,1328,92,1362,671,10.5,57.9,0.7352],
... ["11/03/2004","12.00.00",1.6,1236,61,5.2,774,104,1301,95,1401,664,9.5,66.8,0.7951],
... ["11/03/2004","13.00.00",1.9,1286,63,7.3,869,146,1162,112,1537,799,8.3,76.4,0.8393],
... ["11/03/2004","14.00.00",2.9,1371,164,11.5,1034,207,983,128,1730,1037,8.0,81.1,0.8736],
... ["11/03/2004","15.00.00",2.2,1310,79,8.8,933,184,1082,126,1647,946,8.3,79.8,0.8778],
... ["11/03/2004","16.00.00",2.2,1292,95,8.3,912,193,1103,131,1591,957,9.7,71.2,0.8569],
... ["11/03/2004","17.00.00",2.9,1383,150,11.2,1020,243,1008,135,1719,1104,9.8,67.6,0.8185]],
... schema=schema, validate_schema=True)
-etc-
</hide>
>>> frame.inspect()
[#] Date Time CO_GT PT08_S1_CO NMHC_GT C6H6_GT PT08_S2_NMHC
============================================================================
[0] 10/03/2004 18.00.00 2.6 1360 150 11.9 1046
[1] 10/03/2004 19.00.00 2.0 1292 112 9.4 955
[2] 10/03/2004 20.00.00 2.2 1402 88 9.0 939
[3] 10/03/2004 21.00.00 2.2 1376 80 9.2 948
[4] 10/03/2004 22.00.00 1.6 1272 51 6.5 836
[5] 10/03/2004 23.00.00 1.2 1197 38 4.7 750
[6] 11/03/2004 00.00.00 1.2 1185 31 3.6 690
[7] 11/03/2004 01.00.00 1.0 1136 31 3.3 672
[8] 11/03/2004 02.00.00 0.9 1094 24 2.3 609
[9] 11/03/2004 03.00.00 0.6 1010 19 1.7 561
<BLANKLINE>
[#] NOx_GT PT08_S3_NOx NO2_GT PT08_S4_NO2 PT08_S5_O3_ T RH AH
==============================================================================
[0] 166 1056 113 1692 1268 13.6 48.9 0.7578
[1] 103 1174 92 1559 972 13.3 47.7 0.7255
[2] 131 1140 114 1555 1074 11.9 54.0 0.7502
[3] 172 1092 122 1584 1203 11.0 60.0 0.7867
[4] 131 1205 116 1490 1110 11.2 59.6 0.7888
[5] 89 1337 96 1393 949 11.2 59.2 0.7848
[6] 62 1462 77 1333 733 11.3 56.8 0.7603
[7] 62 1453 76 1333 730 10.7 60.0 0.7702
[8] 45 1579 60 1276 620 10.7 59.7 0.7648
[9] -200 1705 -200 1235 501 10.3 60.2 0.7517
We will be using the column "T" (temperature) as our time series value:
>>> y = "T"
The sensor values will be used as our exogenous variables:
>>> x = ['CO_GT','PT08_S1_CO','NMHC_GT','C6H6_GT','PT08_S2_NMHC','NOx_GT','PT08_S3_NOx','NO2_GT','PT08_S4_NO2','PT08_S5_O3_']
Train the model and then take a look at the model properties and coefficients:
>>> model = tc.models.timeseries.arx.train(frame, y, x, 0, 0, True)
<progress>
>>> model
c = 0.0
coefficients = [0.005567992923907625, -0.010969068059453009, 0.012556586798371176, -0.39792503380811506, 0.04289162879826746, -0.012253952164677924, 0.01192148525581035, 0.014100699808650077, -0.021091473795935345, 0.007622676727420039]
no_intercept = True
x_max_lag = 0
y_max_lag = 0
In this example, we will call predict using the same frame that was used for training, again specifying the name
of the time series column and the names of the columns that contain exogenous regressors.
>>> predicted_frame = model.predict(frame, y, x)
<progress>
The predicted_frame that's return has a new column called *predicted_y*. This column contains the predicted
time series values.
>>> predicted_frame.column_names
[u'Date',
u'Time',
u'CO_GT',
u'PT08_S1_CO',
u'NMHC_GT',
u'C6H6_GT',
u'PT08_S2_NMHC',
u'NOx_GT',
u'PT08_S3_NOx',
u'NO2_GT',
u'PT08_S4_NO2',
u'PT08_S5_O3_',
u'T',
u'RH',
u'AH',
u'predicted_y']
>>> predicted_frame.inspect(n=15, columns=["T","predicted_y"])
[##] T predicted_y
=========================
[0] 13.6 13.236459938
[1] 13.3 13.0250130899
[2] 11.9 11.4147282294
[3] 11.0 11.3157457822
[4] 11.2 11.3982074883
[5] 11.2 11.7079198051
[6] 11.3 10.7879916472
[7] 10.7 10.527428478
[8] 10.7 10.4439615476
[9] 10.3 10.276662138
[10] 10.1 10.0999996581
[11] 11.0 11.2849327784
[12] 10.5 10.5726885589
[13] 10.2 10.1984619512
[14] 10.8 11.0063774234
The trained model can be saved to be used later:
>>> model_path = "sandbox/savedArxModel"
>>> model.save(model_path)
The saved model can be loaded through the tk context and then used for forecasting values the same way
that the original model was used.
>>> loaded_model = tc.load(model_path)
>>> predicted_frame = loaded_model.predict(frame, y, x)
>>> predicted_frame.inspect(n=15,columns=["T","predicted_y"])
[##] T predicted_y
=========================
[0] 13.6 13.236459938
[1] 13.3 13.0250130899
[2] 11.9 11.4147282294
[3] 11.0 11.3157457822
[4] 11.2 11.3982074883
[5] 11.2 11.7079198051
[6] 11.3 10.7879916472
[7] 10.7 10.527428478
[8] 10.7 10.4439615476
[9] 10.3 10.276662138
[10] 10.1 10.0999996581
[11] 11.0 11.2849327784
[12] 10.5 10.5726885589
[13] 10.2 10.1984619512
[14] 10.8 11.0063774234
The trained model can also be exported to a .mar file, to be used with the scoring engine:
>>> canonical_path = model.export_to_mar("sandbox/arx.mar")
<hide>
>>> import os
>>> assert(os.path.isfile(canonical_path))
</hide>
"""
def __init__(self, tc, scala_model):
self._tc = tc
tc.jutils.validate_is_jvm_instance_of(scala_model, get_scala_obj(tc))
self._scala = scala_model
@staticmethod
def _from_scala(tc, scala_model):
"""
Load an ARX model
:param tc: (TkContext) Active TkContext
:param scala_model: (scala ArxModel) Scala model to load
:return: (ArxModel) ArxModel object
"""
return ArxModel(tc, scala_model)
@property
def y_max_lag(self):
"""
The maximum lag order for the dependent (time series) values.
"""
return self._scala.yMaxLag()
@property
def x_max_lag(self):
"""
The maximum lag order for exogenous variables.
"""
return self._scala.xMaxLag()
@property
def c(self):
"""
An intercept term (zero if none desired), from the trained model.
"""
return self._scala.c()
@property
def coefficients(self):
"""
Coefficient values from the trained model.
"""
return list(self._tc.jutils.convert.from_scala_seq(self._scala.coefficients()))
@property
def no_intercept(self):
"""
A boolean flag indicating if the intercept should be dropped.
"""
return self._scala.noIntercept()
def predict(self, frame, ts_column, x_columns):
"""
New frame with column of predicted y values
Predict the time series values for a test frame, based on the specified x values. Creates a new frame
revision with the existing columns and a new predicted_y column.
Parameters
----------
:param frame: (Frame) Frame used for predicting the ts values
:param ts_column: (str) Name of the time series column
:param x_columns: (List[str]) Names of the column(s) that contain the values of the exogenous inputs.
:return: (Frame) A new frame containing the original frame's columns and a column *predictied_y*
"""
if not isinstance(frame, self._tc.frame.Frame):
raise TypeError("'frame' parameter should be a spark-tk Frame object.")
if not isinstance(ts_column, basestring):
raise TypeError("'ts_column' parameter should be a string (name of the column that has the timeseries value).")
if not isinstance(x_columns, list) or not all(isinstance(c, str) for c in x_columns):
raise TypeError("'x_columns' parameter should be a list of strings (names of the exogenous columns).")
elif len(x_columns) <= 0:
raise ValueError("'x_columns' should not be empty.")
scala_x_columns = self._tc.jutils.convert.to_scala_vector_string(x_columns)
from sparktk.frame.frame import Frame
return Frame(self._tc, self._scala.predict(frame._scala, ts_column, scala_x_columns))
def save(self, path):
"""
Save the trained model to the specified path.
Parameters
----------
:param path: (str) Path to save
"""
self._scala.save(self._tc._scala_sc, path)
def export_to_mar(self, path):
"""
Exports the trained model as a model archive (.mar) to the specified path.
Parameters
----------
:param path: (str) Path to save the trained model
:returns (str) Full path to the saved .mar file
"""
if not isinstance(path, basestring):
raise TypeError("path parameter must be a str, but received %s" % type(path))
return self._scala.exportToMar(self._tc._scala_sc, path)
del PropertiesObject
| apache-2.0 | -6,667,347,977,254,630,000 | 43.401055 | 244 | 0.547243 | false | 3.042488 | false | false | false |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/pip/_vendor/chardet/escsm.py | 289 | 10510 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import MachineState
HZ_CLS = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_ST = (
MachineState.START,MachineState.ERROR, 3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START, 4,MachineState.ERROR,# 10-17
5,MachineState.ERROR, 6,MachineState.ERROR, 5, 5, 4,MachineState.ERROR,# 18-1f
4,MachineState.ERROR, 4, 4, 4,MachineState.ERROR, 4,MachineState.ERROR,# 20-27
4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f
)
HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
HZ_SM_MODEL = {'class_table': HZ_CLS,
'class_factor': 6,
'state_table': HZ_ST,
'char_len_table': HZ_CHAR_LEN_TABLE,
'name': "HZ-GB-2312",
'language': 'Chinese'}
ISO2022CN_CLS = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_ST = (
MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,# 18-1f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27
5, 6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f
)
ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS,
'class_factor': 9,
'state_table': ISO2022CN_ST,
'char_len_table': ISO2022CN_CHAR_LEN_TABLE,
'name': "ISO-2022-CN",
'language': 'Chinese'}
ISO2022JP_CLS = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_ST = (
MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f
MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 20-27
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47
)
ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS,
'class_factor': 10,
'state_table': ISO2022JP_ST,
'char_len_table': ISO2022JP_CHAR_LEN_TABLE,
'name': "ISO-2022-JP",
'language': 'Japanese'}
ISO2022KR_CLS = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_ST = (
MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 10-17
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27
)
ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS,
'class_factor': 6,
'state_table': ISO2022KR_ST,
'char_len_table': ISO2022KR_CHAR_LEN_TABLE,
'name': "ISO-2022-KR",
'language': 'Korean'}
| mit | -308,592,456,976,034,500 | 41.723577 | 165 | 0.628069 | false | 2.199665 | false | false | false |
openvapour/ryu | ryu/services/protocols/bgp/info_base/rtc.py | 52 | 2467 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines data types and models required specifically for RTC support.
"""
import logging
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.services.protocols.bgp.info_base.base import Destination
from ryu.services.protocols.bgp.info_base.base import NonVrfPathProcessingMixin
from ryu.services.protocols.bgp.info_base.base import Path
from ryu.services.protocols.bgp.info_base.base import Table
LOG = logging.getLogger('bgpspeaker.info_base.rtc')
class RtcTable(Table):
"""Global table to store RT membership information.
Uses `RtDest` to store destination information for each known RT NLRI path.
"""
ROUTE_FAMILY = RF_RTC_UC
def __init__(self, core_service, signal_bus):
Table.__init__(self, None, core_service, signal_bus)
def _table_key(self, rtc_nlri):
"""Return a key that will uniquely identify this RT NLRI inside
this table.
"""
return str(rtc_nlri.origin_as) + ':' + rtc_nlri.route_target
def _create_dest(self, nlri):
return RtcDest(self, nlri)
def __str__(self):
return 'RtcTable(scope_id: %s, rf: %s)' % (self.scope_id,
self.route_family)
class RtcDest(Destination, NonVrfPathProcessingMixin):
ROUTE_FAMILY = RF_RTC_UC
def _new_best_path(self, new_best_path):
NonVrfPathProcessingMixin._new_best_path(self, new_best_path)
def _best_path_lost(self):
NonVrfPathProcessingMixin._best_path_lost(self)
class RtcPath(Path):
ROUTE_FAMILY = RF_RTC_UC
def __init__(self, source, nlri, src_ver_num, pattrs=None,
nexthop='0.0.0.0', is_withdraw=False,
med_set_by_target_neighbor=False):
Path.__init__(self, source, nlri, src_ver_num, pattrs, nexthop,
is_withdraw, med_set_by_target_neighbor)
| apache-2.0 | -6,789,287,685,204,760,000 | 32.794521 | 79 | 0.679773 | false | 3.440725 | false | false | false |
admcrae/tensorflow | tensorflow/contrib/learn/python/learn/ops/seq2seq_ops.py | 61 | 5870 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Ops for Sequence to Sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import rnn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
def sequence_classifier(decoding, labels, sampling_decoding=None, name=None):
"""Returns predictions and loss for sequence of predictions.
Args:
decoding: List of Tensors with predictions.
labels: List of Tensors with labels.
sampling_decoding: Optional, List of Tensor with predictions to be used
in sampling. E.g. they shouldn't have dependncy on outputs.
If not provided, decoding is used.
name: Operation name.
Returns:
Predictions and losses tensors.
"""
with ops.name_scope(name, "sequence_classifier", [decoding, labels]):
predictions, xent_list = [], []
for i, pred in enumerate(decoding):
xent_list.append(nn.softmax_cross_entropy_with_logits(
labels=labels[i], logits=pred,
name="sequence_loss/xent_raw{0}".format(i)))
if sampling_decoding:
predictions.append(nn.softmax(sampling_decoding[i]))
else:
predictions.append(nn.softmax(pred))
xent = math_ops.add_n(xent_list, name="sequence_loss/xent")
loss = math_ops.reduce_sum(xent, name="sequence_loss")
return array_ops.stack(predictions, axis=1), loss
def seq2seq_inputs(x, y, input_length, output_length, sentinel=None, name=None):
"""Processes inputs for Sequence to Sequence models.
Args:
x: Input Tensor [batch_size, input_length, embed_dim].
y: Output Tensor [batch_size, output_length, embed_dim].
input_length: length of input x.
output_length: length of output y.
sentinel: optional first input to decoder and final output expected.
If sentinel is not provided, zeros are used. Due to fact that y is not
available in sampling time, shape of sentinel will be inferred from x.
name: Operation name.
Returns:
Encoder input from x, and decoder inputs and outputs from y.
"""
with ops.name_scope(name, "seq2seq_inputs", [x, y]):
in_x = array_ops.unstack(x, axis=1)
y = array_ops.unstack(y, axis=1)
if not sentinel:
# Set to zeros of shape of y[0], using x for batch size.
sentinel_shape = array_ops.stack(
[array_ops.shape(x)[0], y[0].get_shape()[1]])
sentinel = array_ops.zeros(sentinel_shape)
sentinel.set_shape(y[0].get_shape())
in_y = [sentinel] + y
out_y = y + [sentinel]
return in_x, in_y, out_y
def rnn_decoder(decoder_inputs, initial_state, cell, scope=None):
"""RNN Decoder that creates training and sampling sub-graphs.
Args:
decoder_inputs: Inputs for decoder, list of tensors.
This is used only in training sub-graph.
initial_state: Initial state for the decoder.
cell: RNN cell to use for decoder.
scope: Scope to use, if None new will be produced.
Returns:
List of tensors for outputs and states for training and sampling sub-graphs.
"""
with vs.variable_scope(scope or "dnn_decoder"):
states, sampling_states = [initial_state], [initial_state]
outputs, sampling_outputs = [], []
with ops.name_scope("training", values=[decoder_inputs, initial_state]):
for i, inp in enumerate(decoder_inputs):
if i > 0:
vs.get_variable_scope().reuse_variables()
output, new_state = cell(inp, states[-1])
outputs.append(output)
states.append(new_state)
with ops.name_scope("sampling", values=[initial_state]):
for i, _ in enumerate(decoder_inputs):
if i == 0:
sampling_outputs.append(outputs[i])
sampling_states.append(states[i])
else:
sampling_output, sampling_state = cell(sampling_outputs[-1],
sampling_states[-1])
sampling_outputs.append(sampling_output)
sampling_states.append(sampling_state)
return outputs, states, sampling_outputs, sampling_states
def rnn_seq2seq(encoder_inputs,
decoder_inputs,
encoder_cell,
decoder_cell=None,
dtype=dtypes.float32,
scope=None):
"""RNN Sequence to Sequence model.
Args:
encoder_inputs: List of tensors, inputs for encoder.
decoder_inputs: List of tensors, inputs for decoder.
encoder_cell: RNN cell to use for encoder.
decoder_cell: RNN cell to use for decoder, if None encoder_cell is used.
dtype: Type to initialize encoder state with.
scope: Scope to use, if None new will be produced.
Returns:
List of tensors for outputs and states for trianing and sampling sub-graphs.
"""
with vs.variable_scope(scope or "rnn_seq2seq"):
_, last_enc_state = rnn.static_rnn(
encoder_cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, last_enc_state, decoder_cell or
encoder_cell)
| apache-2.0 | -5,773,286,829,979,748,000 | 38.395973 | 80 | 0.671891 | false | 3.960864 | false | false | false |
pyblish/pyblish-starter | pyblish_starter/vendor/jsonschema/_reflect.py | 7 | 5008 | # -*- test-case-name: twisted.test.test_reflect -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Standardized versions of various cool and/or strange things that you can do
with Python's reflection capabilities.
"""
import sys
from .compat import PY3
class _NoModuleFound(Exception):
"""
No module was found because none exists.
"""
class InvalidName(ValueError):
"""
The given name is not a dot-separated list of Python objects.
"""
class ModuleNotFound(InvalidName):
"""
The module associated with the given name doesn't exist and it can't be
imported.
"""
class ObjectNotFound(InvalidName):
"""
The object associated with the given name doesn't exist and it can't be
imported.
"""
if PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or C{None} indicating a new traceback.
"""
def _importAndCheckStack(importName):
"""
Import the given name as a module, then walk the stack to determine whether
the failure was the module not existing, or some code in the module (for
example a dependent import) failing. This can be helpful to determine
whether any actual application code was run. For example, to distiguish
administrative error (entering the wrong module name), from programmer
error (writing buggy code in a module that fails to import).
@param importName: The name of the module to import.
@type importName: C{str}
@raise Exception: if something bad happens. This can be any type of
exception, since nobody knows what loading some arbitrary code might
do.
@raise _NoModuleFound: if no module was found.
"""
try:
return __import__(importName)
except ImportError:
excType, excValue, excTraceback = sys.exc_info()
while excTraceback:
execName = excTraceback.tb_frame.f_globals["__name__"]
# in Python 2 execName is None when an ImportError is encountered,
# where in Python 3 execName is equal to the importName.
if execName is None or execName == importName:
reraise(excValue, excTraceback)
excTraceback = excTraceback.tb_next
raise _NoModuleFound()
def namedAny(name):
"""
Retrieve a Python object by its fully qualified name from the global Python
module namespace. The first part of the name, that describes a module,
will be discovered and imported. Each subsequent part of the name is
treated as the name of an attribute of the object specified by all of the
name which came before it. For example, the fully-qualified name of this
object is 'twisted.python.reflect.namedAny'.
@type name: L{str}
@param name: The name of the object to return.
@raise InvalidName: If the name is an empty string, starts or ends with
a '.', or is otherwise syntactically incorrect.
@raise ModuleNotFound: If the name is syntactically correct but the
module it specifies cannot be imported because it does not appear to
exist.
@raise ObjectNotFound: If the name is syntactically correct, includes at
least one '.', but the module it specifies cannot be imported because
it does not appear to exist.
@raise AttributeError: If an attribute of an object along the way cannot be
accessed, or a module along the way is not found.
@return: the Python object identified by 'name'.
"""
if not name:
raise InvalidName('Empty module name')
names = name.split('.')
# if the name starts or ends with a '.' or contains '..', the __import__
# will raise an 'Empty module name' error. This will provide a better error
# message.
if '' in names:
raise InvalidName(
"name must be a string giving a '.'-separated list of Python "
"identifiers, not %r" % (name,))
topLevelPackage = None
moduleNames = names[:]
while not topLevelPackage:
if moduleNames:
trialname = '.'.join(moduleNames)
try:
topLevelPackage = _importAndCheckStack(trialname)
except _NoModuleFound:
moduleNames.pop()
else:
if len(names) == 1:
raise ModuleNotFound("No module named %r" % (name,))
else:
raise ObjectNotFound('%r does not name an object' % (name,))
obj = topLevelPackage
for n in names[1:]:
obj = getattr(obj, n)
return obj
| mit | 2,590,815,536,556,024,300 | 32.386667 | 79 | 0.665136 | false | 4.369983 | false | false | false |
rbenson/orgmode | resolver/https.py | 2 | 2676 |
import re
import sys
import subprocess
import sublime
from .abstract import AbstractRegexLinkResolver
try:
import urllib.request, urllib.parse, urllib.error
except ImportError:
import urllib
PATTERN_SETTING = 'orgmode.open_link.resolver.https.pattern'
PATTERN_DEFAULT = r'^(https):(?P<url>.+)$'
URL_SETTING = 'orgmode.open_link.resolver.https.url'
URL_DEFAULT = 'https:%s'
DEFAULT_OPEN_HTTP_LINK_COMMANDS = dict(
darwin=['open'],
win32=['cmd', '/C'],
linux=['xdg-open'],
)
class Resolver(AbstractRegexLinkResolver):
def __init__(self, view):
super(Resolver, self).__init__(view)
get = self.settings.get
pattern = get(PATTERN_SETTING, PATTERN_DEFAULT)
self.regex = re.compile(pattern)
self.url = get(URL_SETTING, URL_DEFAULT)
self.link_commands = self.settings.get(
'orgmode.open_link.resolver.abstract.commands', DEFAULT_OPEN_HTTP_LINK_COMMANDS)
def replace(self, match):
return self.url % match.group('url')
def execute(self, content):
command = self.get_link_command()
if not command:
sublime.error_message(
'Could not get link opener command.\nNot yet supported.')
return None
# cmd.exe quote is needed, http://ss64.com/nt/syntax-esc.html
# escape these: ^\ ^& ^| ^> ^< ^^
if sys.platform == 'win32':
content = content.replace("^", "^^")
content = content.replace("&", "^&")
content = content.replace("\\", "^\\")
content = content.replace("|", "^|")
content = content.replace("<", "^<")
content = content.replace(">", "^>")
if sys.version_info[0] < 3:
content = content.encode(sys.getfilesystemencoding())
if sys.platform != 'win32':
cmd = command + [content]
else:
cmd = command + ['start ' + content]
print('HTTP*****')
print(repr(content), content)
print(repr(cmd))
print(cmd)
sublime.status_message('Executing: %s' % cmd)
if sys.platform != 'win32':
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
if stdout:
stdout = str(stdout, sys.getfilesystemencoding())
sublime.status_message(stdout)
if stderr:
stderr = str(stderr, sys.getfilesystemencoding())
sublime.error_message(stderr)
| mit | -617,696,223,155,290,200 | 31.240964 | 92 | 0.58296 | false | 3.918009 | false | false | false |
jfinkels/networkx | networkx/generators/tests/test_expanders.py | 55 | 2497 | # Copyright 2014 "cheebee7i".
# Copyright 2014 "alexbrc".
# Copyright 2014 Jeffrey Finkelstein <jeffrey.finkelstein@gmail.com>.
"""Unit tests for the :mod:`networkx.generators.expanders` module.
"""
try:
import scipy
is_scipy_available = True
except:
is_scipy_available = False
import networkx as nx
from networkx import adjacency_matrix
from networkx import number_of_nodes
from networkx.generators.expanders import chordal_cycle_graph
from networkx.generators.expanders import margulis_gabber_galil_graph
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_less
from nose.tools import assert_raises
from nose.tools import assert_true
def test_margulis_gabber_galil_graph():
try:
# Scipy is required for conversion to an adjacency matrix.
# We also use scipy for computing the eigenvalues,
# but this second use could be done using only numpy.
import numpy as np
import scipy.linalg
has_scipy = True
except ImportError as e:
has_scipy = False
for n in 2, 3, 5, 6, 10:
g = margulis_gabber_galil_graph(n)
assert_equal(number_of_nodes(g), n*n)
for node in g:
assert_equal(g.degree(node), 8)
assert_equal(len(node), 2)
for i in node:
assert_equal(int(i), i)
assert_true(0 <= i < n)
if has_scipy:
# Eigenvalues are already sorted using the scipy eigvalsh,
# but the implementation in numpy does not guarantee order.
w = sorted(scipy.linalg.eigvalsh(adjacency_matrix(g).A))
assert_less(w[-2], 5*np.sqrt(2))
def test_chordal_cycle_graph():
"""Test for the :func:`networkx.chordal_cycle_graph` function."""
if not is_scipy_available:
raise SkipTest('SciPy is not available')
primes = [3, 5, 7, 11]
for p in primes:
G = chordal_cycle_graph(p)
assert_equal(len(G), p)
# TODO The second largest eigenvalue should be smaller than a constant,
# independent of the number of nodes in the graph:
#
# eigs = sorted(scipy.linalg.eigvalsh(adjacency_matrix(G).A))
# assert_less(eigs[-2], ...)
#
def test_margulis_gabber_galil_graph_badinput():
assert_raises(nx.NetworkXError, margulis_gabber_galil_graph, 3,
nx.DiGraph())
assert_raises(nx.NetworkXError, margulis_gabber_galil_graph, 3,
nx.Graph())
| bsd-3-clause | -8,240,160,307,509,666,000 | 33.680556 | 79 | 0.645575 | false | 3.516901 | true | false | false |
xthirtyfive/gamemod | guiprovider.py | 1 | 2081 | # Copyright 2013 X35
#
# This file is part of gamemod.
#
# gamemod is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gamemod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gamemod. If not, see <http:#www.gnu.org/licenses/>.
from requestcounter import requestcounter
from debug import debug
# provide the gamemod gui & server list
class guiprovider:
FILECHECK_INTERVAL = 60*60 # 1h
DBGTAG = "guiprovider"
DBGTAG_REQUEST = DBGTAG+"/request"
DBGTAG_REPLY = DBGTAG+"/reply"
LIST_REQUEST = "list"
READABLELIST_REQUEST = "readablelist"
def __init__(self, reqfunc):
self.reqfunc = reqfunc
self.counter = requestcounter()
def request(self, readable=False):
return self.reqfunc(readable)
def onrequest(self, line, addr, build): # return (reply, close)
if line == guiprovider.LIST_REQUEST:
debug.msg(guiprovider.DBGTAG_REQUEST, "%s request from %s:%d (%sbuild)" % ((line,)+addr+("" if build else "don't ",)))
self.counter.add(addr[0])
s = (self.request() if build else True)
debug.msg(guiprovider.DBGTAG_REQUEST, "sending reply to %s request to %s:%d" % ((line,)+addr))
return s, True
elif line == guiprovider.READABLELIST_REQUEST:
debug.msg(guiprovider.DBGTAG_REQUEST, "%s request from %s:%d (%sbuild)" % ((line,)+addr+("" if build else "don't ",)))
s = (self.request(True) if build else True)
debug.msg(guiprovider.DBGTAG_REQUEST, "sending reply to %s request to %s:%d" % ((line,)+addr))
return s, True
return None, False
def differentips(self):
return self.counter.differentips()
def requests(self):
return self.counter.requests()
| gpl-3.0 | 8,101,542,888,332,923,000 | 35.508772 | 121 | 0.701105 | false | 3.340289 | false | false | false |
romankagan/DDBWorkbench | plugins/hg4idea/testData/bin/hgext/acl.py | 91 | 10362 | # acl.py - changeset access control for mercurial
#
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''hooks for controlling repository access
This hook makes it possible to allow or deny write access to given
branches and paths of a repository when receiving incoming changesets
via pretxnchangegroup and pretxncommit.
The authorization is matched based on the local user name on the
system where the hook runs, and not the committer of the original
changeset (since the latter is merely informative).
The acl hook is best used along with a restricted shell like hgsh,
preventing authenticating users from doing anything other than pushing
or pulling. The hook is not safe to use if users have interactive
shell access, as they can then disable the hook. Nor is it safe if
remote users share an account, because then there is no way to
distinguish them.
The order in which access checks are performed is:
1) Deny list for branches (section ``acl.deny.branches``)
2) Allow list for branches (section ``acl.allow.branches``)
3) Deny list for paths (section ``acl.deny``)
4) Allow list for paths (section ``acl.allow``)
The allow and deny sections take key-value pairs.
Branch-based Access Control
---------------------------
Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
have branch-based access control. Keys in these sections can be
either:
- a branch name, or
- an asterisk, to match any branch;
The corresponding values can be either:
- a comma-separated list containing users and groups, or
- an asterisk, to match anyone;
You can add the "!" prefix to a user or group name to invert the sense
of the match.
Path-based Access Control
-------------------------
Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
access control. Keys in these sections accept a subtree pattern (with
a glob syntax by default). The corresponding values follow the same
syntax as the other sections above.
Groups
------
Group names must be prefixed with an ``@`` symbol. Specifying a group
name has the same effect as specifying all the users in that group.
You can define group members in the ``acl.groups`` section.
If a group name is not defined there, and Mercurial is running under
a Unix-like system, the list of users will be taken from the OS.
Otherwise, an exception will be raised.
Example Configuration
---------------------
::
[hooks]
# Use this if you want to check access restrictions at commit time
pretxncommit.acl = python:hgext.acl.hook
# Use this if you want to check access restrictions for pull, push,
# bundle and serve.
pretxnchangegroup.acl = python:hgext.acl.hook
[acl]
# Allow or deny access for incoming changes only if their source is
# listed here, let them pass otherwise. Source is "serve" for all
# remote access (http or ssh), "push", "pull" or "bundle" when the
# related commands are run locally.
# Default: serve
sources = serve
[acl.deny.branches]
# Everyone is denied to the frozen branch:
frozen-branch = *
# A bad user is denied on all branches:
* = bad-user
[acl.allow.branches]
# A few users are allowed on branch-a:
branch-a = user-1, user-2, user-3
# Only one user is allowed on branch-b:
branch-b = user-1
# The super user is allowed on any branch:
* = super-user
# Everyone is allowed on branch-for-tests:
branch-for-tests = *
[acl.deny]
# This list is checked first. If a match is found, acl.allow is not
# checked. All users are granted access if acl.deny is not present.
# Format for both lists: glob pattern = user, ..., @group, ...
# To match everyone, use an asterisk for the user:
# my/glob/pattern = *
# user6 will not have write access to any file:
** = user6
# Group "hg-denied" will not have write access to any file:
** = @hg-denied
# Nobody will be able to change "DONT-TOUCH-THIS.txt", despite
# everyone being able to change all other files. See below.
src/main/resources/DONT-TOUCH-THIS.txt = *
[acl.allow]
# if acl.allow is not present, all users are allowed by default
# empty acl.allow = no users allowed
# User "doc_writer" has write access to any file under the "docs"
# folder:
docs/** = doc_writer
# User "jack" and group "designers" have write access to any file
# under the "images" folder:
images/** = jack, @designers
# Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
# will have write access to any file under the "resources" folder
# (except for 1 file. See acl.deny):
src/main/resources/** = *
.hgtags = release_engineer
Examples using the "!" prefix
.............................
Suppose there's a branch that only a given user (or group) should be able to
push to, and you don't want to restrict access to any other branch that may
be created.
The "!" prefix allows you to prevent anyone except a given user or group to
push changesets in a given branch or path.
In the examples below, we will:
1) Deny access to branch "ring" to anyone but user "gollum"
2) Deny access to branch "lake" to anyone but members of the group "hobbit"
3) Deny access to a file to anyone but user "gollum"
::
[acl.allow.branches]
# Empty
[acl.deny.branches]
# 1) only 'gollum' can commit to branch 'ring';
# 'gollum' and anyone else can still commit to any other branch.
ring = !gollum
# 2) only members of the group 'hobbit' can commit to branch 'lake';
# 'hobbit' members and anyone else can still commit to any other branch.
lake = !@hobbit
# You can also deny access based on file paths:
[acl.allow]
# Empty
[acl.deny]
# 3) only 'gollum' can change the file below;
# 'gollum' and anyone else can still change any other file.
/misty/mountains/cave/ring = !gollum
'''
from mercurial.i18n import _
from mercurial import util, match
import getpass, urllib
testedwith = 'internal'
def _getusers(ui, group):
# First, try to use group definition from section [acl.groups]
hgrcusers = ui.configlist('acl.groups', group)
if hgrcusers:
return hgrcusers
ui.debug('acl: "%s" not defined in [acl.groups]\n' % group)
# If no users found in group definition, get users from OS-level group
try:
return util.groupmembers(group)
except KeyError:
raise util.Abort(_("group '%s' is undefined") % group)
def _usermatch(ui, user, usersorgroups):
if usersorgroups == '*':
return True
for ug in usersorgroups.replace(',', ' ').split():
if ug.startswith('!'):
# Test for excluded user or group. Format:
# if ug is a user name: !username
# if ug is a group name: !@groupname
ug = ug[1:]
if not ug.startswith('@') and user != ug \
or ug.startswith('@') and user not in _getusers(ui, ug[1:]):
return True
# Test for user or group. Format:
# if ug is a user name: username
# if ug is a group name: @groupname
elif user == ug \
or ug.startswith('@') and user in _getusers(ui, ug[1:]):
return True
return False
def buildmatch(ui, repo, user, key):
'''return tuple of (match function, list enabled).'''
if not ui.has_section(key):
ui.debug('acl: %s not enabled\n' % key)
return None
pats = [pat for pat, users in ui.configitems(key)
if _usermatch(ui, user, users)]
ui.debug('acl: %s enabled, %d entries for user %s\n' %
(key, len(pats), user))
# Branch-based ACL
if not repo:
if pats:
# If there's an asterisk (meaning "any branch"), always return True;
# Otherwise, test if b is in pats
if '*' in pats:
return util.always
return lambda b: b in pats
return util.never
# Path-based ACL
if pats:
return match.match(repo.root, '', pats)
return util.never
def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
if hooktype not in ['pretxnchangegroup', 'pretxncommit']:
raise util.Abort(_('config error - hook type "%s" cannot stop '
'incoming changesets nor commits') % hooktype)
if (hooktype == 'pretxnchangegroup' and
source not in ui.config('acl', 'sources', 'serve').split()):
ui.debug('acl: changes have source "%s" - skipping\n' % source)
return
user = None
if source == 'serve' and 'url' in kwargs:
url = kwargs['url'].split(':')
if url[0] == 'remote' and url[1].startswith('http'):
user = urllib.unquote(url[3])
if user is None:
user = getpass.getuser()
ui.debug('acl: checking access for user "%s"\n' % user)
cfg = ui.config('acl', 'config')
if cfg:
ui.readconfig(cfg, sections = ['acl.groups', 'acl.allow.branches',
'acl.deny.branches', 'acl.allow', 'acl.deny'])
allowbranches = buildmatch(ui, None, user, 'acl.allow.branches')
denybranches = buildmatch(ui, None, user, 'acl.deny.branches')
allow = buildmatch(ui, repo, user, 'acl.allow')
deny = buildmatch(ui, repo, user, 'acl.deny')
for rev in xrange(repo[node], len(repo)):
ctx = repo[rev]
branch = ctx.branch()
if denybranches and denybranches(branch):
raise util.Abort(_('acl: user "%s" denied on branch "%s"'
' (changeset "%s")')
% (user, branch, ctx))
if allowbranches and not allowbranches(branch):
raise util.Abort(_('acl: user "%s" not allowed on branch "%s"'
' (changeset "%s")')
% (user, branch, ctx))
ui.debug('acl: branch access granted: "%s" on branch "%s"\n'
% (ctx, branch))
for f in ctx.files():
if deny and deny(f):
raise util.Abort(_('acl: user "%s" denied on "%s"'
' (changeset "%s")') % (user, f, ctx))
if allow and not allow(f):
raise util.Abort(_('acl: user "%s" not allowed on "%s"'
' (changeset "%s")') % (user, f, ctx))
ui.debug('acl: path access granted: "%s"\n' % ctx)
| apache-2.0 | 5,239,691,406,254,036,000 | 31.791139 | 80 | 0.641382 | false | 3.684922 | true | false | false |
jhu-lcsr-forks/ogre | Tools/Blender2.6Export/ogre_mesh_exporter/mesh_impl.py | 16 | 16080 | # ##### BEGIN MIT LICENSE BLOCK #####
# Copyright (C) 2011 by Lih-Hern Pang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ##### END MIT LICENSE BLOCK #####
# ########################################################################
# See mesh_exporter.py for explanation.
# ########################################################################
import bpy, mathutils
from ogre_mesh_exporter.log_manager import LogManager, Message
from operator import attrgetter
# Mesh export settings class to define how we are going to export the mesh.
class MeshExportSettings():
def __init__(self, fixUpAxisToY = True, requireMaterials = True, applyModifiers = False, skeletonNameFollowMesh = True, runOgreXMLConverter = True):
self.fixUpAxisToY = fixUpAxisToY
self.requireMaterials = requireMaterials
self.applyModifiers = applyModifiers
self.skeletonNameFollowMesh = skeletonNameFollowMesh
self.runOgreXMLConverter = runOgreXMLConverter
@classmethod
def fromRNA(cls, meshObject):
globalSettings = bpy.context.scene.ogre_mesh_exporter
meshSettings = meshObject.data.ogre_mesh_exporter
return MeshExportSettings(
fixUpAxisToY = globalSettings.fixUpAxisToY,
requireMaterials = meshSettings.requireMaterials if (meshSettings.requireMaterials_override) else globalSettings.requireMaterials,
applyModifiers = meshSettings.applyModifiers if (meshSettings.applyModifiers_override) else globalSettings.applyModifiers,
skeletonNameFollowMesh = meshSettings.skeletonNameFollowMesh if (meshSettings.skeletonNameFollowMesh_override) else globalSettings.skeletonNameFollowMesh,
runOgreXMLConverter = globalSettings.runOgreXMLConverter)
class BoneWeight():
def __init__(self, boneIndex, boneWeight):
self.mBoneIndex = boneIndex
self.mBoneWeight = boneWeight
class Vertex():
def __init__(self, pos, norm, uvs = list(), colors = list(), boneWeights = list()):
self.mPosition = pos
self.mNormal = norm
self.mUVs = uvs
self.mColors = colors
self.mBoneWeights = boneWeights
def match(self, norm, uvs, colors):
# Test normal.
if (self.mNormal != norm): return False;
# Test UVs.
if (len(self.mUVs) is not len(uvs)): return False
for uv1, uv2 in zip(self.mUVs, uvs):
if (uv1 != uv2): return False
# Test Colors.
if (len(self.mColors) is not len(colors)): return False
for color1, color2 in zip(self.mColors, colors):
if (color1 != color2): return False
return True
class VertexBuffer():
def __init__(self, uvLayers = 0, colorLayers = 0, hasBoneWeights = False):
# Vertex data.
self.mVertexData = list()
self.mUVLayers = uvLayers
self.mColorLayers = colorLayers
self.mHasBoneWeights = hasBoneWeights
# Blender mesh -> vertex index link.
# Only useful when exporting.
self.mMeshVertexIndexLink = dict()
def reset(self, uvLayers, colorLayers, hasBoneWeights = False):
self.mVertexData = list()
self.mUVLayers = uvLayers
self.mColorLayers = colorLayers
self.mHasBoneWeights = hasBoneWeights
def vertexCount(self):
return len(self.mVertexData)
# This method adds a vertex from the given blend mesh index into the buffer.
# If the uv information does not match the recorded vertex, it will automatically
# clone a new vertex for use.
def addVertex(self, index, pos, norm, uvs, colors, boneWeights = list(), fixUpAxisToY = True):
# Fix Up axis to Y (swap Y and Z and negate Z)
if (fixUpAxisToY):
pos = [pos[0], pos[2], -pos[1]]
norm = [norm[0], norm[2], -norm[1]]
# make sure uv layers and color layers matches as defined.
if (len(uvs) != self.mUVLayers or len(colors) != self.mColorLayers):
raise Exception("Invalid UV layer or Color layer count! Expecting uv(%d), color(%d). Got uv(%d), color(%d)" %
(self.mUVLayers, self.mColorLayers, len(uvs), len(colors)))
# try to find pre added vertex that matches criteria.
if (index in self.mMeshVertexIndexLink):
localIndexList = self.mMeshVertexIndexLink[index]
for localIndex in localIndexList:
if (self.mVertexData[localIndex].match(norm, uvs, colors)):
return localIndex
# nothing found. so we add a new vertex.
localIndex = len(self.mVertexData)
if (index not in self.mMeshVertexIndexLink): self.mMeshVertexIndexLink[index] = list()
self.mMeshVertexIndexLink[index].append(localIndex)
self.mVertexData.append(Vertex(pos, norm, uvs, colors, boneWeights))
return localIndex
def serialize(self, file, indent = ''):
extraAttributes = ''
uvLayerCount = 8 if (self.mUVLayers > 8) else self.mUVLayers
if (uvLayerCount > 0):
extraAttributes = ' texture_coords="%d"' % uvLayerCount
for i in range(uvLayerCount):
extraAttributes += ' texture_coord_dimensions_%d="float2"' % i
colorLayerCount = self.mColorLayers
if (colorLayerCount > 0): extraAttributes += ' colours_diffuse="true"'
if (colorLayerCount > 1): extraAttributes += ' colours_specular="true"'
file.write('%s<vertexbuffer positions="true" normals="true"%s>\n' % (indent, extraAttributes))
for vertex in self.mVertexData:
file.write('%s\t<vertex>\n' % indent)
# write position and normal.
file.write('%s\t\t<position x="%.6f" y="%.6f" z="%.6f" />\n' % (indent, vertex.mPosition[0], vertex.mPosition[1], vertex.mPosition[2]))
file.write('%s\t\t<normal x="%.6f" y="%.6f" z="%.6f" />\n' % (indent, vertex.mNormal[0], vertex.mNormal[1], vertex.mNormal[2]))
# write UV layers. (NOTE: Blender uses bottom left coord! Ogre uses top left! So we have to flip Y.)
for i in range(uvLayerCount):
uv = vertex.mUVs[i]
file.write('%s\t\t<texcoord u="%.6f" v="%.6f" />\n' % (indent, uv[0], (1.0 - uv[1])))
# write diffuse.
if (colorLayerCount > 0):
color = vertex.mColors[0]
file.write('%s\t\t<colour_diffuse value="%.6f %.6f %.6f" />\n' % (indent, color[0], color[1], color[2]))
# write specular.
if (colorLayerCount > 1):
color = vertex.mColors[1]
file.write('%s\t\t<colour_diffuse value="%.6f %.6f %.6f" />\n' % (indent, color[0], color[1], color[2]))
file.write('%s\t</vertex>\n' % indent)
file.write('%s</vertexbuffer>\n' % indent)
def serializeBoneAssignments(self, file, indent = ''):
file.write('%s\t<boneassignments>\n' % indent)
vertexWithNoBoneAssignements = 0;
for i, vertex in enumerate(self.mVertexData):
if (len(vertex.mBoneWeights) == 0): vertexWithNoBoneAssignements += 1
for boneWeight in vertex.mBoneWeights:
file.write('%s\t\t<vertexboneassignment vertexindex="%d" boneindex="%d" weight="%.6f" />\n' %
(indent, i, boneWeight.mBoneIndex, boneWeight.mBoneWeight))
if (vertexWithNoBoneAssignements > 0):
LogManager.logMessage("There are %d vertices with no bone assignements!" % vertexWithNoBoneAssignements, Message.LVL_WARNING)
file.write('%s\t</boneassignments>\n' % indent)
class SubMesh():
def __init__(self, vertexBuffer = None, meshVertexIndexLink = None, name = None):
# True if submesh is sharing vertex buffer.
self.mShareVertexBuffer = False
# Vertex buffer.
self.mVertexBuffer = vertexBuffer if (vertexBuffer) else VertexBuffer()
# Blender mesh -> local/shared vertex index link.
self.mMeshVertexIndexLink = meshVertexIndexLink if (meshVertexIndexLink) else dict()
# Face data.
self.mFaceData = list()
# Blender material.
self.mMaterial = None
# Name of submesh
self.mName = name
if ((vertexBuffer is not None) and (meshVertexIndexLink is not None)):
self.mShareVertexBuffer = True
def insertPolygon(self, blendMesh, polygon, blendVertexGroups = None, ogreSkeleton = None, fixUpAxisToY = True):
polygonVertices = polygon.vertices
polygonVertexCount = polygon.loop_total
# extract uv information.
# Here we convert blender uv data into our own
# uv information that lists uvs by vertices.
blendUVLoopLayers = blendMesh.uv_layers
# construct empty polygon vertex uv list.
polygonVertUVs = list()
for i in range(polygonVertexCount): polygonVertUVs.append(list())
for uvLoopLayer in blendUVLoopLayers:
for i, loopIndex in enumerate(polygon.loop_indices):
polygonVertUVs[i].append(uvLoopLayer.data[loopIndex].uv)
# extract color information.
# Here we convert blender color data into our own
# color information that lists colors by vertices.
blendColorLoopLayers = blendMesh.vertex_colors
# construct empty polygon vertex color list.
polygonVertColors = list()
for i in range(polygonVertexCount): polygonVertColors.append(list())
for colorLoopLayer in blendColorLoopLayers:
for i, loopIndex in enumerate(polygon.loop_indices):
polygonVertColors[i].append(colorLoopLayer.data[loopIndex].color)
# loop through the vertices and add to this submesh.
localIndices = list()
useSmooth = polygon.use_smooth
for index, uvs, colors in zip(polygonVertices, polygonVertUVs, polygonVertColors):
vertex = blendMesh.vertices[index]
norm = vertex.normal if (useSmooth) else polygon.normal
# grab bone weights.
boneWeights = list()
if (ogreSkeleton is not None):
for groupElement in vertex.groups:
groupName = blendVertexGroups[groupElement.group].name
boneIndex = ogreSkeleton.getBoneIndex(groupName)
if (boneIndex == -1 or abs(groupElement.weight) < 0.000001): continue
boneWeight = groupElement.weight
boneWeights.append(BoneWeight(boneIndex, boneWeight))
# trim bone weight count if too many defined.
if (len(boneWeights) > 4):
LogManager.logMessage("More than 4 bone weights are defined for a vertex! Best 4 will be used.", Message.LVL_WARNING)
boneWeights.sort(key=attrgetter('mBoneWeight'), reverse=True)
while (len(boneWeights) > 4): del boneWeights[-1]
localIndices.append(self.mVertexBuffer.addVertex(index, vertex.co, norm, uvs, colors, boneWeights, fixUpAxisToY))
# construct triangle index data.
if (polygonVertexCount is 3):
self.mFaceData.append(localIndices)
else:
# split quad into triangles.
self.mFaceData.append(localIndices[:3])
self.mFaceData.append([localIndices[0], localIndices[2], localIndices[3]])
def serialize(self, file):
vertexCount = self.mVertexBuffer.vertexCount()
materialAttribute = '' if (self.mMaterial is None) else ' material="%s"' % self.mMaterial.name
file.write('\t\t<submesh%s usesharedvertices="%s" use32bitindexes="%s">\n' %
(materialAttribute, 'true' if self.mShareVertexBuffer else 'false',
'true' if (vertexCount > 65536) else 'false'))
# write face data.
file.write('\t\t\t<faces count="%d">\n' % len(self.mFaceData))
for face in self.mFaceData:
file.write('\t\t\t\t<face v1="%d" v2="%d" v3="%d" />\n' % tuple(face))
file.write('\t\t\t</faces>\n')
# write submesh vertex buffer if not shared.
if (not self.mShareVertexBuffer):
file.write('\t\t\t<geometry vertexcount="%d">\n' % vertexCount)
self.mVertexBuffer.serialize(file, '\t\t\t\t')
file.write('\t\t\t</geometry>\n')
# write bone assignments
if (self.mShareVertexBuffer.mHasBoneWeights):
self.mSharedVertexBuffer.serializeBoneAssignments(file, '\t\t\t')
file.write('\t\t</submesh>\n')
class Mesh():
def __init__(self, blendMesh = None, blendVertexGroups = None, ogreSkeleton = None, exportSettings = MeshExportSettings()):
# shared vertex buffer.
self.mSharedVertexBuffer = VertexBuffer()
# Blender mesh -> shared vertex index link.
self.mSharedMeshVertexIndexLink = dict()
# collection of submeshes.
self.mSubMeshDict = dict()
# skip blend mesh conversion if no blend mesh passed in.
if (blendMesh is None): return
self.mOgreSkeleton = ogreSkeleton
hasBoneWeights = ogreSkeleton is not None
# Lets do some pre checking to show warnings if needed.
uvLayerCount = len(blendMesh.uv_layers)
colorLayerCount = len(blendMesh.vertex_colors)
if (uvLayerCount > 8): LogManager.logMessage("More than 8 UV layers in this mesh. Only 8 will be exported.", Message.LVL_WARNING)
if (colorLayerCount > 2): LogManager.logMessage("More than 2 color layers in this mesh. Only 2 will be exported.", Message.LVL_WARNING)
# setup shared vertex buffer.
self.mSharedVertexBuffer.reset(uvLayerCount, colorLayerCount, hasBoneWeights)
# split up the mesh into submeshes by materials.
# we first get sub mesh shared vertices option.
materialList = blendMesh.materials
materialCount = len(materialList)
subMeshProperties = blendMesh.ogre_mesh_exporter.subMeshProperties
while (len(subMeshProperties) < materialCount): subMeshProperties.add() # add more items if needed.
while (len(subMeshProperties) > materialCount): subMeshProperties.remove(0) # remove items if needed.
LogManager.logMessage("Material Count: %d" % len(materialList), Message.LVL_INFO)
for polygon in blendMesh.polygons:
# get or create submesh.
if (polygon.material_index in self.mSubMeshDict):
subMesh = self.mSubMeshDict[polygon.material_index]
else:
# instantiate submesh base on wether sharing vertices or not.
subMeshProperty = subMeshProperties[polygon.material_index]
if (subMeshProperty.useSharedVertices):
subMesh = SubMesh(self.mSharedVertexBuffer, self.mSharedMeshVertexIndexLink, subMeshProperty.name)
else:
subMesh = SubMesh(VertexBuffer(uvLayerCount, colorLayerCount, hasBoneWeights), name = subMeshProperty.name)
subMesh.mMaterial = None if (len(materialList) == 0) else materialList[polygon.material_index]
if (exportSettings.requireMaterials and subMesh.mMaterial == None):
LogManager.logMessage("Some faces are not assigned with a material!", Message.LVL_WARNING)
LogManager.logMessage("To hide this warning, please uncheck the 'Require Materials' option.", Message.LVL_WARNING)
self.mSubMeshDict[polygon.material_index] = subMesh
# insert polygon.
subMesh.insertPolygon(blendMesh, polygon, blendVertexGroups, ogreSkeleton, exportSettings.fixUpAxisToY)
def serialize(self, file):
file.write('<mesh>\n')
# write shared vertex buffer if available.
sharedVertexCount = self.mSharedVertexBuffer.vertexCount()
if (sharedVertexCount > 0):
file.write('\t<sharedgeometry vertexcount="%d">\n' % sharedVertexCount)
self.mSharedVertexBuffer.serialize(file, '\t\t')
file.write('\t</sharedgeometry>\n')
# write bone assignments
if (self.mSharedVertexBuffer.mHasBoneWeights):
self.mSharedVertexBuffer.serializeBoneAssignments(file, '\t\t')
subMeshNames = list()
# write submeshes.
file.write('\t<submeshes>\n')
for subMesh in self.mSubMeshDict.values():
name = subMesh.mName
if (name):
if (not name in subMeshNames):
subMeshNames.append(name)
else:
LogManager.logMessage("Mulitple submesh with same name defined: %s" % name, Message.LVL_WARNING)
subMesh.serialize(file)
file.write('\t</submeshes>\n')
# write submesh names
if (len(subMeshNames)):
file.write('\t<submeshnames>\n')
for index, name in enumerate(subMeshNames):
file.write('\t\t<submeshname name="%s" index="%d" />\n' % (name, index))
file.write('\t</submeshnames>\n')
# write skeleton link
if (self.mOgreSkeleton is not None):
file.write('\t<skeletonlink name="%s.skeleton" />\n' % self.mOgreSkeleton.mName)
file.write('</mesh>\n')
| mit | 1,976,643,602,277,208,600 | 41.204724 | 157 | 0.721891 | false | 3.266965 | false | false | false |
Imaginashion/cloud-vision | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/encodings/cp1255.py | 272 | 12466 | """ Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1255',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\u20aa' # 0xA4 -> NEW SHEQEL SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xd7' # 0xAA -> MULTIPLICATION SIGN
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xf7' # 0xBA -> DIVISION SIGN
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\u05b0' # 0xC0 -> HEBREW POINT SHEVA
'\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL
'\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH
'\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS
'\u05b4' # 0xC4 -> HEBREW POINT HIRIQ
'\u05b5' # 0xC5 -> HEBREW POINT TSERE
'\u05b6' # 0xC6 -> HEBREW POINT SEGOL
'\u05b7' # 0xC7 -> HEBREW POINT PATAH
'\u05b8' # 0xC8 -> HEBREW POINT QAMATS
'\u05b9' # 0xC9 -> HEBREW POINT HOLAM
'\ufffe' # 0xCA -> UNDEFINED
'\u05bb' # 0xCB -> HEBREW POINT QUBUTS
'\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ
'\u05bd' # 0xCD -> HEBREW POINT METEG
'\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF
'\u05bf' # 0xCF -> HEBREW POINT RAFE
'\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ
'\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT
'\u05c2' # 0xD2 -> HEBREW POINT SIN DOT
'\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ
'\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
'\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
'\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
'\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH
'\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM
'\ufffe' # 0xD9 -> UNDEFINED
'\ufffe' # 0xDA -> UNDEFINED
'\ufffe' # 0xDB -> UNDEFINED
'\ufffe' # 0xDC -> UNDEFINED
'\ufffe' # 0xDD -> UNDEFINED
'\ufffe' # 0xDE -> UNDEFINED
'\ufffe' # 0xDF -> UNDEFINED
'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
'\u05d1' # 0xE1 -> HEBREW LETTER BET
'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
'\u05d3' # 0xE3 -> HEBREW LETTER DALET
'\u05d4' # 0xE4 -> HEBREW LETTER HE
'\u05d5' # 0xE5 -> HEBREW LETTER VAV
'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
'\u05d7' # 0xE7 -> HEBREW LETTER HET
'\u05d8' # 0xE8 -> HEBREW LETTER TET
'\u05d9' # 0xE9 -> HEBREW LETTER YOD
'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
'\u05db' # 0xEB -> HEBREW LETTER KAF
'\u05dc' # 0xEC -> HEBREW LETTER LAMED
'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
'\u05de' # 0xEE -> HEBREW LETTER MEM
'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
'\u05e0' # 0xF0 -> HEBREW LETTER NUN
'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
'\u05e4' # 0xF4 -> HEBREW LETTER PE
'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
'\u05e7' # 0xF7 -> HEBREW LETTER QOF
'\u05e8' # 0xF8 -> HEBREW LETTER RESH
'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
'\u05ea' # 0xFA -> HEBREW LETTER TAV
'\ufffe' # 0xFB -> UNDEFINED
'\ufffe' # 0xFC -> UNDEFINED
'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit | 5,207,704,298,974,207,000 | 39.605863 | 119 | 0.520777 | false | 2.931107 | false | false | false |
shakamunyi/neutron | neutron/db/metering/metering_rpc.py | 46 | 2075 | # Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging
from neutron.common import constants as consts
from neutron.common import utils
from neutron.i18n import _LE
from neutron import manager
from neutron.plugins.common import constants as service_constants
LOG = logging.getLogger(__name__)
class MeteringRpcCallbacks(object):
target = oslo_messaging.Target(version='1.0')
def __init__(self, meter_plugin):
self.meter_plugin = meter_plugin
def get_sync_data_metering(self, context, **kwargs):
l3_plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if not l3_plugin:
return
host = kwargs.get('host')
if not utils.is_extension_supported(
l3_plugin, consts.L3_AGENT_SCHEDULER_EXT_ALIAS) or not host:
return self.meter_plugin.get_sync_data_metering(context)
else:
agents = l3_plugin.get_l3_agents(context, filters={'host': [host]})
if not agents:
LOG.error(_LE('Unable to find agent %s.'), host)
return
routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id)
router_ids = [router['id'] for router in routers['routers']]
if not router_ids:
return
return self.meter_plugin.get_sync_data_metering(context,
router_ids=router_ids)
| apache-2.0 | -1,353,348,655,059,363,800 | 36.053571 | 79 | 0.660241 | false | 3.975096 | false | false | false |
cedadev/cis | cis/test/integration/test_eval.py | 3 | 7743 | import netCDF4
import numpy
from hamcrest import assert_that, is_
import unittest
from cis.cis_main import evaluate_cmd, col_cmd
from cis.test.integration.base_integration_test import BaseIntegrationTest
from cis.test.integration_test_data import *
from cis.parse import parse_args
from cis.test.unit.eval.test_calc import compare_masked_arrays
class TestEval(BaseIntegrationTest):
def test_Aeronet_wavelength_calculation(self):
# Example from the CIS Phase 3 Software spec:
# ... a user should be able to write a plugin to calculate the Aeronet AOD at 550nm from the AOD at 500 nm as
# AOD550 = AOD500 * (550/500)^(-1*Angstrom500-870)"
# Takes 3s
args = ['eval', 'AOT_500,500-870Angstrom=a550to870:' + escape_colons(another_valid_aeronet_filename),
'AOT_500 * (550.0/500)**(-1*a550to870)', '1', '-o', self.OUTPUT_FILENAME]
arguments = parse_args(args)
evaluate_cmd(arguments)
# Check correct:
self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
calculated_result = self.ds.variables['calculated_variable'][:]
expected_result = [0.2341039087, 0.2285401152, 0.2228799533, 0.1953746746, 0.2094051561, 0.1696889668,
0.3137791803, 0.2798929273, 0.1664194279, 0.1254619092, 0.1258309124, 0.1496960031,
0.0768447737, 0.0550896430, 0.0534543107, 0.0538315909, 0.0666742975, 0.0512935449,
0.0699585189, 0.0645033944]
assert_that(calculated_result.shape, is_((3140,)))
assert numpy.allclose(expected_result, calculated_result[0:20])
def test_ECHAMHAM_wavelength_sum(self):
args = ['eval', "%s,%s:%s" % (valid_echamham_variable_1, valid_echamham_variable_2, escape_colons(valid_echamham_filename)),
'%s+%s' % (valid_echamham_variable_1, valid_echamham_variable_2), '1', '-o', self.OUTPUT_FILENAME]
arguments = parse_args(args)
evaluate_cmd(arguments)
# Check correct:
self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
calculated_result = self.ds.variables['calculated_variable'][:]
# A hand calculated selection of values
expected_result = [0.007633533, 0.007646653, 0.007749859, 0.007744226, 0.007761176]
assert_that(calculated_result.shape, is_((96, 192)))
assert numpy.allclose(expected_result, calculated_result[:][0][0:5])
def test_collocated_NetCDF_Gridded_onto_GASSP(self):
# First do a collocation of ECHAMHAM onto GASSP
vars = valid_echamham_variable_1, valid_echamham_variable_2
filename = escape_colons(valid_echamham_filename)
sample_file = escape_colons(valid_GASSP_aeroplane_filename)
sample_var = valid_GASSP_aeroplane_variable
collocator_and_opts = 'nn[missing_data_for_missing_sample=True],variable=%s' % sample_var
arguments = ['col', ",".join(vars) + ':' + filename,
sample_file + ':collocator=' + collocator_and_opts,
'-o', 'collocated_gassp']
main_arguments = parse_args(arguments)
col_cmd(main_arguments)
# Check collocation is the same
self.ds = netCDF4.Dataset('collocated_gassp.nc')
col_var1 = self.ds.variables[valid_echamham_variable_1][:]
col_var2 = self.ds.variables[valid_echamham_variable_2][:]
# A hand calculated selection of values
expected_col1 = numpy.ma.masked_invalid(
[float('Nan'), float('Nan'), float('Nan'), 0.0814601778984, 0.0814601778984])
compare_masked_arrays(expected_col1, col_var1[:][0:5])
expected_col2 = numpy.ma.masked_invalid(
[float('Nan'), float('Nan'), float('Nan'), 0.0741240680218, 0.0741240680218])
compare_masked_arrays(expected_col2, col_var2[:][0:5])
# Then do an evaluation using the collocated data:
args = ['eval', "%s,%s:%s" % (valid_echamham_variable_1, valid_echamham_variable_2,
'collocated_gassp.nc'),
"%s=gassp_alias:%s" % (valid_GASSP_aeroplane_variable, escape_colons(valid_GASSP_aeroplane_filename)),
"(%s + %s) / gassp_alias " % (valid_echamham_variable_1, valid_echamham_variable_2),
'1', '-o', self.OUTPUT_FILENAME]
arguments = parse_args(args)
evaluate_cmd(arguments)
self.ds.close()
# Check correct
self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
calculated_result = self.ds.variables['calculated_variable'][:]
# A hand calculated selection of values
expected_result = numpy.ma.masked_invalid([0.00196121983491, 0.00197255626472, 0.00120850731992])
assert_that(calculated_result.shape, is_((311,)))
# Check the first 3 vald values
compare_masked_arrays(expected_result, calculated_result[:][10:13])
os.remove('collocated_gassp.nc')
@skip_pyhdf
def test_CloudSat(self):
args = ['eval', "%s,%s:%s" % (valid_cloudsat_RVOD_sdata_variable, valid_cloudsat_RVOD_vdata_variable,
escape_colons(valid_cloudsat_RVOD_file)),
'%s/%s' % (valid_cloudsat_RVOD_sdata_variable, valid_cloudsat_RVOD_vdata_variable), 'ppm', '-o',
'cloudsat_var:' + self.OUTPUT_FILENAME]
arguments = parse_args(args)
evaluate_cmd(arguments)
self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
assert_that(self.ds.variables['cloudsat_var'].units, is_('ppm'))
def test_can_specify_output_variable(self):
args = ['eval', "%s,%s:%s" % (valid_echamham_variable_1, valid_echamham_variable_2, escape_colons(valid_echamham_filename)),
'%s+%s' % (valid_echamham_variable_1, valid_echamham_variable_2), 'kg m^-3',
'-o', 'var_out:' + self.OUTPUT_FILENAME]
arguments = parse_args(args)
evaluate_cmd(arguments)
self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
assert 'var_out' in self.ds.variables
def test_can_specify_attributes_gridded(self):
args = ['eval', "%s,%s:%s" % (valid_echamham_variable_1, valid_echamham_variable_2, escape_colons(valid_echamham_filename)),
'%s+%s' % (valid_echamham_variable_1, valid_echamham_variable_2), 'kg m^-3',
'-o', 'var_out:' + self.OUTPUT_FILENAME, '-a', 'att1=val1,att2=val2']
arguments = parse_args(args)
evaluate_cmd(arguments)
self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
assert_that(self.ds.variables['var_out'].att1, is_('val1'))
assert_that(self.ds.variables['var_out'].att2, is_('val2'))
def test_can_specify_units_gridded(self):
args = ['eval', "%s,%s:%s" % (valid_echamham_variable_1, valid_echamham_variable_2, escape_colons(valid_echamham_filename)),
'%s+%s' % (valid_echamham_variable_1, valid_echamham_variable_2), 'kg m^-3',
'-o', 'var_out:' + self.OUTPUT_FILENAME, '-a', 'att1=val1,att2=val2']
arguments = parse_args(args)
evaluate_cmd(arguments)
self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
assert_that(self.ds.variables['var_out'].units, is_('kg m^-3'))
def test_can_specify_units_gridded_no_output_var(self):
args = ['eval', "%s:%s" % (valid_hadgem_variable, escape_colons(valid_hadgem_filename)), "od550aer", "ppm", "-o",
self.OUTPUT_FILENAME, "-a", "att1=val1"]
arguments = parse_args(args)
evaluate_cmd(arguments)
self.ds = netCDF4.Dataset(self.OUTPUT_FILENAME)
assert_that(self.ds.variables['calculated_variable'].units, is_('ppm'))
assert_that(self.ds.variables['calculated_variable'].att1, is_('val1'))
| lgpl-3.0 | 4,635,982,652,704,962,000 | 51.673469 | 132 | 0.62818 | false | 3.202233 | true | false | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/template/loaders/app_directories.py | 1 | 1602 | """
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
import io
from django.core.exceptions import SuspiciousFileOperation
from django.template.base import TemplateDoesNotExist
from django.template.utils import get_app_template_dirs
from django.utils._os import safe_join
from .base import Loader as BaseLoader
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = get_app_template_dirs('templates')
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except SuspiciousFileOperation:
# The joined path was located outside of this template_dir
# (it might be inside another one, so this isn't fatal).
pass
def load_template_source(self, template_name, template_dirs=None):
for filepath in self.get_template_sources(template_name, template_dirs):
try:
with io.open(filepath, encoding=self.engine.file_charset) as fp:
return fp.read(), filepath
except IOError:
pass
raise TemplateDoesNotExist(template_name)
| mit | 3,960,476,586,008,162,300 | 36.142857 | 80 | 0.642322 | false | 4.616715 | false | false | false |
beatrizjesus/my-first-blog | pasta/Lib/site-packages/django/contrib/redirects/migrations/0001_initial.py | 142 | 1271 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Redirect',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('site', models.ForeignKey(to='sites.Site', to_field='id')),
('old_path', models.CharField(help_text="This should be an absolute path, excluding the domain name. Example: '/events/search/'.", max_length=200, verbose_name='redirect from', db_index=True)),
('new_path', models.CharField(help_text="This can be either an absolute path (as above) or a full URL starting with 'http://'.", max_length=200, verbose_name='redirect to', blank=True)),
],
options={
'ordering': ('old_path',),
'unique_together': set([('site', 'old_path')]),
'db_table': 'django_redirect',
'verbose_name': 'redirect',
'verbose_name_plural': 'redirects',
},
bases=(models.Model,),
),
]
| mit | -7,705,647,044,940,095,000 | 40 | 209 | 0.556255 | false | 4.279461 | false | false | false |
Vogeltak/pauselan | lib/python3.4/site-packages/sqlalchemy/dialects/mysql/gaerdbms.py | 59 | 3387 | # mysql/gaerdbms.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+gaerdbms
:name: Google Cloud SQL
:dbapi: rdbms
:connectstring: mysql+gaerdbms:///<dbname>?instance=<instancename>
:url: https://developers.google.com/appengine/docs/python/cloud-sql/\
developers-guide
This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with
minimal changes.
.. versionadded:: 0.7.8
.. deprecated:: 1.0 This dialect is **no longer necessary** for
Google Cloud SQL; the MySQLdb dialect can be used directly.
Cloud SQL now recommends creating connections via the
mysql dialect using the URL format
``mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>``
Pooling
-------
Google App Engine connections appear to be randomly recycled,
so the dialect does not pool connections. The :class:`.NullPool`
implementation is installed within the :class:`.Engine` by
default.
"""
import os
from .mysqldb import MySQLDialect_mysqldb
from ...pool import NullPool
import re
from sqlalchemy.util import warn_deprecated
def _is_dev_environment():
return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')
class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
@classmethod
def dbapi(cls):
warn_deprecated(
"Google Cloud SQL now recommends creating connections via the "
"MySQLdb dialect directly, using the URL format "
"mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/"
"<projectid>:<instancename>"
)
# from django:
# http://code.google.com/p/googleappengine/source/
# browse/trunk/python/google/storage/speckle/
# python/django/backend/base.py#118
# see also [ticket:2649]
# see also http://stackoverflow.com/q/14224679/34549
from google.appengine.api import apiproxy_stub_map
if _is_dev_environment():
from google.appengine.api import rdbms_mysqldb
return rdbms_mysqldb
elif apiproxy_stub_map.apiproxy.GetStub('rdbms'):
from google.storage.speckle.python.api import rdbms_apiproxy
return rdbms_apiproxy
else:
from google.storage.speckle.python.api import rdbms_googleapi
return rdbms_googleapi
@classmethod
def get_pool_class(cls, url):
# Cloud SQL connections die at any moment
return NullPool
def create_connect_args(self, url):
opts = url.translate_connect_args()
if not _is_dev_environment():
# 'dsn' and 'instance' are because we are skipping
# the traditional google.api.rdbms wrapper
opts['dsn'] = ''
opts['instance'] = url.query['instance']
return [], opts
def _extract_error_code(self, exception):
match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception))
# The rdbms api will wrap then re-raise some types of errors
# making this regex return no matches.
code = match.group(1) or match.group(2) if match else None
if code:
return int(code)
dialect = MySQLDialect_gaerdbms
| gpl-2.0 | -222,051,937,422,292,350 | 32.205882 | 91 | 0.658105 | false | 3.86203 | false | false | false |
numericillustration/sdc-imgapi | tools/manatee-diff/manatee2images.py | 2 | 1948 | #!/usr/bin/env python
"""
Take a imgapi_images-*.gz manatee table dump and emit a JSON array of images.
Usage:
gzcat imgapi_images-2014-11-15-00-01-56.gz | ./manatee2images.py > images.json
"""
import json
import sys
import operator
from pprint import pprint
import codecs
# TODO: ideally we wouldn't hardcode types here. This should come from
# the imgapi_images bucket definition.
type_from_key = {
'billing_tags': 'array',
'published_at': 'string',
'acl': 'array',
'public': 'bool',
}
def update_img_from_index(img, entry, header, key):
try:
type = type_from_key[key]
idx = header.index(key) # cache this?
val = entry[idx]
# Postgres NULL
if val == '\\N':
if key in img:
del img[key]
return
if type == 'array' and val.startswith('{') and val.endswith('}'):
# Hack parsing of postgres arrays.
val = [tag for tag in val[1:-1].split(',') if tag]
elif type == 'bool':
if val == 't':
val = True
elif val == 'f':
val = False
else:
raise RuntimeError(
'unexpected index value for "%s" bool field: %r'
% (key, val))
img[key] = val
except ValueError:
pass
header = None
published_at_idx = None
acl_idx = None
imgs = []
for line in sys.stdin:
if header is None:
header = json.loads(line)['keys']
assert header[3] == '_value'
continue
entry = json.loads(line)['entry']
img = json.loads(entry[3])
# Apply some of the index values.
# TODO: eventually should do all of these
for key in ['billing_tags', 'published_at', 'acl', 'public']:
update_img_from_index(img, entry, header, key)
imgs.append(img)
imgs.sort(key=operator.itemgetter('uuid'))
print json.dumps(imgs, sort_keys=True, indent=4)
| mpl-2.0 | 3,893,063,082,592,945,000 | 23.974359 | 82 | 0.563655 | false | 3.580882 | false | false | false |
lancezlin/ml_template_py | lib/python2.7/site-packages/wheel/metadata.py | 93 | 11676 | """
Tools for converting old- to new-style metadata.
"""
from collections import namedtuple
from .pkginfo import read_pkg_info
from .util import OrderedDefaultDict
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
import re
import os.path
import textwrap
import pkg_resources
import email.parser
from . import __version__ as wheel_version
METADATA_VERSION = "2.0"
PLURAL_FIELDS = { "classifier" : "classifiers",
"provides_dist" : "provides",
"provides_extra" : "extras" }
SKIP_FIELDS = set()
CONTACT_FIELDS = (({"email":"author_email", "name": "author"},
"author"),
({"email":"maintainer_email", "name": "maintainer"},
"maintainer"))
# commonly filled out as "UNKNOWN" by distutils:
UNKNOWN_FIELDS = set(("author", "author_email", "platform", "home_page",
"license"))
# Wheel itself is probably the only program that uses non-extras markers
# in METADATA/PKG-INFO. Support its syntax with the extra at the end only.
EXTRA_RE = re.compile("""^(?P<package>.*?)(;\s*(?P<condition>.*?)(extra == '(?P<extra>.*?)')?)$""")
KEYWORDS_RE = re.compile("[\0-,]+")
MayRequiresKey = namedtuple('MayRequiresKey', ('condition', 'extra'))
def unique(iterable):
"""
Yield unique values in iterable, preserving order.
"""
seen = set()
for value in iterable:
if not value in seen:
seen.add(value)
yield value
def handle_requires(metadata, pkg_info, key):
"""
Place the runtime requirements from pkg_info into metadata.
"""
may_requires = OrderedDefaultDict(list)
for value in sorted(pkg_info.get_all(key)):
extra_match = EXTRA_RE.search(value)
if extra_match:
groupdict = extra_match.groupdict()
condition = groupdict['condition']
extra = groupdict['extra']
package = groupdict['package']
if condition.endswith(' and '):
condition = condition[:-5]
else:
condition, extra = None, None
package = value
key = MayRequiresKey(condition, extra)
may_requires[key].append(package)
if may_requires:
metadata['run_requires'] = []
def sort_key(item):
# Both condition and extra could be None, which can't be compared
# against strings in Python 3.
key, value = item
if key.condition is None:
return ''
return key.condition
for key, value in sorted(may_requires.items(), key=sort_key):
may_requirement = OrderedDict((('requires', value),))
if key.extra:
may_requirement['extra'] = key.extra
if key.condition:
may_requirement['environment'] = key.condition
metadata['run_requires'].append(may_requirement)
if not 'extras' in metadata:
metadata['extras'] = []
metadata['extras'].extend([key.extra for key in may_requires.keys() if key.extra])
def pkginfo_to_dict(path, distribution=None):
"""
Convert PKG-INFO to a prototype Metadata 2.0 (PEP 426) dict.
The description is included under the key ['description'] rather than
being written to a separate file.
path: path to PKG-INFO file
distribution: optional distutils Distribution()
"""
metadata = OrderedDefaultDict(lambda: OrderedDefaultDict(lambda: OrderedDefaultDict(OrderedDict)))
metadata["generator"] = "bdist_wheel (" + wheel_version + ")"
try:
unicode
pkg_info = read_pkg_info(path)
except NameError:
with open(path, 'rb') as pkg_info_file:
pkg_info = email.parser.Parser().parsestr(pkg_info_file.read().decode('utf-8'))
description = None
if pkg_info['Summary']:
metadata['summary'] = pkginfo_unicode(pkg_info, 'Summary')
del pkg_info['Summary']
if pkg_info['Description']:
description = dedent_description(pkg_info)
del pkg_info['Description']
else:
payload = pkg_info.get_payload()
if isinstance(payload, bytes):
# Avoid a Python 2 Unicode error.
# We still suffer ? glyphs on Python 3.
payload = payload.decode('utf-8')
if payload:
description = payload
if description:
pkg_info['description'] = description
for key in sorted(unique(k.lower() for k in pkg_info.keys())):
low_key = key.replace('-', '_')
if low_key in SKIP_FIELDS:
continue
if low_key in UNKNOWN_FIELDS and pkg_info.get(key) == 'UNKNOWN':
continue
if low_key in sorted(PLURAL_FIELDS):
metadata[PLURAL_FIELDS[low_key]] = pkg_info.get_all(key)
elif low_key == "requires_dist":
handle_requires(metadata, pkg_info, key)
elif low_key == 'provides_extra':
if not 'extras' in metadata:
metadata['extras'] = []
metadata['extras'].extend(pkg_info.get_all(key))
elif low_key == 'home_page':
metadata['extensions']['python.details']['project_urls'] = {'Home':pkg_info[key]}
elif low_key == 'keywords':
metadata['keywords'] = KEYWORDS_RE.split(pkg_info[key])
else:
metadata[low_key] = pkg_info[key]
metadata['metadata_version'] = METADATA_VERSION
if 'extras' in metadata:
metadata['extras'] = sorted(set(metadata['extras']))
# include more information if distribution is available
if distribution:
for requires, attr in (('test_requires', 'tests_require'),):
try:
requirements = getattr(distribution, attr)
if isinstance(requirements, list):
new_requirements = sorted(convert_requirements(requirements))
metadata[requires] = [{'requires':new_requirements}]
except AttributeError:
pass
# handle contacts
contacts = []
for contact_type, role in CONTACT_FIELDS:
contact = OrderedDict()
for key in sorted(contact_type):
if contact_type[key] in metadata:
contact[key] = metadata.pop(contact_type[key])
if contact:
contact['role'] = role
contacts.append(contact)
if contacts:
metadata['extensions']['python.details']['contacts'] = contacts
# convert entry points to exports
try:
with open(os.path.join(os.path.dirname(path), "entry_points.txt"), "r") as ep_file:
ep_map = pkg_resources.EntryPoint.parse_map(ep_file.read())
exports = OrderedDict()
for group, items in sorted(ep_map.items()):
exports[group] = OrderedDict()
for item in sorted(map(str, items.values())):
name, export = item.split(' = ', 1)
exports[group][name] = export
if exports:
metadata['extensions']['python.exports'] = exports
except IOError:
pass
# copy console_scripts entry points to commands
if 'python.exports' in metadata['extensions']:
for (ep_script, wrap_script) in (('console_scripts', 'wrap_console'),
('gui_scripts', 'wrap_gui')):
if ep_script in metadata['extensions']['python.exports']:
metadata['extensions']['python.commands'][wrap_script] = \
metadata['extensions']['python.exports'][ep_script]
return metadata
def requires_to_requires_dist(requirement):
"""Compose the version predicates for requirement in PEP 345 fashion."""
requires_dist = []
for op, ver in requirement.specs:
requires_dist.append(op + ver)
if not requires_dist:
return ''
return " (%s)" % ','.join(requires_dist)
def convert_requirements(requirements):
"""Yield Requires-Dist: strings for parsed requirements strings."""
for req in requirements:
parsed_requirement = pkg_resources.Requirement.parse(req)
spec = requires_to_requires_dist(parsed_requirement)
extras = ",".join(parsed_requirement.extras)
if extras:
extras = "[%s]" % extras
yield (parsed_requirement.project_name + extras + spec)
def generate_requirements(extras_require):
"""
Convert requirements from a setup()-style dictionary to ('Requires-Dist', 'requirement')
and ('Provides-Extra', 'extra') tuples.
extras_require is a dictionary of {extra: [requirements]} as passed to setup(),
using the empty extra {'': [requirements]} to hold install_requires.
"""
for extra, depends in extras_require.items():
condition = ''
if extra and ':' in extra: # setuptools extra:condition syntax
extra, condition = extra.split(':', 1)
extra = pkg_resources.safe_extra(extra)
if extra:
yield ('Provides-Extra', extra)
if condition:
condition += " and "
condition += "extra == '%s'" % extra
if condition:
condition = '; ' + condition
for new_req in convert_requirements(depends):
yield ('Requires-Dist', new_req + condition)
def pkginfo_to_metadata(egg_info_path, pkginfo_path):
"""
Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka
old-draft Metadata 2.0 format.
"""
pkg_info = read_pkg_info(pkginfo_path)
pkg_info.replace_header('Metadata-Version', '2.0')
requires_path = os.path.join(egg_info_path, 'requires.txt')
if os.path.exists(requires_path):
with open(requires_path) as requires_file:
requires = requires_file.read()
for extra, reqs in sorted(pkg_resources.split_sections(requires),
key=lambda x: x[0] or ''):
for item in generate_requirements({extra: reqs}):
pkg_info[item[0]] = item[1]
description = pkg_info['Description']
if description:
pkg_info.set_payload(dedent_description(pkg_info))
del pkg_info['Description']
return pkg_info
def pkginfo_unicode(pkg_info, field):
"""Hack to coax Unicode out of an email Message() - Python 3.3+"""
text = pkg_info[field]
field = field.lower()
if not isinstance(text, str):
if not hasattr(pkg_info, 'raw_items'): # Python 3.2
return str(text)
for item in pkg_info.raw_items():
if item[0].lower() == field:
text = item[1].encode('ascii', 'surrogateescape')\
.decode('utf-8')
break
return text
def dedent_description(pkg_info):
"""
Dedent and convert pkg_info['Description'] to Unicode.
"""
description = pkg_info['Description']
# Python 3 Unicode handling, sorta.
surrogates = False
if not isinstance(description, str):
surrogates = True
description = pkginfo_unicode(pkg_info, 'Description')
description_lines = description.splitlines()
description_dedent = '\n'.join(
# if the first line of long_description is blank,
# the first line here will be indented.
(description_lines[0].lstrip(),
textwrap.dedent('\n'.join(description_lines[1:])),
'\n'))
if surrogates:
description_dedent = description_dedent\
.encode("utf8")\
.decode("ascii", "surrogateescape")
return description_dedent
if __name__ == "__main__":
import sys, pprint
pprint.pprint(pkginfo_to_dict(sys.argv[1]))
| mit | -7,892,011,049,403,554,000 | 34.063063 | 102 | 0.593011 | false | 4.236575 | false | false | false |
nicholaschris/landsatpy | stuff.py | 1 | 1864 | import cloud_detection_new as cloud_detection
from matplotlib import pyplot as plt
import views
from skimage import exposure
nir = cloud_detection.get_nir()[0:600,2000:2600]
red = cloud_detection.get_red()[0:600,2000:2600]
green = cloud_detection.get_green()[0:600,2000:2600]
blue = cloud_detection.get_blue()[0:600,2000:2600] # or use coastal
coastal = cloud_detection.get_coastal()[0:600,2000:2600]
marine_shadow_index = (green-blue)/(green+blue)
img = views.create_composite(red, green, blue)
img_rescale = exposure.rescale_intensity(img, in_range=(0, 90))
plt.rcParams['savefig.facecolor'] = "0.8"
vmin, vmax=0.0,0.1
def example_plot(ax, data, fontsize=12):
ax.imshow(data, vmin=vmin, vmax=vmax)
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
plt.close('all')
fig = plt.figure
ax1=plt.subplot(243)
ax2=plt.subplot(244)
ax3=plt.subplot(247)
ax4=plt.subplot(248)
ax5=plt.subplot(121)
a_coastal = coastal[500:600, 500:600]
a_blue = blue[500:600, 500:600]
a_green = green[500:600, 500:600]
a_red = red[500:600, 500:600]
a_nir = nir[500:600, 500:600]
a_img = img[500:600, 500:600]
spec1 = [a_coastal[60, 60], a_blue[60, 60], a_green[60, 60], a_red[60, 60], a_nir[60, 60]]
b_coastal = coastal[200:300, 100:200]
b_blue = blue[200:300, 100:200]
b_green = green[200:300, 100:200]
b_red = red[200:300, 100:200]
b_nir = nir[200:300, 100:200]
b_img = img[200:300, 100:200]
example_plot(ax1, coastal)
example_plot(ax2, blue)
example_plot(ax3, green)
example_plot(ax4, red)
ax5.imshow(img)
# plt.tight_layout()
plt.close('all')
spec = [b_coastal[60, 60], b_blue[60, 60], b_green[60, 60], b_red[60, 60], b_nir[60, 60]]
plt.plot(spec, 'k*-')
plt.plot(spec1, 'k.-')
plt.close('all')
cbg = (coastal+blue+green)/3
plt.imshow(cbg/red) | mit | -3,928,246,446,012,056,600 | 27.692308 | 90 | 0.689914 | false | 2.368488 | false | false | false |
koyuawsmbrtn/eclock | windows/kivy/kivy/core/image/img_dds.py | 54 | 1048 | '''
DDS: DDS image loader
'''
__all__ = ('ImageLoaderDDS', )
from kivy.lib.ddsfile import DDSFile
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderDDS(ImageLoaderBase):
@staticmethod
def extensions():
return ('dds', )
def load(self, filename):
try:
dds = DDSFile(filename=filename)
except:
Logger.warning('Image: Unable to load image <%s>' % filename)
raise
self.filename = filename
width, height = dds.size
im = ImageData(width, height, dds.dxt, dds.images[0], source=filename,
flip_vertical=False)
if len(dds.images) > 1:
images = dds.images
images_size = dds.images_size
for index in range(1, len(dds.images)):
w, h = images_size[index]
data = images[index]
im.add_mipmap(index, w, h, data)
return [im]
# register
ImageLoader.register(ImageLoaderDDS)
| gpl-2.0 | -1,542,359,741,957,030,400 | 25.871795 | 78 | 0.583015 | false | 3.716312 | false | false | false |
dudonwai/dudonsblog | Lib/site-packages/django/contrib/gis/db/models/sql/conversion.py | 308 | 2015 | """
This module holds simple classes to convert geospatial values from the
database.
"""
from django.contrib.gis.db.models.fields import GeoSelectFormatMixin
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
class BaseField(object):
empty_strings_allowed = True
def get_db_converters(self, connection):
return [self.from_db_value]
def select_format(self, compiler, sql, params):
return sql, params
class AreaField(BaseField):
"Wrapper for Area values."
def __init__(self, area_att):
self.area_att = area_att
def from_db_value(self, value, expression, connection, context):
if value is not None:
value = Area(**{self.area_att: value})
return value
def get_internal_type(self):
return 'AreaField'
class DistanceField(BaseField):
"Wrapper for Distance values."
def __init__(self, distance_att):
self.distance_att = distance_att
def from_db_value(self, value, expression, connection, context):
if value is not None:
value = Distance(**{self.distance_att: value})
return value
def get_internal_type(self):
return 'DistanceField'
class GeomField(GeoSelectFormatMixin, BaseField):
"""
Wrapper for Geometry values. It is a lightweight alternative to
using GeometryField (which requires an SQL query upon instantiation).
"""
# Hacky marker for get_db_converters()
geom_type = None
def from_db_value(self, value, expression, connection, context):
if value is not None:
value = Geometry(value)
return value
def get_internal_type(self):
return 'GeometryField'
class GMLField(BaseField):
"""
Wrapper for GML to be used by Oracle to ensure Database.LOB conversion.
"""
def get_internal_type(self):
return 'GMLField'
def from_db_value(self, value, expression, connection, context):
return value
| mit | -1,638,714,960,052,173,300 | 25.866667 | 75 | 0.667494 | false | 4.078947 | false | false | false |
JamesShaeffer/QGIS | tests/src/python/test_qgsmessagelog.py | 30 | 3449 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMessageLog.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '18/06/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
from qgis.core import (Qgis,
QgsApplication,
QgsMessageLog,
QgsMessageLogNotifyBlocker)
from qgis.PyQt.QtTest import QSignalSpy
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath)
app = start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsMessageLog(unittest.TestCase):
def testSignals(self):
app_log = QgsApplication.messageLog()
# signals should be emitted by application log
app_spy = QSignalSpy(app_log.messageReceived)
app_spy_received = QSignalSpy(app_log.messageReceived[bool])
QgsMessageLog.logMessage('test', 'tag', Qgis.Info, notifyUser=True)
self.assertEqual(len(app_spy), 1)
self.assertEqual(app_spy[-1], ['test', 'tag', Qgis.Info])
# info message, so messageReceived(bool) should not be emitted
self.assertEqual(len(app_spy_received), 0)
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(app_spy), 2)
self.assertEqual(app_spy[-1], ['test', 'tag', Qgis.Warning])
# warning message, so messageReceived(bool) should be emitted
self.assertEqual(len(app_spy_received), 1)
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=False)
self.assertEqual(len(app_spy), 3)
# notifyUser was False
self.assertEqual(len(app_spy_received), 1)
def testBlocker(self):
app_log = QgsApplication.messageLog()
spy = QSignalSpy(app_log.messageReceived)
spy_received = QSignalSpy(app_log.messageReceived[bool])
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(spy), 1)
self.assertEqual(spy[-1], ['test', 'tag', Qgis.Warning])
self.assertEqual(len(spy_received), 1)
# block notifications
b = QgsMessageLogNotifyBlocker()
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(spy), 2) # should not be blocked
self.assertEqual(len(spy_received), 1) # should be blocked
# another blocker
b2 = QgsMessageLogNotifyBlocker()
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(spy), 3) # should not be blocked
self.assertEqual(len(spy_received), 1) # should be blocked
del b
# still blocked because of b2
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(spy), 4) # should not be blocked
self.assertEqual(len(spy_received), 1) # should be blocked
del b2
# not blocked
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(spy), 5) # should not be blocked
self.assertEqual(len(spy_received), 2) # should not be blocked
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -103,974,553,227,338,910 | 36.48913 | 79 | 0.655552 | false | 3.700644 | true | false | false |
hophacker/bitcoin_malleability | contrib/bitrpc/bitrpc.py | 46 | 9207 | from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| mit | 148,013,364,626,363,460 | 27.329231 | 80 | 0.568481 | false | 3.937981 | false | false | false |
Monika319/EWEF-1 | Cw2Rezonans/Karolina/Oscyloskop/OscyloskopZ5W2.py | 1 | 1312 | # -*- coding: utf-8 -*-
"""
Plot oscilloscope files from MultiSim
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
from matplotlib import rc
rc('font',family="Consolas")
files=["real_zad5_05f_p2.txt"]
for NazwaPliku in files:
print NazwaPliku
Plik=open(NazwaPliku)
#print DeltaT
Dane=Plik.readlines()#[4:]
DeltaT=float(Dane[2].split()[3].replace(",","."))
#M=len(Dane[4].split())/2
M=2
Dane=Dane[5:]
Plik.close()
print M
Ys=[np.zeros(len(Dane)) for i in range(M)]
for m in range(M):
for i in range(len(Dane)):
try:
Ys[m][i]=float(Dane[i].split()[2+3*m].replace(",","."))
except:
print m, i, 2+3*m, len(Dane[i].split()), Dane[i].split()
#print i, Y[i]
X=np.zeros_like(Ys[0])
for i in range(len(X)):
X[i]=i*DeltaT
for y in Ys:
print max(y)-min(y)
Opis=u"Układ szeregowy\nPołowa częstotliwości rezonansowej"
Nazwa=u"Z5W2"
plt.title(u"Przebieg napięciowy\n"+Opis)
plt.xlabel(u"Czas t [s]")
plt.ylabel(u"Napięcie [V]")
plt.plot(X,Ys[0],label=u"Wejście")
plt.plot(X,Ys[1],label=u"Wyjście")
plt.grid()
plt.legend(loc="best")
plt.savefig(Nazwa + ".png", bbox_inches='tight')
plt.show()
| gpl-2.0 | -1,034,745,225,697,455,700 | 23.603774 | 72 | 0.578221 | false | 2.414815 | false | false | false |
catapult-project/catapult | telemetry/telemetry/internal/backends/chrome_inspector/devtools_client_backend.py | 3 | 21781 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import logging
import re
import socket
import sys
import six
from py_utils import exc_util
from py_utils import retry_util
from telemetry.core import exceptions
from telemetry import decorators
from telemetry.internal.backends import browser_backend
from telemetry.internal.backends.chrome_inspector import devtools_http
from telemetry.internal.backends.chrome_inspector import inspector_backend
from telemetry.internal.backends.chrome_inspector import inspector_websocket
from telemetry.internal.backends.chrome_inspector import memory_backend
from telemetry.internal.backends.chrome_inspector import system_info_backend
from telemetry.internal.backends.chrome_inspector import tracing_backend
from telemetry.internal.backends.chrome_inspector import window_manager_backend
from telemetry.internal.platform.tracing_agent import (
chrome_tracing_devtools_manager)
class TabNotFoundError(exceptions.Error):
pass
class UnsupportedVersionError(exceptions.Error):
pass
# Only versions of Chrome from M58 and above are supported. Older versions
# did not support many of the modern features currently in use by Telemetry.
MIN_SUPPORTED_BRANCH_NUMBER = 3029
# The first WebSocket connections or calls against a newly-started
# browser, specifically in Debug builds, can take a long time. Give
# them 60s to complete instead of the default 10s used in many places
# in this file.
_FIRST_CALL_TIMEOUT = 60
# These are possible exceptions raised when the DevTools agent is not ready
# to accept incomming connections.
_DEVTOOLS_CONNECTION_ERRORS = (
devtools_http.DevToolsClientConnectionError,
inspector_websocket.WebSocketException,
socket.error)
def GetDevToolsBackEndIfReady(devtools_port, app_backend, browser_target=None, enable_tracing=True):
client = _DevToolsClientBackend(app_backend)
try:
client.Connect(devtools_port, browser_target, enable_tracing)
logging.info('DevTools agent ready at %s', client)
except _DEVTOOLS_CONNECTION_ERRORS as exc:
logging.info('DevTools agent at %s not ready yet: %s', client, exc)
client = None
return client
class FuchsiaBrowserTargetNotFoundException(Exception):
pass
class _DevToolsClientBackend(object):
"""An object that communicates with Chrome's devtools.
This class owns a map of InspectorBackends. It is responsible for creating
and destroying them.
"""
def __init__(self, app_backend):
"""Create an object able to connect with the DevTools agent.
Args:
app_backend: The app that contains the DevTools agent.
"""
self._app_backend = app_backend
self._browser_target = None
self._forwarder = None
self._devtools_http = None
self._browser_websocket = None
self._created = False
self._local_port = None
self._remote_port = None
# Other backends.
self._tracing_backend = None
self._memory_backend = None
self._system_info_backend = None
self._wm_backend = None
self._devtools_context_map_backend = _DevToolsContextMapBackend(self)
def __str__(self):
s = self.browser_target_url
if self.local_port != self.remote_port:
s = '%s (remote=%s)' % (s, self.remote_port)
return s
@property
def local_port(self):
return self._local_port
@property
def remote_port(self):
return self._remote_port
@property
def browser_target_url(self):
# For Fuchsia browsers, we get the browser_target through a JSON request
if self.platform_backend.GetOSName() == 'fuchsia':
resp = self.GetVersion()
if 'webSocketDebuggerUrl' in resp:
return resp['webSocketDebuggerUrl']
else:
raise FuchsiaBrowserTargetNotFoundException(
'Could not get the browser target.')
return 'ws://127.0.0.1:%i%s' % (self._local_port, self._browser_target)
@property
def app_backend(self):
return self._app_backend
@property
def platform_backend(self):
return self._app_backend.platform_backend
@property
def supports_overriding_memory_pressure_notifications(self):
return (
isinstance(self.app_backend, browser_backend.BrowserBackend)
and self.app_backend.supports_overriding_memory_pressure_notifications)
@property
def is_tracing_running(self):
return self._tracing_backend.is_tracing_running
@property
def has_tracing_client(self):
return self._tracing_backend != None
def Connect(self, devtools_port, browser_target, enable_tracing=True):
try:
self._Connect(devtools_port, browser_target, enable_tracing)
except:
self.Close() # Close any connections made if failed to connect to all.
raise
@retry_util.RetryOnException(devtools_http.DevToolsClientUrlError, retries=3)
def _WaitForConnection(self, retries=None):
del retries
self._devtools_http.Request('')
def _SetUpPortForwarding(self, devtools_port):
self._forwarder = self.platform_backend.forwarder_factory.Create(
local_port=None, # Forwarder will choose an available port.
remote_port=devtools_port, reverse=True)
self._local_port = self._forwarder._local_port
self._remote_port = self._forwarder._remote_port
self._devtools_http = devtools_http.DevToolsHttp(self.local_port)
# For Fuchsia, wait until port forwarding has started working.
if self.platform_backend.GetOSName() == 'fuchsia':
self._WaitForConnection()
def _Connect(self, devtools_port, browser_target, enable_tracing):
"""Attempt to connect to the DevTools client.
Args:
devtools_port: The devtools_port uniquely identifies the DevTools agent.
browser_target: An optional string to override the default path used to
establish a websocket connection with the browser inspector.
enable_tracing: Defines if a tracing_client is created.
Raises:
Any of _DEVTOOLS_CONNECTION_ERRORS if failed to establish the connection.
"""
self._browser_target = browser_target or '/devtools/browser'
self._SetUpPortForwarding(devtools_port)
# If the agent is not alive and ready, trying to get the branch number will
# raise a devtools_http.DevToolsClientConnectionError.
branch_number = self.GetChromeBranchNumber()
if branch_number < MIN_SUPPORTED_BRANCH_NUMBER:
raise UnsupportedVersionError(
'Chrome branch number %d is no longer supported' % branch_number)
# Ensure that the inspector websocket is ready. This may raise a
# inspector_websocket.WebSocketException or socket.error if not ready.
self._browser_websocket = inspector_websocket.InspectorWebsocket()
self._browser_websocket.Connect(self.browser_target_url, timeout=10)
chrome_tracing_devtools_manager.RegisterDevToolsClient(self)
# If there is a trace_config it means that Telemetry has already started
# Chrome tracing via a startup config. The TracingBackend also needs needs
# this config to initialize itself correctly.
if enable_tracing:
trace_config = (
self.platform_backend.tracing_controller_backend.GetChromeTraceConfig())
self._tracing_backend = tracing_backend.TracingBackend(
self._browser_websocket, trace_config)
@exc_util.BestEffort
def Close(self):
if self._tracing_backend is not None:
self._tracing_backend.Close()
self._tracing_backend = None
if self._memory_backend is not None:
self._memory_backend.Close()
self._memory_backend = None
if self._system_info_backend is not None:
self._system_info_backend.Close()
self._system_info_backend = None
if self._wm_backend is not None:
self._wm_backend.Close()
self._wm_backend = None
if self._devtools_context_map_backend is not None:
self._devtools_context_map_backend.Clear()
self._devtools_context_map_backend = None
# Close the DevTools connections last (in case the backends above still
# need to interact with them while closing).
if self._browser_websocket is not None:
self._browser_websocket.Disconnect()
self._browser_websocket = None
if self._devtools_http is not None:
self._devtools_http.Disconnect()
self._devtools_http = None
if self._forwarder is not None:
self._forwarder.Close()
self._forwarder = None
def CloseBrowser(self):
"""Close the browser instance."""
request = {
'method': 'Browser.close',
}
self._browser_websocket.SyncRequest(request, timeout=60)
def IsAlive(self):
"""Whether the DevTools server is available and connectable."""
if self._devtools_http is None:
return False
try:
self._devtools_http.Request('')
except devtools_http.DevToolsClientConnectionError:
return False
else:
return True
@decorators.Cache
def GetVersion(self):
"""Return the version dict as provided by the DevTools agent."""
return self._devtools_http.RequestJson('version')
def GetChromeBranchNumber(self):
# Detect version information.
resp = self.GetVersion()
if 'Protocol-Version' in resp:
if 'Browser' in resp:
branch_number_match = re.search(r'.+/\d+\.\d+\.(\d+)\.\d+',
resp['Browser'])
if not branch_number_match and 'User-Agent' in resp:
branch_number_match = re.search(
r'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari',
resp['User-Agent'])
if branch_number_match:
branch_number = int(branch_number_match.group(1))
if branch_number:
return branch_number
# Branch number can't be determined, so fail any branch number checks.
return 0
def _ListInspectableContexts(self):
return self._devtools_http.RequestJson('')
def RequestNewTab(self, timeout, in_new_window=False, url=None):
"""Creates a new tab, either in new window or current window.
Returns:
A dict of a parsed JSON object as returned by DevTools. Example:
If an error is present, the dict will contain an 'error' key.
If no error is present, the result is present in the 'result' key:
{
"result": {
"targetId": "id-string" # This is the ID for the tab.
}
}
"""
request = {
'method': 'Target.createTarget',
'params': {
'url': url if url else 'about:blank',
'newWindow': in_new_window
}
}
return self._browser_websocket.SyncRequest(request, timeout)
def CloseTab(self, tab_id, timeout):
"""Closes the tab with the given id.
Raises:
devtools_http.DevToolsClientConnectionError
TabNotFoundError
"""
try:
return self._devtools_http.Request(
'close/%s' % tab_id, timeout=timeout)
except devtools_http.DevToolsClientUrlError:
error = TabNotFoundError(
'Unable to close tab, tab id not found: %s' % tab_id)
six.reraise(error, None, sys.exc_info()[2])
def ActivateTab(self, tab_id, timeout):
"""Activates the tab with the given id.
Raises:
devtools_http.DevToolsClientConnectionError
TabNotFoundError
"""
try:
return self._devtools_http.Request(
'activate/%s' % tab_id, timeout=timeout)
except devtools_http.DevToolsClientUrlError:
error = TabNotFoundError(
'Unable to activate tab, tab id not found: %s' % tab_id)
six.reraise(error, None, sys.exc_info()[2])
def GetUrl(self, tab_id):
"""Returns the URL of the tab with |tab_id|, as reported by devtools.
Raises:
devtools_http.DevToolsClientConnectionError
"""
for c in self._ListInspectableContexts():
if c['id'] == tab_id:
return c['url']
return None
def IsInspectable(self, tab_id):
"""Whether the tab with |tab_id| is inspectable, as reported by devtools.
Raises:
devtools_http.DevToolsClientConnectionError
"""
contexts = self._ListInspectableContexts()
return tab_id in [c['id'] for c in contexts]
def GetUpdatedInspectableContexts(self):
"""Returns an updated instance of _DevToolsContextMapBackend."""
contexts = self._ListInspectableContexts()
self._devtools_context_map_backend._Update(contexts)
return self._devtools_context_map_backend
def _CreateWindowManagerBackendIfNeeded(self):
if not self._wm_backend:
self._wm_backend = window_manager_backend.WindowManagerBackend(
self._browser_websocket)
def _CreateMemoryBackendIfNeeded(self):
assert self.supports_overriding_memory_pressure_notifications
if not self._memory_backend:
self._memory_backend = memory_backend.MemoryBackend(
self._browser_websocket)
def _CreateSystemInfoBackendIfNeeded(self):
if not self._system_info_backend:
self._system_info_backend = system_info_backend.SystemInfoBackend(
self.browser_target_url)
def StartChromeTracing(self, trace_config, transfer_mode=None, timeout=20):
"""
Args:
trace_config: An tracing_config.TracingConfig instance.
transfer_mode: Defaults to using 'ReturnAsStream' transfer mode
for Chrome tracing. Can be set to 'ReportEvents'.
timeout: Time waited for websocket to receive a response.
"""
if not self._tracing_backend:
return
assert trace_config and trace_config.enable_chrome_trace
return self._tracing_backend.StartTracing(
trace_config.chrome_trace_config, transfer_mode, timeout)
def RecordChromeClockSyncMarker(self, sync_id):
assert self.is_tracing_running, 'Tracing must be running to clock sync.'
self._tracing_backend.RecordClockSyncMarker(sync_id)
def StopChromeTracing(self):
if not self._tracing_backend:
return
assert self.is_tracing_running
try:
backend = self.FirstTabBackend()
if backend is not None:
backend.AddTimelineMarker('first-renderer-thread')
backend.AddTimelineMarker(backend.id)
else:
logging.warning('No page inspector backend found.')
finally:
self._tracing_backend.StopTracing()
def _IterInspectorBackends(self, types):
"""Iterate over inspector backends from this client.
Note: The devtools client might list contexts which, howerver, do not yet
have a live DevTools instance to connect to (e.g. background tabs which may
have been discarded or not yet created). In such case this method will hang
and eventually timeout when trying to create an inspector backend to
communicate with such contexts.
"""
context_map = self.GetUpdatedInspectableContexts()
for context in context_map.contexts:
if context['type'] in types:
yield context_map.GetInspectorBackend(context['id'])
def FirstTabBackend(self):
"""Obtain the inspector backend for the firstly created tab."""
return next(self._IterInspectorBackends(['page']), None)
def CollectChromeTracingData(self, trace_data_builder, timeout=120):
if not self._tracing_backend:
return
self._tracing_backend.CollectTraceData(trace_data_builder, timeout)
# This call may be made early during browser bringup and may cause the
# GPU process to launch, which takes a long time in Debug builds and
# has been seen to frequently exceed the default 10s timeout used
# throughout this file. Use a larger timeout by default. Callers
# typically do not override this.
def GetSystemInfo(self, timeout=_FIRST_CALL_TIMEOUT):
self._CreateSystemInfoBackendIfNeeded()
return self._system_info_backend.GetSystemInfo(timeout)
def DumpMemory(self, timeout=None, detail_level=None):
"""Dumps memory.
Args:
timeout: seconds to wait between websocket responses.
detail_level: Level of detail in memory dump. One of ['detailed',
'light', 'background']. Defaults to 'detailed'.
Returns:
GUID of the generated dump if successful, None otherwise.
Raises:
TracingTimeoutException: If more than |timeout| seconds has passed
since the last time any data is received.
TracingUnrecoverableException: If there is a websocket error.
TracingUnexpectedResponseException: If the response contains an error
or does not contain the expected result.
"""
if not self._tracing_backend:
return None
return self._tracing_backend.DumpMemory(
timeout=timeout,
detail_level=detail_level)
def SetMemoryPressureNotificationsSuppressed(self, suppressed, timeout=30):
"""Enable/disable suppressing memory pressure notifications.
Args:
suppressed: If true, memory pressure notifications will be suppressed.
timeout: The timeout in seconds.
Raises:
MemoryTimeoutException: If more than |timeout| seconds has passed
since the last time any data is received.
MemoryUnrecoverableException: If there is a websocket error.
MemoryUnexpectedResponseException: If the response contains an error
or does not contain the expected result.
"""
self._CreateMemoryBackendIfNeeded()
return self._memory_backend.SetMemoryPressureNotificationsSuppressed(
suppressed, timeout)
def SimulateMemoryPressureNotification(self, pressure_level, timeout=30):
"""Simulate a memory pressure notification.
Args:
pressure level: The memory pressure level of the notification ('moderate'
or 'critical').
timeout: The timeout in seconds.
Raises:
MemoryTimeoutException: If more than |timeout| seconds has passed
since the last time any data is received.
MemoryUnrecoverableException: If there is a websocket error.
MemoryUnexpectedResponseException: If the response contains an error
or does not contain the expected result.
"""
self._CreateMemoryBackendIfNeeded()
return self._memory_backend.SimulateMemoryPressureNotification(
pressure_level, timeout)
@property
def window_manager_backend(self):
"""Return the window manager backend.
This should be called by a CrOS backend only.
"""
self._CreateWindowManagerBackendIfNeeded()
return self._wm_backend
def ExecuteBrowserCommand(self, command_id, timeout):
request = {
'method': 'Browser.executeBrowserCommand',
'params': {
'commandId': command_id,
}
}
self._browser_websocket.SyncRequest(request, timeout)
def SetDownloadBehavior(self, behavior, downloadPath, timeout):
request = {
'method': 'Browser.setDownloadBehavior',
'params': {
'behavior': behavior,
'downloadPath': downloadPath,
}
}
self._browser_websocket.SyncRequest(request, timeout)
def GetWindowForTarget(self, target_id):
request = {
'method': 'Browser.getWindowForTarget',
'params': {
'targetId': target_id
}
}
return self._browser_websocket.SyncRequest(request, timeout=30)
def SetWindowBounds(self, window_id, bounds):
request = {
'method': 'Browser.setWindowBounds',
'params': {
'windowId': window_id,
'bounds': bounds
}
}
self._browser_websocket.SyncRequest(request, timeout=30)
class _DevToolsContextMapBackend(object):
def __init__(self, devtools_client):
self._devtools_client = devtools_client
self._contexts = None
self._inspector_backends_dict = {}
@property
def contexts(self):
"""The most up to date contexts data.
Returned in the order returned by devtools agent."""
return self._contexts
def GetContextInfo(self, context_id):
for context in self._contexts:
if context['id'] == context_id:
return context
raise KeyError('Cannot find a context with id=%s' % context_id)
def GetInspectorBackend(self, context_id):
"""Gets an InspectorBackend instance for the given context_id.
This lazily creates InspectorBackend for the context_id if it does
not exist yet. Otherwise, it will return the cached instance."""
if context_id in self._inspector_backends_dict:
return self._inspector_backends_dict[context_id]
for context in self._contexts:
if context['id'] == context_id:
new_backend = inspector_backend.InspectorBackend(
self._devtools_client, context)
self._inspector_backends_dict[context_id] = new_backend
return new_backend
raise KeyError('Cannot find a context with id=%s' % context_id)
def _Update(self, contexts):
# Remove InspectorBackend that is not in the current inspectable
# contexts list.
context_ids = [context['id'] for context in contexts]
for context_id in list(self._inspector_backends_dict.keys()):
if context_id not in context_ids:
backend = self._inspector_backends_dict[context_id]
backend.Disconnect()
del self._inspector_backends_dict[context_id]
valid_contexts = []
for context in contexts:
# If the context does not have webSocketDebuggerUrl, skip it.
# If an InspectorBackend is already created for the tab,
# webSocketDebuggerUrl will be missing, and this is expected.
context_id = context['id']
if context_id not in self._inspector_backends_dict:
if 'webSocketDebuggerUrl' not in context:
logging.debug('webSocketDebuggerUrl missing, removing %s',
context_id)
continue
valid_contexts.append(context)
self._contexts = valid_contexts
def Clear(self):
for backend in self._inspector_backends_dict.values():
backend.Disconnect()
self._inspector_backends_dict = {}
self._contexts = None
| bsd-3-clause | 6,397,773,995,444,545,000 | 34.244337 | 100 | 0.696662 | false | 4.117391 | true | false | false |
v-zhongz/azure-linux-extensions | VMBackup/main/Utils/WAAgentUtil.py | 11 | 2528 | # Wrapper module for waagent
#
# waagent is not written as a module. This wrapper module is created
# to use the waagent code as a module.
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import imp
import os
import os.path
#
# The following code will search and load waagent code and expose
# it as a submodule of current module
#
def searchWAAgent():
agentPath = '/usr/sbin/waagent'
if(os.path.isfile(agentPath)):
return agentPath
user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
for user_path in user_paths:
agentPath = os.path.join(user_path, 'waagent')
if(os.path.isfile(agentPath)):
return agentPath
return None
agentPath = searchWAAgent()
if(agentPath):
waagent = imp.load_source('waagent', agentPath)
else:
raise Exception("Can't load waagent.")
if not hasattr(waagent, "AddExtensionEvent"):
"""
If AddExtensionEvent is not defined, provide a dummy impl.
"""
def _AddExtensionEvent(*args, **kwargs):
pass
waagent.AddExtensionEvent = _AddExtensionEvent
if not hasattr(waagent, "WALAEventOperation"):
class _WALAEventOperation:
HeartBeat = "HeartBeat"
Provision = "Provision"
Install = "Install"
UnIsntall = "UnInstall"
Disable = "Disable"
Enable = "Enable"
Download = "Download"
Upgrade = "Upgrade"
Update = "Update"
waagent.WALAEventOperation = _WALAEventOperation
__ExtensionName__ = None
def InitExtensionEventLog(name):
__ExtensionName__ = name
def AddExtensionEvent(name=__ExtensionName__,
op=waagent.WALAEventOperation.Enable,
isSuccess=False,
message=None):
if name is not None:
waagent.AddExtensionEvent(name=name,
op=op,
isSuccess=isSuccess,
message=message)
| apache-2.0 | 1,095,373,671,504,539,800 | 29.804878 | 74 | 0.650831 | false | 3.971698 | false | false | false |
ibinti/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/topology.py | 311 | 2226 | """
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
__all__ = ['geos_boundary', 'geos_buffer', 'geos_centroid', 'geos_convexhull',
'geos_difference', 'geos_envelope', 'geos_intersection',
'geos_linemerge', 'geos_pointonsurface', 'geos_preservesimplify',
'geos_simplify', 'geos_symdifference', 'geos_union', 'geos_relate']
from ctypes import c_char_p, c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
def topology(func, *args):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
### Topology Routines ###
geos_boundary = topology(GEOSFunc('GEOSBoundary'))
geos_buffer = topology(GEOSFunc('GEOSBuffer'), c_double, c_int)
geos_centroid = topology(GEOSFunc('GEOSGetCentroid'))
geos_convexhull = topology(GEOSFunc('GEOSConvexHull'))
geos_difference = topology(GEOSFunc('GEOSDifference'), GEOM_PTR)
geos_envelope = topology(GEOSFunc('GEOSEnvelope'))
geos_intersection = topology(GEOSFunc('GEOSIntersection'), GEOM_PTR)
geos_linemerge = topology(GEOSFunc('GEOSLineMerge'))
geos_pointonsurface = topology(GEOSFunc('GEOSPointOnSurface'))
geos_preservesimplify = topology(GEOSFunc('GEOSTopologyPreserveSimplify'), c_double)
geos_simplify = topology(GEOSFunc('GEOSSimplify'), c_double)
geos_symdifference = topology(GEOSFunc('GEOSSymDifference'), GEOM_PTR)
geos_union = topology(GEOSFunc('GEOSUnion'), GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFunc('GEOSRelate')
geos_relate.argtypes = [GEOM_PTR, GEOM_PTR]
geos_relate.restype = geos_char_p
geos_relate.errcheck = check_string
# Routines only in GEOS 3.1+
if GEOS_PREPARE:
geos_cascaded_union = GEOSFunc('GEOSUnionCascaded')
geos_cascaded_union.argtypes = [GEOM_PTR]
geos_cascaded_union.restype = GEOM_PTR
__all__.append('geos_cascaded_union')
| apache-2.0 | -7,917,583,549,610,253,000 | 42.647059 | 84 | 0.743935 | false | 3.419355 | false | false | false |
googlecartographer/cartographer | docs/source/conf.py | 5 | 9092 | # -*- coding: utf-8 -*-
# Copyright 2016 The Cartographer Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Cartographer documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 8 10:41:33 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from datetime import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cartographer'
copyright = u'{year} The Cartographer Authors'.format(year=datetime.now().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = ''
# The full version, including alpha/beta/rc tags.
#release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Cartographerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Cartographer.tex', u'Cartographer Documentation',
u'The Cartographer Authors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cartographer', u'Cartographer Documentation',
[u'The Cartographer Authors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Cartographer', u'Cartographer Documentation',
u'The Cartographer Authors', 'Cartographer',
'Cartographer is a system that provides real-time simultaneous '
'localization and mapping (SLAM) in 2D and 3D across multiple platforms '
'and sensor configurations.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 | 7,089,863,709,540,181,000 | 32.061818 | 79 | 0.711835 | false | 3.786756 | false | false | false |
partofthething/home-assistant | tests/components/apple_tv/conftest.py | 8 | 3388 | """Fixtures for component."""
from unittest.mock import patch
from pyatv import conf, net
import pytest
from .common import MockPairingHandler, create_conf
@pytest.fixture(autouse=True, name="mock_scan")
def mock_scan_fixture():
"""Mock pyatv.scan."""
with patch("homeassistant.components.apple_tv.config_flow.scan") as mock_scan:
async def _scan(loop, timeout=5, identifier=None, protocol=None, hosts=None):
if not mock_scan.hosts:
mock_scan.hosts = hosts
return mock_scan.result
mock_scan.result = []
mock_scan.hosts = None
mock_scan.side_effect = _scan
yield mock_scan
@pytest.fixture(name="dmap_pin")
def dmap_pin_fixture():
"""Mock pyatv.scan."""
with patch("homeassistant.components.apple_tv.config_flow.randrange") as mock_pin:
mock_pin.side_effect = lambda start, stop: 1111
yield mock_pin
@pytest.fixture
def pairing():
"""Mock pyatv.scan."""
with patch("homeassistant.components.apple_tv.config_flow.pair") as mock_pair:
async def _pair(config, protocol, loop, session=None, **kwargs):
handler = MockPairingHandler(
await net.create_session(session), config.get_service(protocol)
)
handler.always_fail = mock_pair.always_fail
return handler
mock_pair.always_fail = False
mock_pair.side_effect = _pair
yield mock_pair
@pytest.fixture
def pairing_mock():
"""Mock pyatv.scan."""
with patch("homeassistant.components.apple_tv.config_flow.pair") as mock_pair:
async def _pair(config, protocol, loop, session=None, **kwargs):
return mock_pair
async def _begin():
pass
async def _close():
pass
mock_pair.close.side_effect = _close
mock_pair.begin.side_effect = _begin
mock_pair.pin = lambda pin: None
mock_pair.side_effect = _pair
yield mock_pair
@pytest.fixture
def full_device(mock_scan, dmap_pin):
"""Mock pyatv.scan."""
mock_scan.result.append(
create_conf(
"127.0.0.1",
"MRP Device",
conf.MrpService("mrpid", 5555),
conf.DmapService("dmapid", None, port=6666),
conf.AirPlayService("airplayid", port=7777),
)
)
yield mock_scan
@pytest.fixture
def mrp_device(mock_scan):
"""Mock pyatv.scan."""
mock_scan.result.append(
create_conf("127.0.0.1", "MRP Device", conf.MrpService("mrpid", 5555))
)
yield mock_scan
@pytest.fixture
def dmap_device(mock_scan):
"""Mock pyatv.scan."""
mock_scan.result.append(
create_conf(
"127.0.0.1",
"DMAP Device",
conf.DmapService("dmapid", None, port=6666),
)
)
yield mock_scan
@pytest.fixture
def dmap_device_with_credentials(mock_scan):
"""Mock pyatv.scan."""
mock_scan.result.append(
create_conf(
"127.0.0.1",
"DMAP Device",
conf.DmapService("dmapid", "dummy_creds", port=6666),
)
)
yield mock_scan
@pytest.fixture
def airplay_device(mock_scan):
"""Mock pyatv.scan."""
mock_scan.result.append(
create_conf(
"127.0.0.1", "AirPlay Device", conf.AirPlayService("airplayid", port=7777)
)
)
yield mock_scan
| mit | 8,095,179,028,966,757,000 | 24.862595 | 86 | 0.597107 | false | 3.52183 | true | false | false |
fentas/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py | 118 | 7503 | # Copyright (C) 2012 Zan Dobersek <zandobersek@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import Port
from webkitpy.port.server_process_mock import MockServerProcess
from webkitpy.port.xvfbdriver import XvfbDriver
from webkitpy.tool.mocktool import MockOptions
_log = logging.getLogger(__name__)
class XvfbDriverTest(unittest.TestCase):
def make_driver(self, worker_number=0, xorg_running=False, executive=None):
port = Port(MockSystemHost(log_executive=True, executive=executive), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._config.build_directory = lambda configuration: "/mock-build"
port._server_process_constructor = MockServerProcess
if xorg_running:
port._executive._running_pids['Xorg'] = 108
driver = XvfbDriver(port, worker_number=worker_number, pixel_tests=True)
driver._startup_delay_secs = 0
return driver
def cleanup_driver(self, driver):
# Setting _xvfb_process member to None is necessary as the Driver object is stopped on deletion,
# killing the Xvfb process if present. Thus, this method should only be called from tests that do not
# intend to test the behavior of XvfbDriver.stop.
driver._xvfb_process = None
def assertDriverStartSuccessful(self, driver, expected_logs, expected_display, pixel_tests=False):
OutputCapture().assert_outputs(self, driver.start, [pixel_tests, []], expected_logs=expected_logs)
self.assertTrue(driver._server_process.started)
self.assertEqual(driver._server_process.env["DISPLAY"], expected_display)
def test_start_no_pixel_tests(self):
driver = self.make_driver()
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0")
self.cleanup_driver(driver)
def test_start_pixel_tests(self):
driver = self.make_driver()
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
def test_start_arbitrary_worker_number(self):
driver = self.make_driver(worker_number=17)
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
def test_next_free_display(self):
output = "Xorg /usr/bin/X :0 -auth /var/run/lightdm/root/:0 -nolisten tcp vt7 -novtswitch -background none\nXvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 2)
self.cleanup_driver(driver)
output = "X /usr/bin/X :0 vt7 -nolisten tcp -auth /var/run/xauth/A:0-8p7Ybb"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 1)
self.cleanup_driver(driver)
output = "Xvfb Xvfb :0 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 1)
self.cleanup_driver(driver)
output = "Xvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp\nXvfb Xvfb :0 -screen 0 800x600x24 -nolisten tcp\nXvfb Xvfb :3 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 2)
self.cleanup_driver(driver)
def test_start_next_worker(self):
driver = self.make_driver()
driver._next_free_display = lambda: 0
expected_logs = "MOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
driver = self.make_driver()
driver._next_free_display = lambda: 3
expected_logs = "MOCK popen: ['Xvfb', ':3', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":3", pixel_tests=True)
self.cleanup_driver(driver)
def test_stop(self):
filesystem = MockFileSystem(files={'/tmp/.X42-lock': '1234\n'})
port = Port(MockSystemHost(log_executive=True, filesystem=filesystem), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._executive.kill_process = lambda x: _log.info("MOCK kill_process pid: " + str(x))
driver = XvfbDriver(port, worker_number=0, pixel_tests=True)
class FakeXvfbProcess(object):
pid = 1234
driver._xvfb_process = FakeXvfbProcess()
driver._lock_file = '/tmp/.X42-lock'
expected_logs = "MOCK kill_process pid: 1234\n"
OutputCapture().assert_outputs(self, driver.stop, [], expected_logs=expected_logs)
self.assertIsNone(driver._xvfb_process)
self.assertFalse(port._filesystem.exists(driver._lock_file))
| bsd-3-clause | 7,573,119,017,516,415,000 | 54.577778 | 197 | 0.698121 | false | 3.712519 | true | false | false |
valkjsaaa/sl4a | python/src/Lib/contextlib.py | 62 | 4136 | """Utilities for with-statement contexts. See PEP 343."""
import sys
from functools import wraps
__all__ = ["contextmanager", "nested", "closing"]
class GeneratorContextManager(object):
"""Helper for @contextmanager decorator."""
def __init__(self, gen):
self.gen = gen
def __enter__(self):
try:
return self.gen.next()
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
self.gen.next()
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration, exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return GeneratorContextManager(func(*args, **kwds))
return helper
@contextmanager
def nested(*managers):
"""Support multiple context managers in a single with-statement.
Code like this:
with nested(A, B, C) as (X, Y, Z):
<body>
is equivalent to this:
with A as X:
with B as Y:
with C as Z:
<body>
"""
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
raise exc[0], exc[1], exc[2]
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
| apache-2.0 | -6,676,445,754,616,710,000 | 26.03268 | 78 | 0.508946 | false | 4.748565 | false | false | false |
AbrahmAB/sugar | src/jarabe/controlpanel/gui.py | 2 | 21394 | # Copyright (C) 2008 One Laptop Per Child
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
from gettext import gettext as _
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkX11
from sugar3.graphics.icon import Icon
from sugar3.graphics import style
from sugar3.graphics.alert import Alert, TimeoutAlert
from jarabe.model.session import get_session_manager
from jarabe.controlpanel.toolbar import MainToolbar
from jarabe.controlpanel.toolbar import SectionToolbar
from jarabe import config
from jarabe.model import shell
_logger = logging.getLogger('ControlPanel')
class ControlPanel(Gtk.Window):
__gtype_name__ = 'SugarControlPanel'
def __init__(self, window_xid=0):
self.parent_window_xid = window_xid
Gtk.Window.__init__(self)
self._calculate_max_columns()
self.set_border_width(style.LINE_WIDTH)
self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
self.set_decorated(False)
self.set_resizable(False)
self.set_modal(True)
self.set_can_focus(True)
self.connect('key-press-event', self.__key_press_event_cb)
self._toolbar = None
self._canvas = None
self._table = None
self._scrolledwindow = None
self._separator = None
self._section_view = None
self._section_toolbar = None
self._main_toolbar = None
self._vbox = Gtk.VBox()
self._hbox = Gtk.HBox()
self._vbox.pack_start(self._hbox, True, True, 0)
self._hbox.show()
self._main_view = Gtk.EventBox()
self._hbox.pack_start(self._main_view, True, True, 0)
self._main_view.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_BLACK.get_gdk_color())
self._main_view.show()
self.add(self._vbox)
self._vbox.show()
self.connect('realize', self.__realize_cb)
self._options = self._get_options()
self._current_option = None
self._setup_main()
self._setup_section()
self._show_main_view()
Gdk.Screen.get_default().connect(
'size-changed', self.__size_changed_cb)
self._busy_count = 0
self._selected = []
def __realize_cb(self, widget):
self.set_type_hint(Gdk.WindowTypeHint.DIALOG)
window = self.get_window()
window.set_accept_focus(True)
if self.parent_window_xid > 0:
display = Gdk.Display.get_default()
parent = GdkX11.X11Window.foreign_new_for_display(
display, self.parent_window_xid)
window.set_transient_for(parent)
# the modal windows counter is updated to disable hot keys - SL#4601
shell.get_model().push_modal()
def __size_changed_cb(self, event):
self._calculate_max_columns()
def busy(self):
if self._busy_count == 0:
self._old_cursor = self.get_window().get_cursor()
self._set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
self._busy_count += 1
def unbusy(self):
self._busy_count -= 1
if self._busy_count == 0:
self._set_cursor(self._old_cursor)
def _set_cursor(self, cursor):
self.get_window().set_cursor(cursor)
Gdk.flush()
def add_alert(self, alert):
self._vbox.pack_start(alert, False, False, 0)
self._vbox.reorder_child(alert, 2)
def remove_alert(self, alert):
self._vbox.remove(alert)
def grab_focus(self):
# overwrite grab focus in order to grab focus on the view
self._main_view.get_child().grab_focus()
def _calculate_max_columns(self):
self._max_columns = int(0.285 * (float(Gdk.Screen.width()) /
style.GRID_CELL_SIZE - 3))
offset = style.GRID_CELL_SIZE
width = Gdk.Screen.width() - offset * 2
height = Gdk.Screen.height() - offset * 2
self.set_size_request(width, height)
if hasattr(self, '_table'):
for child in self._table.get_children():
child.destroy()
self._setup_options()
def _set_canvas(self, canvas):
if self._canvas in self._main_view:
self._main_view.remove(self._canvas)
if canvas:
self._main_view.add(canvas)
self._canvas = canvas
def _set_toolbar(self, toolbar):
if self._toolbar:
self._vbox.remove(self._toolbar)
self._vbox.pack_start(toolbar, False, False, 0)
self._vbox.reorder_child(toolbar, 0)
self._toolbar = toolbar
if not self._separator:
self._separator = Gtk.HSeparator()
self._vbox.pack_start(self._separator, False, False, 0)
self._vbox.reorder_child(self._separator, 1)
self._separator.show()
def _setup_main(self):
self._main_toolbar = MainToolbar()
self._table = Gtk.Table()
self._table.set_col_spacings(style.GRID_CELL_SIZE)
self._table.set_row_spacings(style.GRID_CELL_SIZE)
self._table.set_border_width(style.GRID_CELL_SIZE)
self._scrolledwindow = Gtk.ScrolledWindow()
self._scrolledwindow.set_can_focus(False)
self._scrolledwindow.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
self._scrolledwindow.add_with_viewport(self._table)
child = self._scrolledwindow.get_child()
child.modify_bg(
Gtk.StateType.NORMAL, style.COLOR_BLACK.get_gdk_color())
self._setup_options()
self._main_toolbar.connect('stop-clicked',
self.__stop_clicked_cb)
self._main_toolbar.connect('search-changed',
self.__search_changed_cb)
def _setup_options(self):
# If the screen width only supports two columns, start
# placing from the second row.
if self._max_columns == 2:
row = 1
column = 0
else:
# About Me and About my computer are hardcoded below to use the
# first two slots so we need to leave them free.
row = 0
column = 2
options = self._options.keys()
options.sort()
for option in options:
sectionicon = _SectionIcon(icon_name=self._options[option]['icon'],
title=self._options[option]['title'],
xo_color=self._options[option]['color'],
pixel_size=style.GRID_CELL_SIZE)
sectionicon.connect('button_press_event',
self.__select_option_cb, option)
sectionicon.show()
if option == 'aboutme':
self._table.attach(sectionicon, 0, 1, 0, 1)
elif option == 'aboutcomputer':
self._table.attach(sectionicon, 1, 2, 0, 1)
else:
self._table.attach(sectionicon,
column, column + 1,
row, row + 1)
column += 1
if column == self._max_columns:
column = 0
row += 1
self._options[option]['button'] = sectionicon
def _show_main_view(self):
if self._section_view is not None:
self._section_view.destroy()
self._section_view = None
self._set_toolbar(self._main_toolbar)
self._main_toolbar.show()
self._set_canvas(self._scrolledwindow)
self._main_view.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_BLACK.get_gdk_color())
self._table.show()
self._scrolledwindow.show()
entry = self._main_toolbar.get_entry()
entry.set_text('')
entry.connect('icon-press', self.__clear_icon_pressed_cb)
self.grab_focus()
def __key_press_event_cb(self, window, event):
if event.keyval == Gdk.KEY_Return:
if len(self._selected) == 1:
self.show_section_view(self._selected[0])
return True
if event.keyval == Gdk.KEY_Escape:
if self._toolbar == self._main_toolbar:
self.__stop_clicked_cb(None)
self.destroy()
else:
self.__cancel_clicked_cb(None)
return True
# if the user clicked out of the window - fix SL #3188
if not self.is_active():
self.present()
entry = self._main_toolbar.get_entry()
if not entry.has_focus():
entry.grab_focus()
return False
def __clear_icon_pressed_cb(self, entry, icon_pos, event):
self.grab_focus()
def _update(self, query):
self._selected = []
for option in self._options:
found = False
for key in self._options[option]['keywords']:
if query.lower() in key.lower():
self._options[option]['button'].set_sensitive(True)
self._selected.append(option)
found = True
break
if not found:
self._options[option]['button'].set_sensitive(False)
def _setup_section(self):
self._section_toolbar = SectionToolbar()
self._section_toolbar.connect('cancel-clicked',
self.__cancel_clicked_cb)
self._section_toolbar.connect('accept-clicked',
self.__accept_clicked_cb)
def show_section_view(self, option):
self._set_toolbar(self._section_toolbar)
icon = self._section_toolbar.get_icon()
icon.set_from_icon_name(self._options[option]['icon'],
Gtk.IconSize.LARGE_TOOLBAR)
icon.props.xo_color = self._options[option]['color']
title = self._section_toolbar.get_title()
title.set_text(self._options[option]['title'])
self._section_toolbar.show()
self._current_option = option
mod = __import__('.'.join(('cpsection', option, 'view')),
globals(), locals(), ['view'])
view_class = getattr(mod, self._options[option]['view'], None)
mod = __import__('.'.join(('cpsection', option, 'model')),
globals(), locals(), ['model'])
model = ModelWrapper(mod)
try:
self.busy()
self._section_view = view_class(model,
self._options[option]['alerts'])
self._set_canvas(self._section_view)
self._section_view.show()
finally:
self.unbusy()
self._section_view.connect('notify::is-valid',
self.__valid_section_cb)
self._section_view.connect('notify::is-cancellable',
self.__cancellable_section_cb)
self._section_view.connect('request-close',
self.__close_request_cb)
self._section_view.connect('add-alert',
self.__create_restart_alert_cb)
self._section_view.connect('set-toolbar-sensitivity',
self.__set_toolbar_sensitivity_cb)
self._main_view.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_WHITE.get_gdk_color())
def set_section_view_auto_close(self):
"""Automatically close the control panel if there is "nothing to do"
"""
self._section_view.auto_close = True
def _get_options(self):
"""Get the available option information from the extensions
"""
options = {}
path = os.path.join(config.ext_path, 'cpsection')
folder = os.listdir(path)
for item in folder:
if os.path.isdir(os.path.join(path, item)) and \
os.path.exists(os.path.join(path, item, '__init__.py')):
try:
mod = __import__('.'.join(('cpsection', item)),
globals(), locals(), [item])
view_class = getattr(mod, 'CLASS', None)
if view_class is not None:
options[item] = {}
options[item]['alerts'] = []
options[item]['view'] = view_class
options[item]['icon'] = getattr(mod, 'ICON', item)
options[item]['title'] = getattr(mod, 'TITLE', item)
options[item]['color'] = getattr(mod, 'COLOR', None)
keywords = getattr(mod, 'KEYWORDS', [])
keywords.append(options[item]['title'].lower())
if item not in keywords:
keywords.append(item)
options[item]['keywords'] = keywords
else:
_logger.debug('no CLASS attribute in %r', item)
except Exception:
logging.exception('Exception while loading extension:')
return options
def __cancel_clicked_cb(self, widget):
self._section_view.undo()
self._options[self._current_option]['alerts'] = []
self._section_toolbar.accept_button.set_sensitive(True)
self._show_main_view()
def __accept_clicked_cb(self, widget):
if hasattr(self._section_view, "apply"):
self._section_view.apply()
if self._section_view.needs_restart:
self.__set_toolbar_sensitivity_cb(False)
if self._section_view.show_restart_alert:
self.__create_restart_alert_cb()
else:
self._show_main_view()
def __set_toolbar_sensitivity_cb(self, value=True,
widget=None, event=None):
self._section_toolbar.accept_button.set_sensitive(value)
self._section_toolbar.cancel_button.set_sensitive(value)
def __create_restart_alert_cb(self, widget=None, event=None):
alert = Alert()
alert.props.title = _('Warning')
alert.props.msg = self._section_view.restart_msg
if self._section_view.props.is_cancellable:
icon = Icon(icon_name='dialog-cancel')
alert.add_button(Gtk.ResponseType.CANCEL,
_('Cancel changes'), icon)
icon.show()
if self._section_view.props.is_deferrable:
icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.ACCEPT, _('Later'), icon)
icon.show()
icon = Icon(icon_name='system-restart')
alert.add_button(Gtk.ResponseType.APPLY, _('Restart now'), icon)
icon.show()
self.add_alert(alert)
alert.connect('response', self.__response_cb)
alert.show()
def __response_cb(self, alert, response_id):
self.remove_alert(alert)
self._section_toolbar.accept_button.set_sensitive(True)
self._section_toolbar.cancel_button.set_sensitive(True)
if response_id is Gtk.ResponseType.CANCEL:
self._section_view.undo()
self._section_view.setup()
self._options[self._current_option]['alerts'] = []
elif response_id is Gtk.ResponseType.ACCEPT:
self._options[self._current_option]['alerts'] = \
self._section_view.restart_alerts
self._show_main_view()
elif response_id is Gtk.ResponseType.APPLY:
self.busy()
self._section_toolbar.accept_button.set_sensitive(False)
self._section_toolbar.cancel_button.set_sensitive(False)
get_session_manager().logout()
GObject.timeout_add_seconds(4, self.__quit_timeout_cb)
def __quit_timeout_cb(self):
self.unbusy()
alert = TimeoutAlert(30)
alert.props.title = _('An activity is not responding.')
alert.props.msg = _('You may lose unsaved work if you continue.')
alert.connect('response', self.__quit_accept_cb)
self.add_alert(alert)
alert.show()
def __quit_accept_cb(self, alert, response_id):
self.remove_alert(alert)
if response_id is Gtk.ResponseType.CANCEL:
get_session_manager().cancel_shutdown()
self._section_toolbar.accept_button.set_sensitive(True)
self._section_toolbar.cancel_button.set_sensitive(True)
else:
self.busy()
get_session_manager().shutdown_completed()
def __select_option_cb(self, button, event, option):
self.show_section_view(option)
def __search_changed_cb(self, maintoolbar, query):
self._update(query)
def __stop_clicked_cb(self, widget):
shell.get_model().pop_modal()
self.destroy()
def __close_request_cb(self, widget, event=None):
self.destroy()
def __valid_section_cb(self, section_view, pspec):
section_is_valid = section_view.props.is_valid
self._section_toolbar.accept_button.set_sensitive(section_is_valid)
def __cancellable_section_cb(self, section_view, pspec):
cancellable = section_view.props.is_cancellable
self._section_toolbar.cancel_button.set_sensitive(cancellable)
class ModelWrapper(object):
def __init__(self, module):
self._module = module
self._options = {}
self._setup()
def _setup(self):
methods = dir(self._module)
for method in methods:
if method.startswith('get_') and method[4:] != 'color':
try:
self._options[method[4:]] = getattr(self._module, method)()
except Exception:
self._options[method[4:]] = None
def __getattr__(self, name):
return getattr(self._module, name)
def undo(self):
for key in self._options.keys():
method = getattr(self._module, 'set_' + key, None)
if method and self._options[key] is not None:
try:
method(self._options[key])
except Exception as detail:
_logger.debug('Error undo option: %s', detail)
if hasattr(ControlPanel, 'set_css_name'):
ControlPanel.set_css_name('controlpanel')
class _SectionIcon(Gtk.EventBox):
__gtype_name__ = 'SugarSectionIcon'
__gproperties__ = {
'icon-name': (str, None, None, None, GObject.PARAM_READWRITE),
'pixel-size': (object, None, None, GObject.PARAM_READWRITE),
'xo-color': (object, None, None, GObject.PARAM_READWRITE),
'title': (str, None, None, None, GObject.PARAM_READWRITE),
}
def __init__(self, **kwargs):
self._icon_name = None
self._pixel_size = style.GRID_CELL_SIZE
self._xo_color = None
self._title = 'No Title'
Gtk.EventBox.__init__(self, **kwargs)
self._vbox = Gtk.VBox()
self._icon = Icon(icon_name=self._icon_name,
pixel_size=self._pixel_size,
xo_color=self._xo_color)
self._vbox.pack_start(self._icon, expand=False, fill=False, padding=0)
self._label = Gtk.Label(label=self._title)
self._label.modify_fg(Gtk.StateType.NORMAL,
style.COLOR_WHITE.get_gdk_color())
self._vbox.pack_start(self._label, expand=False, fill=False, padding=0)
self._vbox.set_spacing(style.DEFAULT_SPACING)
self.set_visible_window(False)
self.set_app_paintable(True)
self.set_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.add(self._vbox)
self._vbox.show()
self._label.show()
self._icon.show()
def get_icon(self):
return self._icon
def do_set_property(self, pspec, value):
if pspec.name == 'icon-name':
if self._icon_name != value:
self._icon_name = value
elif pspec.name == 'pixel-size':
if self._pixel_size != value:
self._pixel_size = value
elif pspec.name == 'xo-color':
if self._xo_color != value:
self._xo_color = value
elif pspec.name == 'title':
if self._title != value:
self._title = value
def do_get_property(self, pspec):
if pspec.name == 'icon-name':
return self._icon_name
elif pspec.name == 'pixel-size':
return self._pixel_size
elif pspec.name == 'xo-color':
return self._xo_color
elif pspec.name == 'title':
return self._title
| gpl-3.0 | -4,915,852,698,902,648,000 | 36.271777 | 79 | 0.558708 | false | 4.03356 | false | false | false |
bramd/django-phonenumber-field | setup.py | 1 | 1568 | from setuptools import setup, find_packages
from phonenumber_field import __version__
setup(
name="django-phonenumber-field",
version=__version__,
url='http://github.com/stefanfoulis/django-phonenumber-field',
license='BSD',
platforms=['OS Independent'],
description="An international phone number field for django models.",
install_requires=[
'phonenumbers>=7.0.2',
'babel',
],
long_description=open('README.rst').read(),
author='Stefan Foulis',
author_email='stefan.foulis@gmail.com',
maintainer='Stefan Foulis',
maintainer_email='stefan.foulis@gmail.com',
packages=find_packages(),
package_data = {
'phonenumber_field': [
'locale/*/LC_MESSAGES/*',
],
},
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
]
)
| mit | -4,853,339,113,309,659,000 | 33.086957 | 73 | 0.604592 | false | 4.072727 | false | false | false |
quang-ha/lammps | tools/moltemplate/moltemplate/remove_duplicate_atoms.py | 11 | 1467 | #!/usr/bin/env python
"""
Get rid of lines containing duplicate copies of the same atom in the "Atoms"
section of a LAMMPS data file. Duplicate lines which occur later are
preserved and the earlier lines are erased.
The file is read from sys.stdin. This program does not parse the entire
data file. The text from the "Atoms" section of the LAMMPS file must
be extracted in advance before it is sent to this program.)
"""
import sys
def main():
in_stream = sys.stdin
f = None
fname = None
if len(sys.argv) == 2:
fname = sys.argv[1]
f = open(fname, 'r')
in_stream = f
atom_ids_in_use = set([])
lines = in_stream.readlines()
# Start at the end of the file and read backwards.
# If duplicate lines exist, eliminate the ones that occur earlier in the file.
i = len(lines)
while i > 0:
i -= 1
line_orig = lines[i]
line = line_orig.rstrip('\n')
if '#' in line_orig:
ic = line.find('#')
line = line_orig[:ic]
tokens = line.strip().split()
if len(tokens) > 0:
atom_id = tokens[0]
if atom_id in atom_ids_in_use:
del lines[i]
else:
atom_ids_in_use.add(atom_id)
else:
del lines[i]
for line in lines:
sys.stdout.write(line)
if f != None:
f.close()
return
if __name__ == '__main__':
main()
| gpl-2.0 | -9,153,415,108,029,633,000 | 23.45 | 82 | 0.558964 | false | 3.658354 | false | false | false |
hydroshare/hydroshare_temp | hs_party/models/group_association.py | 1 | 2255 | from django.contrib.contenttypes import generic
from django.contrib.auth.models import User, Group
from django.db import models
from mezzanine.pages.models import Page, RichText,Displayable
from mezzanine.core.fields import FileField, RichTextField
from mezzanine.core.models import Ownable
from mezzanine.generic.models import Keyword, Orderable
from hs_core.models import AbstractResource
from django.db.models.signals import post_save
from datetime import date
from uuid import uuid4
from django.db.models.signals import post_save,pre_save,post_init
from django.contrib.auth.signals import user_logged_in
from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist,ValidationError
from django.core.urlresolvers import reverse
from .party import Party
from .party_types import PartyEmailModel,PartyGeolocation,PartyPhoneModel,PartyLocationModel
from .activities import ActivitiesModel
from .person import Person
from .organization import Organization
__author__ = 'valentin'
class GroupAssociation( ActivitiesModel):
# object to handle a person being in one or more organizations
#organization = models.ForeignKey(Organization)
uniqueCode = models.CharField(max_length=64,default=lambda: str(uuid4()),verbose_name="A unique code for the record", help_text="A unique code for the record")
group = models.ForeignKey(Group)
#person = models.ForeignKey(Person)
person = models.ForeignKey(Person)
beginDate = models.DateField(null=True,blank=True,verbose_name="begin date of associate, Empty is not know.")
endDate = models.DateField(null=True,blank=True, verbose_name="End date of association. Empty if still with group")
positionName = models.CharField(verbose_name="Position, empty is not known", blank=True,max_length='100')
def __unicode__(self):
if (self.beginDate):
if (self.endDate):
range=u' [%s, %s]' % (self.beginDate,self.endDate)
else:
range=u' [%s]' % (self.beginDate)
else:
range=''
if (self.jobTitle):
title = ' ,' + self.jobTitle
return u'%s (%s%s%s)' % (self.person.name, self.group.name,title,range)
class Meta:
app_label = 'hs_party' | bsd-3-clause | -6,585,716,426,862,214,000 | 40.777778 | 163 | 0.734368 | false | 3.87457 | false | false | false |
stewartsmith/bzr | bzrlib/index.py | 2 | 80106 | # Copyright (C) 2007-2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Indexing facilities."""
from __future__ import absolute_import
__all__ = [
'CombinedGraphIndex',
'GraphIndex',
'GraphIndexBuilder',
'GraphIndexPrefixAdapter',
'InMemoryGraphIndex',
]
from bisect import bisect_right
from cStringIO import StringIO
import re
import sys
from bzrlib.lazy_import import lazy_import
lazy_import(globals(), """
from bzrlib import (
bisect_multi,
revision as _mod_revision,
trace,
)
""")
from bzrlib import (
debug,
errors,
)
from bzrlib.static_tuple import StaticTuple
_HEADER_READV = (0, 200)
_OPTION_KEY_ELEMENTS = "key_elements="
_OPTION_LEN = "len="
_OPTION_NODE_REFS = "node_ref_lists="
_SIGNATURE = "Bazaar Graph Index 1\n"
_whitespace_re = re.compile('[\t\n\x0b\x0c\r\x00 ]')
_newline_null_re = re.compile('[\n\0]')
def _has_key_from_parent_map(self, key):
"""Check if this index has one key.
If it's possible to check for multiple keys at once through
calling get_parent_map that should be faster.
"""
return (key in self.get_parent_map([key]))
def _missing_keys_from_parent_map(self, keys):
return set(keys) - set(self.get_parent_map(keys))
class GraphIndexBuilder(object):
"""A builder that can build a GraphIndex.
The resulting graph has the structure::
_SIGNATURE OPTIONS NODES NEWLINE
_SIGNATURE := 'Bazaar Graph Index 1' NEWLINE
OPTIONS := 'node_ref_lists=' DIGITS NEWLINE
NODES := NODE*
NODE := KEY NULL ABSENT? NULL REFERENCES NULL VALUE NEWLINE
KEY := Not-whitespace-utf8
ABSENT := 'a'
REFERENCES := REFERENCE_LIST (TAB REFERENCE_LIST){node_ref_lists - 1}
REFERENCE_LIST := (REFERENCE (CR REFERENCE)*)?
REFERENCE := DIGITS ; digits is the byte offset in the index of the
; referenced key.
VALUE := no-newline-no-null-bytes
"""
def __init__(self, reference_lists=0, key_elements=1):
"""Create a GraphIndex builder.
:param reference_lists: The number of node references lists for each
entry.
:param key_elements: The number of bytestrings in each key.
"""
self.reference_lists = reference_lists
# A dict of {key: (absent, ref_lists, value)}
self._nodes = {}
# Keys that are referenced but not actually present in this index
self._absent_keys = set()
self._nodes_by_key = None
self._key_length = key_elements
self._optimize_for_size = False
self._combine_backing_indices = True
def _check_key(self, key):
"""Raise BadIndexKey if key is not a valid key for this index."""
if type(key) not in (tuple, StaticTuple):
raise errors.BadIndexKey(key)
if self._key_length != len(key):
raise errors.BadIndexKey(key)
for element in key:
if not element or _whitespace_re.search(element) is not None:
raise errors.BadIndexKey(element)
def _external_references(self):
"""Return references that are not present in this index.
"""
keys = set()
refs = set()
# TODO: JAM 2008-11-21 This makes an assumption about how the reference
# lists are used. It is currently correct for pack-0.92 through
# 1.9, which use the node references (3rd column) second
# reference list as the compression parent. Perhaps this should
# be moved into something higher up the stack, since it
# makes assumptions about how the index is used.
if self.reference_lists > 1:
for node in self.iter_all_entries():
keys.add(node[1])
refs.update(node[3][1])
return refs - keys
else:
# If reference_lists == 0 there can be no external references, and
# if reference_lists == 1, then there isn't a place to store the
# compression parent
return set()
def _get_nodes_by_key(self):
if self._nodes_by_key is None:
nodes_by_key = {}
if self.reference_lists:
for key, (absent, references, value) in self._nodes.iteritems():
if absent:
continue
key_dict = nodes_by_key
for subkey in key[:-1]:
key_dict = key_dict.setdefault(subkey, {})
key_dict[key[-1]] = key, value, references
else:
for key, (absent, references, value) in self._nodes.iteritems():
if absent:
continue
key_dict = nodes_by_key
for subkey in key[:-1]:
key_dict = key_dict.setdefault(subkey, {})
key_dict[key[-1]] = key, value
self._nodes_by_key = nodes_by_key
return self._nodes_by_key
def _update_nodes_by_key(self, key, value, node_refs):
"""Update the _nodes_by_key dict with a new key.
For a key of (foo, bar, baz) create
_nodes_by_key[foo][bar][baz] = key_value
"""
if self._nodes_by_key is None:
return
key_dict = self._nodes_by_key
if self.reference_lists:
key_value = StaticTuple(key, value, node_refs)
else:
key_value = StaticTuple(key, value)
for subkey in key[:-1]:
key_dict = key_dict.setdefault(subkey, {})
key_dict[key[-1]] = key_value
def _check_key_ref_value(self, key, references, value):
"""Check that 'key' and 'references' are all valid.
:param key: A key tuple. Must conform to the key interface (be a tuple,
be of the right length, not have any whitespace or nulls in any key
element.)
:param references: An iterable of reference lists. Something like
[[(ref, key)], [(ref, key), (other, key)]]
:param value: The value associate with this key. Must not contain
newlines or null characters.
:return: (node_refs, absent_references)
* node_refs: basically a packed form of 'references' where all
iterables are tuples
* absent_references: reference keys that are not in self._nodes.
This may contain duplicates if the same key is referenced in
multiple lists.
"""
as_st = StaticTuple.from_sequence
self._check_key(key)
if _newline_null_re.search(value) is not None:
raise errors.BadIndexValue(value)
if len(references) != self.reference_lists:
raise errors.BadIndexValue(references)
node_refs = []
absent_references = []
for reference_list in references:
for reference in reference_list:
# If reference *is* in self._nodes, then we know it has already
# been checked.
if reference not in self._nodes:
self._check_key(reference)
absent_references.append(reference)
reference_list = as_st([as_st(ref).intern()
for ref in reference_list])
node_refs.append(reference_list)
return as_st(node_refs), absent_references
def add_node(self, key, value, references=()):
"""Add a node to the index.
:param key: The key. keys are non-empty tuples containing
as many whitespace-free utf8 bytestrings as the key length
defined for this index.
:param references: An iterable of iterables of keys. Each is a
reference to another key.
:param value: The value to associate with the key. It may be any
bytes as long as it does not contain \\0 or \\n.
"""
(node_refs,
absent_references) = self._check_key_ref_value(key, references, value)
if key in self._nodes and self._nodes[key][0] != 'a':
raise errors.BadIndexDuplicateKey(key, self)
for reference in absent_references:
# There may be duplicates, but I don't think it is worth worrying
# about
self._nodes[reference] = ('a', (), '')
self._absent_keys.update(absent_references)
self._absent_keys.discard(key)
self._nodes[key] = ('', node_refs, value)
if self._nodes_by_key is not None and self._key_length > 1:
self._update_nodes_by_key(key, value, node_refs)
def clear_cache(self):
"""See GraphIndex.clear_cache()
This is a no-op, but we need the api to conform to a generic 'Index'
abstraction.
"""
def finish(self):
"""Finish the index.
:returns: cStringIO holding the full context of the index as it
should be written to disk.
"""
lines = [_SIGNATURE]
lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
key_count = len(self._nodes) - len(self._absent_keys)
lines.append(_OPTION_LEN + str(key_count) + '\n')
prefix_length = sum(len(x) for x in lines)
# references are byte offsets. To avoid having to do nasty
# polynomial work to resolve offsets (references to later in the
# file cannot be determined until all the inbetween references have
# been calculated too) we pad the offsets with 0's to make them be
# of consistent length. Using binary offsets would break the trivial
# file parsing.
# to calculate the width of zero's needed we do three passes:
# one to gather all the non-reference data and the number of references.
# one to pad all the data with reference-length and determine entry
# addresses.
# One to serialise.
# forward sorted by key. In future we may consider topological sorting,
# at the cost of table scans for direct lookup, or a second index for
# direct lookup
nodes = sorted(self._nodes.items())
# if we do not prepass, we don't know how long it will be up front.
expected_bytes = None
# we only need to pre-pass if we have reference lists at all.
if self.reference_lists:
key_offset_info = []
non_ref_bytes = prefix_length
total_references = 0
# TODO use simple multiplication for the constants in this loop.
for key, (absent, references, value) in nodes:
# record the offset known *so far* for this key:
# the non reference bytes to date, and the total references to
# date - saves reaccumulating on the second pass
key_offset_info.append((key, non_ref_bytes, total_references))
# key is literal, value is literal, there are 3 null's, 1 NL
# key is variable length tuple, \x00 between elements
non_ref_bytes += sum(len(element) for element in key)
if self._key_length > 1:
non_ref_bytes += self._key_length - 1
# value is literal bytes, there are 3 null's, 1 NL.
non_ref_bytes += len(value) + 3 + 1
# one byte for absent if set.
if absent:
non_ref_bytes += 1
elif self.reference_lists:
# (ref_lists -1) tabs
non_ref_bytes += self.reference_lists - 1
# (ref-1 cr's per ref_list)
for ref_list in references:
# how many references across the whole file?
total_references += len(ref_list)
# accrue reference separators
if ref_list:
non_ref_bytes += len(ref_list) - 1
# how many digits are needed to represent the total byte count?
digits = 1
possible_total_bytes = non_ref_bytes + total_references*digits
while 10 ** digits < possible_total_bytes:
digits += 1
possible_total_bytes = non_ref_bytes + total_references*digits
expected_bytes = possible_total_bytes + 1 # terminating newline
# resolve key addresses.
key_addresses = {}
for key, non_ref_bytes, total_references in key_offset_info:
key_addresses[key] = non_ref_bytes + total_references*digits
# serialise
format_string = '%%0%sd' % digits
for key, (absent, references, value) in nodes:
flattened_references = []
for ref_list in references:
ref_addresses = []
for reference in ref_list:
ref_addresses.append(format_string % key_addresses[reference])
flattened_references.append('\r'.join(ref_addresses))
string_key = '\x00'.join(key)
lines.append("%s\x00%s\x00%s\x00%s\n" % (string_key, absent,
'\t'.join(flattened_references), value))
lines.append('\n')
result = StringIO(''.join(lines))
if expected_bytes and len(result.getvalue()) != expected_bytes:
raise errors.BzrError('Failed index creation. Internal error:'
' mismatched output length and expected length: %d %d' %
(len(result.getvalue()), expected_bytes))
return result
def set_optimize(self, for_size=None, combine_backing_indices=None):
"""Change how the builder tries to optimize the result.
:param for_size: Tell the builder to try and make the index as small as
possible.
:param combine_backing_indices: If the builder spills to disk to save
memory, should the on-disk indices be combined. Set to True if you
are going to be probing the index, but to False if you are not. (If
you are not querying, then the time spent combining is wasted.)
:return: None
"""
# GraphIndexBuilder itself doesn't pay attention to the flag yet, but
# other builders do.
if for_size is not None:
self._optimize_for_size = for_size
if combine_backing_indices is not None:
self._combine_backing_indices = combine_backing_indices
def find_ancestry(self, keys, ref_list_num):
"""See CombinedGraphIndex.find_ancestry()"""
pending = set(keys)
parent_map = {}
missing_keys = set()
while pending:
next_pending = set()
for _, key, value, ref_lists in self.iter_entries(pending):
parent_keys = ref_lists[ref_list_num]
parent_map[key] = parent_keys
next_pending.update([p for p in parent_keys if p not in
parent_map])
missing_keys.update(pending.difference(parent_map))
pending = next_pending
return parent_map, missing_keys
class GraphIndex(object):
"""An index for data with embedded graphs.
The index maps keys to a list of key reference lists, and a value.
Each node has the same number of key reference lists. Each key reference
list can be empty or an arbitrary length. The value is an opaque NULL
terminated string without any newlines. The storage of the index is
hidden in the interface: keys and key references are always tuples of
bytestrings, never the internal representation (e.g. dictionary offsets).
It is presumed that the index will not be mutated - it is static data.
Successive iter_all_entries calls will read the entire index each time.
Additionally, iter_entries calls will read the index linearly until the
desired keys are found. XXX: This must be fixed before the index is
suitable for production use. :XXX
"""
def __init__(self, transport, name, size, unlimited_cache=False, offset=0):
"""Open an index called name on transport.
:param transport: A bzrlib.transport.Transport.
:param name: A path to provide to transport API calls.
:param size: The size of the index in bytes. This is used for bisection
logic to perform partial index reads. While the size could be
obtained by statting the file this introduced an additional round
trip as well as requiring stat'able transports, both of which are
avoided by having it supplied. If size is None, then bisection
support will be disabled and accessing the index will just stream
all the data.
:param offset: Instead of starting the index data at offset 0, start it
at an arbitrary offset.
"""
self._transport = transport
self._name = name
# Becomes a dict of key:(value, reference-list-byte-locations) used by
# the bisection interface to store parsed but not resolved keys.
self._bisect_nodes = None
# Becomes a dict of key:(value, reference-list-keys) which are ready to
# be returned directly to callers.
self._nodes = None
# a sorted list of slice-addresses for the parsed bytes of the file.
# e.g. (0,1) would mean that byte 0 is parsed.
self._parsed_byte_map = []
# a sorted list of keys matching each slice address for parsed bytes
# e.g. (None, 'foo@bar') would mean that the first byte contained no
# key, and the end byte of the slice is the of the data for 'foo@bar'
self._parsed_key_map = []
self._key_count = None
self._keys_by_offset = None
self._nodes_by_key = None
self._size = size
# The number of bytes we've read so far in trying to process this file
self._bytes_read = 0
self._base_offset = offset
def __eq__(self, other):
"""Equal when self and other were created with the same parameters."""
return (
type(self) == type(other) and
self._transport == other._transport and
self._name == other._name and
self._size == other._size)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__,
self._transport.abspath(self._name))
def _buffer_all(self, stream=None):
"""Buffer all the index data.
Mutates self._nodes and self.keys_by_offset.
"""
if self._nodes is not None:
# We already did this
return
if 'index' in debug.debug_flags:
trace.mutter('Reading entire index %s',
self._transport.abspath(self._name))
if stream is None:
stream = self._transport.get(self._name)
if self._base_offset != 0:
# This is wasteful, but it is better than dealing with
# adjusting all the offsets, etc.
stream = StringIO(stream.read()[self._base_offset:])
self._read_prefix(stream)
self._expected_elements = 3 + self._key_length
line_count = 0
# raw data keyed by offset
self._keys_by_offset = {}
# ready-to-return key:value or key:value, node_ref_lists
self._nodes = {}
self._nodes_by_key = None
trailers = 0
pos = stream.tell()
lines = stream.read().split('\n')
# GZ 2009-09-20: Should really use a try/finally block to ensure close
stream.close()
del lines[-1]
_, _, _, trailers = self._parse_lines(lines, pos)
for key, absent, references, value in self._keys_by_offset.itervalues():
if absent:
continue
# resolve references:
if self.node_ref_lists:
node_value = (value, self._resolve_references(references))
else:
node_value = value
self._nodes[key] = node_value
# cache the keys for quick set intersections
if trailers != 1:
# there must be one line - the empty trailer line.
raise errors.BadIndexData(self)
def clear_cache(self):
"""Clear out any cached/memoized values.
This can be called at any time, but generally it is used when we have
extracted some information, but don't expect to be requesting any more
from this index.
"""
def external_references(self, ref_list_num):
"""Return references that are not present in this index.
"""
self._buffer_all()
if ref_list_num + 1 > self.node_ref_lists:
raise ValueError('No ref list %d, index has %d ref lists'
% (ref_list_num, self.node_ref_lists))
refs = set()
nodes = self._nodes
for key, (value, ref_lists) in nodes.iteritems():
ref_list = ref_lists[ref_list_num]
refs.update([ref for ref in ref_list if ref not in nodes])
return refs
def _get_nodes_by_key(self):
if self._nodes_by_key is None:
nodes_by_key = {}
if self.node_ref_lists:
for key, (value, references) in self._nodes.iteritems():
key_dict = nodes_by_key
for subkey in key[:-1]:
key_dict = key_dict.setdefault(subkey, {})
key_dict[key[-1]] = key, value, references
else:
for key, value in self._nodes.iteritems():
key_dict = nodes_by_key
for subkey in key[:-1]:
key_dict = key_dict.setdefault(subkey, {})
key_dict[key[-1]] = key, value
self._nodes_by_key = nodes_by_key
return self._nodes_by_key
def iter_all_entries(self):
"""Iterate over all keys within the index.
:return: An iterable of (index, key, value) or (index, key, value, reference_lists).
The former tuple is used when there are no reference lists in the
index, making the API compatible with simple key:value index types.
There is no defined order for the result iteration - it will be in
the most efficient order for the index.
"""
if 'evil' in debug.debug_flags:
trace.mutter_callsite(3,
"iter_all_entries scales with size of history.")
if self._nodes is None:
self._buffer_all()
if self.node_ref_lists:
for key, (value, node_ref_lists) in self._nodes.iteritems():
yield self, key, value, node_ref_lists
else:
for key, value in self._nodes.iteritems():
yield self, key, value
def _read_prefix(self, stream):
signature = stream.read(len(self._signature()))
if not signature == self._signature():
raise errors.BadIndexFormatSignature(self._name, GraphIndex)
options_line = stream.readline()
if not options_line.startswith(_OPTION_NODE_REFS):
raise errors.BadIndexOptions(self)
try:
self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):-1])
except ValueError:
raise errors.BadIndexOptions(self)
options_line = stream.readline()
if not options_line.startswith(_OPTION_KEY_ELEMENTS):
raise errors.BadIndexOptions(self)
try:
self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):-1])
except ValueError:
raise errors.BadIndexOptions(self)
options_line = stream.readline()
if not options_line.startswith(_OPTION_LEN):
raise errors.BadIndexOptions(self)
try:
self._key_count = int(options_line[len(_OPTION_LEN):-1])
except ValueError:
raise errors.BadIndexOptions(self)
def _resolve_references(self, references):
"""Return the resolved key references for references.
References are resolved by looking up the location of the key in the
_keys_by_offset map and substituting the key name, preserving ordering.
:param references: An iterable of iterables of key locations. e.g.
[[123, 456], [123]]
:return: A tuple of tuples of keys.
"""
node_refs = []
for ref_list in references:
node_refs.append(tuple([self._keys_by_offset[ref][0] for ref in ref_list]))
return tuple(node_refs)
def _find_index(self, range_map, key):
"""Helper for the _parsed_*_index calls.
Given a range map - [(start, end), ...], finds the index of the range
in the map for key if it is in the map, and if it is not there, the
immediately preceeding range in the map.
"""
result = bisect_right(range_map, key) - 1
if result + 1 < len(range_map):
# check the border condition, it may be in result + 1
if range_map[result + 1][0] == key[0]:
return result + 1
return result
def _parsed_byte_index(self, offset):
"""Return the index of the entry immediately before offset.
e.g. if the parsed map has regions 0,10 and 11,12 parsed, meaning that
there is one unparsed byte (the 11th, addressed as[10]). then:
asking for 0 will return 0
asking for 10 will return 0
asking for 11 will return 1
asking for 12 will return 1
"""
key = (offset, 0)
return self._find_index(self._parsed_byte_map, key)
def _parsed_key_index(self, key):
"""Return the index of the entry immediately before key.
e.g. if the parsed map has regions (None, 'a') and ('b','c') parsed,
meaning that keys from None to 'a' inclusive, and 'b' to 'c' inclusive
have been parsed, then:
asking for '' will return 0
asking for 'a' will return 0
asking for 'b' will return 1
asking for 'e' will return 1
"""
search_key = (key, None)
return self._find_index(self._parsed_key_map, search_key)
def _is_parsed(self, offset):
"""Returns True if offset has been parsed."""
index = self._parsed_byte_index(offset)
if index == len(self._parsed_byte_map):
return offset < self._parsed_byte_map[index - 1][1]
start, end = self._parsed_byte_map[index]
return offset >= start and offset < end
def _iter_entries_from_total_buffer(self, keys):
"""Iterate over keys when the entire index is parsed."""
# Note: See the note in BTreeBuilder.iter_entries for why we don't use
# .intersection() here
nodes = self._nodes
keys = [key for key in keys if key in nodes]
if self.node_ref_lists:
for key in keys:
value, node_refs = nodes[key]
yield self, key, value, node_refs
else:
for key in keys:
yield self, key, nodes[key]
def iter_entries(self, keys):
"""Iterate over keys within the index.
:param keys: An iterable providing the keys to be retrieved.
:return: An iterable as per iter_all_entries, but restricted to the
keys supplied. No additional keys will be returned, and every
key supplied that is in the index will be returned.
"""
keys = set(keys)
if not keys:
return []
if self._size is None and self._nodes is None:
self._buffer_all()
# We fit about 20 keys per minimum-read (4K), so if we are looking for
# more than 1/20th of the index its likely (assuming homogenous key
# spread) that we'll read the entire index. If we're going to do that,
# buffer the whole thing. A better analysis might take key spread into
# account - but B+Tree indices are better anyway.
# We could look at all data read, and use a threshold there, which will
# trigger on ancestry walks, but that is not yet fully mapped out.
if self._nodes is None and len(keys) * 20 > self.key_count():
self._buffer_all()
if self._nodes is not None:
return self._iter_entries_from_total_buffer(keys)
else:
return (result[1] for result in bisect_multi.bisect_multi_bytes(
self._lookup_keys_via_location, self._size, keys))
def iter_entries_prefix(self, keys):
"""Iterate over keys within the index using prefix matching.
Prefix matching is applied within the tuple of a key, not to within
the bytestring of each key element. e.g. if you have the keys ('foo',
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
only the former key is returned.
WARNING: Note that this method currently causes a full index parse
unconditionally (which is reasonably appropriate as it is a means for
thunking many small indices into one larger one and still supplies
iter_all_entries at the thunk layer).
:param keys: An iterable providing the key prefixes to be retrieved.
Each key prefix takes the form of a tuple the length of a key, but
with the last N elements 'None' rather than a regular bytestring.
The first element cannot be 'None'.
:return: An iterable as per iter_all_entries, but restricted to the
keys with a matching prefix to those supplied. No additional keys
will be returned, and every match that is in the index will be
returned.
"""
keys = set(keys)
if not keys:
return
# load data - also finds key lengths
if self._nodes is None:
self._buffer_all()
if self._key_length == 1:
for key in keys:
# sanity check
if key[0] is None:
raise errors.BadIndexKey(key)
if len(key) != self._key_length:
raise errors.BadIndexKey(key)
if self.node_ref_lists:
value, node_refs = self._nodes[key]
yield self, key, value, node_refs
else:
yield self, key, self._nodes[key]
return
nodes_by_key = self._get_nodes_by_key()
for key in keys:
# sanity check
if key[0] is None:
raise errors.BadIndexKey(key)
if len(key) != self._key_length:
raise errors.BadIndexKey(key)
# find what it refers to:
key_dict = nodes_by_key
elements = list(key)
# find the subdict whose contents should be returned.
try:
while len(elements) and elements[0] is not None:
key_dict = key_dict[elements[0]]
elements.pop(0)
except KeyError:
# a non-existant lookup.
continue
if len(elements):
dicts = [key_dict]
while dicts:
key_dict = dicts.pop(-1)
# can't be empty or would not exist
item, value = key_dict.iteritems().next()
if type(value) == dict:
# push keys
dicts.extend(key_dict.itervalues())
else:
# yield keys
for value in key_dict.itervalues():
# each value is the key:value:node refs tuple
# ready to yield.
yield (self, ) + value
else:
# the last thing looked up was a terminal element
yield (self, ) + key_dict
def _find_ancestors(self, keys, ref_list_num, parent_map, missing_keys):
"""See BTreeIndex._find_ancestors."""
# The api can be implemented as a trivial overlay on top of
# iter_entries, it is not an efficient implementation, but it at least
# gets the job done.
found_keys = set()
search_keys = set()
for index, key, value, refs in self.iter_entries(keys):
parent_keys = refs[ref_list_num]
found_keys.add(key)
parent_map[key] = parent_keys
search_keys.update(parent_keys)
# Figure out what, if anything, was missing
missing_keys.update(set(keys).difference(found_keys))
search_keys = search_keys.difference(parent_map)
return search_keys
def key_count(self):
"""Return an estimate of the number of keys in this index.
For GraphIndex the estimate is exact.
"""
if self._key_count is None:
self._read_and_parse([_HEADER_READV])
return self._key_count
def _lookup_keys_via_location(self, location_keys):
"""Public interface for implementing bisection.
If _buffer_all has been called, then all the data for the index is in
memory, and this method should not be called, as it uses a separate
cache because it cannot pre-resolve all indices, which buffer_all does
for performance.
:param location_keys: A list of location(byte offset), key tuples.
:return: A list of (location_key, result) tuples as expected by
bzrlib.bisect_multi.bisect_multi_bytes.
"""
# Possible improvements:
# - only bisect lookup each key once
# - sort the keys first, and use that to reduce the bisection window
# -----
# this progresses in three parts:
# read data
# parse it
# attempt to answer the question from the now in memory data.
# build the readv request
# for each location, ask for 800 bytes - much more than rows we've seen
# anywhere.
readv_ranges = []
for location, key in location_keys:
# can we answer from cache?
if self._bisect_nodes and key in self._bisect_nodes:
# We have the key parsed.
continue
index = self._parsed_key_index(key)
if (len(self._parsed_key_map) and
self._parsed_key_map[index][0] <= key and
(self._parsed_key_map[index][1] >= key or
# end of the file has been parsed
self._parsed_byte_map[index][1] == self._size)):
# the key has been parsed, so no lookup is needed even if its
# not present.
continue
# - if we have examined this part of the file already - yes
index = self._parsed_byte_index(location)
if (len(self._parsed_byte_map) and
self._parsed_byte_map[index][0] <= location and
self._parsed_byte_map[index][1] > location):
# the byte region has been parsed, so no read is needed.
continue
length = 800
if location + length > self._size:
length = self._size - location
# todo, trim out parsed locations.
if length > 0:
readv_ranges.append((location, length))
# read the header if needed
if self._bisect_nodes is None:
readv_ranges.append(_HEADER_READV)
self._read_and_parse(readv_ranges)
result = []
if self._nodes is not None:
# _read_and_parse triggered a _buffer_all because we requested the
# whole data range
for location, key in location_keys:
if key not in self._nodes: # not present
result.append(((location, key), False))
elif self.node_ref_lists:
value, refs = self._nodes[key]
result.append(((location, key),
(self, key, value, refs)))
else:
result.append(((location, key),
(self, key, self._nodes[key])))
return result
# generate results:
# - figure out <, >, missing, present
# - result present references so we can return them.
# keys that we cannot answer until we resolve references
pending_references = []
pending_locations = set()
for location, key in location_keys:
# can we answer from cache?
if key in self._bisect_nodes:
# the key has been parsed, so no lookup is needed
if self.node_ref_lists:
# the references may not have been all parsed.
value, refs = self._bisect_nodes[key]
wanted_locations = []
for ref_list in refs:
for ref in ref_list:
if ref not in self._keys_by_offset:
wanted_locations.append(ref)
if wanted_locations:
pending_locations.update(wanted_locations)
pending_references.append((location, key))
continue
result.append(((location, key), (self, key,
value, self._resolve_references(refs))))
else:
result.append(((location, key),
(self, key, self._bisect_nodes[key])))
continue
else:
# has the region the key should be in, been parsed?
index = self._parsed_key_index(key)
if (self._parsed_key_map[index][0] <= key and
(self._parsed_key_map[index][1] >= key or
# end of the file has been parsed
self._parsed_byte_map[index][1] == self._size)):
result.append(((location, key), False))
continue
# no, is the key above or below the probed location:
# get the range of the probed & parsed location
index = self._parsed_byte_index(location)
# if the key is below the start of the range, its below
if key < self._parsed_key_map[index][0]:
direction = -1
else:
direction = +1
result.append(((location, key), direction))
readv_ranges = []
# lookup data to resolve references
for location in pending_locations:
length = 800
if location + length > self._size:
length = self._size - location
# TODO: trim out parsed locations (e.g. if the 800 is into the
# parsed region trim it, and dont use the adjust_for_latency
# facility)
if length > 0:
readv_ranges.append((location, length))
self._read_and_parse(readv_ranges)
if self._nodes is not None:
# The _read_and_parse triggered a _buffer_all, grab the data and
# return it
for location, key in pending_references:
value, refs = self._nodes[key]
result.append(((location, key), (self, key, value, refs)))
return result
for location, key in pending_references:
# answer key references we had to look-up-late.
value, refs = self._bisect_nodes[key]
result.append(((location, key), (self, key,
value, self._resolve_references(refs))))
return result
def _parse_header_from_bytes(self, bytes):
"""Parse the header from a region of bytes.
:param bytes: The data to parse.
:return: An offset, data tuple such as readv yields, for the unparsed
data. (which may length 0).
"""
signature = bytes[0:len(self._signature())]
if not signature == self._signature():
raise errors.BadIndexFormatSignature(self._name, GraphIndex)
lines = bytes[len(self._signature()):].splitlines()
options_line = lines[0]
if not options_line.startswith(_OPTION_NODE_REFS):
raise errors.BadIndexOptions(self)
try:
self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):])
except ValueError:
raise errors.BadIndexOptions(self)
options_line = lines[1]
if not options_line.startswith(_OPTION_KEY_ELEMENTS):
raise errors.BadIndexOptions(self)
try:
self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):])
except ValueError:
raise errors.BadIndexOptions(self)
options_line = lines[2]
if not options_line.startswith(_OPTION_LEN):
raise errors.BadIndexOptions(self)
try:
self._key_count = int(options_line[len(_OPTION_LEN):])
except ValueError:
raise errors.BadIndexOptions(self)
# calculate the bytes we have processed
header_end = (len(signature) + len(lines[0]) + len(lines[1]) +
len(lines[2]) + 3)
self._parsed_bytes(0, None, header_end, None)
# setup parsing state
self._expected_elements = 3 + self._key_length
# raw data keyed by offset
self._keys_by_offset = {}
# keys with the value and node references
self._bisect_nodes = {}
return header_end, bytes[header_end:]
def _parse_region(self, offset, data):
"""Parse node data returned from a readv operation.
:param offset: The byte offset the data starts at.
:param data: The data to parse.
"""
# trim the data.
# end first:
end = offset + len(data)
high_parsed = offset
while True:
# Trivial test - if the current index's end is within the
# low-matching parsed range, we're done.
index = self._parsed_byte_index(high_parsed)
if end < self._parsed_byte_map[index][1]:
return
# print "[%d:%d]" % (offset, end), \
# self._parsed_byte_map[index:index + 2]
high_parsed, last_segment = self._parse_segment(
offset, data, end, index)
if last_segment:
return
def _parse_segment(self, offset, data, end, index):
"""Parse one segment of data.
:param offset: Where 'data' begins in the file.
:param data: Some data to parse a segment of.
:param end: Where data ends
:param index: The current index into the parsed bytes map.
:return: True if the parsed segment is the last possible one in the
range of data.
:return: high_parsed_byte, last_segment.
high_parsed_byte is the location of the highest parsed byte in this
segment, last_segment is True if the parsed segment is the last
possible one in the data block.
"""
# default is to use all data
trim_end = None
# accomodate overlap with data before this.
if offset < self._parsed_byte_map[index][1]:
# overlaps the lower parsed region
# skip the parsed data
trim_start = self._parsed_byte_map[index][1] - offset
# don't trim the start for \n
start_adjacent = True
elif offset == self._parsed_byte_map[index][1]:
# abuts the lower parsed region
# use all data
trim_start = None
# do not trim anything
start_adjacent = True
else:
# does not overlap the lower parsed region
# use all data
trim_start = None
# but trim the leading \n
start_adjacent = False
if end == self._size:
# lines up to the end of all data:
# use it all
trim_end = None
# do not strip to the last \n
end_adjacent = True
last_segment = True
elif index + 1 == len(self._parsed_byte_map):
# at the end of the parsed data
# use it all
trim_end = None
# but strip to the last \n
end_adjacent = False
last_segment = True
elif end == self._parsed_byte_map[index + 1][0]:
# buts up against the next parsed region
# use it all
trim_end = None
# do not strip to the last \n
end_adjacent = True
last_segment = True
elif end > self._parsed_byte_map[index + 1][0]:
# overlaps into the next parsed region
# only consider the unparsed data
trim_end = self._parsed_byte_map[index + 1][0] - offset
# do not strip to the last \n as we know its an entire record
end_adjacent = True
last_segment = end < self._parsed_byte_map[index + 1][1]
else:
# does not overlap into the next region
# use it all
trim_end = None
# but strip to the last \n
end_adjacent = False
last_segment = True
# now find bytes to discard if needed
if not start_adjacent:
# work around python bug in rfind
if trim_start is None:
trim_start = data.find('\n') + 1
else:
trim_start = data.find('\n', trim_start) + 1
if not (trim_start != 0):
raise AssertionError('no \n was present')
# print 'removing start', offset, trim_start, repr(data[:trim_start])
if not end_adjacent:
# work around python bug in rfind
if trim_end is None:
trim_end = data.rfind('\n') + 1
else:
trim_end = data.rfind('\n', None, trim_end) + 1
if not (trim_end != 0):
raise AssertionError('no \n was present')
# print 'removing end', offset, trim_end, repr(data[trim_end:])
# adjust offset and data to the parseable data.
trimmed_data = data[trim_start:trim_end]
if not (trimmed_data):
raise AssertionError('read unneeded data [%d:%d] from [%d:%d]'
% (trim_start, trim_end, offset, offset + len(data)))
if trim_start:
offset += trim_start
# print "parsing", repr(trimmed_data)
# splitlines mangles the \r delimiters.. don't use it.
lines = trimmed_data.split('\n')
del lines[-1]
pos = offset
first_key, last_key, nodes, _ = self._parse_lines(lines, pos)
for key, value in nodes:
self._bisect_nodes[key] = value
self._parsed_bytes(offset, first_key,
offset + len(trimmed_data), last_key)
return offset + len(trimmed_data), last_segment
def _parse_lines(self, lines, pos):
key = None
first_key = None
trailers = 0
nodes = []
for line in lines:
if line == '':
# must be at the end
if self._size:
if not (self._size == pos + 1):
raise AssertionError("%s %s" % (self._size, pos))
trailers += 1
continue
elements = line.split('\0')
if len(elements) != self._expected_elements:
raise errors.BadIndexData(self)
# keys are tuples. Each element is a string that may occur many
# times, so we intern them to save space. AB, RC, 200807
key = tuple([intern(element) for element in elements[:self._key_length]])
if first_key is None:
first_key = key
absent, references, value = elements[-3:]
ref_lists = []
for ref_string in references.split('\t'):
ref_lists.append(tuple([
int(ref) for ref in ref_string.split('\r') if ref
]))
ref_lists = tuple(ref_lists)
self._keys_by_offset[pos] = (key, absent, ref_lists, value)
pos += len(line) + 1 # +1 for the \n
if absent:
continue
if self.node_ref_lists:
node_value = (value, ref_lists)
else:
node_value = value
nodes.append((key, node_value))
# print "parsed ", key
return first_key, key, nodes, trailers
def _parsed_bytes(self, start, start_key, end, end_key):
"""Mark the bytes from start to end as parsed.
Calling self._parsed_bytes(1,2) will mark one byte (the one at offset
1) as parsed.
:param start: The start of the parsed region.
:param end: The end of the parsed region.
"""
index = self._parsed_byte_index(start)
new_value = (start, end)
new_key = (start_key, end_key)
if index == -1:
# first range parsed is always the beginning.
self._parsed_byte_map.insert(index, new_value)
self._parsed_key_map.insert(index, new_key)
return
# four cases:
# new region
# extend lower region
# extend higher region
# combine two regions
if (index + 1 < len(self._parsed_byte_map) and
self._parsed_byte_map[index][1] == start and
self._parsed_byte_map[index + 1][0] == end):
# combine two regions
self._parsed_byte_map[index] = (self._parsed_byte_map[index][0],
self._parsed_byte_map[index + 1][1])
self._parsed_key_map[index] = (self._parsed_key_map[index][0],
self._parsed_key_map[index + 1][1])
del self._parsed_byte_map[index + 1]
del self._parsed_key_map[index + 1]
elif self._parsed_byte_map[index][1] == start:
# extend the lower entry
self._parsed_byte_map[index] = (
self._parsed_byte_map[index][0], end)
self._parsed_key_map[index] = (
self._parsed_key_map[index][0], end_key)
elif (index + 1 < len(self._parsed_byte_map) and
self._parsed_byte_map[index + 1][0] == end):
# extend the higher entry
self._parsed_byte_map[index + 1] = (
start, self._parsed_byte_map[index + 1][1])
self._parsed_key_map[index + 1] = (
start_key, self._parsed_key_map[index + 1][1])
else:
# new entry
self._parsed_byte_map.insert(index + 1, new_value)
self._parsed_key_map.insert(index + 1, new_key)
def _read_and_parse(self, readv_ranges):
"""Read the ranges and parse the resulting data.
:param readv_ranges: A prepared readv range list.
"""
if not readv_ranges:
return
if self._nodes is None and self._bytes_read * 2 >= self._size:
# We've already read more than 50% of the file and we are about to
# request more data, just _buffer_all() and be done
self._buffer_all()
return
base_offset = self._base_offset
if base_offset != 0:
# Rewrite the ranges for the offset
readv_ranges = [(start+base_offset, size)
for start, size in readv_ranges]
readv_data = self._transport.readv(self._name, readv_ranges, True,
self._size + self._base_offset)
# parse
for offset, data in readv_data:
offset -= base_offset
self._bytes_read += len(data)
if offset < 0:
# transport.readv() expanded to extra data which isn't part of
# this index
data = data[-offset:]
offset = 0
if offset == 0 and len(data) == self._size:
# We read the whole range, most likely because the
# Transport upcast our readv ranges into one long request
# for enough total data to grab the whole index.
self._buffer_all(StringIO(data))
return
if self._bisect_nodes is None:
# this must be the start
if not (offset == 0):
raise AssertionError()
offset, data = self._parse_header_from_bytes(data)
# print readv_ranges, "[%d:%d]" % (offset, offset + len(data))
self._parse_region(offset, data)
def _signature(self):
"""The file signature for this index type."""
return _SIGNATURE
def validate(self):
"""Validate that everything in the index can be accessed."""
# iter_all validates completely at the moment, so just do that.
for node in self.iter_all_entries():
pass
class CombinedGraphIndex(object):
"""A GraphIndex made up from smaller GraphIndices.
The backing indices must implement GraphIndex, and are presumed to be
static data.
Queries against the combined index will be made against the first index,
and then the second and so on. The order of indices can thus influence
performance significantly. For example, if one index is on local disk and a
second on a remote server, the local disk index should be before the other
in the index list.
Also, queries tend to need results from the same indices as previous
queries. So the indices will be reordered after every query to put the
indices that had the result(s) of that query first (while otherwise
preserving the relative ordering).
"""
def __init__(self, indices, reload_func=None):
"""Create a CombinedGraphIndex backed by indices.
:param indices: An ordered list of indices to query for data.
:param reload_func: A function to call if we find we are missing an
index. Should have the form reload_func() => True/False to indicate
if reloading actually changed anything.
"""
self._indices = indices
self._reload_func = reload_func
# Sibling indices are other CombinedGraphIndex that we should call
# _move_to_front_by_name on when we auto-reorder ourself.
self._sibling_indices = []
# A list of names that corresponds to the instances in self._indices,
# so _index_names[0] is always the name for _indices[0], etc. Sibling
# indices must all use the same set of names as each other.
self._index_names = [None] * len(self._indices)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
', '.join(map(repr, self._indices)))
def clear_cache(self):
"""See GraphIndex.clear_cache()"""
for index in self._indices:
index.clear_cache()
def get_parent_map(self, keys):
"""See graph.StackedParentsProvider.get_parent_map"""
search_keys = set(keys)
if _mod_revision.NULL_REVISION in search_keys:
search_keys.discard(_mod_revision.NULL_REVISION)
found_parents = {_mod_revision.NULL_REVISION:[]}
else:
found_parents = {}
for index, key, value, refs in self.iter_entries(search_keys):
parents = refs[0]
if not parents:
parents = (_mod_revision.NULL_REVISION,)
found_parents[key] = parents
return found_parents
has_key = _has_key_from_parent_map
def insert_index(self, pos, index, name=None):
"""Insert a new index in the list of indices to query.
:param pos: The position to insert the index.
:param index: The index to insert.
:param name: a name for this index, e.g. a pack name. These names can
be used to reflect index reorderings to related CombinedGraphIndex
instances that use the same names. (see set_sibling_indices)
"""
self._indices.insert(pos, index)
self._index_names.insert(pos, name)
def iter_all_entries(self):
"""Iterate over all keys within the index
Duplicate keys across child indices are presumed to have the same
value and are only reported once.
:return: An iterable of (index, key, reference_lists, value).
There is no defined order for the result iteration - it will be in
the most efficient order for the index.
"""
seen_keys = set()
while True:
try:
for index in self._indices:
for node in index.iter_all_entries():
if node[1] not in seen_keys:
yield node
seen_keys.add(node[1])
return
except errors.NoSuchFile:
self._reload_or_raise()
def iter_entries(self, keys):
"""Iterate over keys within the index.
Duplicate keys across child indices are presumed to have the same
value and are only reported once.
:param keys: An iterable providing the keys to be retrieved.
:return: An iterable of (index, key, reference_lists, value). There is
no defined order for the result iteration - it will be in the most
efficient order for the index.
"""
keys = set(keys)
hit_indices = []
while True:
try:
for index in self._indices:
if not keys:
break
index_hit = False
for node in index.iter_entries(keys):
keys.remove(node[1])
yield node
index_hit = True
if index_hit:
hit_indices.append(index)
break
except errors.NoSuchFile:
self._reload_or_raise()
self._move_to_front(hit_indices)
def iter_entries_prefix(self, keys):
"""Iterate over keys within the index using prefix matching.
Duplicate keys across child indices are presumed to have the same
value and are only reported once.
Prefix matching is applied within the tuple of a key, not to within
the bytestring of each key element. e.g. if you have the keys ('foo',
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
only the former key is returned.
:param keys: An iterable providing the key prefixes to be retrieved.
Each key prefix takes the form of a tuple the length of a key, but
with the last N elements 'None' rather than a regular bytestring.
The first element cannot be 'None'.
:return: An iterable as per iter_all_entries, but restricted to the
keys with a matching prefix to those supplied. No additional keys
will be returned, and every match that is in the index will be
returned.
"""
keys = set(keys)
if not keys:
return
seen_keys = set()
hit_indices = []
while True:
try:
for index in self._indices:
index_hit = False
for node in index.iter_entries_prefix(keys):
if node[1] in seen_keys:
continue
seen_keys.add(node[1])
yield node
index_hit = True
if index_hit:
hit_indices.append(index)
break
except errors.NoSuchFile:
self._reload_or_raise()
self._move_to_front(hit_indices)
def _move_to_front(self, hit_indices):
"""Rearrange self._indices so that hit_indices are first.
Order is maintained as much as possible, e.g. the first unhit index
will be the first index in _indices after the hit_indices, and the
hit_indices will be present in exactly the order they are passed to
_move_to_front.
_move_to_front propagates to all objects in self._sibling_indices by
calling _move_to_front_by_name.
"""
if self._indices[:len(hit_indices)] == hit_indices:
# The 'hit_indices' are already at the front (and in the same
# order), no need to re-order
return
hit_names = self._move_to_front_by_index(hit_indices)
for sibling_idx in self._sibling_indices:
sibling_idx._move_to_front_by_name(hit_names)
def _move_to_front_by_index(self, hit_indices):
"""Core logic for _move_to_front.
Returns a list of names corresponding to the hit_indices param.
"""
indices_info = zip(self._index_names, self._indices)
if 'index' in debug.debug_flags:
trace.mutter('CombinedGraphIndex reordering: currently %r, '
'promoting %r', indices_info, hit_indices)
hit_names = []
unhit_names = []
new_hit_indices = []
unhit_indices = []
for offset, (name, idx) in enumerate(indices_info):
if idx in hit_indices:
hit_names.append(name)
new_hit_indices.append(idx)
if len(new_hit_indices) == len(hit_indices):
# We've found all of the hit entries, everything else is
# unhit
unhit_names.extend(self._index_names[offset+1:])
unhit_indices.extend(self._indices[offset+1:])
break
else:
unhit_names.append(name)
unhit_indices.append(idx)
self._indices = new_hit_indices + unhit_indices
self._index_names = hit_names + unhit_names
if 'index' in debug.debug_flags:
trace.mutter('CombinedGraphIndex reordered: %r', self._indices)
return hit_names
def _move_to_front_by_name(self, hit_names):
"""Moves indices named by 'hit_names' to front of the search order, as
described in _move_to_front.
"""
# Translate names to index instances, and then call
# _move_to_front_by_index.
indices_info = zip(self._index_names, self._indices)
hit_indices = []
for name, idx in indices_info:
if name in hit_names:
hit_indices.append(idx)
self._move_to_front_by_index(hit_indices)
def find_ancestry(self, keys, ref_list_num):
"""Find the complete ancestry for the given set of keys.
Note that this is a whole-ancestry request, so it should be used
sparingly.
:param keys: An iterable of keys to look for
:param ref_list_num: The reference list which references the parents
we care about.
:return: (parent_map, missing_keys)
"""
# XXX: make this call _move_to_front?
missing_keys = set()
parent_map = {}
keys_to_lookup = set(keys)
generation = 0
while keys_to_lookup:
# keys that *all* indexes claim are missing, stop searching them
generation += 1
all_index_missing = None
# print 'gen\tidx\tsub\tn_keys\tn_pmap\tn_miss'
# print '%4d\t\t\t%4d\t%5d\t%5d' % (generation, len(keys_to_lookup),
# len(parent_map),
# len(missing_keys))
for index_idx, index in enumerate(self._indices):
# TODO: we should probably be doing something with
# 'missing_keys' since we've already determined that
# those revisions have not been found anywhere
index_missing_keys = set()
# Find all of the ancestry we can from this index
# keep looking until the search_keys set is empty, which means
# things we didn't find should be in index_missing_keys
search_keys = keys_to_lookup
sub_generation = 0
# print ' \t%2d\t\t%4d\t%5d\t%5d' % (
# index_idx, len(search_keys),
# len(parent_map), len(index_missing_keys))
while search_keys:
sub_generation += 1
# TODO: ref_list_num should really be a parameter, since
# CombinedGraphIndex does not know what the ref lists
# mean.
search_keys = index._find_ancestors(search_keys,
ref_list_num, parent_map, index_missing_keys)
# print ' \t \t%2d\t%4d\t%5d\t%5d' % (
# sub_generation, len(search_keys),
# len(parent_map), len(index_missing_keys))
# Now set whatever was missing to be searched in the next index
keys_to_lookup = index_missing_keys
if all_index_missing is None:
all_index_missing = set(index_missing_keys)
else:
all_index_missing.intersection_update(index_missing_keys)
if not keys_to_lookup:
break
if all_index_missing is None:
# There were no indexes, so all search keys are 'missing'
missing_keys.update(keys_to_lookup)
keys_to_lookup = None
else:
missing_keys.update(all_index_missing)
keys_to_lookup.difference_update(all_index_missing)
return parent_map, missing_keys
def key_count(self):
"""Return an estimate of the number of keys in this index.
For CombinedGraphIndex this is approximated by the sum of the keys of
the child indices. As child indices may have duplicate keys this can
have a maximum error of the number of child indices * largest number of
keys in any index.
"""
while True:
try:
return sum((index.key_count() for index in self._indices), 0)
except errors.NoSuchFile:
self._reload_or_raise()
missing_keys = _missing_keys_from_parent_map
def _reload_or_raise(self):
"""We just got a NoSuchFile exception.
Try to reload the indices, if it fails, just raise the current
exception.
"""
if self._reload_func is None:
raise
exc_type, exc_value, exc_traceback = sys.exc_info()
trace.mutter('Trying to reload after getting exception: %s',
exc_value)
if not self._reload_func():
# We tried to reload, but nothing changed, so we fail anyway
trace.mutter('_reload_func indicated nothing has changed.'
' Raising original exception.')
raise exc_type, exc_value, exc_traceback
def set_sibling_indices(self, sibling_combined_graph_indices):
"""Set the CombinedGraphIndex objects to reorder after reordering self.
"""
self._sibling_indices = sibling_combined_graph_indices
def validate(self):
"""Validate that everything in the index can be accessed."""
while True:
try:
for index in self._indices:
index.validate()
return
except errors.NoSuchFile:
self._reload_or_raise()
class InMemoryGraphIndex(GraphIndexBuilder):
"""A GraphIndex which operates entirely out of memory and is mutable.
This is designed to allow the accumulation of GraphIndex entries during a
single write operation, where the accumulated entries need to be immediately
available - for example via a CombinedGraphIndex.
"""
def add_nodes(self, nodes):
"""Add nodes to the index.
:param nodes: An iterable of (key, node_refs, value) entries to add.
"""
if self.reference_lists:
for (key, value, node_refs) in nodes:
self.add_node(key, value, node_refs)
else:
for (key, value) in nodes:
self.add_node(key, value)
def iter_all_entries(self):
"""Iterate over all keys within the index
:return: An iterable of (index, key, reference_lists, value). There is no
defined order for the result iteration - it will be in the most
efficient order for the index (in this case dictionary hash order).
"""
if 'evil' in debug.debug_flags:
trace.mutter_callsite(3,
"iter_all_entries scales with size of history.")
if self.reference_lists:
for key, (absent, references, value) in self._nodes.iteritems():
if not absent:
yield self, key, value, references
else:
for key, (absent, references, value) in self._nodes.iteritems():
if not absent:
yield self, key, value
def iter_entries(self, keys):
"""Iterate over keys within the index.
:param keys: An iterable providing the keys to be retrieved.
:return: An iterable of (index, key, value, reference_lists). There is no
defined order for the result iteration - it will be in the most
efficient order for the index (keys iteration order in this case).
"""
# Note: See BTreeBuilder.iter_entries for an explanation of why we
# aren't using set().intersection() here
nodes = self._nodes
keys = [key for key in keys if key in nodes]
if self.reference_lists:
for key in keys:
node = nodes[key]
if not node[0]:
yield self, key, node[2], node[1]
else:
for key in keys:
node = nodes[key]
if not node[0]:
yield self, key, node[2]
def iter_entries_prefix(self, keys):
"""Iterate over keys within the index using prefix matching.
Prefix matching is applied within the tuple of a key, not to within
the bytestring of each key element. e.g. if you have the keys ('foo',
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
only the former key is returned.
:param keys: An iterable providing the key prefixes to be retrieved.
Each key prefix takes the form of a tuple the length of a key, but
with the last N elements 'None' rather than a regular bytestring.
The first element cannot be 'None'.
:return: An iterable as per iter_all_entries, but restricted to the
keys with a matching prefix to those supplied. No additional keys
will be returned, and every match that is in the index will be
returned.
"""
# XXX: To much duplication with the GraphIndex class; consider finding
# a good place to pull out the actual common logic.
keys = set(keys)
if not keys:
return
if self._key_length == 1:
for key in keys:
# sanity check
if key[0] is None:
raise errors.BadIndexKey(key)
if len(key) != self._key_length:
raise errors.BadIndexKey(key)
node = self._nodes[key]
if node[0]:
continue
if self.reference_lists:
yield self, key, node[2], node[1]
else:
yield self, key, node[2]
return
nodes_by_key = self._get_nodes_by_key()
for key in keys:
# sanity check
if key[0] is None:
raise errors.BadIndexKey(key)
if len(key) != self._key_length:
raise errors.BadIndexKey(key)
# find what it refers to:
key_dict = nodes_by_key
elements = list(key)
# find the subdict to return
try:
while len(elements) and elements[0] is not None:
key_dict = key_dict[elements[0]]
elements.pop(0)
except KeyError:
# a non-existant lookup.
continue
if len(elements):
dicts = [key_dict]
while dicts:
key_dict = dicts.pop(-1)
# can't be empty or would not exist
item, value = key_dict.iteritems().next()
if type(value) == dict:
# push keys
dicts.extend(key_dict.itervalues())
else:
# yield keys
for value in key_dict.itervalues():
yield (self, ) + value
else:
yield (self, ) + key_dict
def key_count(self):
"""Return an estimate of the number of keys in this index.
For InMemoryGraphIndex the estimate is exact.
"""
return len(self._nodes) - len(self._absent_keys)
def validate(self):
"""In memory index's have no known corruption at the moment."""
class GraphIndexPrefixAdapter(object):
"""An adapter between GraphIndex with different key lengths.
Queries against this will emit queries against the adapted Graph with the
prefix added, queries for all items use iter_entries_prefix. The returned
nodes will have their keys and node references adjusted to remove the
prefix. Finally, an add_nodes_callback can be supplied - when called the
nodes and references being added will have prefix prepended.
"""
def __init__(self, adapted, prefix, missing_key_length,
add_nodes_callback=None):
"""Construct an adapter against adapted with prefix."""
self.adapted = adapted
self.prefix_key = prefix + (None,)*missing_key_length
self.prefix = prefix
self.prefix_len = len(prefix)
self.add_nodes_callback = add_nodes_callback
def add_nodes(self, nodes):
"""Add nodes to the index.
:param nodes: An iterable of (key, node_refs, value) entries to add.
"""
# save nodes in case its an iterator
nodes = tuple(nodes)
translated_nodes = []
try:
# Add prefix_key to each reference node_refs is a tuple of tuples,
# so split it apart, and add prefix_key to the internal reference
for (key, value, node_refs) in nodes:
adjusted_references = (
tuple(tuple(self.prefix + ref_node for ref_node in ref_list)
for ref_list in node_refs))
translated_nodes.append((self.prefix + key, value,
adjusted_references))
except ValueError:
# XXX: TODO add an explicit interface for getting the reference list
# status, to handle this bit of user-friendliness in the API more
# explicitly.
for (key, value) in nodes:
translated_nodes.append((self.prefix + key, value))
self.add_nodes_callback(translated_nodes)
def add_node(self, key, value, references=()):
"""Add a node to the index.
:param key: The key. keys are non-empty tuples containing
as many whitespace-free utf8 bytestrings as the key length
defined for this index.
:param references: An iterable of iterables of keys. Each is a
reference to another key.
:param value: The value to associate with the key. It may be any
bytes as long as it does not contain \0 or \n.
"""
self.add_nodes(((key, value, references), ))
def _strip_prefix(self, an_iter):
"""Strip prefix data from nodes and return it."""
for node in an_iter:
# cross checks
if node[1][:self.prefix_len] != self.prefix:
raise errors.BadIndexData(self)
for ref_list in node[3]:
for ref_node in ref_list:
if ref_node[:self.prefix_len] != self.prefix:
raise errors.BadIndexData(self)
yield node[0], node[1][self.prefix_len:], node[2], (
tuple(tuple(ref_node[self.prefix_len:] for ref_node in ref_list)
for ref_list in node[3]))
def iter_all_entries(self):
"""Iterate over all keys within the index
iter_all_entries is implemented against the adapted index using
iter_entries_prefix.
:return: An iterable of (index, key, reference_lists, value). There is no
defined order for the result iteration - it will be in the most
efficient order for the index (in this case dictionary hash order).
"""
return self._strip_prefix(self.adapted.iter_entries_prefix([self.prefix_key]))
def iter_entries(self, keys):
"""Iterate over keys within the index.
:param keys: An iterable providing the keys to be retrieved.
:return: An iterable of (index, key, value, reference_lists). There is no
defined order for the result iteration - it will be in the most
efficient order for the index (keys iteration order in this case).
"""
return self._strip_prefix(self.adapted.iter_entries(
self.prefix + key for key in keys))
def iter_entries_prefix(self, keys):
"""Iterate over keys within the index using prefix matching.
Prefix matching is applied within the tuple of a key, not to within
the bytestring of each key element. e.g. if you have the keys ('foo',
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
only the former key is returned.
:param keys: An iterable providing the key prefixes to be retrieved.
Each key prefix takes the form of a tuple the length of a key, but
with the last N elements 'None' rather than a regular bytestring.
The first element cannot be 'None'.
:return: An iterable as per iter_all_entries, but restricted to the
keys with a matching prefix to those supplied. No additional keys
will be returned, and every match that is in the index will be
returned.
"""
return self._strip_prefix(self.adapted.iter_entries_prefix(
self.prefix + key for key in keys))
def key_count(self):
"""Return an estimate of the number of keys in this index.
For GraphIndexPrefixAdapter this is relatively expensive - key
iteration with the prefix is done.
"""
return len(list(self.iter_all_entries()))
def validate(self):
"""Call the adapted's validate."""
self.adapted.validate()
| gpl-2.0 | -1,710,965,484,435,133,000 | 41.883298 | 92 | 0.56449 | false | 4.342495 | false | false | false |
scalable-networks/ext | gnuradio-3.7.0.1/gr-trellis/examples/python/test_turbo_equalization1.py | 13 | 5480 | #!/usr/bin/env python
from gnuradio import gr
from gnuradio import trellis, digital, filter, blocks
from gnuradio import eng_notation
import math
import sys
import random
import fsm_utils
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
def make_rx(tb,fo,fi,dimensionality,tot_constellation,K,interleaver,IT,Es,N0,type):
metrics_in = trellis.metrics_f(fi.O(),dimensionality,tot_constellation,digital.TRELLIS_EUCLIDEAN) # data preprocessing to generate metrics for innner SISO
scale = blocks.multiply_const_ff(1.0/N0)
gnd = blocks.vector_source_f([0],True);
inter=[]
deinter=[]
siso_in=[]
siso_out=[]
# generate all blocks
for it in range(IT):
inter.append( trellis.permutation(interleaver.K(),interleaver.INTER(),fi.I(),gr.sizeof_float) )
siso_in.append( trellis.siso_f(fi,K,0,-1,True,False,type) )
deinter.append( trellis.permutation(interleaver.K(),interleaver.DEINTER(),fi.I(),gr.sizeof_float) )
if it < IT-1:
siso_out.append( trellis.siso_f(fo,K,0,-1,False,True,type) )
else:
siso_out.append( trellis.viterbi_s(fo,K,0,-1) ) # no soft outputs needed
# connect first stage
tb.connect (gnd,inter[0])
tb.connect (metrics_in,scale)
tb.connect (scale,(siso_in[0],1))
# connect the rest
for it in range(IT):
if it < IT-1:
tb.connect (scale,(siso_in[it+1],1))
tb.connect (siso_in[it],deinter[it],(siso_out[it],1))
tb.connect (gnd,(siso_out[it],0))
tb.connect (siso_out[it],inter[it+1])
tb.connect (inter[it],(siso_in[it],0))
else:
tb.connect (siso_in[it],deinter[it],siso_out[it])
tb.connect (inter[it],(siso_in[it],0))
return (metrics_in,siso_out[IT-1])
def run_test (fo,fi,interleaver,Kb,bitspersymbol,K,channel,modulation,dimensionality,tot_constellation,Es,N0,IT,seed):
tb = gr.top_block ()
L = len(channel)
# TX
# this for loop is TOO slow in python!!!
packet = [0]*(K)
random.seed(seed)
for i in range(len(packet)):
packet[i] = random.randint(0, 2**bitspersymbol - 1) # random symbols
src = blocks.vector_source_s(packet,False)
enc_out = trellis.encoder_ss(fo,0) # initial state = 0
inter = trellis.permutation(interleaver.K(),interleaver.INTER(),1,gr.sizeof_short)
mod = digital.chunks_to_symbols_sf(modulation[1],modulation[0])
# CHANNEL
isi = filter.fir_filter_fff(1,channel)
add = blocks.add_ff()
noise = analog.noise_source_f(analog.GR_GAUSSIAN,math.sqrt(N0/2),seed)
# RX
(head,tail) = make_rx(tb,fo,fi,dimensionality,tot_constellation,K,interleaver,IT,Es,N0,trellis.TRELLIS_MIN_SUM)
dst = blocks.vector_sink_s();
tb.connect (src,enc_out,inter,mod)
tb.connect (mod,isi,(add,0))
tb.connect (noise,(add,1))
tb.connect (add,head)
tb.connect (tail,dst)
tb.run()
data = dst.data()
ntotal = len(data)
nright=0
for i in range(ntotal):
if packet[i]==data[i]:
nright=nright+1
#else:
#print "Error in ", i
return (ntotal,ntotal-nright)
def main(args):
nargs = len (args)
if nargs == 3:
fname_out=args[0]
esn0_db=float(args[1])
rep=int(args[2])
else:
sys.stderr.write ('usage: test_turbo_equalization.py fsm_name_out Es/No_db repetitions\n')
sys.exit (1)
# system parameters
Kb=64*16 # packet size in bits (multiple of 16)
modulation = fsm_utils.pam4 # see fsm_utlis.py for available predefined modulations
channel = fsm_utils.c_channel # see fsm_utlis.py for available predefined test channels
fo=trellis.fsm(fname_out) # get the outer FSM specification from a file
fi=trellis.fsm(len(modulation[1]),len(channel)) # generate the FSM automatically
if fo.O() != fi.I():
sys.stderr.write ('Incompatible cardinality between outer and inner FSM.\n')
sys.exit (1)
bitspersymbol = int(round(math.log(fo.I())/math.log(2))) # bits per FSM input symbol
K=Kb/bitspersymbol # packet size in trellis steps
interleaver=trellis.interleaver(K,666) # construct a random interleaver
tot_channel = fsm_utils.make_isi_lookup(modulation,channel,True) # generate the lookup table (normalize energy to 1)
dimensionality = tot_channel[0]
tot_constellation = tot_channel[1]
if len(tot_constellation)/dimensionality != fi.O():
sys.stderr.write ('Incompatible FSM output cardinality and lookup table size.\n')
sys.exit (1)
N0=pow(10.0,-esn0_db/10.0); # noise variance
IT = 3 # number of turbo iterations
tot_s=0 # total number of transmitted shorts
terr_s=0 # total number of shorts in error
terr_p=0 # total number of packets in error
for i in range(rep):
(s,e)=run_test(fo,fi,interleaver,Kb,bitspersymbol,K,channel,modulation,dimensionality,tot_constellation,1,N0,IT,-long(666+i)) # run experiment with different seed to get different noise realizations
tot_s=tot_s+s
terr_s=terr_s+e
terr_p=terr_p+(terr_s!=0)
if ((i+1)%10==0) : # display progress
print i+1,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
# estimate of the (short or bit) error rate
print rep,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
if __name__ == '__main__':
main (sys.argv[1:])
| gpl-2.0 | -7,890,500,674,692,229,000 | 35.052632 | 206 | 0.644708 | false | 2.896406 | false | false | false |
astagi/django-cms | cms/test_utils/project/placeholderapp/migrations_django/0001_initial.py | 66 | 4526 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cms.models.fields
import cms.test_utils.project.placeholderapp.models
class Migration(migrations.Migration):
dependencies = [
('cms', '0002_auto_20140816_1918'),
]
operations = [
migrations.CreateModel(
name='DynamicPlaceholderSlotExample',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('char_1', models.CharField(max_length=255, verbose_name='char_1')),
('char_2', models.CharField(max_length=255, verbose_name='char_2')),
('placeholder_1', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname=cms.test_utils.project.placeholderapp.models.dynamic_placeholder_1, related_name='dynamic_pl_1', editable=False)),
('placeholder_2', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname=cms.test_utils.project.placeholderapp.models.dynamic_placeholder_2, related_name='dynamic_pl_2', editable=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Example1',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('char_1', models.CharField(max_length=255, verbose_name='char_1')),
('char_2', models.CharField(max_length=255, verbose_name='char_2')),
('char_3', models.CharField(max_length=255, verbose_name='char_3')),
('char_4', models.CharField(max_length=255, verbose_name='char_4')),
('date_field', models.DateField(null=True)),
('placeholder', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder', editable=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MultilingualExample1',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('placeholder_1', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder_1', editable=False)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MultilingualExample1Translation',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('char_1', models.CharField(max_length=255, verbose_name='char_1')),
('char_2', models.CharField(max_length=255, verbose_name='char_2')),
('language_code', models.CharField(db_index=True, max_length=15)),
('master', models.ForeignKey(null=True, to='placeholderapp.MultilingualExample1', related_name='translations', editable=False)),
],
options={
'db_table': 'placeholderapp_multilingualexample1_translation',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='multilingualexample1translation',
unique_together=set([('language_code', 'master')]),
),
migrations.CreateModel(
name='TwoPlaceholderExample',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('char_1', models.CharField(max_length=255, verbose_name='char_1')),
('char_2', models.CharField(max_length=255, verbose_name='char_2')),
('char_3', models.CharField(max_length=255, verbose_name='char_3')),
('char_4', models.CharField(max_length=255, verbose_name='char_4')),
('placeholder_1', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder_1', related_name='p1', editable=False)),
('placeholder_2', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder_2', related_name='p2', editable=False)),
],
options={
},
bases=(models.Model,),
),
]
| bsd-3-clause | -2,308,999,552,748,953,000 | 50.431818 | 225 | 0.58418 | false | 4.099638 | false | false | false |
lhupfeldt/multiconf | test/invalid_values_test.py | 1 | 19200 | # Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import sys
import os.path
from pytest import raises
from multiconf import mc_config, ConfigItem, ConfigException, MC_REQUIRED
from multiconf.envs import EnvFactory
from .utils.utils import config_error, next_line_num, replace_ids, lines_in, start_file_line
from .utils.messages import already_printed_msg, config_error_mc_required_expected, mc_required_expected
from .utils.messages import config_error_never_received_value_expected
from .utils.tstclasses import ItemWithAA
from .utils.invalid_values_classes import McRequiredInInitL1, McRequiredInInitL3
minor_version = sys.version_info[1]
_utils = os.path.join(os.path.dirname(__file__), 'utils')
ef1_prod_pp = EnvFactory()
pp1 = ef1_prod_pp.Env('pp')
prod1 = ef1_prod_pp.Env('prod')
def ce(line_num, *lines):
return config_error(__file__, line_num, *lines)
_attribute_mc_required_expected = mc_required_expected.format(attr='aa', env=prod1)
_mc_required_one_error_expected_ex = """There was 1 error when defining item: {
"__class__": "ItemWithAA #as: 'ItemWithAA', id: 0000, not-frozen",
"env": {
"__class__": "Env",
"name": "%(env_name)s"
},
"aa": "MC_REQUIRED"
}""" + already_printed_msg
def test_attribute_mc_required_env(capsys):
errorline = [None]
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with ItemWithAA() as cr:
errorline[0] = next_line_num()
cr.setattr('aa', prod=MC_REQUIRED, pp="hello")
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorline[0]),
config_error_never_received_value_expected.format(env=prod1),
start_file_line(__file__, errorline[0]),
'^ConfigError: ' + _attribute_mc_required_expected,
)
assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='prod')
def test_attribute_mc_required_mc_force_env(capsys):
errorline = [None]
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with ItemWithAA() as cr:
errorline[0] = next_line_num()
cr.setattr('aa', default=MC_REQUIRED, mc_force=True)
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorline[0]),
config_error_mc_required_expected.format(attr='aa', env=pp1),
)
assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='pp')
def test_attribute_mc_required_default(capsys):
errorline = [None]
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with ItemWithAA() as cr:
errorline[0] = next_line_num()
cr.setattr('aa', default=MC_REQUIRED, pp="hello")
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorline[0]),
config_error_never_received_value_expected.format(env=prod1),
start_file_line(__file__, errorline[0]),
'^ConfigError: ' + _attribute_mc_required_expected,
)
assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='prod')
def test_attribute_mc_required_default_resolved_with_default_value_in_mc_init(capsys):
class ItemWithAAMcInitResolve(ItemWithAA):
def mc_init(self):
super().mc_init()
self.aa = 'Hi'
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with ItemWithAAMcInitResolve() as cr:
cr.setattr('aa', default=MC_REQUIRED, pp="hello")
cfg = config(pp1)
assert cfg.ItemWithAAMcInitResolve.aa == 'hello'
cfg = config(prod1)
assert cfg.ItemWithAAMcInitResolve.aa == 'Hi'
def test_attribute_mc_required_default_resolved_with_default_env_specific_value_in_mc_init(capsys):
class ItemWithAAMcInitResolve(ItemWithAA):
def mc_init(self):
super().mc_init()
self.setattr('aa', prod='Hi')
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with ItemWithAAMcInitResolve() as cr:
cr.setattr('aa', default=MC_REQUIRED, pp="hello")
cfg = config(pp1)
assert cfg.ItemWithAAMcInitResolve.aa == 'hello'
cfg = config(prod1)
assert cfg.ItemWithAAMcInitResolve.aa == 'Hi'
def test_attribute_mc_required_init(capsys):
errorline = [None]
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with ItemWithAA(aa=MC_REQUIRED) as ci:
errorline[0] = next_line_num()
ci.setattr('aa', pp="hello")
_sout, serr = capsys.readouterr()
print(serr)
print("errorline[0]", errorline[0])
assert serr == ce(errorline[0], _attribute_mc_required_expected)
assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='prod')
def test_attribute_mc_required_in_with(capsys):
errorline = [None]
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with ItemWithAA() as cr:
errorline[0] = next_line_num()
cr.setattr('aa', prod="hi", pp=MC_REQUIRED)
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorline[0]),
config_error_never_received_value_expected.format(env=pp1),
start_file_line(__file__, errorline[0]),
'^ConfigError: ' + mc_required_expected.format(attr='aa', env=pp1),
)
assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='pp')
def test_attribute_mc_required_in_with_default_all_overridden():
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with ItemWithAA() as cr:
# TODO: This should actually not be allowed, it does not make sense!
cr.setattr('aa', default=MC_REQUIRED, pp="hello", prod="hi")
cr = config(prod1).ItemWithAA
assert cr.aa == "hi"
def test_attribute_mc_required_init_args_all_overridden():
class Requires(ConfigItem):
def __init__(self, aa=MC_REQUIRED):
super().__init__()
self.aa = aa
@mc_config(ef1_prod_pp, load_now=True)
def config1(root):
with ConfigItem() as cr:
Requires(aa=3)
cr = config1(prod1).ConfigItem
assert cr.Requires.aa == 3
@mc_config(ef1_prod_pp, load_now=True)
def config2(root):
with ConfigItem() as cr:
with Requires() as rq:
rq.aa = 3
cr = config2(prod1).ConfigItem
assert cr.Requires.aa == 3
def test_attribute_mc_required_args_all_overridden_in_mc_init():
class Requires(ConfigItem):
def __init__(self, aa=MC_REQUIRED):
super().__init__()
self.aa = aa
def mc_init(self):
self.aa = 7
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
Requires()
cr = config(prod1)
assert cr.Requires.aa == 7
def test_attribute_mc_required_args_partial_set_in_init_overridden_in_mc_init():
class Requires(ConfigItem):
def __init__(self, aa=MC_REQUIRED):
super().__init__()
# Partial assignment is allowed in init
self.setattr('aa', prod=aa)
self.setattr('b', default=MC_REQUIRED, prod=2)
def mc_init(self):
self.aa = 7
self.b = 7
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
Requires()
cr = config(prod1)
assert cr.Requires.aa == 7
assert cr.Requires.b == 2
cr = config(pp1)
assert cr.Requires.aa == 7
assert cr.Requires.b == 7
def test_attribute_mc_required_args_partial_set_in_init_overridden_in_with():
class Requires(ConfigItem):
def __init__(self, aa=MC_REQUIRED):
super().__init__()
# Partial assignment is allowed in init
self.setattr('aa', prod=aa)
self.setattr('b', default=MC_REQUIRED, prod=2)
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with Requires() as rq:
rq.aa = 8
rq.setattr('b', pp=8)
cr = config(prod1)
assert cr.Requires.aa == 8
assert cr.Requires.b == 2
cr = config(pp1)
assert cr.Requires.aa == 8
assert cr.Requires.b == 8
def test_attribute_mc_required_args_set_in_init_overridden_in_with():
class Requires(ConfigItem):
def __init__(self, aa=MC_REQUIRED):
super().__init__()
self.aa = aa
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with Requires() as rq:
rq.aa = 7
cr = config(prod1)
assert cr.Requires.aa == 7
cr = config(pp1)
assert cr.Requires.aa == 7
_attribute_mc_required_requires_expected_ex = """There was 1 error when defining item: {
"__class__": "Requires #as: 'Requires', id: 0000, not-frozen",
"env": {
"__class__": "Env",
"name": "pp"
},
"aa": "MC_REQUIRED"
}""" + already_printed_msg
def test_attribute_mc_required_init_args_missing_env_value(capsys):
errorline = [None]
class Requires(ConfigItem):
def __init__(self, aa=MC_REQUIRED):
super().__init__()
self.aa = aa
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
with Requires() as rq:
errorline[0] = next_line_num()
rq.setattr('aa', prod='hi')
_sout, serr = capsys.readouterr()
print(_sout)
assert serr == ce(errorline[0], mc_required_expected.format(attr='aa', env=pp1))
assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_requires_expected_ex
_attribute_mc_required_required_init_arg_missing_with_expected_ex = """There was 1 error when defining item: {{
"__class__": "{0} #as: '{0}', id: 0000, not-frozen",
"env": {{
"__class__": "Env",
"name": "pp"
}},
"aa": "MC_REQUIRED"
}}""" + already_printed_msg
def test_attribute_mc_required_init_args_missing_with(capsys):
errorline = [None]
# If the error occures on the last object, and that is not under a with statement, then the line will be the @mc_config
with raises(ConfigException) as exinfo:
errorline[0] = next_line_num() + (1 if minor_version > 7 else 0)
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
McRequiredInInitL1()
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorline[0]),
config_error_never_received_value_expected.format(env=pp1),
'^File "{}/invalid_values_classes.py", line 8'.format(_utils),
mc_required_expected.format(attr='aa', env=pp1),
)
exp = _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL1')
got = replace_ids(str(exinfo.value), False)
assert got == exp
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config0(root):
with McRequiredInInitL1():
errorline[0] = next_line_num()
pass
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorline[0]),
config_error_never_received_value_expected.format(env=pp1),
'^File "{}/invalid_values_classes.py", line 8'.format(_utils),
mc_required_expected.format(attr='aa', env=pp1),
)
assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL1')
# If the error occures on the last object, and that is not under a with statement, then the line will be the @mc_config
with raises(ConfigException) as exinfo:
errorline[0] = next_line_num() + (1 if minor_version > 7 else 0)
@mc_config(ef1_prod_pp, load_now=True)
def config1(root):
McRequiredInInitL3()
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorline[0]),
config_error_never_received_value_expected.format(env=pp1),
'^File "{}/invalid_values_classes.py", line 8'.format(_utils),
mc_required_expected.format(attr='aa', env=pp1),
)
assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL3')
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config2(root):
with McRequiredInInitL3():
errorline[0] = next_line_num()
pass
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorline[0]),
config_error_never_received_value_expected.format(env=pp1),
'^File "{}/invalid_values_classes.py", line 8'.format(_utils),
mc_required_expected.format(attr='aa', env=pp1),
)
assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL3')
def test_attribute_mc_required_init_args_missing_previous_item(capsys):
errorline = [None]
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
errorline[0] = next_line_num()
McRequiredInInitL1()
McRequiredInInitL3()
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
config_error_never_received_value_expected.format(env=pp1),
'^File "{}/invalid_values_classes.py", line 8'.format(_utils),
mc_required_expected.format(attr='aa', env=pp1),
)
assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL1')
def test_attribute_mc_required_init_assign_all_overridden():
class Requires(ConfigItem):
def __init__(self, aa=MC_REQUIRED):
super().__init__()
self.aa = aa
@mc_config(ef1_prod_pp, load_now=True)
def config(root):
Requires(aa=3)
cr = config(prod1)
assert cr.Requires.aa == 3
@mc_config(ef1_prod_pp, load_now=True)
def config(_):
with Requires() as rq:
rq.aa = 3
cr = config(prod1)
assert cr.Requires.aa == 3
_attribute_mc_required_env_in_init_expected_ex = """There were %(num_errors)s errors when defining item: {
"__class__": "MyRoot #as: 'MyRoot', id: 0000, not-frozen",
"env": {
"__class__": "Env",
"name": "pp"
},
"aa": "MC_REQUIRED",
"bb": "MC_REQUIRED"
}""" + already_printed_msg
def test_attribute_setattr_mc_required_force_in_init(capsys):
errorline = [None]
class MyRoot(ConfigItem):
def __init__(self):
super().__init__()
errorline[0] = next_line_num()
self.setattr('aa', default=MC_REQUIRED, mc_force=True)
self.setattr('bb', default=MC_REQUIRED, mc_force=True)
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(_):
MyRoot()
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorline[0]),
config_error_mc_required_expected.format(attr='aa', env=pp1),
config_error_mc_required_expected.format(attr='bb', env=pp1),
)
assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_env_in_init_expected_ex % dict(num_errors=2)
def test_multiple_attributes_mc_required_init_not_set(capsys):
errorline = [None]
class ItemWithAAABBCC(ConfigItem):
def __init__(self):
super().__init__()
self.aa = MC_REQUIRED
self.bb = MC_REQUIRED
self.cc = MC_REQUIRED
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(_):
with ConfigItem() as cr:
errorline[0] = next_line_num()
ItemWithAAABBCC()
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorline[0]),
config_error_mc_required_expected.format(attr='aa', env=pp1),
config_error_mc_required_expected.format(attr='bb', env=pp1),
config_error_mc_required_expected.format(attr='cc', env=pp1),
)
def test_multiple_attributes_mc_required_mc_init_not_set(capsys):
errorlines = [None, None]
class ItemWithAAABBCC(ConfigItem):
def __init__(self):
super().__init__()
self.aa = MC_REQUIRED
self.bb = MC_REQUIRED
self.cc = MC_REQUIRED
def mc_init(self):
super().__init__()
errorlines[0] = next_line_num()
self.setattr('aa', default=MC_REQUIRED)
self.setattr('bb', default=MC_REQUIRED, pp='Hello')
errorlines[1] = next_line_num()
self.cc = MC_REQUIRED
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(_):
with ConfigItem() as cr:
ItemWithAAABBCC()
_sout, serr = capsys.readouterr()
assert lines_in(
serr,
start_file_line(__file__, errorlines[0]),
config_error_mc_required_expected.format(attr='aa', env=pp1),
start_file_line(__file__, errorlines[1]),
config_error_mc_required_expected.format(attr='cc', env=pp1),
)
_multiple_attributes_mc_required_env_expected_ex = """There %(ww)s %(num_errors)s %(err)s when defining item: {
"__class__": "MyRoot #as: 'MyRoot', id: 0000, not-frozen",
"env": {
"__class__": "Env",
"name": "pp"
},
"aa": "hello",
"bb": "MC_REQUIRED"
}""" + already_printed_msg
def test_multiple_attributes_mc_required_env(capsys):
errorline = [None]
class MyRoot(ConfigItem):
def __init__(self):
super().__init__()
self.aa = MC_REQUIRED
self.bb = MC_REQUIRED
with raises(ConfigException) as exinfo:
@mc_config(ef1_prod_pp, load_now=True)
def config(_):
with MyRoot() as cr:
errorline[0] = next_line_num()
cr.setattr('aa', prod=MC_REQUIRED, pp="hello")
cr.setattr('bb', prod=1, pp=MC_REQUIRED)
_sout, serr = capsys.readouterr()
#assert ce(errorline[0], mc_required_expected.format(attr='aa', env=prod1)) in serr
assert ce(errorline[0] + 1, mc_required_expected.format(attr='bb', env=pp1)) in serr
assert replace_ids(str(exinfo.value), False) == _multiple_attributes_mc_required_env_expected_ex % dict(ww='was', num_errors=1, err='error')
| bsd-3-clause | -5,479,040,787,900,449,000 | 32.217993 | 146 | 0.60651 | false | 3.318928 | true | false | false |
embeddedarm/android_external_chromium_org | build/android/tombstones.py | 28 | 5953 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Find the most recent tombstone file(s) on all connected devices
# and prints their stacks.
#
# Assumes tombstone file was created with current symbols.
import datetime
import logging
import multiprocessing
import os
import subprocess
import sys
import optparse
from pylib import android_commands
def _ListTombstones(adb):
"""List the tombstone files on the device.
Args:
adb: An instance of AndroidCommands.
Yields:
Tuples of (tombstone filename, date time of file on device).
"""
lines = adb.RunShellCommand('TZ=UTC su -c ls -a -l /data/tombstones')
for line in lines:
if 'tombstone' in line and not 'No such file or directory' in line:
details = line.split()
t = datetime.datetime.strptime(details[-3] + ' ' + details[-2],
'%Y-%m-%d %H:%M')
yield details[-1], t
def _GetDeviceDateTime(adb):
"""Determine the date time on the device.
Args:
adb: An instance of AndroidCommands.
Returns:
A datetime instance.
"""
device_now_string = adb.RunShellCommand('TZ=UTC date')
return datetime.datetime.strptime(
device_now_string[0], '%a %b %d %H:%M:%S %Z %Y')
def _GetTombstoneData(adb, tombstone_file):
"""Retrieve the tombstone data from the device
Args:
tombstone_file: the tombstone to retrieve
Returns:
A list of lines
"""
return adb.GetProtectedFileContents('/data/tombstones/' + tombstone_file)
def _EraseTombstone(adb, tombstone_file):
"""Deletes a tombstone from the device.
Args:
tombstone_file: the tombstone to delete.
"""
return adb.RunShellCommandWithSU('rm /data/tombstones/' + tombstone_file)
def _ResolveSymbols(tombstone_data, include_stack):
"""Run the stack tool for given tombstone input.
Args:
tombstone_data: a list of strings of tombstone data.
include_stack: boolean whether to include stack data in output.
Yields:
A string for each line of resolved stack output.
"""
stack_tool = os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'android_platform', 'development',
'scripts', 'stack')
proc = subprocess.Popen(stack_tool, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
output = proc.communicate(input='\n'.join(tombstone_data))[0]
for line in output.split('\n'):
if not include_stack and 'Stack Data:' in line:
break
yield line
def _ResolveTombstone(tombstone):
lines = []
lines += [tombstone['file'] + ' created on ' + str(tombstone['time']) +
', about this long ago: ' +
(str(tombstone['device_now'] - tombstone['time']) +
' Device: ' + tombstone['serial'])]
print '\n'.join(lines)
print 'Resolving...'
lines += _ResolveSymbols(tombstone['data'], tombstone['stack'])
return lines
def _ResolveTombstones(jobs, tombstones):
"""Resolve a list of tombstones.
Args:
jobs: the number of jobs to use with multiprocess.
tombstones: a list of tombstones.
"""
if not tombstones:
print 'No device attached? Or no tombstones?'
return
if len(tombstones) == 1:
data = _ResolveTombstone(tombstones[0])
else:
pool = multiprocessing.Pool(processes=jobs)
data = pool.map(_ResolveTombstone, tombstones)
data = ['\n'.join(d) for d in data]
print '\n'.join(data)
def _GetTombstonesForDevice(adb, options):
"""Returns a list of tombstones on a given adb connection.
Args:
adb: An instance of Androidcommands.
options: command line arguments from OptParse
"""
ret = []
all_tombstones = list(_ListTombstones(adb))
if not all_tombstones:
print 'No device attached? Or no tombstones?'
return ret
# Sort the tombstones in date order, descending
all_tombstones.sort(cmp=lambda a, b: cmp(b[1], a[1]))
# Only resolve the most recent unless --all-tombstones given.
tombstones = all_tombstones if options.all_tombstones else [all_tombstones[0]]
device_now = _GetDeviceDateTime(adb)
for tombstone_file, tombstone_time in tombstones:
ret += [{'serial': adb.Adb().GetSerialNumber(),
'device_now': device_now,
'time': tombstone_time,
'file': tombstone_file,
'stack': options.stack,
'data': _GetTombstoneData(adb, tombstone_file)}]
# Erase all the tombstones if desired.
if options.wipe_tombstones:
for tombstone_file, _ in all_tombstones:
_EraseTombstone(adb, tombstone_file)
return ret
def main():
parser = optparse.OptionParser()
parser.add_option('--device',
help='The serial number of the device. If not specified '
'will use all devices.')
parser.add_option('-a', '--all-tombstones', action='store_true',
help="""Resolve symbols for all tombstones, rather than just
the most recent""")
parser.add_option('-s', '--stack', action='store_true',
help='Also include symbols for stack data')
parser.add_option('-w', '--wipe-tombstones', action='store_true',
help='Erase all tombstones from device after processing')
parser.add_option('-j', '--jobs', type='int',
default=4,
help='Number of jobs to use when processing multiple '
'crash stacks.')
options, args = parser.parse_args()
if options.device:
devices = [options.device]
else:
devices = android_commands.GetAttachedDevices()
tombstones = []
for device in devices:
adb = android_commands.AndroidCommands(device)
tombstones += _GetTombstonesForDevice(adb, options)
_ResolveTombstones(options.jobs, tombstones)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -4,382,173,006,879,530,000 | 29.528205 | 80 | 0.645389 | false | 3.640979 | false | false | false |
datalogics-robb/scons | src/engine/SCons/Tool/tar.py | 2 | 2320 | """SCons.Tool.tar
Tool-specific initialization for tar.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
tars = ['tar', 'gtar']
TarAction = SCons.Action.Action('$TARCOM', '$TARCOMSTR')
TarBuilder = SCons.Builder.Builder(action = TarAction,
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for tar to an Environment."""
try:
bld = env['BUILDERS']['Tar']
except KeyError:
bld = TarBuilder
env['BUILDERS']['Tar'] = bld
env['TAR'] = env.Detect(tars) or 'gtar'
env['TARFLAGS'] = SCons.Util.CLVar('-c')
env['TARCOM'] = '$TAR $TARFLAGS -f $TARGET $SOURCES'
env['TARSUFFIX'] = '.tar'
def exists(env):
return env.Detect(tars)
| mit | 3,870,170,223,994,117,000 | 33.626866 | 78 | 0.676293 | false | 4.041812 | false | false | false |
GoogleCloudPlatform/DataflowTemplates | v2/common/src/test/resources/PythonTextTransformerTest/transform.py | 1 | 2207 | """
Copyright (C) 2020 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
"""
A good transform function.
@param {string} inJson
@return {string} outJson
"""
import copy
import json
import sys
import traceback
def transform(event):
""" Return a Dict or List of Dict Objects. Return None to discard """
event['new_key'] = 'new_value'
# event = event
return event
def _handle_result(input_data):
event_id = copy.deepcopy(input_data['id'])
event = copy.deepcopy(input_data['event'])
try:
transformed_event = transform(event)
if isinstance(transformed_event, list):
for row in transformed_event:
payload = json.dumps({'status': 'SUCCESS',
'id': event_id,
'event': row,
'error_message': None})
print(payload)
else:
payload = json.dumps({'status': 'SUCCESS',
'id': event_id,
'event': transformed_event,
'error_message': None})
print(payload)
except Exception as e:
stack_trace = traceback.format_exc()
payload = json.dumps({'status': 'FAILED',
'id': event_id,
'event': event,
'error_message': stack_trace})
print(payload)
if __name__ == '__main__':
# TODO: How do we handle the case where there are no messages
file_name = sys.argv[1]
data = []
with open(file_name, "r") as data_file:
for line in data_file:
data.append(json.loads(line))
if isinstance(data, list):
for event in data:
_handle_result(event)
else:
event = data
_handle_result(event)
exit()
| apache-2.0 | -6,333,046,332,267,350,000 | 29.232877 | 77 | 0.608065 | false | 4.064457 | false | false | false |
zifeishan/deepdive | examples/tutorial_example/step3-more-data/experiment-reports/v00001/code/udf/ext_has_spouse_features.py | 60 | 1304 | #! /usr/bin/env python
import sys
import ddlib # DeepDive python utility
ARR_DELIM = '~^~'
# For each input tuple
for row in sys.stdin:
parts = row.strip().split('\t')
if len(parts) != 6:
print >>sys.stderr, 'Failed to parse row:', row
continue
# Get all fields from a row
words = parts[0].split(ARR_DELIM)
relation_id = parts[1]
p1_start, p1_length, p2_start, p2_length = [int(x) for x in parts[2:]]
# Unpack input into tuples.
span1 = ddlib.Span(begin_word_id=p1_start, length=p1_length)
span2 = ddlib.Span(begin_word_id=p2_start, length=p2_length)
# Features for this pair come in here
features = set()
# Feature 1: Bag of words between the two phrases
words_between = ddlib.tokens_between_spans(words, span1, span2)
for word in words_between.elements:
features.add("word_between=" + word)
# Feature 2: Number of words between the two phrases
features.add("num_words_between=%s" % len(words_between.elements))
# Feature 3: Does the last word (last name) match?
last_word_left = ddlib.materialize_span(words, span1)[-1]
last_word_right = ddlib.materialize_span(words, span2)[-1]
if (last_word_left == last_word_right):
features.add("potential_last_name_match")
for feature in features:
print str(relation_id) + '\t' + feature
| apache-2.0 | -7,309,405,940,012,458,000 | 30.047619 | 72 | 0.682515 | false | 3.053864 | false | false | false |
JT5D/Alfred-Popclip-Sublime | Sublime Text 2/Python PEP8 Autoformat/libs/lib2to3/fixes/fix_intern.py | 7 | 1362 | # Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name(u"sys"), Name(u"intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
touch_import(None, u'sys', node)
return new
| gpl-2.0 | 8,113,873,073,803,947,000 | 29.954545 | 75 | 0.483113 | false | 4.177914 | false | false | false |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/contrib/auth/management/__init__.py | 126 | 2854 | """
Creates permissions for all installed apps that need permissions.
"""
from django.contrib.auth import models as auth_app
from django.db.models import get_models, signals
def _get_permission_codename(action, opts):
return u'%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts):
"Returns (codename, name) for all permissions in the given opts."
perms = []
for action in ('add', 'change', 'delete'):
perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw)))
return perms + list(opts.permissions)
def create_permissions(app, created_models, verbosity, **kwargs):
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
ctype = ContentType.objects.get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a context_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(auth_app.Permission.objects.filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
for ctype, (codename, name) in searched_perms:
# If the permissions exists, move on.
if (ctype.pk, codename) in all_perms:
continue
p = auth_app.Permission.objects.create(
codename=codename,
name=name,
content_type=ctype
)
if verbosity >= 2:
print "Adding permission '%s'" % p
def create_superuser(app, created_models, verbosity, **kwargs):
from django.core.management import call_command
if auth_app.User in created_models and kwargs.get('interactive', True):
msg = ("\nYou just installed Django's auth system, which means you "
"don't have any superusers defined.\nWould you like to create one "
"now? (yes/no): ")
confirm = raw_input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = raw_input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True)
break
signals.post_syncdb.connect(create_permissions,
dispatch_uid = "django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
| bsd-3-clause | 759,113,971,445,881,100 | 36.552632 | 110 | 0.644008 | false | 3.963889 | false | false | false |
gimite/personfinder | app/vendors/xlrd/compdoc.py | 27 | 21226 | # -*- coding: cp1252 -*-
##
# Implements the minimal functionality required
# to extract a "Workbook" or "Book" stream (as one big string)
# from an OLE2 Compound Document file.
# <p>Copyright � 2005-2012 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# No part of the content of this file was derived from the works of David Giffin.
# 2008-11-04 SJM Avoid assertion error when -1 used instead of -2 for first_SID of empty SCSS [Frank Hoffsuemmer]
# 2007-09-08 SJM Warning message if sector sizes are extremely large.
# 2007-05-07 SJM Meaningful exception instead of IndexError if a SAT (sector allocation table) is corrupted.
# 2007-04-22 SJM Missing "<" in a struct.unpack call => can't open files on bigendian platforms.
from __future__ import print_function
import sys
from struct import unpack
from .timemachine import *
import array
##
# Magic cookie that should appear in the first 8 bytes of the file.
SIGNATURE = b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1"
EOCSID = -2
FREESID = -1
SATSID = -3
MSATSID = -4
EVILSID = -5
class CompDocError(Exception):
pass
class DirNode(object):
def __init__(self, DID, dent, DEBUG=0, logfile=sys.stdout):
# dent is the 128-byte directory entry
self.DID = DID
self.logfile = logfile
(cbufsize, self.etype, self.colour, self.left_DID, self.right_DID,
self.root_DID) = \
unpack('<HBBiii', dent[64:80])
(self.first_SID, self.tot_size) = \
unpack('<ii', dent[116:124])
if cbufsize == 0:
self.name = UNICODE_LITERAL('')
else:
self.name = unicode(dent[0:cbufsize-2], 'utf_16_le') # omit the trailing U+0000
self.children = [] # filled in later
self.parent = -1 # indicates orphan; fixed up later
self.tsinfo = unpack('<IIII', dent[100:116])
if DEBUG:
self.dump(DEBUG)
def dump(self, DEBUG=1):
fprintf(
self.logfile,
"DID=%d name=%r etype=%d DIDs(left=%d right=%d root=%d parent=%d kids=%r) first_SID=%d tot_size=%d\n",
self.DID, self.name, self.etype, self.left_DID,
self.right_DID, self.root_DID, self.parent, self.children, self.first_SID, self.tot_size
)
if DEBUG == 2:
# cre_lo, cre_hi, mod_lo, mod_hi = tsinfo
print("timestamp info", self.tsinfo, file=self.logfile)
def _build_family_tree(dirlist, parent_DID, child_DID):
if child_DID < 0: return
_build_family_tree(dirlist, parent_DID, dirlist[child_DID].left_DID)
dirlist[parent_DID].children.append(child_DID)
dirlist[child_DID].parent = parent_DID
_build_family_tree(dirlist, parent_DID, dirlist[child_DID].right_DID)
if dirlist[child_DID].etype == 1: # storage
_build_family_tree(dirlist, child_DID, dirlist[child_DID].root_DID)
##
# Compound document handler.
# @param mem The raw contents of the file, as a string, or as an mmap.mmap() object. The
# only operation it needs to support is slicing.
class CompDoc(object):
def __init__(self, mem, logfile=sys.stdout, DEBUG=0):
self.logfile = logfile
self.DEBUG = DEBUG
if mem[0:8] != SIGNATURE:
raise CompDocError('Not an OLE2 compound document')
if mem[28:30] != b'\xFE\xFF':
raise CompDocError('Expected "little-endian" marker, found %r' % mem[28:30])
revision, version = unpack('<HH', mem[24:28])
if DEBUG:
print("\nCompDoc format: version=0x%04x revision=0x%04x" % (version, revision), file=logfile)
self.mem = mem
ssz, sssz = unpack('<HH', mem[30:34])
if ssz > 20: # allows for 2**20 bytes i.e. 1MB
print("WARNING: sector size (2**%d) is preposterous; assuming 512 and continuing ..." \
% ssz, file=logfile)
ssz = 9
if sssz > ssz:
print("WARNING: short stream sector size (2**%d) is preposterous; assuming 64 and continuing ..." \
% sssz, file=logfile)
sssz = 6
self.sec_size = sec_size = 1 << ssz
self.short_sec_size = 1 << sssz
if self.sec_size != 512 or self.short_sec_size != 64:
print("@@@@ sec_size=%d short_sec_size=%d" % (self.sec_size, self.short_sec_size), file=logfile)
(
SAT_tot_secs, self.dir_first_sec_sid, _unused, self.min_size_std_stream,
SSAT_first_sec_sid, SSAT_tot_secs,
MSATX_first_sec_sid, MSATX_tot_secs,
# ) = unpack('<ii4xiiiii', mem[44:76])
) = unpack('<iiiiiiii', mem[44:76])
mem_data_len = len(mem) - 512
mem_data_secs, left_over = divmod(mem_data_len, sec_size)
if left_over:
#### raise CompDocError("Not a whole number of sectors")
mem_data_secs += 1
print("WARNING *** file size (%d) not 512 + multiple of sector size (%d)" \
% (len(mem), sec_size), file=logfile)
self.mem_data_secs = mem_data_secs # use for checking later
self.mem_data_len = mem_data_len
seen = self.seen = array.array('B', [0]) * mem_data_secs
if DEBUG:
print('sec sizes', ssz, sssz, sec_size, self.short_sec_size, file=logfile)
print("mem data: %d bytes == %d sectors" % (mem_data_len, mem_data_secs), file=logfile)
print("SAT_tot_secs=%d, dir_first_sec_sid=%d, min_size_std_stream=%d" \
% (SAT_tot_secs, self.dir_first_sec_sid, self.min_size_std_stream,), file=logfile)
print("SSAT_first_sec_sid=%d, SSAT_tot_secs=%d" % (SSAT_first_sec_sid, SSAT_tot_secs,), file=logfile)
print("MSATX_first_sec_sid=%d, MSATX_tot_secs=%d" % (MSATX_first_sec_sid, MSATX_tot_secs,), file=logfile)
nent = sec_size // 4 # number of SID entries in a sector
fmt = "<%di" % nent
trunc_warned = 0
#
# === build the MSAT ===
#
MSAT = list(unpack('<109i', mem[76:512]))
SAT_sectors_reqd = (mem_data_secs + nent - 1) // nent
expected_MSATX_sectors = max(0, (SAT_sectors_reqd - 109 + nent - 2) // (nent - 1))
actual_MSATX_sectors = 0
if MSATX_tot_secs == 0 and MSATX_first_sec_sid in (EOCSID, FREESID, 0):
# Strictly, if there is no MSAT extension, then MSATX_first_sec_sid
# should be set to EOCSID ... FREESID and 0 have been met in the wild.
pass # Presuming no extension
else:
sid = MSATX_first_sec_sid
while sid not in (EOCSID, FREESID, MSATSID):
# Above should be only EOCSID according to MS & OOo docs
# but Excel doesn't complain about FREESID. Zero is a valid
# sector number, not a sentinel.
if DEBUG > 1:
print('MSATX: sid=%d (0x%08X)' % (sid, sid), file=logfile)
if sid >= mem_data_secs:
msg = "MSAT extension: accessing sector %d but only %d in file" % (sid, mem_data_secs)
if DEBUG > 1:
print(msg, file=logfile)
break
raise CompDocError(msg)
elif sid < 0:
raise CompDocError("MSAT extension: invalid sector id: %d" % sid)
if seen[sid]:
raise CompDocError("MSAT corruption: seen[%d] == %d" % (sid, seen[sid]))
seen[sid] = 1
actual_MSATX_sectors += 1
if DEBUG and actual_MSATX_sectors > expected_MSATX_sectors:
print("[1]===>>>", mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, file=logfile)
offset = 512 + sec_size * sid
MSAT.extend(unpack(fmt, mem[offset:offset+sec_size]))
sid = MSAT.pop() # last sector id is sid of next sector in the chain
if DEBUG and actual_MSATX_sectors != expected_MSATX_sectors:
print("[2]===>>>", mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, file=logfile)
if DEBUG:
print("MSAT: len =", len(MSAT), file=logfile)
dump_list(MSAT, 10, logfile)
#
# === build the SAT ===
#
self.SAT = []
actual_SAT_sectors = 0
dump_again = 0
for msidx in xrange(len(MSAT)):
msid = MSAT[msidx]
if msid in (FREESID, EOCSID):
# Specification: the MSAT array may be padded with trailing FREESID entries.
# Toleration: a FREESID or EOCSID entry anywhere in the MSAT array will be ignored.
continue
if msid >= mem_data_secs:
if not trunc_warned:
print("WARNING *** File is truncated, or OLE2 MSAT is corrupt!!", file=logfile)
print("INFO: Trying to access sector %d but only %d available" \
% (msid, mem_data_secs), file=logfile)
trunc_warned = 1
MSAT[msidx] = EVILSID
dump_again = 1
continue
elif msid < -2:
raise CompDocError("MSAT: invalid sector id: %d" % msid)
if seen[msid]:
raise CompDocError("MSAT extension corruption: seen[%d] == %d" % (msid, seen[msid]))
seen[msid] = 2
actual_SAT_sectors += 1
if DEBUG and actual_SAT_sectors > SAT_sectors_reqd:
print("[3]===>>>", mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, actual_SAT_sectors, msid, file=logfile)
offset = 512 + sec_size * msid
self.SAT.extend(unpack(fmt, mem[offset:offset+sec_size]))
if DEBUG:
print("SAT: len =", len(self.SAT), file=logfile)
dump_list(self.SAT, 10, logfile)
# print >> logfile, "SAT ",
# for i, s in enumerate(self.SAT):
# print >> logfile, "entry: %4d offset: %6d, next entry: %4d" % (i, 512 + sec_size * i, s)
# print >> logfile, "%d:%d " % (i, s),
print(file=logfile)
if DEBUG and dump_again:
print("MSAT: len =", len(MSAT), file=logfile)
dump_list(MSAT, 10, logfile)
for satx in xrange(mem_data_secs, len(self.SAT)):
self.SAT[satx] = EVILSID
print("SAT: len =", len(self.SAT), file=logfile)
dump_list(self.SAT, 10, logfile)
#
# === build the directory ===
#
dbytes = self._get_stream(
self.mem, 512, self.SAT, self.sec_size, self.dir_first_sec_sid,
name="directory", seen_id=3)
dirlist = []
did = -1
for pos in xrange(0, len(dbytes), 128):
did += 1
dirlist.append(DirNode(did, dbytes[pos:pos+128], 0, logfile))
self.dirlist = dirlist
_build_family_tree(dirlist, 0, dirlist[0].root_DID) # and stand well back ...
if DEBUG:
for d in dirlist:
d.dump(DEBUG)
#
# === get the SSCS ===
#
sscs_dir = self.dirlist[0]
assert sscs_dir.etype == 5 # root entry
if sscs_dir.first_SID < 0 or sscs_dir.tot_size == 0:
# Problem reported by Frank Hoffsuemmer: some software was
# writing -1 instead of -2 (EOCSID) for the first_SID
# when the SCCS was empty. Not having EOCSID caused assertion
# failure in _get_stream.
# Solution: avoid calling _get_stream in any case when the
# SCSS appears to be empty.
self.SSCS = ""
else:
self.SSCS = self._get_stream(
self.mem, 512, self.SAT, sec_size, sscs_dir.first_SID,
sscs_dir.tot_size, name="SSCS", seen_id=4)
# if DEBUG: print >> logfile, "SSCS", repr(self.SSCS)
#
# === build the SSAT ===
#
self.SSAT = []
if SSAT_tot_secs > 0 and sscs_dir.tot_size == 0:
print("WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero", file=logfile)
if sscs_dir.tot_size > 0:
sid = SSAT_first_sec_sid
nsecs = SSAT_tot_secs
while sid >= 0 and nsecs > 0:
if seen[sid]:
raise CompDocError("SSAT corruption: seen[%d] == %d" % (sid, seen[sid]))
seen[sid] = 5
nsecs -= 1
start_pos = 512 + sid * sec_size
news = list(unpack(fmt, mem[start_pos:start_pos+sec_size]))
self.SSAT.extend(news)
sid = self.SAT[sid]
if DEBUG: print("SSAT last sid %d; remaining sectors %d" % (sid, nsecs), file=logfile)
assert nsecs == 0 and sid == EOCSID
if DEBUG:
print("SSAT", file=logfile)
dump_list(self.SSAT, 10, logfile)
if DEBUG:
print("seen", file=logfile)
dump_list(seen, 20, logfile)
def _get_stream(self, mem, base, sat, sec_size, start_sid, size=None, name='', seen_id=None):
# print >> self.logfile, "_get_stream", base, sec_size, start_sid, size
sectors = []
s = start_sid
if size is None:
# nothing to check against
while s >= 0:
if seen_id is not None:
if self.seen[s]:
raise CompDocError("%s corruption: seen[%d] == %d" % (name, s, self.seen[s]))
self.seen[s] = seen_id
start_pos = base + s * sec_size
sectors.append(mem[start_pos:start_pos+sec_size])
try:
s = sat[s]
except IndexError:
raise CompDocError(
"OLE2 stream %r: sector allocation table invalid entry (%d)" %
(name, s)
)
assert s == EOCSID
else:
todo = size
while s >= 0:
if seen_id is not None:
if self.seen[s]:
raise CompDocError("%s corruption: seen[%d] == %d" % (name, s, self.seen[s]))
self.seen[s] = seen_id
start_pos = base + s * sec_size
grab = sec_size
if grab > todo:
grab = todo
todo -= grab
sectors.append(mem[start_pos:start_pos+grab])
try:
s = sat[s]
except IndexError:
raise CompDocError(
"OLE2 stream %r: sector allocation table invalid entry (%d)" %
(name, s)
)
assert s == EOCSID
if todo != 0:
fprintf(self.logfile,
"WARNING *** OLE2 stream %r: expected size %d, actual size %d\n",
name, size, size - todo)
return b''.join(sectors)
def _dir_search(self, path, storage_DID=0):
# Return matching DirNode instance, or None
head = path[0]
tail = path[1:]
dl = self.dirlist
for child in dl[storage_DID].children:
if dl[child].name.lower() == head.lower():
et = dl[child].etype
if et == 2:
return dl[child]
if et == 1:
if not tail:
raise CompDocError("Requested component is a 'storage'")
return self._dir_search(tail, child)
dl[child].dump(1)
raise CompDocError("Requested stream is not a 'user stream'")
return None
##
# Interrogate the compound document's directory; return the stream as a string if found, otherwise
# return None.
# @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto.
def get_named_stream(self, qname):
d = self._dir_search(qname.split("/"))
if d is None:
return None
if d.tot_size >= self.min_size_std_stream:
return self._get_stream(
self.mem, 512, self.SAT, self.sec_size, d.first_SID,
d.tot_size, name=qname, seen_id=d.DID+6)
else:
return self._get_stream(
self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID,
d.tot_size, name=qname + " (from SSCS)", seen_id=None)
##
# Interrogate the compound document's directory.
# If the named stream is not found, (None, 0, 0) will be returned.
# If the named stream is found and is contiguous within the original byte sequence ("mem")
# used when the document was opened,
# then (mem, offset_to_start_of_stream, length_of_stream) is returned.
# Otherwise a new string is built from the fragments and (new_string, 0, length_of_stream) is returned.
# @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto.
def locate_named_stream(self, qname):
d = self._dir_search(qname.split("/"))
if d is None:
return (None, 0, 0)
if d.tot_size > self.mem_data_len:
raise CompDocError("%r stream length (%d bytes) > file data size (%d bytes)"
% (qname, d.tot_size, self.mem_data_len))
if d.tot_size >= self.min_size_std_stream:
result = self._locate_stream(
self.mem, 512, self.SAT, self.sec_size, d.first_SID,
d.tot_size, qname, d.DID+6)
if self.DEBUG:
print("\nseen", file=self.logfile)
dump_list(self.seen, 20, self.logfile)
return result
else:
return (
self._get_stream(
self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID,
d.tot_size, qname + " (from SSCS)", None),
0,
d.tot_size
)
def _locate_stream(self, mem, base, sat, sec_size, start_sid, expected_stream_size, qname, seen_id):
# print >> self.logfile, "_locate_stream", base, sec_size, start_sid, expected_stream_size
s = start_sid
if s < 0:
raise CompDocError("_locate_stream: start_sid (%d) is -ve" % start_sid)
p = -99 # dummy previous SID
start_pos = -9999
end_pos = -8888
slices = []
tot_found = 0
found_limit = (expected_stream_size + sec_size - 1) // sec_size
while s >= 0:
if self.seen[s]:
print("_locate_stream(%s): seen" % qname, file=self.logfile); dump_list(self.seen, 20, self.logfile)
raise CompDocError("%s corruption: seen[%d] == %d" % (qname, s, self.seen[s]))
self.seen[s] = seen_id
tot_found += 1
if tot_found > found_limit:
raise CompDocError(
"%s: size exceeds expected %d bytes; corrupt?"
% (qname, found_limit * sec_size)
) # Note: expected size rounded up to higher sector
if s == p+1:
# contiguous sectors
end_pos += sec_size
else:
# start new slice
if p >= 0:
# not first time
slices.append((start_pos, end_pos))
start_pos = base + s * sec_size
end_pos = start_pos + sec_size
p = s
s = sat[s]
assert s == EOCSID
assert tot_found == found_limit
# print >> self.logfile, "_locate_stream(%s): seen" % qname; dump_list(self.seen, 20, self.logfile)
if not slices:
# The stream is contiguous ... just what we like!
return (mem, start_pos, expected_stream_size)
slices.append((start_pos, end_pos))
# print >> self.logfile, "+++>>> %d fragments" % len(slices)
return (b''.join([mem[start_pos:end_pos] for start_pos, end_pos in slices]), 0, expected_stream_size)
# ==========================================================================================
def x_dump_line(alist, stride, f, dpos, equal=0):
print("%5d%s" % (dpos, " ="[equal]), end=' ', file=f)
for value in alist[dpos:dpos + stride]:
print(str(value), end=' ', file=f)
print(file=f)
def dump_list(alist, stride, f=sys.stdout):
def _dump_line(dpos, equal=0):
print("%5d%s" % (dpos, " ="[equal]), end=' ', file=f)
for value in alist[dpos:dpos + stride]:
print(str(value), end=' ', file=f)
print(file=f)
pos = None
oldpos = None
for pos in xrange(0, len(alist), stride):
if oldpos is None:
_dump_line(pos)
oldpos = pos
elif alist[pos:pos+stride] != alist[oldpos:oldpos+stride]:
if pos - oldpos > stride:
_dump_line(pos - stride, equal=1)
_dump_line(pos)
oldpos = pos
if oldpos is not None and pos is not None and pos != oldpos:
_dump_line(pos, equal=1)
| apache-2.0 | -1,583,750,774,636,250,000 | 43.871036 | 159 | 0.53317 | false | 3.508099 | false | false | false |
Distrotech/pycairo | examples/cairo_snippets/snippets_pdf.py | 9 | 1523 | #!/usr/bin/env python
"""Python version of cairo-demo/cairo_snippets/cairo_snippets_pdf.c
create a file for each example rather than one large file for all examples
"""
from __future__ import division
from math import pi as M_PI # used by many snippets
import sys
import cairo
if not cairo.HAS_PDF_SURFACE:
raise SystemExit ('cairo was not compiled with PDF support')
from snippets import snip_list, snippet_normalize
width_in_inches, height_in_inches = 2, 2
width_in_points, height_in_points = width_in_inches * 72, height_in_inches * 72
width, height = width_in_points, height_in_points # used by snippet_normalize()
def do_snippet (snippet):
if verbose_mode:
print('processing %s' % snippet)
filename = 'snippets/%s.pdf' % snippet
surface = cairo.PDFSurface (filename, width_in_points, height_in_points)
cr = cairo.Context (surface)
cr.save()
try:
fName = 'snippets/%s.py' % snippet
code = open(fName).read()
exec (code, globals(), locals())
except:
exc_type, exc_value = sys.exc_info()[:2]
print(exc_type, exc_value, file=sys.stderr)
else:
cr.restore()
cr.show_page()
surface.finish()
if verbose_mode:
print
if __name__ == '__main__':
verbose_mode = True
if len(sys.argv) > 1 and sys.argv[1] == '-s':
verbose_mode = False
del sys.argv[1]
if len(sys.argv) > 1: # do specified snippets
snippet_list = sys.argv[1:]
else: # do all snippets
snippet_list = snip_list
for s in snippet_list:
do_snippet (s)
| gpl-3.0 | 1,881,779,671,919,894,300 | 25.258621 | 79 | 0.665135 | false | 3.226695 | false | false | false |
cosmoharrigan/pylearn2 | pylearn2/costs/gated_autoencoder.py | 39 | 5793 | """
Definitions of the cost for the gated-autoencoder.
"""
from pylearn2.costs.cost import Cost, DefaultDataSpecsMixin
from pylearn2.space import VectorSpace
class SymmetricCost(DefaultDataSpecsMixin, Cost):
"""
Summary (Class representing the symmetric cost).
Subclasses can define the type of data they will use.
Mean reconstruction error is used for real valued data
and cross-Entropy loss is used for binary.
See Also
--------
"Gradient-based learning of higher-order image features"
"""
@staticmethod
def cost(x, y, rx, ry):
"""
Symmetric reconstruction cost.
Parameters
----------
x : tensor_like
Theano symbolic representing the first input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
y : tensor_like
Theano symbolic representing the seconde input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
rx : tensor_like
Reconstruction of the first minibatch by the model.
ry: tensor_like
Reconstruction of the second minibatch by the model.
Returns
-------
Cost: theano_like expression
Representation of the cost
"""
raise NotImplementedError
def expr(self, model, data, *args, **kwargs):
"""
Returns a theano expression for the cost function.
Returns a symbolic expression for a cost function applied to the
minibatch of data.
Optionally, may return None. This represents that the cost function
is intractable but may be optimized via the get_gradients method.
Parameters
----------
model : a pylearn2 Model instance
data : a batch in cost.get_data_specs() form
kwargs : dict
Optional extra arguments. Not used by the base class.
"""
self.get_data_specs(model)[0].validate(data)
x, y = data
input_space = model.get_input_space()
if not isinstance(input_space.components[0], VectorSpace):
conv = input_space.components[0]
vec = VectorSpace(conv.get_total_dimension())
x = conv.format_as(x, vec)
if not isinstance(input_space.components[1], VectorSpace):
conv = input_space.components[1]
vec = VectorSpace(conv.get_total_dimension())
y = conv.format_as(y, vec)
rx, ry = model.reconstructXY((x, y))
return self.cost(x, y, rx, ry)
class SymmetricMSRE(SymmetricCost):
"""
Summary (Symmetric cost for real valued data).
See Also
--------
"Gradient-based learning of higher-order image features"
"""
@staticmethod
def cost(x, y, rx, ry):
"""
Summary (Definition of the cost).
Mean squared reconstruction error.
Parameters
----------
x : tensor_like
Theano symbolic representing the first input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
y : tensor_like
Theano symbolic representing the seconde input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
rx : tensor_like
Reconstruction of the first minibatch by the model.
ry: tensor_like
Reconstruction of the second minibatch by the model.
Returns
-------
Cost: theano_like expression
Representation of the cost
Notes
-----
Symmetric reconstruction cost as defined by Memisevic in:
"Gradient-based learning of higher-order image features".
This function only works with real valued data.
"""
return (
((0.5*((x - rx)**2)) + (0.5*((y - ry)**2)))).sum(axis=1).mean()
class NormalizedSymmetricMSRE(SymmetricCost):
"""
Summary (Normalized Symmetric cost for real valued data).
Notes
-----
Value used to observe the percentage of reconstruction.
"""
@staticmethod
def cost(x, y, rx, ry):
"""
Summary (Definition of the cost).
Normalized Mean squared reconstruction error. Values
between 0 and 1.
Parameters
----------
x : tensor_like
Theano symbolic representing the first input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
y : tensor_like
Theano symbolic representing the seconde input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
rx : tensor_like
Reconstruction of the first minibatch by the model.
ry: tensor_like
Reconstruction of the second minibatch by the model.
Returns
-------
Cost: theano_like expression
Representation of the cost
Notes
-----
Do not use this function to train, only to monitor the
average percentage of reconstruction achieved when training on
real valued data.
"""
num = (((0.5*((x - rx)**2)) + (0.5*((y - ry)**2)))).sum(axis=1).mean()
den = ((0.5*(x.norm(2, 1)**2)) + (0.5*(y.norm(2, 1)**2))).mean()
return num/den
| bsd-3-clause | -8,314,601,974,106,115,000 | 32.293103 | 78 | 0.599862 | false | 4.70211 | false | false | false |
Rudloff/youtube-dl | youtube_dl/extractor/expotv.py | 4 | 2907 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
class ExpoTVIE(InfoExtractor):
_VALID_URL = r'https?://www\.expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])'
_TEST = {
'url': 'http://www.expotv.com/videos/reviews/3/40/NYX-Butter-lipstick/667916',
'md5': 'fe1d728c3a813ff78f595bc8b7a707a8',
'info_dict': {
'id': '667916',
'ext': 'mp4',
'title': 'NYX Butter Lipstick Little Susie',
'description': 'Goes on like butter, but looks better!',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'Stephanie S.',
'upload_date': '20150520',
'view_count': int,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_key = self._search_regex(
r'<param name="playerKey" value="([^"]+)"', webpage, 'player key')
config = self._download_json(
'http://client.expotv.com/video/config/%s/%s' % (video_id, player_key),
video_id, 'Downloading video configuration')
formats = []
for fcfg in config['sources']:
media_url = fcfg.get('file')
if not media_url:
continue
if fcfg.get('type') == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls'))
else:
formats.append({
'url': media_url,
'height': int_or_none(fcfg.get('height')),
'format_id': fcfg.get('label'),
'ext': self._search_regex(
r'filename=.*\.([a-z0-9_A-Z]+)&', media_url,
'file extension', default=None) or fcfg.get('type'),
})
self._sort_formats(formats)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = config.get('image')
view_count = int_or_none(self._search_regex(
r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts'))
uploader = self._search_regex(
r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader',
fatal=False)
upload_date = unified_strdate(self._search_regex(
r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date',
fatal=False), day_first=False)
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'view_count': view_count,
'thumbnail': thumbnail,
'uploader': uploader,
'upload_date': upload_date,
}
| unlicense | 6,269,134,595,314,035,000 | 36.753247 | 93 | 0.506364 | false | 3.60223 | false | false | false |
thinksabin/lazy-devops | S3 bucket Maker/IdentityAccessManagement.py | 1 | 2418 | __author__ = 'gambit'
import boto
from boto.iam.connection import IAMConnection
from boto.s3.key import Key
import datetime
import time
import smtplib
import os
class IdentityAccessManagement():
admin_access_key = "XXXXXXXXXXXXXXXXXXXXXXX"
admin_secret_key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
def create_user(self, s3_user):
connect = IAMConnection(self.admin_access_key, self.admin_secret_key)
user = connect.get_all_users()
users = user['list_users_response']['list_users_result']['users']
for user in users:
if s3_user in user['user_name']:
return False
connect.create_user(s3_user)
return True
def access_key(self, s3_user):
connect = IAMConnection(self.admin_access_key, self.admin_secret_key)
key = connect.create_access_key(s3_user)
access_key = key['create_access_key_response'][u'create_access_key_result'][u'access_key'][u'access_key_id']
secret_key = key['create_access_key_response'][u'create_access_key_result'][u'access_key'][u'secret_access_key']
return s3_user, access_key, secret_key
def attach_policy(self, S3_User, bucket_name):
policy = '''{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:ListAllMyBuckets"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::*"
},
{
"Action": "s3:*",
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::%s*",
"arn:aws:s3:::%s*/*"
]
}
]
}''' % (bucket_name, bucket_name)
print policy
# # Attach Policy to acces s3 bucket
connect = IAMConnection(self.admin_access_key, self.admin_secret_key)
connect.put_user_policy(S3_User, bucket_name, policy)
def create_s3_bucket(self, bucket_name):
s3 = boto.connect_s3(self.admin_access_key, self.admin_secret_key)
all_bucket = s3.get_all_buckets()
for bucket in all_bucket:
name = bucket.name
if bucket_name not in name:
s3.create_bucket(bucket_name)
return True
else:
return False
| apache-2.0 | 1,952,461,174,675,023,600 | 31.675676 | 120 | 0.535567 | false | 3.856459 | false | false | false |
c-o-m-m-a-n-d-e-r/CouchPotatoServer | libs/caper/result.py | 81 | 5904 | # Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from logr import Logr
GROUP_MATCHES = ['identifier']
class CaperNode(object):
def __init__(self, closure, parent=None, match=None):
"""
:type parent: CaperNode
:type weight: float
"""
#: :type: caper.objects.CaperClosure
self.closure = closure
#: :type: CaperNode
self.parent = parent
#: :type: CaptureMatch
self.match = match
#: :type: list of CaptureGroup
self.finished_groups = []
def next(self):
raise NotImplementedError()
def captured(self):
cur = self
if cur.match:
yield cur.match.tag, cur.match.result
while cur.parent:
cur = cur.parent
if cur.match:
yield cur.match.tag, cur.match.result
class CaperRootNode(CaperNode):
def __init__(self, closure):
"""
:type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure
"""
super(CaperRootNode, self).__init__(closure)
def next(self):
return self.closure
class CaperClosureNode(CaperNode):
def __init__(self, closure, parent=None, match=None):
"""
:type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure
"""
super(CaperClosureNode, self).__init__(closure, parent, match)
def next(self):
if not self.closure:
return None
if self.match:
# Jump to next closure if we have a match
return self.closure.right
elif len(self.closure.fragments) > 0:
# Otherwise parse the fragments
return self.closure.fragments[0]
return None
def __str__(self):
return "<CaperClosureNode match: %s>" % repr(self.match)
def __repr__(self):
return self.__str__()
class CaperFragmentNode(CaperNode):
def __init__(self, closure, fragments, parent=None, match=None):
"""
:type closure: caper.objects.CaperClosure
:type fragments: list of caper.objects.CaperFragment
"""
super(CaperFragmentNode, self).__init__(closure, parent, match)
#: :type: caper.objects.CaperFragment or list of caper.objects.CaperFragment
self.fragments = fragments
def next(self):
if len(self.fragments) > 0 and self.fragments[-1] and self.fragments[-1].right:
return self.fragments[-1].right
if self.closure.right:
return self.closure.right
return None
def __str__(self):
return "<CaperFragmentNode match: %s>" % repr(self.match)
def __repr__(self):
return self.__str__()
class CaperResult(object):
def __init__(self):
#: :type: list of CaperNode
self.heads = []
self.chains = []
def build(self):
max_matched = 0
for head in self.heads:
for chain in self.combine_chain(head):
if chain.num_matched > max_matched:
max_matched = chain.num_matched
self.chains.append(chain)
for chain in self.chains:
chain.weights.append(chain.num_matched / float(max_matched or chain.num_matched or 1))
chain.finish()
self.chains.sort(key=lambda chain: chain.weight, reverse=True)
for chain in self.chains:
Logr.debug("chain weight: %.02f", chain.weight)
Logr.debug("\tInfo: %s", chain.info)
Logr.debug("\tWeights: %s", chain.weights)
Logr.debug("\tNumber of Fragments Matched: %s", chain.num_matched)
def combine_chain(self, subject, chain=None):
nodes = subject if type(subject) is list else [subject]
if chain is None:
chain = CaperResultChain()
result = []
for x, node in enumerate(nodes):
node_chain = chain if x == len(nodes) - 1 else chain.copy()
if not node.parent:
result.append(node_chain)
continue
node_chain.update(node)
result.extend(self.combine_chain(node.parent, node_chain))
return result
class CaperResultChain(object):
def __init__(self):
#: :type: float
self.weight = None
self.info = {}
self.num_matched = 0
self.weights = []
def update(self, subject):
"""
:type subject: CaperFragmentNode
"""
if not subject.match or not subject.match.success:
return
# TODO this should support closure nodes
if type(subject) is CaperFragmentNode:
self.num_matched += len(subject.fragments) if subject.fragments is not None else 0
self.weights.append(subject.match.weight)
if subject.match:
if subject.match.tag not in self.info:
self.info[subject.match.tag] = []
self.info[subject.match.tag].insert(0, subject.match.result)
def finish(self):
self.weight = sum(self.weights) / len(self.weights)
def copy(self):
chain = CaperResultChain()
chain.weight = self.weight
chain.info = copy.deepcopy(self.info)
chain.num_matched = self.num_matched
chain.weights = copy.copy(self.weights)
return chain | gpl-3.0 | -1,945,484,610,735,691,800 | 26.723005 | 98 | 0.598747 | false | 4.005427 | false | false | false |
ToBeReplaced/ansible-modules-extras | notification/hall.py | 142 | 3619 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Billy Kimble <basslines@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: hall
short_description: Send notification to Hall
description:
- "The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms."
version_added: "2.0"
author: Billy Kimble (@bkimble) <basslines@gmail.com>
options:
room_token:
description:
- "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)"
required: true
msg:
description:
- The message you wish to deliver as a notifcation
required: true
title:
description:
- The title of the message
required: true
picture:
description:
- "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)"
required: false
"""
EXAMPLES = """
- name: Send Hall notifiation
local_action:
module: hall
room_token: <hall room integration token>
title: Nginx
msg: Created virtual host file on {{ inventory_hostname }}
- name: Send Hall notification if EC2 servers were created.
when: ec2.instances|length > 0
local_action:
module: hall
room_token: <hall room integration token>
title: Server Creation
msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region."
with_items: ec2.instances
"""
HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s'
def send_request_to_hall(module, room_token, payload):
headers = {'Content-Type': 'application/json'}
payload=module.jsonify(payload)
api_endpoint = HALL_API_ENDPOINT % (room_token)
response, info = fetch_url(module, api_endpoint, data=payload, headers=headers)
if info['status'] != 200:
secure_url = HALL_API_ENDPOINT % ('[redacted]')
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, secure_url, info['msg']))
def main():
module = AnsibleModule(
argument_spec = dict(
room_token = dict(type='str', required=True),
msg = dict(type='str', required=True),
title = dict(type='str', required=True),
picture = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'),
)
)
room_token = module.params['room_token']
message = module.params['msg']
title = module.params['title']
picture = module.params['picture']
payload = {'title': title, 'message': message, 'picture': picture}
send_request_to_hall(module, room_token, payload)
module.exit_json(msg="OK")
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 | -5,157,466,764,462,421,000 | 36.309278 | 216 | 0.685548 | false | 3.483157 | false | false | false |
misdoro/python-ase | ase/calculators/jacapo/utils/bader.py | 2 | 6745 | from __future__ import print_function
import os, string, tempfile, shutil
from subprocess import Popen
from ase.io import write
from ase.units import Bohr
class Bader:
'''class for running bader analysis and extracting data from it.
The class runs bader, extracts the charge density and outputs it
to a cube file. Then you call different functions of the class to
extract the charges, volumes, etc...
ACF.dat contains the coordinates of each atom, the charge
associated with it according to Bader partitioning, percentage of
the whole according to Bader partitioning and the minimum distance
to the surface. This distance should be compared to maximum
cut-off radius for the core region if pseudo potentials have been
used.
BCF.dat contains the coordinates of each Bader maxima, the charge
within that volume, the nearest atom and the distance to that
atom.
AtomVolumes.dat contains the number of each volume that has been
assigned to each atom. These numbers correspond to the number of
the BvAtxxxx.dat files.
The options for the executable are::
bader [ -c bader | voronoi ]
[ -n bader | voronoi ]
[ -b neargrid | ongrid ]
[ -r refine_edge_iterations ]
[ -ref reference_charge ]
[ -p all_atom | all_bader ]
[ -p sel_atom | sel_bader ] [volume list]
[ -p atom_index | bader_index ]
[ -i cube | chgcar ]
[ -h ] [ -v ]
chargefile
References:
G. Henkelman, A. Arnaldsson, and H. Jonsson, A fast and robust
algorithm for Bader decomposition of charge density,
Comput. Mater. Sci. 36 254-360 (2006).
E. Sanville, S. D. Kenny, R. Smith, and G. Henkelman An improved
grid-based algorithm for Bader charge allocation,
J. Comp. Chem. 28 899-908 (2007).
W. Tang, E. Sanville, and G. Henkelman A grid-based Bader analysis
algorithm without lattice bias, J. Phys.: Condens. Matter 21
084204 (2009).
'''
def __init__(self, atoms):
'''
'''
self.atoms = atoms
#get density and write cube file
calc = atoms.get_calculator()
ncfile = calc.get_nc()
base, ext = os.path.splitext(ncfile)
x, y, z, density = calc.get_charge_density()
cubefile = base + '_charge_density.cube'
self.densityfile = cubefile
if not os.path.exists(cubefile):
write(cubefile, atoms, data=density * Bohr ** 3)
#cmd to run for bader analysis. check if output exists so we
#don't run this too often.
acf_file = base + '_ACF.dat'
if not os.path.exists(acf_file):
#mk tempdir
tempdir = tempfile.mkdtemp()
cwd = os.getcwd()
abscubefile = os.path.abspath(cubefile)
os.chdir(tempdir)
cmd = 'bader %s' % abscubefile
process = Popen(cmd)
status = Popen.wait()
if status != 0:
print(process)
shutil.copy2('ACF.dat', os.path.join(cwd, acf_file))
os.chdir(cwd)
shutil.rmtree(tempdir)
self.charges = []
self.volumes = []
#now parse the output
f = open(acf_file, 'r')
#skip 2 lines
f.readline()
f.readline()
for i, atom in enumerate(self.atoms):
line = f.readline()
fields = line.split()
n = int(fields[0])
x = float(fields[1])
y = float(fields[2])
z = float(fields[3])
chg = float(fields[4])
mindist = float(fields[5])
vol = float(fields[6])
self.charges.append(chg)
self.volumes.append(vol)
f.close()
def get_bader_charges(self):
return self.charges
def get_bader_volumes(self):
'return volumes in Ang**3'
return [x * Bohr ** 3 for x in self.volumes]
def write_atom_volume(self, atomlist):
'''write bader atom volumes to cube files.
atomlist = [0,2] #for example
-p sel_atom Write the selected atomic volumes, read from the
subsequent list of volumes.
'''
alist = string.join([str(x) for x in atomlist], ' ')
cmd = 'bader -p sel_atom %s %s' % (alist, self.densityfile)
print(cmd)
os.system(cmd)
def write_bader_volume(self, atomlist):
"""write bader atom volumes to cube files.
::
atomlist = [0,2] # for example
-p sel_bader Write the selected Bader volumes, read from the
subsequent list of volumes.
"""
alist = string.join([str(x) for x in atomlist], ' ')
cmd = 'bader -p sel_bader %s %s' % (alist, self.densityfile)
print(cmd)
os.system(cmd)
def write_atom_index(self):
''' -p atom_index Write the atomic volume index to a charge
density file.
'''
cmd = 'bader -p atom_index %s' % (self.densityfile)
print(cmd)
os.system(cmd)
def write_bader_index(self):
'''
-p bader_index Write the Bader volume index to a charge
density file.
'''
cmd = 'bader -p bader_index %s' % (self.densityfile)
print(cmd)
os.system(cmd)
def write_all_atom(self):
'''
-p all_atom Combine all volumes associated with an atom and
write to file. This is done for all atoms and written to files
named BvAtxxxx.dat. The volumes associated with atoms are
those for which the maximum in charge density within the
volume is closest to the atom.
'''
cmd = 'bader -p all_atom %s' % (self.densityfile)
print(cmd)
os.system(cmd)
def write_all_bader(self):
'''
-p all_bader Write all Bader volumes (containing charge above
threshold of 0.0001) to a file. The charge distribution in
each volume is written to a separate file, named
Bvolxxxx.dat. It will either be of a CHGCAR format or a CUBE
file format, depending on the format of the initial charge
density file. These files can be quite large, so this option
should be used with caution.
'''
cmd = 'bader -p all_bader %s' % (self.densityfile)
print(cmd)
os.system(cmd)
if __name__ == '__main__':
from ase.calculators.jacapo import Jacapo
atoms = Jacapo.read_atoms('ethylene.nc')
b = Bader(atoms)
print(b.get_bader_charges())
print(b.get_bader_volumes())
b.write_atom_volume([3, 4])
| gpl-2.0 | -2,498,351,350,100,636,700 | 30.966825 | 70 | 0.577613 | false | 3.815045 | false | false | false |
playm2mboy/edx-platform | lms/djangoapps/open_ended_grading/staff_grading_service.py | 64 | 16269 | """
This module provides views that proxy to the staff grading backend service.
"""
import json
import logging
from django.conf import settings
from django.http import HttpResponse, Http404
from django.utils.translation import ugettext as _
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.open_ended_grading_classes.grading_service_module import GradingService, GradingServiceError
from courseware.access import has_access
from edxmako.shortcuts import render_to_string
from student.models import unique_id_for_user
from open_ended_grading.utils import does_location_exist
import dogstats_wrapper as dog_stats_api
log = logging.getLogger(__name__)
STAFF_ERROR_MESSAGE = _(
u'Could not contact the external grading server. Please contact the '
u'development team at {email}.'
).format(
email=u'<a href="mailto:{tech_support_email}>{tech_support_email}</a>'.format(
tech_support_email=settings.TECH_SUPPORT_EMAIL
)
)
MAX_ALLOWED_FEEDBACK_LENGTH = 5000
class MockStaffGradingService(object):
"""
A simple mockup of a staff grading service, testing.
"""
def __init__(self):
self.cnt = 0
def get_next(self, course_id, location, grader_id):
self.cnt += 1
return {'success': True,
'submission_id': self.cnt,
'submission': 'Test submission {cnt}'.format(cnt=self.cnt),
'num_graded': 3,
'min_for_ml': 5,
'num_pending': 4,
'prompt': 'This is a fake prompt',
'ml_error_info': 'ML info',
'max_score': 2 + self.cnt % 3,
'rubric': 'A rubric'}
def get_problem_list(self, course_id, grader_id):
self.cnt += 1
return {
'success': True,
'problem_list': [
json.dumps({
'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1",
'num_graded': 3,
'num_pending': 5,
'min_for_ml': 10,
}),
json.dumps({
'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2",
'num_graded': 1,
'num_pending': 5,
'min_for_ml': 10,
}),
],
}
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
return self.get_next(course_id, 'fake location', grader_id)
class StaffGradingService(GradingService):
"""
Interface to staff grading backend.
"""
METRIC_NAME = 'edxapp.open_ended_grading.staff_grading_service'
def __init__(self, config):
config['render_template'] = render_to_string
super(StaffGradingService, self).__init__(config)
self.url = config['url'] + config['staff_grading']
self.login_url = self.url + '/login/'
self.get_next_url = self.url + '/get_next_submission/'
self.save_grade_url = self.url + '/save_grade/'
self.get_problem_list_url = self.url + '/get_problem_list/'
self.get_notifications_url = self.url + "/get_notifications/"
def get_problem_list(self, course_id, grader_id):
"""
Get the list of problems for a given course.
Args:
course_id: course id that we want the problems of
grader_id: who is grading this? The anonymous user_id of the grader.
Returns:
dict with the response from the service. (Deliberately not
writing out the fields here--see the docs on the staff_grading view
in the grading_controller repo)
Raises:
GradingServiceError: something went wrong with the connection.
"""
params = {'course_id': course_id.to_deprecated_string(), 'grader_id': grader_id}
result = self.get(self.get_problem_list_url, params)
tags = [u'course_id:{}'.format(course_id)]
self._record_result('get_problem_list', result, tags)
dog_stats_api.histogram(
self._metric_name('get_problem_list.result.length'),
len(result.get('problem_list', []))
)
return result
def get_next(self, course_id, location, grader_id):
"""
Get the next thing to grade.
Args:
course_id: the course that this problem belongs to
location: location of the problem that we are grading and would like the
next submission for
grader_id: who is grading this? The anonymous user_id of the grader.
Returns:
dict with the response from the service. (Deliberately not
writing out the fields here--see the docs on the staff_grading view
in the grading_controller repo)
Raises:
GradingServiceError: something went wrong with the connection.
"""
result = self._render_rubric(
self.get(
self.get_next_url,
params={
'location': location.to_deprecated_string(),
'grader_id': grader_id
}
)
)
tags = [u'course_id:{}'.format(course_id)]
self._record_result('get_next', result, tags)
return result
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
"""
Save a score and feedback for a submission.
Returns:
dict with keys
'success': bool
'error': error msg, if something went wrong.
Raises:
GradingServiceError if there's a problem connecting.
"""
data = {'course_id': course_id.to_deprecated_string(),
'submission_id': submission_id,
'score': score,
'feedback': feedback,
'grader_id': grader_id,
'skipped': skipped,
'rubric_scores': rubric_scores,
'rubric_scores_complete': True,
'submission_flagged': submission_flagged}
result = self._render_rubric(self.post(self.save_grade_url, data=data))
tags = [u'course_id:{}'.format(course_id)]
self._record_result('save_grade', result, tags)
return result
def get_notifications(self, course_id):
params = {'course_id': course_id.to_deprecated_string()}
result = self.get(self.get_notifications_url, params)
tags = [
u'course_id:{}'.format(course_id),
u'staff_needs_to_grade:{}'.format(result.get('staff_needs_to_grade'))
]
self._record_result('get_notifications', result, tags)
return result
# don't initialize until staff_grading_service() is called--means that just
# importing this file doesn't create objects that may not have the right config
_service = None
def staff_grading_service():
"""
Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True,
returns a mock one, otherwise a real one.
Caches the result, so changing the setting after the first call to this
function will have no effect.
"""
global _service
if _service is not None:
return _service
if settings.MOCK_STAFF_GRADING:
_service = MockStaffGradingService()
else:
_service = StaffGradingService(settings.OPEN_ENDED_GRADING_INTERFACE)
return _service
def _err_response(msg):
"""
Return a HttpResponse with a json dump with success=False, and the given error message.
"""
return HttpResponse(json.dumps({'success': False, 'error': msg}),
mimetype="application/json")
def _check_access(user, course_id):
"""
Raise 404 if user doesn't have staff access to course_id
"""
if not has_access(user, 'staff', course_id):
raise Http404
return
def get_next(request, course_id):
"""
Get the next thing to grade for course_id and with the location specified
in the request.
Returns a json dict with the following keys:
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'message': if there was no submission available, but nothing went wrong,
there will be a message field.
'error': if success is False, will have an error message with more info.
"""
assert isinstance(course_id, basestring)
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
_check_access(request.user, course_key)
required = set(['location'])
if request.method != 'POST':
raise Http404
actual = set(request.POST.keys())
missing = required - actual
if len(missing) > 0:
return _err_response('Missing required keys {0}'.format(
', '.join(missing)))
grader_id = unique_id_for_user(request.user)
p = request.POST
location = course_key.make_usage_key_from_deprecated_string(p['location'])
return HttpResponse(json.dumps(_get_next(course_key, grader_id, location)),
mimetype="application/json")
def get_problem_list(request, course_id):
"""
Get all the problems for the given course id
Returns a json dict with the following keys:
success: bool
problem_list: a list containing json dicts with the following keys:
each dict represents a different problem in the course
location: the location of the problem
problem_name: the name of the problem
num_graded: the number of responses that have been graded
num_pending: the number of responses that are sitting in the queue
min_for_ml: the number of responses that need to be graded before
the ml can be run
'error': if success is False, will have an error message with more info.
"""
assert isinstance(course_id, basestring)
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
_check_access(request.user, course_key)
try:
response = staff_grading_service().get_problem_list(course_key, unique_id_for_user(request.user))
# If 'problem_list' is in the response, then we got a list of problems from the ORA server.
# If it is not, then ORA could not find any problems.
if 'problem_list' in response:
problem_list = response['problem_list']
else:
problem_list = []
# Make an error messages to reflect that we could not find anything to grade.
response['error'] = _(
u'Cannot find any open response problems in this course. '
u'Have you submitted answers to any open response assessment questions? '
u'If not, please do so and return to this page.'
)
valid_problem_list = []
for i in xrange(len(problem_list)):
# Needed to ensure that the 'location' key can be accessed.
try:
problem_list[i] = json.loads(problem_list[i])
except Exception:
pass
if does_location_exist(course_key.make_usage_key_from_deprecated_string(problem_list[i]['location'])):
valid_problem_list.append(problem_list[i])
response['problem_list'] = valid_problem_list
response = json.dumps(response)
return HttpResponse(response,
mimetype="application/json")
except GradingServiceError:
#This is a dev_facing_error
log.exception(
"Error from staff grading service in open "
"ended grading. server url: {0}".format(staff_grading_service().url)
)
#This is a staff_facing_error
return HttpResponse(json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE}))
def _get_next(course_id, grader_id, location):
"""
Implementation of get_next (also called from save_grade) -- returns a json string
"""
try:
return staff_grading_service().get_next(course_id, location, grader_id)
except GradingServiceError:
#This is a dev facing error
log.exception(
"Error from staff grading service in open "
"ended grading. server url: {0}".format(staff_grading_service().url)
)
#This is a staff_facing_error
return json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE})
def save_grade(request, course_id):
"""
Save the grade and feedback for a submission, and, if all goes well, return
the next thing to grade.
Expects the following POST parameters:
'score': int
'feedback': string
'submission_id': int
Returns the same thing as get_next, except that additional error messages
are possible if something goes wrong with saving the grade.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
_check_access(request.user, course_key)
if request.method != 'POST':
raise Http404
p = request.POST
required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged'])
skipped = 'skipped' in p
#If the instructor has skipped grading the submission, then there will not be any rubric scores.
#Only add in the rubric scores if the instructor has not skipped.
if not skipped:
required.add('rubric_scores[]')
actual = set(p.keys())
missing = required - actual
if len(missing) > 0:
return _err_response('Missing required keys {0}'.format(
', '.join(missing)))
success, message = check_feedback_length(p)
if not success:
return _err_response(message)
grader_id = unique_id_for_user(request.user)
location = course_key.make_usage_key_from_deprecated_string(p['location'])
try:
result = staff_grading_service().save_grade(course_key,
grader_id,
p['submission_id'],
p['score'],
p['feedback'],
skipped,
p.getlist('rubric_scores[]'),
p['submission_flagged'])
except GradingServiceError:
#This is a dev_facing_error
log.exception(
"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(
request, course_id))
#This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE)
except ValueError:
#This is a dev_facing_error
log.exception(
"save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(
result_json))
#This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE)
if not result.get('success', False):
#This is a dev_facing_error
log.warning(
'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
return _err_response(STAFF_ERROR_MESSAGE)
# Ok, save_grade seemed to work. Get the next submission to grade.
return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)),
mimetype="application/json")
def check_feedback_length(data):
feedback = data.get("feedback")
if feedback and len(feedback) > MAX_ALLOWED_FEEDBACK_LENGTH:
return False, "Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
else:
return True, ""
| agpl-3.0 | -2,235,410,977,889,313,800 | 35.559551 | 123 | 0.595427 | false | 4.257786 | true | false | false |
batxes/4c2vhic | Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/Six_zebra_models29901.py | 2 | 13923 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((14969.5, 9355.22, 4884.95), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((15857.7, 8984.62, 4617.48), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((14048.8, 8305.7, 4500.15), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((11860.6, 7514.14, 4365.92), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((11168.8, 7253.91, 4353.22), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((12710.6, 5841.18, 3924.41), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((11163.9, 4708.08, 4115.8), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((11220.3, 3116.23, 3445.88), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((10024.2, 2278.2, 2811.32), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((8565.6, 1179.39, 2598.93), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((7241.9, 1843.25, 1632.03), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((7011.72, 1012.49, -305.89), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((6903.61, 98.5444, -2158.28), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((8201.9, 1002.62, -1834.4), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((7414.09, -129.02, -854.322), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((7122.12, -299.946, 714.323), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((7708.18, 230.276, 1947.9), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((8140.74, 861.511, 3347.95), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((9916.15, 691.375, 3641.12), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((10028.7, 610.495, 5184.02), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((9947.99, 101.655, 6937.75), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((10141.9, -1122.2, 8124.17), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((9427.43, 62.891, 7740), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((7792.4, 1113.02, 6928.08), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5839.87, 1426.25, 6096.11), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((4866.74, 1500.33, 5695.34), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((4653.24, 3936.53, 6840.6), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((3660.54, 5240.38, 7746.49), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((4273.05, 5438.27, 8823.37), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((4960.15, 5382.51, 11055.5), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((4538.04, 5433.08, 10570.7), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((4142.42, 6699.7, 10521.4), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3794.77, 8574.69, 11766.8), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((4229.33, 9771.99, 11046.9), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((5420.81, 10555.7, 10510.1), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((6615.4, 11834.4, 10784.8), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((8123.43, 12065.3, 11499), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((8008.57, 10546.9, 12037.1), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((6588.29, 10507.4, 12644.8), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((5333.97, 9838.61, 11243.6), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((4610.87, 10843.4, 11069.5), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((5113.26, 9718.7, 10484.3), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((5912.2, 10033, 10071.9), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((5196.91, 9912.15, 10527.5), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((4146.77, 8424.44, 10674.7), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((3517.66, 6984.69, 8251.86), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((2724.62, 7296.95, 6580.21), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((2710.77, 7987.75, 5648.79), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((1852.63, 9762.8, 5359.63), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((41.5662, 11727.3, 5413.74), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((412.316, 12008.6, 7020.04), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((304.441, 9258.71, 7961.56), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((1047.31, 9521.91, 7963.95), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((1974.71, 10863.2, 8973.95), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((3220.08, 11733.6, 9246.88), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((4736.09, 11001.1, 8495.51), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 | -4,076,574,980,432,056,300 | 45.721477 | 75 | 0.699777 | false | 2.623022 | false | false | false |
aostapenko/manila | manila/scheduler/chance.py | 2 | 2704 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Chance (Random) Scheduler implementation
"""
import random
from manila import exception
from manila.scheduler import driver
from oslo.config import cfg
CONF = cfg.CONF
class ChanceScheduler(driver.Scheduler):
"""Implements Scheduler as a random node selector."""
def _filter_hosts(self, request_spec, hosts, **kwargs):
"""Filter a list of hosts based on request_spec."""
filter_properties = kwargs.get('filter_properties', {})
ignore_hosts = filter_properties.get('ignore_hosts', [])
hosts = [host for host in hosts if host not in ignore_hosts]
return hosts
def _schedule(self, context, topic, request_spec, **kwargs):
"""Picks a host that is up at random."""
elevated = context.elevated()
hosts = self.hosts_up(elevated, topic)
if not hosts:
msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg)
hosts = self._filter_hosts(request_spec, hosts, **kwargs)
if not hosts:
msg = _("Could not find another host")
raise exception.NoValidHost(reason=msg)
return hosts[int(random.random() * len(hosts))]
def schedule_create_share(self, context, request_spec, filter_properties):
"""Picks a host that is up at random."""
topic = CONF.share_topic
host = self._schedule(context, topic, request_spec,
filter_properties=filter_properties)
share_id = request_spec['share_id']
snapshot_id = request_spec['snapshot_id']
updated_share = driver.share_update_db(context, share_id, host)
self.share_rpcapi.create_share(context, updated_share, host,
request_spec,
filter_properties,
snapshot_id)
| apache-2.0 | 1,732,781,051,139,034,600 | 35.540541 | 78 | 0.644601 | false | 4.271722 | false | false | false |
wujuguang/sentry | src/sentry/migrations/0098_auto__add_user__chg_field_team_owner__chg_field_activity_user__chg_fie.py | 36 | 28778 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, connections
class Migration(SchemaMigration):
def forwards(self, orm):
if 'auth_user' in connections['default'].introspection.table_names():
return
self.create_auth(orm)
def create_auth(self, orm):
# Adding model 'User'
db.create_table('auth_user', (
(u'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'auth', ['User'])
# Adding M2M table for field groups on 'User'
db.create_table('auth_user_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'sentry.user'], null=False)),
('group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'auth.group'], null=False))
))
db.create_unique('auth_user_groups', ['user_id', 'group_id'])
# Adding M2M table for field user_permissions on 'User'
db.create_table('auth_user_user_permissions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'sentry.user'], null=False)),
('permission', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'auth.permission'], null=False))
))
db.create_unique('auth_user_user_permissions', ['user_id', 'permission_id'])
def backwards(self, orm):
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'})
},
u'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': u"orm['sentry.AlertRelatedGroup']", 'to': u"orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
u'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
u'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"})
},
u'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'unique': 'True'})
},
u'sentry.option': {
'Meta': {'object_name': 'Option'},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': u"orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']", 'null': 'True'})
},
u'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': u"orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': u"orm['sentry.User']"})
},
u'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': u"orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': u"orm['sentry.TeamMember']", 'to': u"orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| bsd-3-clause | 3,045,616,489,184,792,600 | 82.414493 | 225 | 0.563764 | false | 3.738859 | false | false | false |
vanhonit/xmario_center | softwarecenter/ui/gtk3/widgets/description.py | 4 | 47888 | # Copyright (C) 2010 Matthew McGowan
#
# Authors:
# Matthew McGowan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk
from gi.repository import GObject
from gi.repository import Pango
from softwarecenter.utils import normalize_package_description
from softwarecenter.ui.gtk3.drawing import color_to_hex
from softwarecenter.ui.gtk3.utils import point_in
_PS = Pango.SCALE
class _SpecialCasePreParsers(object):
def preparse(self, k, desc):
if k is None:
return desc
func_name = '_%s_preparser' % k.lower().replace('-', '_')
if not hasattr(self, func_name):
return desc
f = getattr(self, func_name)
return f(desc)
# special case pre-parsers
def _skype_preparser(self, desc):
return desc.replace('. *', '.\n*')
def _texlive_fonts_extra_preparser(self, desc):
return desc.replace(')\n', ').\n').replace('--\n', '--\n\n')
class EventHelper(dict):
# FIXME: workaround for broken event.copy()
class ButtonEvent(object):
def __init__(self, event):
self.x = event.x
self.y = event.y
self.type = event.type
self.button = event.button
VALID_KEYS = (
'event',
'layout',
'index',
'within-selection',
'drag-active',
'drag-context')
def __init__(self):
dict.__init__(self)
self.new_press(None, None, None, False)
def __setitem__(self, k, v):
if k not in EventHelper.VALID_KEYS:
raise KeyError('\"%s\" is not a valid key' % k)
return False
return dict.__setitem__(self, k, v)
def new_press(self, event, layout, index, within_sel):
if event is None:
self['event'] = None
else:
# this should be simply event.copy() but that appears broken
# currently(?)
self['event'] = EventHelper.ButtonEvent(event)
self['layout'] = layout
self['index'] = index
self['within-selection'] = within_sel
self['drag-active'] = False
self['drag-context'] = None
class PangoLayoutProxy(object):
""" Because i couldn't figure out how to inherit from
pygi's Pango.Layout... """
def __init__(self, context):
self._layout = Pango.Layout.new(context)
def xy_to_index(self, x, y):
return self._layout.xy_to_index(x, y)
def index_to_pos(self, *args):
return self._layout.index_to_pos(*args)
# setter proxies
def set_attributes(self, attrs):
return self._layout.set_attributes(attrs)
def set_markup(self, markup):
return self._layout.set_markup(markup, -1)
def set_font_description(self, font_desc):
return self._layout.set_font_description(font_desc)
def set_wrap(self, wrap_mode):
return self._layout.set_wrap(wrap_mode)
def set_width(self, width):
return self._layout.set_width(width)
# getter proxies
def get_text(self):
return self._layout.get_text()
def get_pixel_extents(self):
return self._layout.get_pixel_extents()[1]
def get_cursor_pos(self, index):
return self._layout.get_cursor_pos(index)
def get_iter(self):
return self._layout.get_iter()
def get_extents(self):
return self._layout.get_extents()
class Layout(PangoLayoutProxy):
def __init__(self, widget, text=""):
PangoLayoutProxy.__init__(self, widget.get_pango_context())
self.widget = widget
self.length = 0
self.indent = 0
self.vspacing = None
self.is_bullet = False
self.index = 0
self.allocation = Gdk.Rectangle()
self._default_attrs = True
self.set_markup(text)
def __len__(self):
return self.length
def set_text(self, text):
PangoLayoutProxy.set_markup(self, text)
self.length = len(self.get_text())
def set_allocation(self, x, y, w, h):
a = self.allocation
a.x = x
a.y = y
a.width = w
a.height = h
def get_position(self):
return self.allocation.x, self.allocation.y
def cursor_up(self, cursor, target_x=-1):
layout = self.widget.order[cursor.paragraph]
pos = layout.index_to_pos(cursor.index)
x, y = pos.x, pos.y
if target_x >= 0:
x = target_x
y -= _PS * self.widget.line_height
return layout.xy_to_index(x, y), (x, y)
def cursor_down(self, cursor, target_x=-1):
layout = self.widget.order[cursor.paragraph]
pos = layout.index_to_pos(cursor.index)
x, y = pos.x, pos.y
if target_x >= 0:
x = target_x
y += _PS * self.widget.line_height
return layout.xy_to_index(x, y), (x, y)
def index_at(self, px, py):
#wa = self.widget.get_allocation()
x, y = self.get_position() # layout allocation
(_, index, k) = self.xy_to_index((px - x) * _PS, (py - y) * _PS)
return point_in(self.allocation, px, py), index + k
def reset_attrs(self):
#~ self.set_attributes(Pango.AttrList())
self.set_markup(self.get_text())
self._default_attrs = True
def highlight(self, start, end, bg, fg):
# FIXME: AttrBackground doesnt seem to be expose by gi yet??
#~ attrs = Pango.AttrList()
#~ attrs.insert(Pango.AttrBackground(bg.red, bg.green, bg.blue, start,
#~ end))
#~ attrs.insert(Pango.AttrForeground(fg.red, fg.green, fg.blue, start,
#~ end))
#~ self.set_attributes(attrs)
# XXX: workaround
text = self.get_text()
new_text = (text[:start] + '<span background="%s" foreground="%s">' %
(bg, fg))
new_text += text[start:end]
new_text += '</span>' + text[end:]
self.set_markup(new_text)
self._default_attrs = False
def highlight_all(self, bg, fg):
# FIXME: AttrBackground doesnt seem to be expose by gi yet??
#~ attrs = Pango.AttrList()
#~ attrs.insert(Pango.AttrBackground(bg.red, bg.green, bg.blue, 0, -1))
#~ attrs.insert(Pango.AttrForeground(fg.red, fg.green, fg.blue, 0, -1))
#~ self.set_attributes(attrs)
# XXX: workaround
text = self.get_text()
self.set_markup('<span background="%s" foreground="%s">%s</span>' %
(bg, fg, text))
self._default_attrs = False
class Cursor(object):
WORD_TERMINATORS = (' ',) # empty space. suggestions recommended...
def __init__(self, parent):
self.parent = parent
self.index = 0
self.paragraph = 0
def is_min(self, cursor):
return self.get_position() <= cursor.get_position()
def is_max(self, cursor):
return self.get_position() >= cursor.get_position()
def switch(self, cursor):
this_pos = self.get_position()
other_pos = cursor.get_position()
self.set_position(*other_pos)
cursor.set_position(*this_pos)
def same_line(self, cursor):
return self.get_current_line()[0] == cursor.get_current_line()[0]
def get_current_line(self):
keep_going = True
i, it = self.index, self.parent.order[self.paragraph].get_iter()
ln = 0
while keep_going:
l = it.get_line()
ls = l.start_index
le = ls + l.length
if i >= ls and i <= le:
if not it.at_last_line():
le -= 1
return (self.paragraph, ln), (ls, le)
ln += 1
keep_going = it.next_line()
return None, None, None
def get_current_word(self):
keep_going = True
layout = self.parent.order[self.paragraph]
text = layout.get_text()
i, it = self.index, layout.get_iter()
start = 0
while keep_going:
j = it.get_index()
if j >= i and text[j] in self.WORD_TERMINATORS:
return self.paragraph, (start, j)
elif text[j] in self.WORD_TERMINATORS:
start = j + 1
keep_going = it.next_char()
return self.paragraph, (start, len(layout))
def set_position(self, paragraph, index):
self.index = index
self.paragraph = paragraph
def get_position(self):
return self.paragraph, self.index
class PrimaryCursor(Cursor):
def __init__(self, parent):
Cursor.__init__(self, parent)
def __repr__(self):
return 'Cursor: ' + str((self.paragraph, self.index))
def get_rectangle(self, layout, a):
if self.index < len(layout):
pos = layout.get_cursor_pos(self.index)[1]
else:
pos = layout.get_cursor_pos(len(layout))[1]
x = layout.allocation.x + pos.x / _PS
y = layout.allocation.y + pos.y / _PS
return x, y, 1, pos.height / _PS
def draw(self, cr, layout, a):
cr.set_source_rgb(0, 0, 0)
cr.rectangle(*self.get_rectangle(layout, a))
cr.fill()
def zero(self):
self.index = 0
self.paragraph = 0
class SelectionCursor(Cursor):
def __init__(self, cursor):
Cursor.__init__(self, cursor.parent)
self.cursor = cursor
self.target_x = None
self.target_x_indent = 0
self.restore_point = None
def __repr__(self):
return 'Selection: ' + str(self.get_range())
def __nonzero__(self):
c = self.cursor
return (self.paragraph, self.index) != (c.paragraph, c.index)
@property
def min(self):
c = self.cursor
return min((self.paragraph, self.index), (c.paragraph, c.index))
@property
def max(self):
c = self.cursor
return max((self.paragraph, self.index), (c.paragraph, c.index))
def clear(self, key=None):
self.index = self.cursor.index
self.paragraph = self.cursor.paragraph
self.restore_point = None
if key not in (Gdk.KEY_uparrow, Gdk.KEY_downarrow):
self.target_x = None
self.target_x_indent = 0
def set_target_x(self, x, indent):
self.target_x = x
self.target_x_indent = indent
def get_range(self):
return self.min, self.max
def within_selection(self, pos):
l = list(self.get_range())
l.append(pos)
l.sort()
# sort the list, see if pos is in between the extents of the selection
# range, if it is, pos is within the selection
if pos in l:
return l.index(pos) == 1
return False
class TextBlock(Gtk.EventBox):
PAINT_PRIMARY_CURSOR = False
DEBUG_PAINT_BBOXES = False
BULLET_POINT = u' \u2022 '
def __init__(self):
Gtk.EventBox.__init__(self)
self.set_visible_window(False)
self.set_size_request(200, -1)
self.set_can_focus(True)
self.set_events(Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.POINTER_MOTION_MASK)
self._is_new = False
self.order = []
self.cursor = cur = PrimaryCursor(self)
self.selection = sel = SelectionCursor(self.cursor)
self.clipboard = None
#~ event_helper = EventHelper()
self._update_cached_layouts()
self._test_layout = self.create_pango_layout('')
#self._xterm = Gdk.Cursor.new(Gdk.XTERM)
# popup menu and menuitem's
self.copy_menuitem = Gtk.ImageMenuItem.new_from_stock(
Gtk.STOCK_COPY, None)
self.select_all_menuitem = Gtk.ImageMenuItem.new_from_stock(
Gtk.STOCK_SELECT_ALL, None)
self.menu = Gtk.Menu()
self.menu.attach_to_widget(self, None)
self.menu.append(self.copy_menuitem)
self.menu.append(self.select_all_menuitem)
self.menu.show_all()
self.copy_menuitem.connect('select', self._menu_do_copy, sel)
self.select_all_menuitem.connect('select', self._menu_do_select_all,
cur, sel)
#~ Gtk.drag_source_set(self, Gdk.ModifierType.BUTTON1_MASK,
#~ None, Gdk.DragAction.COPY)
#~ Gtk.drag_source_add_text_targets(self)
#~ self.connect('drag-begin', self._on_drag_begin)
#~ self.connect('drag-data-get', self._on_drag_data_get, sel)
event_helper = EventHelper()
self.connect('button-press-event', self._on_press, event_helper, cur,
sel)
self.connect('button-release-event', self._on_release, event_helper,
cur, sel)
self.connect('motion-notify-event', self._on_motion, event_helper,
cur, sel)
self.connect('key-press-event', self._on_key_press, cur, sel)
self.connect('key-release-event', self._on_key_release, cur, sel)
self.connect('focus-in-event', self._on_focus_in)
self.connect('focus-out-event', self._on_focus_out)
self.connect("size-allocate", self.on_size_allocate)
self.connect('style-updated', self._on_style_updated)
def on_size_allocate(self, *args):
allocation = self.get_allocation()
width = allocation.width
x = y = 0
for layout in self.order:
layout.set_width(_PS * (width - layout.indent))
if layout.index > 0:
y += (layout.vspacing or self.line_height)
e = layout.get_pixel_extents()
if self.get_direction() != Gtk.TextDirection.RTL:
layout.set_allocation(e.x + layout.indent, y + e.y,
width - layout.indent, e.height)
else:
layout.set_allocation(x + width - e.x - e.width -
layout.indent - 1, y + e.y, width - layout.indent,
e.height)
y += e.y + e.height
# overrides
def do_get_request_mode(self):
return Gtk.SizeRequestMode.HEIGHT_FOR_WIDTH
def do_get_preferred_height_for_width(self, width):
height = 0
layout = self._test_layout
for l in self.order:
layout.set_text(l.get_text(), -1)
layout.set_width(_PS * (width - l.indent))
lh = layout.get_pixel_extents()[1].height
height += lh + (l.vspacing or self.line_height)
height = max(50, height)
return height, height
def do_draw(self, cr):
self.render(self, cr)
def _config_colors(self):
context = self.get_style_context()
context.save()
context.add_class(Gtk.STYLE_CLASS_HIGHLIGHT)
state = self.get_state_flags()
if self.has_focus():
state |= Gtk.StateFlags.FOCUSED
context.set_state(state)
self._bg = color_to_hex(context.get_background_color(state))
self._fg = color_to_hex(context.get_color(state))
context.restore()
def _on_style_updated(self, widget):
self._config_colors()
self._update_cached_layouts()
# def _on_drag_begin(self, widgets, context, event_helper):
# print 'drag: begin'
def _on_drag_data_get(self, widget, context, selection, info, timestamp,
sel):
# print 'drag: get data'
text = self.get_selected_text(sel)
selection.set_text(text, -1)
def _on_focus_in(self, widget, event):
self._config_colors()
def _on_focus_out(self, widget, event):
self._config_colors()
def _on_motion(self, widget, event, event_helper, cur, sel):
if not (event.state == Gdk.ModifierType.BUTTON1_MASK):
# or not self.has_focus():
return
# check if we have moved enough to count as a drag
press = event_helper['event']
# mvo: how can this be?
if not press:
return
start_x, start_y = int(press.x), int(press.y)
cur_x, cur_y = int(event.x), int(event.y)
if (not event_helper['drag-active'] and
self.drag_check_threshold(start_x, start_y, cur_x, cur_y)):
event_helper['drag-active'] = True
if not event_helper['drag-active']:
return
#~ if (event_helper['within-selection'] and
#~ not event_helper['drag-context']):
#~ target_list = Gtk.TargetList()
#~ target_list.add_text_targets(80)
#~ ctx = self.drag_begin(target_list, # target list
#~ Gdk.DragAction.COPY, # action
#~ 1, # initiating button
#~ event) # event
#~
#~ event_helper['drag-context'] = ctx
#~ return
for layout in self.order:
point_in, index = layout.index_at(cur_x, cur_y)
if point_in:
cur.set_position(layout.index, index)
self.queue_draw()
break
def _on_press(self, widget, event, event_helper, cur, sel):
if sel and not self.has_focus():
self.grab_focus()
return # spot the difference
if not self.has_focus():
self.grab_focus()
if event.button == 3:
self._button3_action(cur, sel, event)
return
elif event.button != 1:
return
for layout in self.order:
x, y = int(event.x), int(event.y)
point_in, index = layout.index_at(x, y)
if point_in:
within_sel = False
#~ within_sel = sel.within_selection((layout.index, index))
if not within_sel:
cur.set_position(layout.index, index)
sel.clear()
#~ event_helper.new_press(event.copy(), layout, index,
#~ within_sel)
event_helper.new_press(event, layout, index, within_sel)
break
def _on_release(self, widget, event, event_helper, cur, sel):
if not event_helper['event']:
return
# check if a drag occurred
if event_helper['drag-active']:
# if so, do not handle release
return
# else, handle release, do click
cur.set_position(event_helper['layout'].index,
event_helper['index'])
sel.clear()
press = event_helper['event']
if (press.type == Gdk.EventType._2BUTTON_PRESS):
self._2click_select(cur, sel)
elif (press.type == Gdk.EventType._3BUTTON_PRESS):
self._3click_select(cur, sel)
self.queue_draw()
def _menu_do_copy(self, item, sel):
self._copy_text(sel)
def _menu_do_select_all(self, item, cur, sel):
self._select_all(cur, sel)
def _button3_action(self, cur, sel, event):
start, end = sel.get_range()
self.copy_menuitem.set_sensitive(True)
self.select_all_menuitem.set_sensitive(True)
if not sel:
self.copy_menuitem.set_sensitive(False)
elif start == (0, 0) and \
end == (len(self.order) - 1, len(self.order[-1])):
self.select_all_menuitem.set_sensitive(False)
self.menu.popup(None, # parent_menu_shell,
None, # parent_menu_item,
None, # GtkMenuPositionFunc func,
None, # data,
event.button,
event.time)
def _on_key_press(self, widget, event, cur, sel):
kv = event.keyval
s, i = cur.paragraph, cur.index
handled_keys = True
ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK) > 0
shift = (event.state & Gdk.ModifierType.SHIFT_MASK) > 0
if not self.PAINT_PRIMARY_CURSOR and \
kv in (Gdk.KEY_uparrow, Gdk.KEY_downarrow) and not sel:
return False
if kv == Gdk.KEY_Tab:
handled_keys = False
elif kv == Gdk.KEY_Left:
if ctrl:
self._select_left_word(cur, sel, s, i)
else:
self._select_left(cur, sel, s, i, shift)
if shift:
layout = self._get_cursor_layout()
pos = layout.index_to_pos(cur.index)
sel.set_target_x(pos.x, layout.indent)
elif kv == Gdk.KEY_Right:
if ctrl:
self._select_right_word(cur, sel, s, i)
else:
self._select_right(cur, sel, s, i, shift)
if shift:
layout = self._get_cursor_layout()
pos = layout.index_to_pos(cur.index)
sel.set_target_x(pos.x, layout.indent)
elif kv == Gdk.KEY_Up:
if ctrl:
if i == 0:
if s > 0:
cur.paragraph -= 1
cur.set_position(cur.paragraph, 0)
elif sel and not shift:
cur.set_position(*sel.min)
else:
self._select_up(cur, sel)
elif kv == Gdk.KEY_Down:
if ctrl:
if i == len(self._get_layout(cur)):
if s + 1 < len(self.order):
cur.paragraph += 1
i = len(self._get_layout(cur))
cur.set_position(cur.paragraph, i)
elif sel and not shift:
cur.set_position(*sel.max)
else:
self._select_down(cur, sel)
elif kv == Gdk.KEY_Home:
if shift:
self._select_home(cur, sel, self.order[cur.paragraph])
else:
cur.set_position(0, 0)
elif kv == Gdk.KEY_End:
if shift:
self._select_end(cur, sel, self.order[cur.paragraph])
else:
cur.paragraph = len(self.order) - 1
cur.index = len(self._get_layout(cur))
else:
handled_keys = False
if not shift and handled_keys:
sel.clear(kv)
self.queue_draw()
return handled_keys
def _on_key_release(self, widget, event, cur, sel):
ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK) > 0
if ctrl:
if event.keyval == Gdk.KEY_a:
self._select_all(cur, sel)
elif event.keyval == Gdk.KEY_c:
self._copy_text(sel)
self.queue_draw()
def _select_up(self, cur, sel):
#~ if sel and not cur.is_min(sel) and cur.same_line(sel):
#~ cur.switch(sel)
s = cur.paragraph
layout = self._get_layout(cur)
if sel.target_x:
x = sel.target_x
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
(_, j, k), (x, y) = layout.cursor_up(cur, x)
j += k
else:
(_, j, k), (x, y) = layout.cursor_up(cur)
j += k
sel.set_target_x(x, layout.indent)
if (s, j) != cur.get_position():
cur.set_position(s, j)
elif s > 0:
cur.paragraph = s - 1
layout = self._get_layout(cur)
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
y = layout.get_extents()[0].height
(_, j, k) = layout.xy_to_index(x, y)
cur.set_position(s - 1, j + k)
else:
return False
return True
def _select_down(self, cur, sel):
#~ if sel and not cur.is_max(sel) and cur.same_line(sel):
#~ cur.switch(sel)
s = cur.paragraph
layout = self._get_layout(cur)
if sel.target_x:
x = sel.target_x
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
(_, j, k), (x, y) = layout.cursor_down(cur, x)
j += k
else:
(_, j, k), (x, y) = layout.cursor_down(cur)
j += k
sel.set_target_x(x, layout.indent)
if (s, j) != cur.get_position():
cur.set_position(s, j)
elif s < len(self.order) - 1:
cur.paragraph = s + 1
layout = self._get_layout(cur)
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
y = 0
(_, j, k) = layout.xy_to_index(x, y)
cur.set_position(s + 1, j + k)
else:
return False
return True
def _2click_select(self, cursor, sel):
self._select_word(cursor, sel)
def _3click_select(self, cursor, sel):
# XXX:
# _select_line seems to expose the following Pango issue:
# (description.py:3892): Pango-CRITICAL **:
# pango_layout_line_unref: assertion `private->ref_count > 0'
# failed
# ... which can result in a segfault
#~ self._select_line(cursor, sel)
self._select_all(cursor, sel)
def _copy_text(self, sel):
text = self.get_selected_text(sel)
if not self.clipboard:
display = Gdk.Display.get_default()
selection = Gdk.Atom.intern("CLIPBOARD", False)
self.clipboard = Gtk.Clipboard.get_for_display(display, selection)
self.clipboard.clear()
self.clipboard.set_text(text.strip(), -1)
def _select_end(self, cur, sel, layout):
if not cur.is_max(sel):
cur.switch(sel)
n, r, line = cur.get_current_line()
cur_pos = cur.get_position()
if cur_pos == (len(self.order) - 1, len(self.order[-1])): # abs end
if sel.restore_point:
# reinstate restore point
cur.set_position(*sel.restore_point)
else:
# reselect the line end
n, r, line = sel.get_current_line()
cur.set_position(n[0], r[1])
elif cur_pos[1] == len(self.order[n[0]]): # para end
# select abs end
cur.set_position(len(self.order) - 1, len(self.order[-1]))
elif cur_pos == (n[0], r[1]): # line end
# select para end
cur.set_position(n[0], len(self.order[n[0]]))
else: # not at any end, within line somewhere
# select line end
if sel:
sel.restore_point = cur_pos
cur.set_position(n[0], r[1])
def _select_home(self, cur, sel, layout):
if not cur.is_min(sel):
cur.switch(sel)
n, r, line = cur.get_current_line()
cur_pos = cur.get_position()
if cur_pos == (0, 0): # absolute home
if sel.restore_point:
cur.set_position(*sel.restore_point)
else:
n, r, line = sel.get_current_line()
cur.set_position(n[0], r[0])
elif cur_pos[1] == 0: # para home
cur.set_position(0, 0)
elif cur_pos == (n[0], r[0]): # line home
cur.set_position(n[0], 0)
else: # not at any home, within line somewhere
if sel:
sel.restore_point = cur_pos
cur.set_position(n[0], r[0])
def _select_left(self, cur, sel, s, i, shift):
if not shift and not cur.is_min(sel):
cur.switch(sel)
return
if i > 0:
cur.set_position(s, i - 1)
elif cur.paragraph > 0:
cur.paragraph -= 1
cur.set_position(s - 1, len(self._get_layout(cur)))
def _select_right(self, cur, sel, s, i, shift):
if not shift and not cur.is_max(sel):
cur.switch(sel)
return
if i < len(self._get_layout(cur)):
cur.set_position(s, i + 1)
elif s < len(self.order) - 1:
cur.set_position(s + 1, 0)
def _select_left_word(self, cur, sel, s, i):
if i > 0:
cur.index -= 1
elif s > 0:
cur.paragraph -= 1
cur.index = len(self._get_layout(cur))
paragraph, word = cur.get_current_word()
if not word:
return
cur.set_position(paragraph, max(0, word[0] - 1))
def _select_right_word(self, cur, sel, s, i):
ll = len(self._get_layout(cur))
if i < ll:
cur.index += 1
elif s + 1 < len(self.order):
cur.paragraph += 1
cur.index = 0
paragraph, word = cur.get_current_word()
if not word:
return
cur.set_position(paragraph, min(word[1] + 1, ll))
def _select_word(self, cursor, sel):
paragraph, word = cursor.get_current_word()
if word:
cursor.set_position(paragraph, word[1] + 1)
sel.set_position(paragraph, word[0])
if self.get_direction() == Gtk.TextDirection.RTL:
cursor.switch(sel)
def _select_line(self, cursor, sel):
n, r = self.cursor.get_current_line()
sel.set_position(n[0], r[0])
cursor.set_position(n[0], r[1])
if self.get_direction() == Gtk.TextDirection.RTL:
cursor.switch(sel)
def _select_all(self, cursor, sel):
layout = self.order[-1]
sel.set_position(0, 0)
cursor.set_position(layout.index, len(layout))
if self.get_direction() == Gtk.TextDirection.RTL:
cursor.switch(sel)
def _selection_copy(self, layout, sel, new_para=True):
i = layout.index
start, end = sel.get_range()
if new_para:
text = '\n\n'
else:
text = ''
if sel and i >= start[0] and i <= end[0]:
if i == start[0]:
if end[0] > i:
return text + layout.get_text()[start[1]: len(layout)]
else:
return text + layout.get_text()[start[1]: end[1]]
elif i == end[0]:
if start[0] < i:
return text + layout.get_text()[0: end[1]]
else:
return text + layout.get_text()[start[1]: end[1]]
else:
return text + layout.get_text()
return ''
def _new_layout(self, text=''):
layout = Layout(self, text)
layout.set_wrap(Pango.WrapMode.WORD_CHAR)
return layout
def _update_cached_layouts(self):
self._bullet = self._new_layout()
self._bullet.set_markup(self.BULLET_POINT)
font_desc = Pango.FontDescription()
font_desc.set_weight(Pango.Weight.BOLD)
self._bullet.set_font_description(font_desc)
e = self._bullet.get_pixel_extents()
self.indent, self.line_height = e.width, e.height
def _selection_highlight(self, layout, sel, bg, fg):
i = layout.index
start, end = sel.get_range()
if sel and i >= start[0] and i <= end[0]:
if i == start[0]:
if end[0] > i:
layout.highlight(start[1], len(layout), bg, fg)
else:
layout.highlight(start[1], end[1], bg, fg)
elif i == end[0]:
if start[0] < i:
layout.highlight(0, end[1], bg, fg)
else:
layout.highlight(start[1], end[1], bg, fg)
else:
layout.highlight_all(bg, fg)
elif not layout._default_attrs:
layout.reset_attrs()
def _paint_bullet_point(self, cr, x, y):
# draw the layout
Gtk.render_layout(self.get_style_context(),
cr, # state
x, # x coord
y, # y coord
self._bullet._layout) # a Pango.Layout()
def _get_layout(self, cursor):
return self.order[cursor.paragraph]
def _get_cursor_layout(self):
return self.order[self.cursor.paragraph]
def _get_selection_layout(self):
return self.order[self.selection.paragraph]
def render(self, widget, cr):
if not self.order:
return
a = self.get_allocation()
for layout in self.order:
lx, ly = layout.get_position()
self._selection_highlight(layout,
self.selection,
self._bg, self._fg)
if layout.is_bullet:
if self.get_direction() != Gtk.TextDirection.RTL:
indent = layout.indent - self.indent
else:
indent = a.width - layout.indent
self._paint_bullet_point(cr, indent, ly)
if self.DEBUG_PAINT_BBOXES:
la = layout.allocation
cr.rectangle(la.x, la.y, la.width, la.height)
cr.set_source_rgb(1, 0, 0)
cr.stroke()
# draw the layout
Gtk.render_layout(self.get_style_context(),
cr,
lx, # x coord
ly, # y coord
layout._layout) # a Pango.Layout()
# draw the cursor
if self.PAINT_PRIMARY_CURSOR and self.has_focus():
self.cursor.draw(cr, self._get_layout(self.cursor), a)
def append_paragraph(self, p, vspacing=None):
l = self._new_layout()
l.index = len(self.order)
l.vspacing = vspacing
l.set_text(p)
self.order.append(l)
def append_bullet(self, point, indent_level, vspacing=None):
l = self._new_layout()
l.index = len(self.order)
l.indent = self.indent * (indent_level + 1)
l.vspacing = vspacing
l.is_bullet = True
l.set_text(point)
self.order.append(l)
def copy_clipboard(self):
self._copy_text(self.selection)
def get_selected_text(self, sel=None):
text = ''
if not sel:
sel = self.selection
for layout in self.order:
text += self._selection_copy(layout, sel, (layout.index > 0))
return text
def select_all(self):
self._select_all(self.cursor, self.selection)
self.queue_draw()
def finished(self):
self.queue_resize()
def clear(self, key=None):
self.cursor.zero()
self.selection.clear(key)
self.order = []
class AppDescription(Gtk.VBox):
TYPE_PARAGRAPH = 0
TYPE_BULLET = 1
_preparser = _SpecialCasePreParsers()
def __init__(self):
Gtk.VBox.__init__(self)
self.description = TextBlock()
self.pack_start(self.description, False, False, 0)
self._prev_type = None
def _part_is_bullet(self, part):
# normalize_description() ensures that we only have "* " bullets
i = part.find("* ")
return i > -1, i
def _parse_desc(self, desc, pkgname):
""" Attempt to maintain original fixed width layout, while
reconstructing the description into text blocks
(either paragraphs or bullets) which are line-wrap friendly.
"""
# pre-parse descrition if special case exists for the given pkgname
desc = self._preparser.preparse(pkgname, desc)
parts = normalize_package_description(desc).split('\n')
for part in parts:
if not part:
continue
is_bullet, indent = self._part_is_bullet(part)
if is_bullet:
self.append_bullet(part, indent)
else:
self.append_paragraph(part)
self.description.finished()
def clear(self):
self.description.clear()
def append_paragraph(self, p):
vspacing = self.description.line_height
self.description.append_paragraph(p.strip(), vspacing)
self._prev_type = self.TYPE_PARAGRAPH
def append_bullet(self, point, indent_level):
if self._prev_type == self.TYPE_BULLET:
vspacing = int(0.4 * self.description.line_height)
else:
vspacing = self.description.line_height
self.description.append_bullet(
point[indent_level + 2:], indent_level, vspacing)
self._prev_type = self.TYPE_BULLET
def set_description(self, raw_desc, pkgname):
self.clear()
if type(raw_desc) == str:
encoded_desc = unicode(raw_desc, 'utf8').encode('utf8')
else:
encoded_desc = raw_desc.encode('utf8')
self._text = GObject.markup_escape_text(encoded_desc)
self._parse_desc(self._text, pkgname)
self.show_all()
# easy access to some TextBlock methods
def copy_clipboard(self):
return TextBlock.copy_clipboard(self.description)
def get_selected_text(self):
return TextBlock.get_selected_text(self.description)
def select_all(self):
return TextBlock.select_all(self.description)
def get_test_description_window():
EXAMPLE0 = """p7zip is the Unix port of 7-Zip, a file archiver that \
archives with very high compression ratios.
p7zip-full provides:
- /usr/bin/7za a standalone version of the 7-zip tool that handles
7z archives (implementation of the LZMA compression algorithm) and some \
other formats.
- /usr/bin/7z not only does it handle 7z but also ZIP, Zip64, CAB, RAR, \
ARJ, GZIP,
BZIP2, TAR, CPIO, RPM, ISO and DEB archives. 7z compression is 30-50% \
better than ZIP compression.
p7zip provides 7zr, a light version of 7za, and p7zip a gzip like wrapper \
around 7zr."""
EXAMPLE1 = """Transmageddon supports almost any format as its input and \
can generate a very large host of output files. The goal of the application \
was to help people to create the files they need to be able to play on their \
mobile devices and for people not hugely experienced with multimedia to \
generate a multimedia file without having to resort to command line tools \
with ungainly syntaxes.
The currently supported codecs are:
* Containers:
- Ogg
- Matroska
- AVI
- MPEG TS
- flv
- QuickTime
- MPEG4
- 3GPP
- MXT
* Audio encoders:
- Vorbis
- FLAC
- MP3
- AAC
- AC3
- Speex
- Celt
* Video encoders:
- Theora
- Dirac
- H264
- MPEG2
- MPEG4/DivX5
- xvid
- DNxHD
It also provide the support for the GStreamer's plugins auto-search."""
EXAMPLE2 = """File-roller is an archive manager for the GNOME \
environment. It allows you to:
* Create and modify archives.
* View the content of an archive.
* View a file contained in an archive.
* Extract files from the archive.
File-roller supports the following formats:
* Tar (.tar) archives, including those compressed with
gzip (.tar.gz, .tgz), bzip (.tar.bz, .tbz), bzip2 (.tar.bz2, .tbz2),
compress (.tar.Z, .taz), lzip (.tar.lz, .tlz), lzop (.tar.lzo, .tzo),
lzma (.tar.lzma) and xz (.tar.xz)
* Zip archives (.zip)
* Jar archives (.jar, .ear, .war)
* 7z archives (.7z)
* iso9660 CD images (.iso)
* Lha archives (.lzh)
* Single files compressed with gzip (.gz), bzip (.bz), bzip2 (.bz2),
compress (.Z), lzip (.lz), lzop (.lzo), lzma (.lzma) and xz (.xz)
File-roller doesn't perform archive operations by itself, but relies on \
standard tools for this."""
EXAMPLE3 = """This package includes the following CTAN packages:
Asana-Math -- A font to typeset maths in Xe(La)TeX.
albertus --
allrunes -- Fonts and LaTeX package for almost all runes.
antiqua -- the URW Antiqua Condensed Font.
antp -- Antykwa Poltawskiego: a Type 1 family of Polish traditional type.
antt -- Antykwa Torunska: a Type 1 family of a Polish traditional type.
apl -- Fonts for typesetting APL programs.
ar -- Capital A and capital R ligature for Apsect Ratio.
archaic -- A collection of archaic fonts.
arev -- Fonts and LaTeX support files for Arev Sans.
ascii -- Support for IBM "standard ASCII" font.
astro -- Astronomical (planetary) symbols.
atqolive --
augie -- Calligraphic font for typesetting handwriting.
auncial-new -- Artificial Uncial font and LaTeX support macros.
aurical -- Calligraphic fonts for use with LaTeX in T1 encoding.
barcodes -- Fonts for making barcodes.
bayer -- Herbert Bayers Universal Font For Metafont.
bbding -- A symbol (dingbat) font and LaTeX macros for its use.
bbm -- "Blackboard-style" cm fonts.
bbm-macros -- LaTeX support for "blackboard-style" cm fonts.
bbold -- Sans serif blackboard bold.
belleek -- Free replacement for basic MathTime fonts.
bera -- Bera fonts.
blacklettert1 -- T1-encoded versions of Haralambous old German fonts.
boisik -- A font inspired by Baskerville design.
bookhands -- A collection of book-hand fonts.
braille -- Support for braille.
brushscr -- A handwriting script font.
calligra -- Calligraphic font.
carolmin-ps -- Adobe Type 1 format of Carolingian Minuscule fonts.
cherokee -- A font for the Cherokee script.
clarendo --
cm-lgc -- Type 1 CM-based fonts for Latin, Greek and Cyrillic.
cmbright -- Computer Modern Bright fonts.
cmll -- Symbols for linear logic.
cmpica -- A Computer Modern Pica variant.
coronet --
courier-scaled -- Provides a scaled Courier font.
cryst -- Font for graphical symbols used in crystallography.
cyklop -- The Cyclop typeface.
dancers -- Font for Conan Doyle's "The Dancing Men".
dice -- A font for die faces.
dictsym -- DictSym font and macro package
dingbat -- Two dingbat symbol fonts.
doublestroke -- Typeset mathematical double stroke symbols.
dozenal -- Typeset documents using base twelve numbering (also called
"dozenal")
duerer -- Computer Duerer fonts.
duerer-latex -- LaTeX support for the Duerer fonts.
ean -- Macros for making EAN barcodes.
ecc -- Sources for the European Concrete fonts.
eco -- Oldstyle numerals using EC fonts.
eiad -- Traditional style Irish fonts.
eiad-ltx -- LaTeX support for the eiad font.
elvish -- Fonts for typesetting Tolkien Elvish scripts.
epigrafica -- A Greek and Latin font.
epsdice -- A scalable dice "font".
esvect -- Vector arrows.
eulervm -- Euler virtual math fonts.
euxm --
feyn -- A font for in-text Feynman diagrams.
fge -- A font for Frege's Grundgesetze der Arithmetik.
foekfont -- The title font of the Mads Fok magazine.
fonetika -- Support for the danish "Dania" phonetic system.
fourier -- Using Utopia fonts in LaTeX documents.
fouriernc -- Use New Century Schoolbook text with Fourier maths fonts.
frcursive -- French cursive hand fonts.
garamond --
genealogy -- A compilation genealogy font.
gfsartemisia -- A modern Greek font design.
gfsbodoni -- A Greek and Latin font based on Bodoni.
gfscomplutum -- A Greek font with a long history.
gfsdidot -- A Greek font based on Didot's work.
gfsneohellenic -- A Greek font in the Neo-Hellenic style.
gfssolomos -- A Greek-alphabet font.
gothic -- A collection of old German-style fonts.
greenpoint -- The Green Point logo.
groff --
grotesq -- the URW Grotesk Bold Font.
hands -- Pointing hand font.
hfbright -- The hfbright fonts.
hfoldsty -- Old style numerals with EC fonts.
ifsym -- A collection of symbols.
inconsolata -- A monospaced font, with support files for use with TeX.
initials -- Adobe Type 1 decorative initial fonts.
iwona -- A two-element sans-serif font.
junicode -- A TrueType font for mediaevalists.
kixfont -- A font for KIX codes.
knuthotherfonts --
kpfonts -- A complete set of fonts for text and mathematics.
kurier -- A two-element sans-serif typeface.
lettrgth --
lfb -- A Greek font with normal and bold variants.
libertine -- Use the font Libertine with LaTeX.
libris -- Libris ADF fonts, with LaTeX support.
linearA -- Linear A script fonts.
logic -- A font for electronic logic design.
lxfonts -- Set of slide fonts based on CM.
ly1 -- Support for LY1 LaTeX encoding.
marigold --
mathabx -- Three series of mathematical symbols.
mathdesign -- Mathematical fonts to fit with particular text fonts.
mnsymbol -- Mathematical symbol font for Adobe MinionPro.
nkarta -- A "new" version of the karta cartographic fonts.
ocherokee -- LaTeX Support for the Cherokee language.
ogham -- Fonts for typesetting Ogham script.
oinuit -- LaTeX Support for the Inuktitut Language.
optima --
orkhun -- A font for orkhun script.
osmanian -- Osmanian font for writing Somali.
pacioli -- Fonts designed by Fra Luca de Pacioli in 1497.
pclnfss -- Font support for current PCL printers.
phaistos -- Disk of Phaistos font.
phonetic -- MetaFont Phonetic fonts, based on Computer Modern.
pigpen -- A font for the pigpen (or masonic) cipher.
psafm --
punk -- Donald Knuth's punk font.
recycle -- A font providing the "recyclable" logo.
sauter -- Wide range of design sizes for CM fonts.
sauterfonts -- Use sauter fonts in LaTeX.
semaphor -- Semaphore alphabet font.
simpsons -- MetaFont source for Simpsons characters.
skull -- A font to draw a skull.
staves -- Typeset Icelandic staves and runic letters.
tapir -- A simple geometrical font.
tengwarscript -- LaTeX support for using Tengwar fonts.
trajan -- Fonts from the Trajan column in Rome.
umtypewriter -- Fonts to typeset with the xgreek package.
univers --
universa -- Herbert Bayer's 'universal' font.
venturisadf -- Venturis ADF fonts collection.
wsuipa -- International Phonetic Alphabet fonts.
yfonts -- Support for old German fonts.
zefonts -- Virtual fonts to provide T1 encoding from existing fonts."""
EXAMPLE4 = """Arista is a simple multimedia transcoder, it focuses on \
being easy to use by making complex task of encoding for various devices \
simple.
Users should pick an input and a target device, choose a file to save to and \
go. Features:
* Presets for iPod, computer, DVD player, PSP, Playstation 3, and more.
* Live preview to see encoded quality.
* Automatically discover available DVD media and Video 4 Linux (v4l) devices.
* Rip straight from DVD media easily (requires libdvdcss).
* Rip straight from v4l devices.
* Simple terminal client for scripting.
* Automatic preset updating."""
def on_clicked(widget, desc_widget, descs):
widget.position += 1
if widget.position >= len(descs):
widget.position = 0
desc_widget.set_description(*descs[widget.position])
descs = ((EXAMPLE0, ''),
(EXAMPLE1, ''),
(EXAMPLE2, ''),
(EXAMPLE3, 'texlive-fonts-extra'),
(EXAMPLE4, ''))
win = Gtk.Window()
win.set_default_size(300, 400)
win.set_has_resize_grip(True)
vb = Gtk.VBox()
win.add(vb)
b = Gtk.Button('Next test description >>')
b.position = 0
vb.pack_start(b, False, False, 0)
scroll = Gtk.ScrolledWindow()
vb.add(scroll)
d = AppDescription()
#~ d.description.DEBUG_PAINT_BBOXES = True
d.set_description(EXAMPLE0, pkgname='')
scroll.add_with_viewport(d)
win.show_all()
b.connect("clicked", on_clicked, d, descs)
win.connect('destroy', lambda x: Gtk.main_quit())
return win
if __name__ == '__main__':
win = get_test_description_window()
win.show_all()
Gtk.main()
| gpl-3.0 | -5,847,206,272,166,974,000 | 32.071823 | 79 | 0.571584 | false | 3.605481 | false | false | false |
teddym6/qualitybots | src/appengine/handlers/machine_pool.py | 26 | 5651 | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for assisting with the machine install process."""
# Disable 'Import not at top of file' lint error.
# pylint: disable-msg=C6204, C6205, W0611
import logging
from django.utils import simplejson
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from common import ec2_manager
from common import enum
from handlers import base
from handlers import launch_tasks
from models import client_machine
INIT_START = '/init/start'
INSTALL_FAILED = '/init/install_failed'
INSTALL_SUCEEDED = '/init/install_succeeded'
class InitializationStart(base.BaseHandler):
"""Handler to acknowledge a machine starting initialization."""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def get(self):
"""Updates the status of a machine starting initialization."""
instance_id = self.GetRequiredParameter('instance_id')
instance = db.GqlQuery('SELECT * FROM ClientMachine WHERE client_id = :1',
instance_id).get()
if not instance:
logging.error('The given instance id "%s" does not match any machines.',
instance_id)
self.error(500)
return
if instance.status != enum.MACHINE_STATUS.PROVISIONED:
logging.error('The machine with instance id "%s" was in an unexpected '
'state for initialization: "%s"', instance_id,
enum.MACHINE_STATUS.LookupKey(instance.status))
instance.status = enum.MACHINE_STATUS.INITIALIZING
instance.put()
self.response.out.write('Initialization acknowledged.')
class InstallFailed(base.BaseHandler):
"""Handler to deal with a machine that fails to properly setup and install."""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def post(self):
"""Updates the status of a machine that failed with initialization."""
instance_id = self.GetRequiredParameter('instance_id')
log = self.GetOptionalParameter('log', None)
old_instance = db.GqlQuery(
'SELECT * FROM ClientMachine WHERE client_id = :1',
instance_id).get()
if not old_instance:
logging.error('The given instance id "%s" does not match any machines.',
instance_id)
self.error(500)
return
if old_instance.status != enum.MACHINE_STATUS.INITIALIZING:
logging.error('The machine with instance id "%s" was in an unexpected '
'state for initialization: "%s"', instance_id,
enum.MACHINE_STATUS.LookupKey(old_instance.status))
old_instance.status = enum.MACHINE_STATUS.FAILED
if log:
old_instance.initialization_log = log
old_instance.put()
if old_instance.retry_count >= client_machine.MAX_RETRIES:
logging.error('Reached the maximum number of retries for starting this '
'machine: %s.', str(old_instance.key()))
logging.info('Terminating the failed instance.')
deferred.defer(launch_tasks.TerminateFailedMachine, instance_id,
_countdown=launch_tasks.DEFAULT_COUNTDOWN,
_queue=launch_tasks.DEFAULT_QUEUE)
self.error(500)
return
logging.info('Rebooting the failed instance.')
deferred.defer(launch_tasks.RebootMachine, instance_id,
_countdown=launch_tasks.DEFAULT_COUNTDOWN,
_queue=launch_tasks.DEFAULT_QUEUE)
self.response.out.write('Initialization failure acknowledged.')
class InstallSucceeded(base.BaseHandler):
"""Handler to deal with a machine that installs successfully."""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def post(self):
"""Updates the status of a machine that succeeded with initialization."""
instance_id = self.GetRequiredParameter('instance_id')
log = self.GetOptionalParameter('log', None)
instance = db.GqlQuery('SELECT * FROM ClientMachine WHERE client_id = :1',
instance_id).get()
if not instance:
logging.error('The given instance id "%s" does not match any machines.',
instance_id)
self.error(500)
return
if instance.status != enum.MACHINE_STATUS.INITIALIZING:
logging.error('The machine with instance id "%s" was in an unexpected '
'state for initialization: "%s"', instance_id,
enum.MACHINE_STATUS.LookupKey(instance.status))
instance.status = enum.MACHINE_STATUS.RUNNING
if log:
instance.initialization_log = log
instance.put()
self.response.out.write('Initialization success acknowledged.')
application = webapp.WSGIApplication(
[(INIT_START, InitializationStart),
(INSTALL_FAILED, InstallFailed),
(INSTALL_SUCEEDED, InstallSucceeded)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| apache-2.0 | 5,529,648,068,327,806,000 | 32.636905 | 80 | 0.682357 | false | 4.146001 | false | false | false |
aspiers/pacemaker | cts/CM_ais.py | 15 | 5946 | '''CTS: Cluster Testing System: AIS dependent modules...
'''
__copyright__ = '''
Copyright (C) 2007 Andrew Beekhof <andrew@suse.de>
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
from cts.CTSvars import *
from cts.CM_lha import crm_lha
from cts.CTS import Process
from cts.patterns import PatternSelector
#######################################################################
#
# LinuxHA v2 dependent modules
#
#######################################################################
class crm_ais(crm_lha):
'''
The crm version 3 cluster manager class.
It implements the things we need to talk to and manipulate
crm clusters running on top of openais
'''
def __init__(self, Environment, randseed=None, name=None):
if not name: name="crm-ais"
crm_lha.__init__(self, Environment, randseed=randseed, name=name)
self.fullcomplist = {}
self.templates = PatternSelector(self.name)
def NodeUUID(self, node):
return node
def ais_components(self, extra={}):
complist = []
if not len(self.fullcomplist.keys()):
for c in ["cib", "lrmd", "crmd", "attrd" ]:
self.fullcomplist[c] = Process(
self, c,
pats = self.templates.get_component(self.name, c),
badnews_ignore = self.templates.get_component(self.name, "%s-ignore" % c),
common_ignore = self.templates.get_component(self.name, "common-ignore"))
# pengine uses dc_pats instead of pats
self.fullcomplist["pengine"] = Process(
self, "pengine",
dc_pats = self.templates.get_component(self.name, "pengine"),
badnews_ignore = self.templates.get_component(self.name, "pengine-ignore"),
common_ignore = self.templates.get_component(self.name, "common-ignore"))
# stonith-ng's process name is different from its component name
self.fullcomplist["stonith-ng"] = Process(
self, "stonith-ng", process="stonithd",
pats = self.templates.get_component(self.name, "stonith"),
badnews_ignore = self.templates.get_component(self.name, "stonith-ignore"),
common_ignore = self.templates.get_component(self.name, "common-ignore"))
# add (or replace) any extra components passed in
self.fullcomplist.update(extra)
# Processes running under valgrind can't be shot with "killall -9 processname",
# so don't include them in the returned list
vgrind = self.Env["valgrind-procs"].split()
for key in list(self.fullcomplist.keys()):
if self.Env["valgrind-tests"]:
if key in vgrind:
self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
continue
if key == "stonith-ng" and not self.Env["DoFencing"]:
continue
complist.append(self.fullcomplist[key])
return complist
class crm_cs_v0(crm_ais):
'''
The crm version 3 cluster manager class.
It implements the things we need to talk to and manipulate
crm clusters running against version 0 of our plugin
'''
def __init__(self, Environment, randseed=None, name=None):
if not name: name="crm-plugin-v0"
crm_ais.__init__(self, Environment, randseed=randseed, name=name)
def Components(self):
extra = {}
extra["corosync"] = Process(
self, "corosync",
pats = self.templates.get_component(self.name, "corosync"),
badnews_ignore = self.templates.get_component(self.name, "corosync-ignore"),
common_ignore = self.templates.get_component(self.name, "common-ignore")
)
return self.ais_components(extra=extra)
class crm_cs_v1(crm_cs_v0):
'''
The crm version 3 cluster manager class.
It implements the things we need to talk to and manipulate
crm clusters running on top of version 1 of our plugin
'''
def __init__(self, Environment, randseed=None, name=None):
if not name: name="crm-plugin-v1"
crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name)
class crm_mcp(crm_cs_v0):
'''
The crm version 4 cluster manager class.
It implements the things we need to talk to and manipulate
crm clusters running on top of native corosync (no plugins)
'''
def __init__(self, Environment, randseed=None, name=None):
if not name: name="crm-mcp"
crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name)
if self.Env["have_systemd"]:
self.update({
# When systemd is in use, we can look for this instead
"Pat:We_stopped" : "%s.*Corosync Cluster Engine exiting normally",
})
class crm_cman(crm_cs_v0):
'''
The crm version 3 cluster manager class.
It implements the things we need to talk to and manipulate
crm clusters running on top of openais
'''
def __init__(self, Environment, randseed=None, name=None):
if not name: name="crm-cman"
crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name)
| gpl-2.0 | 2,421,423,930,139,020,000 | 37.61039 | 110 | 0.61554 | false | 3.868575 | false | false | false |
jlguardi/yowsup | yowsup/layers/protocol_media/protocolentities/builder_message_media_downloadable.py | 17 | 1886 | # from yowsup.layers.protocol_media import mediacipher
import tempfile
import os
class DownloadableMediaMessageBuilder(object):
def __init__(self, downloadbleMediaMessageClass, jid, filepath):
self.jid = jid
self.filepath = filepath
self.encryptedFilepath = None
self.cls = downloadbleMediaMessageClass
self.mediaKey = None
self.attributes = {}
self.mediaType = self.cls.__name__.split("DownloadableMediaMessageProtocolEntity")[0].lower() #ugly ?
# def encrypt(self):
# fd, encpath = tempfile.mkstemp()
# mediaKey = os.urandom(112)
# keys = mediacipher.getDerivedKeys(mediaKey)
# out = mediacipher.encryptImage(self.filepath, keys)
# with open(encImagePath, 'w') as outF:
# outF.write(out)
#
# self.mediaKey = mediaKey
# self.encryptedFilepath = encpath
# def decrypt(self):
# self.mediaKey = None
# self.encryptedFilePath = None
def setEncryptionData(self, mediaKey, encryptedFilepath):
self.mediaKey = mediaKey
self.encryptedFilepath = encryptedFilepath
def isEncrypted(self):
return self.encryptedFilepath is not None
def getFilepath(self):
return self.encryptedFilepath or self.filepath
def getOriginalFilepath(self):
return self.filepath
def set(self, key, val):
self.attributes[key] = val
def get(self, key, default = None):
if key in self.attributes and self.attributes[key] is not None:
return self.attributes[key]
return default
def getOrSet(self, key, func):
if not self.get(key):
self.set(key, func())
def build(self, url = None, ip = None):
if url:
self.set("url", url)
if ip:
self.set("ip", ip)
return self.cls.fromBuilder(self)
| gpl-3.0 | 8,093,133,399,139,990,000 | 29.918033 | 109 | 0.627253 | false | 3.896694 | false | false | false |
damorim/compilers-cin | 2020_3/projeto2/antlr4-python3-runtime-4.7.2/src/antlr4/atn/ATNDeserializer.py | 9 | 22186 | # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
from uuid import UUID
from io import StringIO
from typing import Callable
from antlr4.Token import Token
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNType import ATNType
from antlr4.atn.ATNState import *
from antlr4.atn.Transition import *
from antlr4.atn.LexerAction import *
from antlr4.atn.ATNDeserializationOptions import ATNDeserializationOptions
# This is the earliest supported serialized UUID.
BASE_SERIALIZED_UUID = UUID("AADB8D7E-AEEF-4415-AD2B-8204D6CF042E")
# This UUID indicates the serialized ATN contains two sets of
# IntervalSets, where the second set's values are encoded as
# 32-bit integers to support the full Unicode SMP range up to U+10FFFF.
ADDED_UNICODE_SMP = UUID("59627784-3BE5-417A-B9EB-8131A7286089")
# This list contains all of the currently supported UUIDs, ordered by when
# the feature first appeared in this branch.
SUPPORTED_UUIDS = [ BASE_SERIALIZED_UUID, ADDED_UNICODE_SMP ]
SERIALIZED_VERSION = 3
# This is the current serialized UUID.
SERIALIZED_UUID = ADDED_UNICODE_SMP
class ATNDeserializer (object):
def __init__(self, options : ATNDeserializationOptions = None):
if options is None:
options = ATNDeserializationOptions.defaultOptions
self.deserializationOptions = options
# Determines if a particular serialized representation of an ATN supports
# a particular feature, identified by the {@link UUID} used for serializing
# the ATN at the time the feature was first introduced.
#
# @param feature The {@link UUID} marking the first time the feature was
# supported in the serialized ATN.
# @param actualUuid The {@link UUID} of the actual serialized ATN which is
# currently being deserialized.
# @return {@code true} if the {@code actualUuid} value represents a
# serialized ATN at or after the feature identified by {@code feature} was
# introduced; otherwise, {@code false}.
def isFeatureSupported(self, feature : UUID , actualUuid : UUID ):
idx1 = SUPPORTED_UUIDS.index(feature)
if idx1<0:
return False
idx2 = SUPPORTED_UUIDS.index(actualUuid)
return idx2 >= idx1
def deserialize(self, data : str):
self.reset(data)
self.checkVersion()
self.checkUUID()
atn = self.readATN()
self.readStates(atn)
self.readRules(atn)
self.readModes(atn)
sets = []
# First, read all sets with 16-bit Unicode code points <= U+FFFF.
self.readSets(atn, sets, self.readInt)
# Next, if the ATN was serialized with the Unicode SMP feature,
# deserialize sets with 32-bit arguments <= U+10FFFF.
if self.isFeatureSupported(ADDED_UNICODE_SMP, self.uuid):
self.readSets(atn, sets, self.readInt32)
self.readEdges(atn, sets)
self.readDecisions(atn)
self.readLexerActions(atn)
self.markPrecedenceDecisions(atn)
self.verifyATN(atn)
if self.deserializationOptions.generateRuleBypassTransitions \
and atn.grammarType == ATNType.PARSER:
self.generateRuleBypassTransitions(atn)
# re-verify after modification
self.verifyATN(atn)
return atn
def reset(self, data:str):
def adjust(c):
v = ord(c)
return v-2 if v>1 else v + 65533
temp = [ adjust(c) for c in data ]
# don't adjust the first value since that's the version number
temp[0] = ord(data[0])
self.data = temp
self.pos = 0
def checkVersion(self):
version = self.readInt()
if version != SERIALIZED_VERSION:
raise Exception("Could not deserialize ATN with version " + str(version) + " (expected " + str(SERIALIZED_VERSION) + ").")
def checkUUID(self):
uuid = self.readUUID()
if not uuid in SUPPORTED_UUIDS:
raise Exception("Could not deserialize ATN with UUID: " + str(uuid) + \
" (expected " + str(SERIALIZED_UUID) + " or a legacy UUID).", uuid, SERIALIZED_UUID)
self.uuid = uuid
def readATN(self):
idx = self.readInt()
grammarType = ATNType.fromOrdinal(idx)
maxTokenType = self.readInt()
return ATN(grammarType, maxTokenType)
def readStates(self, atn:ATN):
loopBackStateNumbers = []
endStateNumbers = []
nstates = self.readInt()
for i in range(0, nstates):
stype = self.readInt()
# ignore bad type of states
if stype==ATNState.INVALID_TYPE:
atn.addState(None)
continue
ruleIndex = self.readInt()
if ruleIndex == 0xFFFF:
ruleIndex = -1
s = self.stateFactory(stype, ruleIndex)
if stype == ATNState.LOOP_END: # special case
loopBackStateNumber = self.readInt()
loopBackStateNumbers.append((s, loopBackStateNumber))
elif isinstance(s, BlockStartState):
endStateNumber = self.readInt()
endStateNumbers.append((s, endStateNumber))
atn.addState(s)
# delay the assignment of loop back and end states until we know all the state instances have been initialized
for pair in loopBackStateNumbers:
pair[0].loopBackState = atn.states[pair[1]]
for pair in endStateNumbers:
pair[0].endState = atn.states[pair[1]]
numNonGreedyStates = self.readInt()
for i in range(0, numNonGreedyStates):
stateNumber = self.readInt()
atn.states[stateNumber].nonGreedy = True
numPrecedenceStates = self.readInt()
for i in range(0, numPrecedenceStates):
stateNumber = self.readInt()
atn.states[stateNumber].isPrecedenceRule = True
def readRules(self, atn:ATN):
nrules = self.readInt()
if atn.grammarType == ATNType.LEXER:
atn.ruleToTokenType = [0] * nrules
atn.ruleToStartState = [0] * nrules
for i in range(0, nrules):
s = self.readInt()
startState = atn.states[s]
atn.ruleToStartState[i] = startState
if atn.grammarType == ATNType.LEXER:
tokenType = self.readInt()
if tokenType == 0xFFFF:
tokenType = Token.EOF
atn.ruleToTokenType[i] = tokenType
atn.ruleToStopState = [0] * nrules
for state in atn.states:
if not isinstance(state, RuleStopState):
continue
atn.ruleToStopState[state.ruleIndex] = state
atn.ruleToStartState[state.ruleIndex].stopState = state
def readModes(self, atn:ATN):
nmodes = self.readInt()
for i in range(0, nmodes):
s = self.readInt()
atn.modeToStartState.append(atn.states[s])
def readSets(self, atn:ATN, sets:list, readUnicode:Callable[[], int]):
m = self.readInt()
for i in range(0, m):
iset = IntervalSet()
sets.append(iset)
n = self.readInt()
containsEof = self.readInt()
if containsEof!=0:
iset.addOne(-1)
for j in range(0, n):
i1 = readUnicode()
i2 = readUnicode()
iset.addRange(range(i1, i2 + 1)) # range upper limit is exclusive
def readEdges(self, atn:ATN, sets:list):
nedges = self.readInt()
for i in range(0, nedges):
src = self.readInt()
trg = self.readInt()
ttype = self.readInt()
arg1 = self.readInt()
arg2 = self.readInt()
arg3 = self.readInt()
trans = self.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
srcState = atn.states[src]
srcState.addTransition(trans)
# edges for rule stop states can be derived, so they aren't serialized
for state in atn.states:
for i in range(0, len(state.transitions)):
t = state.transitions[i]
if not isinstance(t, RuleTransition):
continue
outermostPrecedenceReturn = -1
if atn.ruleToStartState[t.target.ruleIndex].isPrecedenceRule:
if t.precedence == 0:
outermostPrecedenceReturn = t.target.ruleIndex
trans = EpsilonTransition(t.followState, outermostPrecedenceReturn)
atn.ruleToStopState[t.target.ruleIndex].addTransition(trans)
for state in atn.states:
if isinstance(state, BlockStartState):
# we need to know the end state to set its start state
if state.endState is None:
raise Exception("IllegalState")
# block end states can only be associated to a single block start state
if state.endState.startState is not None:
raise Exception("IllegalState")
state.endState.startState = state
if isinstance(state, PlusLoopbackState):
for i in range(0, len(state.transitions)):
target = state.transitions[i].target
if isinstance(target, PlusBlockStartState):
target.loopBackState = state
elif isinstance(state, StarLoopbackState):
for i in range(0, len(state.transitions)):
target = state.transitions[i].target
if isinstance(target, StarLoopEntryState):
target.loopBackState = state
def readDecisions(self, atn:ATN):
ndecisions = self.readInt()
for i in range(0, ndecisions):
s = self.readInt()
decState = atn.states[s]
atn.decisionToState.append(decState)
decState.decision = i
def readLexerActions(self, atn:ATN):
if atn.grammarType == ATNType.LEXER:
count = self.readInt()
atn.lexerActions = [ None ] * count
for i in range(0, count):
actionType = self.readInt()
data1 = self.readInt()
if data1 == 0xFFFF:
data1 = -1
data2 = self.readInt()
if data2 == 0xFFFF:
data2 = -1
lexerAction = self.lexerActionFactory(actionType, data1, data2)
atn.lexerActions[i] = lexerAction
def generateRuleBypassTransitions(self, atn:ATN):
count = len(atn.ruleToStartState)
atn.ruleToTokenType = [ 0 ] * count
for i in range(0, count):
atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
for i in range(0, count):
self.generateRuleBypassTransition(atn, i)
def generateRuleBypassTransition(self, atn:ATN, idx:int):
bypassStart = BasicBlockStartState()
bypassStart.ruleIndex = idx
atn.addState(bypassStart)
bypassStop = BlockEndState()
bypassStop.ruleIndex = idx
atn.addState(bypassStop)
bypassStart.endState = bypassStop
atn.defineDecisionState(bypassStart)
bypassStop.startState = bypassStart
excludeTransition = None
if atn.ruleToStartState[idx].isPrecedenceRule:
# wrap from the beginning of the rule to the StarLoopEntryState
endState = None
for state in atn.states:
if self.stateIsEndStateFor(state, idx):
endState = state
excludeTransition = state.loopBackState.transitions[0]
break
if excludeTransition is None:
raise Exception("Couldn't identify final state of the precedence rule prefix section.")
else:
endState = atn.ruleToStopState[idx]
# all non-excluded transitions that currently target end state need to target blockEnd instead
for state in atn.states:
for transition in state.transitions:
if transition == excludeTransition:
continue
if transition.target == endState:
transition.target = bypassStop
# all transitions leaving the rule start state need to leave blockStart instead
ruleToStartState = atn.ruleToStartState[idx]
count = len(ruleToStartState.transitions)
while count > 0:
bypassStart.addTransition(ruleToStartState.transitions[count-1])
del ruleToStartState.transitions[-1]
# link the new states
atn.ruleToStartState[idx].addTransition(EpsilonTransition(bypassStart))
bypassStop.addTransition(EpsilonTransition(endState))
matchState = BasicState()
atn.addState(matchState)
matchState.addTransition(AtomTransition(bypassStop, atn.ruleToTokenType[idx]))
bypassStart.addTransition(EpsilonTransition(matchState))
def stateIsEndStateFor(self, state:ATNState, idx:int):
if state.ruleIndex != idx:
return None
if not isinstance(state, StarLoopEntryState):
return None
maybeLoopEndState = state.transitions[len(state.transitions) - 1].target
if not isinstance(maybeLoopEndState, LoopEndState):
return None
if maybeLoopEndState.epsilonOnlyTransitions and \
isinstance(maybeLoopEndState.transitions[0].target, RuleStopState):
return state
else:
return None
#
# Analyze the {@link StarLoopEntryState} states in the specified ATN to set
# the {@link StarLoopEntryState#isPrecedenceDecision} field to the
# correct value.
#
# @param atn The ATN.
#
def markPrecedenceDecisions(self, atn:ATN):
for state in atn.states:
if not isinstance(state, StarLoopEntryState):
continue
# We analyze the ATN to determine if this ATN decision state is the
# decision for the closure block that determines whether a
# precedence rule should continue or complete.
#
if atn.ruleToStartState[state.ruleIndex].isPrecedenceRule:
maybeLoopEndState = state.transitions[len(state.transitions) - 1].target
if isinstance(maybeLoopEndState, LoopEndState):
if maybeLoopEndState.epsilonOnlyTransitions and \
isinstance(maybeLoopEndState.transitions[0].target, RuleStopState):
state.isPrecedenceDecision = True
def verifyATN(self, atn:ATN):
if not self.deserializationOptions.verifyATN:
return
# verify assumptions
for state in atn.states:
if state is None:
continue
self.checkCondition(state.epsilonOnlyTransitions or len(state.transitions) <= 1)
if isinstance(state, PlusBlockStartState):
self.checkCondition(state.loopBackState is not None)
if isinstance(state, StarLoopEntryState):
self.checkCondition(state.loopBackState is not None)
self.checkCondition(len(state.transitions) == 2)
if isinstance(state.transitions[0].target, StarBlockStartState):
self.checkCondition(isinstance(state.transitions[1].target, LoopEndState))
self.checkCondition(not state.nonGreedy)
elif isinstance(state.transitions[0].target, LoopEndState):
self.checkCondition(isinstance(state.transitions[1].target, StarBlockStartState))
self.checkCondition(state.nonGreedy)
else:
raise Exception("IllegalState")
if isinstance(state, StarLoopbackState):
self.checkCondition(len(state.transitions) == 1)
self.checkCondition(isinstance(state.transitions[0].target, StarLoopEntryState))
if isinstance(state, LoopEndState):
self.checkCondition(state.loopBackState is not None)
if isinstance(state, RuleStartState):
self.checkCondition(state.stopState is not None)
if isinstance(state, BlockStartState):
self.checkCondition(state.endState is not None)
if isinstance(state, BlockEndState):
self.checkCondition(state.startState is not None)
if isinstance(state, DecisionState):
self.checkCondition(len(state.transitions) <= 1 or state.decision >= 0)
else:
self.checkCondition(len(state.transitions) <= 1 or isinstance(state, RuleStopState))
def checkCondition(self, condition:bool, message=None):
if not condition:
if message is None:
message = "IllegalState"
raise Exception(message)
def readInt(self):
i = self.data[self.pos]
self.pos += 1
return i
def readInt32(self):
low = self.readInt()
high = self.readInt()
return low | (high << 16)
def readLong(self):
low = self.readInt32()
high = self.readInt32()
return (low & 0x00000000FFFFFFFF) | (high << 32)
def readUUID(self):
low = self.readLong()
high = self.readLong()
allBits = (low & 0xFFFFFFFFFFFFFFFF) | (high << 64)
return UUID(int=allBits)
edgeFactories = [ lambda args : None,
lambda atn, src, trg, arg1, arg2, arg3, sets, target : EpsilonTransition(target),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
RangeTransition(target, Token.EOF, arg2) if arg3 != 0 else RangeTransition(target, arg1, arg2),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
RuleTransition(atn.states[arg1], arg2, arg3, target),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
PredicateTransition(target, arg1, arg2, arg3 != 0),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
AtomTransition(target, Token.EOF) if arg3 != 0 else AtomTransition(target, arg1),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
ActionTransition(target, arg1, arg2, arg3 != 0),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
SetTransition(target, sets[arg1]),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
NotSetTransition(target, sets[arg1]),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
WildcardTransition(target),
lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
PrecedencePredicateTransition(target, arg1)
]
def edgeFactory(self, atn:ATN, type:int, src:int, trg:int, arg1:int, arg2:int, arg3:int, sets:list):
target = atn.states[trg]
if type > len(self.edgeFactories) or self.edgeFactories[type] is None:
raise Exception("The specified transition type: " + str(type) + " is not valid.")
else:
return self.edgeFactories[type](atn, src, trg, arg1, arg2, arg3, sets, target)
stateFactories = [ lambda : None,
lambda : BasicState(),
lambda : RuleStartState(),
lambda : BasicBlockStartState(),
lambda : PlusBlockStartState(),
lambda : StarBlockStartState(),
lambda : TokensStartState(),
lambda : RuleStopState(),
lambda : BlockEndState(),
lambda : StarLoopbackState(),
lambda : StarLoopEntryState(),
lambda : PlusLoopbackState(),
lambda : LoopEndState()
]
def stateFactory(self, type:int, ruleIndex:int):
if type> len(self.stateFactories) or self.stateFactories[type] is None:
raise Exception("The specified state type " + str(type) + " is not valid.")
else:
s = self.stateFactories[type]()
if s is not None:
s.ruleIndex = ruleIndex
return s
CHANNEL = 0 #The type of a {@link LexerChannelAction} action.
CUSTOM = 1 #The type of a {@link LexerCustomAction} action.
MODE = 2 #The type of a {@link LexerModeAction} action.
MORE = 3 #The type of a {@link LexerMoreAction} action.
POP_MODE = 4 #The type of a {@link LexerPopModeAction} action.
PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action.
SKIP = 6 #The type of a {@link LexerSkipAction} action.
TYPE = 7 #The type of a {@link LexerTypeAction} action.
actionFactories = [ lambda data1, data2: LexerChannelAction(data1),
lambda data1, data2: LexerCustomAction(data1, data2),
lambda data1, data2: LexerModeAction(data1),
lambda data1, data2: LexerMoreAction.INSTANCE,
lambda data1, data2: LexerPopModeAction.INSTANCE,
lambda data1, data2: LexerPushModeAction(data1),
lambda data1, data2: LexerSkipAction.INSTANCE,
lambda data1, data2: LexerTypeAction(data1)
]
def lexerActionFactory(self, type:int, data1:int, data2:int):
if type > len(self.actionFactories) or self.actionFactories[type] is None:
raise Exception("The specified lexer action type " + str(type) + " is not valid.")
else:
return self.actionFactories[type](data1, data2)
| mit | -1,459,583,157,344,531,000 | 41.018939 | 134 | 0.594204 | false | 4.102441 | false | false | false |
dvliman/jaikuengine | .google_appengine/lib/django-0.96/django/newforms/extras/widgets.py | 32 | 2008 | """
Extra HTML Widget classes
"""
from django.newforms.widgets import Widget, Select
from django.utils.dates import MONTHS
import datetime
__all__ = ('SelectDateWidget',)
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def __init__(self, attrs=None, years=None):
# years is an optional list/tuple of years to use in the "year" select box.
self.attrs = attrs or {}
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year+10)
def render(self, name, value, attrs=None):
try:
value = datetime.date(*map(int, value.split('-')))
year_val, month_val, day_val = value.year, value.month, value.day
except (AttributeError, TypeError, ValueError):
year_val = month_val = day_val = None
output = []
month_choices = MONTHS.items()
month_choices.sort()
select_html = Select(choices=month_choices).render(self.month_field % name, month_val)
output.append(select_html)
day_choices = [(i, i) for i in range(1, 32)]
select_html = Select(choices=day_choices).render(self.day_field % name, day_val)
output.append(select_html)
year_choices = [(i, i) for i in self.years]
select_html = Select(choices=year_choices).render(self.year_field % name, year_val)
output.append(select_html)
return u'\n'.join(output)
def value_from_datadict(self, data, name):
y, m, d = data.get(self.year_field % name), data.get(self.month_field % name), data.get(self.day_field % name)
if y and m and d:
return '%s-%s-%s' % (y, m, d)
return None
| apache-2.0 | 8,298,886,374,793,652,000 | 33.033898 | 118 | 0.609562 | false | 3.572954 | false | false | false |
alextruberg/custom_django | django/contrib/gis/db/backends/mysql/introspection.py | 624 | 1426 | from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
# Updating the data_types_reverse dictionary with the appropriate
# type for Geometry fields.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# In order to get the specific geometry type of the field,
# we introspect on the table definition using `DESCRIBE`.
cursor.execute('DESCRIBE %s' %
self.connection.ops.quote_name(table_name))
# Increment over description info until we get to the geometry
# column.
for column, typ, null, key, default, extra in cursor.fetchall():
if column == geo_col:
# Using OGRGeomType to convert from OGC name to Django field.
# MySQL does not support 3D or SRIDs, so the field params
# are empty.
field_type = OGRGeomType(typ).django
field_params = {}
break
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause | -8,190,172,623,721,746,000 | 43.5625 | 81 | 0.620617 | false | 4.706271 | false | false | false |
desarrollosimagos/svidb | administrativo/perfil/models.py | 1 | 12346 | #!/usr/bin/python -u
# -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
from django.contrib.auth.models import User
from mapas.models import *
from actores.models import *
class PerfilPublico(models.Model):
user = models.OneToOneField(User,verbose_name='Usuario')
persona = models.OneToOneField(Directorios)
class Meta:
db_table = u'perfilpublico'
verbose_name_plural='Perfil Público'
verbose_name='Perfil Público'
unique_together=('user','persona')
#app_label = 'Sistematizacion_de_modulos_publicos'
def __unicode__(self):
return u"%s" %(self.persona.nombre)
class SeccionesPanelPublico(models.Model):
panel = models.CharField(max_length=180,verbose_name='Modulo')
descripcion = models.TextField()
# modulos = models.ManyToManyField(ModulosPublicos,related_name='Modulos Principales',verbose_name='Modulos',blank=True)
activo = models.BooleanField(verbose_name="Activo")
is_admmin = models.BooleanField(verbose_name="Solo para Administradores")
posicion = models.IntegerField(verbose_name="Posicion")
class Meta:
verbose_name_plural='Secciones del Panel Publico'
verbose_name='Secciones del Panel Publico'
def __unicode__(self):
return u"%s" %(self.panel)
class ModulosPublicos(models.Model):
paneles = models.ForeignKey(SeccionesPanelPublico)
modulo = models.CharField(max_length=180,verbose_name='Modulo')
url = models.CharField(max_length=180,verbose_name='URL',blank=True,null=True)
boton = models.ImageField(upload_to='modulos')
# submodulos = models.ManyToManyField(SubModulosPublicos,related_name='Submodulos',verbose_name='Sub Modulos',blank=True)
descripcion = models.TextField()
is_admmin = models.BooleanField(verbose_name="Solo para Administradores")
activo = models.BooleanField(verbose_name="Activo")
posicion = models.IntegerField(verbose_name="Posicion")
target = models.CharField(max_length=40,choices=(('_blank',u'Abre el documento vinculado en una nueva ventana o pestaña'),('_self',u'Abre el documento vinculado en el mismo marco que se ha hecho clic'),('_parent',u'Abre el documento vinculado en el marco padre'),('_top',u'Abre el documento vinculado en el pleno de la ventana')),verbose_name='Target del Vinculo')
class Meta:
verbose_name_plural='Módulos Públicos'
verbose_name='Módulos Públicos'
#app_label = 'Sistematizacion_de_modulos_publicos'
def __unicode__(self):
return u"%s - %s" %(self.paneles.panel, self.modulo)
def logo(self):
logo = ""
if self.boton:
esta = "<img src='" + self.boton.url +"' alt='Activo' height='150px'>"
else:
esta = "<img src='/media/imgs/icon-pendiente.gif' alt='Pendiente'> sin imagen"
return u"%s"%(esta)
logo.allow_tags = True
class SubModulosPublicos(models.Model):
modulos = models.ForeignKey(ModulosPublicos)
titulo = models.CharField(max_length=180,verbose_name='Modulo')
url = models.CharField(max_length=180,verbose_name='URL',blank=True,null=True)
boton = models.ImageField(upload_to='modulos')
descripcion = models.TextField()
is_admmin = models.BooleanField(verbose_name="Solo para Administradores")
activo = models.BooleanField(verbose_name="Activo")
posicion = models.IntegerField(verbose_name="Posicion")
target = models.CharField(max_length=40,choices=(('_blank',u'Abre el documento vinculado en una nueva ventana o pestaña'),('_self',u'Abre el documento vinculado en el mismo marco que se ha hecho clic'),('_parent',u'Abre el documento vinculado en el marco padre'),('_top',u'Abre el documento vinculado en el pleno de la ventana')),verbose_name='Target del Vinculo')
class Meta:
verbose_name_plural='Sub Módulos Públicos'
verbose_name='Sub Módulos Públicos'
def __unicode__(self):
return u"%s %s %s" %(self.modulos.paneles.panel, self.modulos.modulo,self.titulo)
def logo(self):
logo = ""
if self.boton:
esta = "<img src='" + self.boton.url +"' alt='Activo' height='150px'>"
else:
esta = "<img src='/media/imgs/icon-pendiente.gif' alt='Pendiente'> sin imagen"
return u"%s"%(esta)
logo.allow_tags = True
class PerfilModulos(models.Model):
perfil = models.ForeignKey(PerfilPublico)
modulos = models.ForeignKey(ModulosPublicos,verbose_name='Modulos')
ver = models.BooleanField(verbose_name="Ver")
add = models.BooleanField(verbose_name="Agregar")
edit = models.BooleanField(verbose_name="Modificar")
activo = models.BooleanField(verbose_name="Activo")
class Meta:
db_table = u'perfilmodulos'
verbose_name_plural='Permisos Perfiles Módulos'
unique_together=('perfil','modulos','activo')
verbose_name='Permisos Perfiles Módulos'
#app_label = 'Sistematizacion_de_modulos_publicos'
def __unicode__(self):
return u"%s %s" %(self.perfil.persona.nombre,self.modulos.modulo)
class PerfilSubModulos(models.Model):
perfil = models.ForeignKey(PerfilPublico)
submodulos = models.ForeignKey(SubModulosPublicos,verbose_name='SubModulos')
ver = models.BooleanField(verbose_name="Ver")
add = models.BooleanField(verbose_name="Agregar")
edit = models.BooleanField(verbose_name="Modificar")
activo = models.BooleanField(verbose_name="Activo")
class Meta:
verbose_name_plural='Permisos Perfiles Sub Módulos'
verbose_name='Permisos Perfil Sub Módulos'
unique_together=('perfil','submodulos','activo')
#app_label = 'Sistematizacion_de_modulos_publicos'
def __unicode__(self):
return u"%s %s" %(self.perfil.persona.nombre,self.submodulos.titulo)
#class PerfilPaneles(models.Model):
# perfil = models.ForeignKey(PerfilPublico)
# modulos = models.ManyToManyField(SeccionesPanelPublico,verbose_name='Paneles')
# class Meta:
# verbose_name_plural='Perfil Paneles'
# verbose_name='Perfil Paneles'
# def __unicode__(self):
# return u"%s %s" %(self.perfil.persona.nombre,self.perfil.persona.documentoidentidad)
class TipoSolicitud(models.Model):
tipo = models.CharField(max_length=180,verbose_name='Tipo')
descripcion = models.TextField()
class Meta:
verbose_name_plural='Tipo de Solicitud'
verbose_name='Tipo de Solicitud'
def __unicode__(self):
return u"%s" %(self.tipo)
class SistemaSolicitudes(models.Model):
remi = models.ForeignKey(Directorios,verbose_name='Remitente')
tipoSolicitud = models.ForeignKey(TipoSolicitud,verbose_name='Tipo de Solicitud',blank=True, null = True)
destino = models.ManyToManyField(Directorios, related_name='destinodirect',verbose_name='Destinatarios',blank=True, null = True)
destinoinst = models.ManyToManyField(Actores, related_name='destinoactor',verbose_name='Destinatarios Instituciones',blank=True, null = True)
asunto = models.CharField(max_length=120,blank=True,null=True)
mensaje = models.TextField(blank=True,null=True)
fecha = models.DateTimeField(default=datetime.now(),editable = False)
fechainicio = models.DateTimeField(verbose_name='Fecha de Inicio',blank=True,null=True)
fechaentrega = models.DateTimeField(verbose_name='Fecha de Entrega',blank=True,null=True)
fechaculminacion = models.DateTimeField(verbose_name='Fecha de Culminación',blank=True,null=True)
fechaprorroga = models.DateTimeField(verbose_name='Prorroga',blank=True,null=True)
proyect = models.BooleanField(verbose_name='Es Proyectable?')
estrucorg = models.TextField(verbose_name='Recursos', blank=True, null=True)
personasinvol = models.ManyToManyField(Directorios, related_name='persoinvol',verbose_name='Personas Involucradas',blank=True, null = True)
personasinvoltext = models.TextField(verbose_name='Personas Involucradas, no registradas', blank=True, null=True)
instituinvol = models.ManyToManyField(Actores, related_name='instiinvol',verbose_name='Instituciones Involucradas',blank=True, null = True)
instituinvoltext = models.TextField(verbose_name='Institutos Involucrados, no registrados', blank=True, null=True)
especies = models.ManyToManyField(Taxon, related_name='tax',verbose_name='Especies Involucradas',blank=True, null = True)
especiestext = models.TextField(verbose_name='Especies Involucradas, no registradas', blank=True, null=True)
areas = models.ManyToManyField(Areas, related_name='ar',verbose_name='Areas Involucradas',blank=True, null = True)
areastext = models.TextField(verbose_name='Areas Involucradas, no registradas', blank=True, null=True)
datos = models.FileField(upload_to='solicitudes',verbose_name='Datos Adjuntos',blank=True,null=True)
prioridad = models.IntegerField(choices=((0,'Urgente'),(1,'Normal'),(2,'Especial')),verbose_name='Prioridad',null=True,blank=True)
estatu = models.IntegerField(choices=((0,'Abierto'),(1,'Cerrado'),(2,'Pausado')),verbose_name='Estatus',null=True,blank=True,db_column='estatu_id')
class Meta:
verbose_name_plural='Sistema de Solicitudes'
#app_label = 'Datos_Transversales'
verbose_name = 'Sistema de Solicitudes'
def __unicode__(self):
return u" %s %s"%(self.remi,self.estatu)
# def VerEspecies(self):
# try:
# espe = Taxon.objects.get(detalletaxon=self)
# except Taxon.DoesNotExist:
# espe = None
# return u"<a href='/manager/especies/taxon/%s'>Ver Taxon</a>"%(tax.id)
# VerTaxon.allow_tags = True
class Seguimiento(models.Model):
solicitud = models.ForeignKey(SistemaSolicitudes,verbose_name='Solicitud',blank=True, null = True)
persona = models.ForeignKey(Directorios,verbose_name='Persona',blank=True, null = True,editable = False)
mensaje = models.TextField()
fecha = models.DateTimeField(default=datetime.now(),editable = False)
class Meta:
verbose_name_plural='Seguimiento'
verbose_name='Seguimiento'
def __unicode__(self):
return u"%s" %(self.solicitud)
class validaciones(models.Model):
usuario = models.ForeignKey(PerfilPublico,verbose_name='Usuario')
codigo = models.CharField(max_length=120)
estatu = models.IntegerField(choices=((0,'Validacion'),(1,'Recuperacion'),(2,'Eliminacion')),verbose_name='Tipo',null=True,blank=True)
fecha = models.DateTimeField(default=datetime.now(),editable = False)
estado = models.BooleanField(verbose_name="Activo")
class Meta:
verbose_name_plural='Validacion de Cuentas'
#app_label = 'Datos_Transversales'
verbose_name = 'Validacion de Cuentas'
def __unicode__(self):
return u" %s %s"%(self.usuario,self.estatu)
class GruposPermisos(models.Model):
nombre = models.CharField(max_length=120)
estado = models.BooleanField(verbose_name="Activo")
class Meta:
verbose_name_plural='Grupos de Permisos de Perfil'
verbose_name = 'Grupos de Permisos de Perfil'
def __unicode__(self):
return u" %s %s"%(self.nombre,self.estado)
class DetalleGruposPermisos(models.Model):
grupo = models.ForeignKey(GruposPermisos,verbose_name='Grupo')
seccion = models.ForeignKey(SeccionesPanelPublico,verbose_name='Panel')
modulo = ChainedForeignKey(ModulosPublicos,chained_field="seccion",chained_model_field="paneles",show_all=False,auto_choose=True,verbose_name='Modulo',null=True,blank=True)
#modulo = models.ForeignKey(ModulosPublicos,verbose_name='Modulo')
submodulo = ChainedForeignKey(SubModulosPublicos,chained_field="modulo",chained_model_field="modulos",show_all=False,auto_choose=True,verbose_name='SubModulo',null=True,blank=True)
#submodulo = models.ForeignKey(SubModulosPublicos,verbose_name='SubModulo')
estado = models.BooleanField(verbose_name="Activo")
class Meta:
verbose_name_plural='Detalle Grupos de Permisos de Perfil'
verbose_name = 'Detalle Grupos de Permisos de Perfil'
def __unicode__(self):
return u" %s %s"%(self.grupo,self.estado)
| gpl-3.0 | 7,892,154,210,437,557,000 | 52.074561 | 368 | 0.693649 | false | 3.18826 | false | false | false |
Stanford-Online/edx-platform | lms/djangoapps/courseware/tests/test_middleware.py | 19 | 1491 | """
Tests for courseware middleware
"""
from django.http import Http404
from django.test.client import RequestFactory
from nose.plugins.attrib import attr
from lms.djangoapps.courseware.exceptions import Redirect
from lms.djangoapps.courseware.middleware import RedirectMiddleware
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=1)
class CoursewareMiddlewareTestCase(SharedModuleStoreTestCase):
"""Tests that courseware middleware is correctly redirected"""
@classmethod
def setUpClass(cls):
super(CoursewareMiddlewareTestCase, cls).setUpClass()
cls.course = CourseFactory.create()
def test_process_404(self):
"""A 404 should not trigger anything"""
request = RequestFactory().get("dummy_url")
response = RedirectMiddleware().process_exception(
request, Http404()
)
self.assertIsNone(response)
def test_redirect_exceptions(self):
"""
Unit tests for handling of Redirect exceptions.
"""
request = RequestFactory().get("dummy_url")
test_url = '/test_url'
exception = Redirect(test_url)
response = RedirectMiddleware().process_exception(
request, exception
)
self.assertEqual(response.status_code, 302)
target_url = response._headers['location'][1]
self.assertTrue(target_url.endswith(test_url))
| agpl-3.0 | -1,999,649,033,062,407,700 | 32.886364 | 76 | 0.701543 | false | 4.825243 | true | false | false |
lscheinkman/nupic | src/nupic/data/dict_utils.py | 49 | 5295 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
# TODO: Note the functions 'rUpdate' are duplicated in
# the swarming.hypersearch.utils.py module
class DictObj(dict):
"""Dictionary that allows attribute-like access to its elements.
Attributes are read-only."""
def __getattr__(self, name):
if name == '__deepcopy__':
return super(DictObj, self).__getattribute__("__deepcopy__")
return self[name]
def __setstate__(self, state):
for k, v in state.items():
self[k] = v
def rUpdate(original, updates):
"""Recursively updates the values in original with the values from updates."""
# Keep a list of the sub-dictionaries that need to be updated to avoid having
# to use recursion (which could fail for dictionaries with a lot of nesting.
dictPairs = [(original, updates)]
while len(dictPairs) > 0:
original, updates = dictPairs.pop()
for k, v in updates.iteritems():
if k in original and isinstance(original[k], dict) and isinstance(v, dict):
dictPairs.append((original[k], v))
else:
original[k] = v
def rApply(d, f):
"""Recursively applies f to the values in dict d.
Args:
d: The dict to recurse over.
f: A function to apply to values in d that takes the value and a list of
keys from the root of the dict to the value.
"""
remainingDicts = [(d, ())]
while len(remainingDicts) > 0:
current, prevKeys = remainingDicts.pop()
for k, v in current.iteritems():
keys = prevKeys + (k,)
if isinstance(v, dict):
remainingDicts.insert(0, (v, keys))
else:
f(v, keys)
def find(d, target):
remainingDicts = [d]
while len(remainingDicts) > 0:
current = remainingDicts.pop()
for k, v in current.iteritems():
if k == target:
return v
if isinstance(v, dict):
remainingDicts.insert(0, v)
return None
def get(d, keys):
for key in keys:
d = d[key]
return d
def set(d, keys, value):
for key in keys[:-1]:
d = d[key]
d[keys[-1]] = value
def dictDiffAndReport(da, db):
""" Compares two python dictionaries at the top level and report differences,
if any, to stdout
da: first dictionary
db: second dictionary
Returns: The same value as returned by dictDiff() for the given args
"""
differences = dictDiff(da, db)
if not differences:
return differences
if differences['inAButNotInB']:
print ">>> inAButNotInB: %s" % differences['inAButNotInB']
if differences['inBButNotInA']:
print ">>> inBButNotInA: %s" % differences['inBButNotInA']
for key in differences['differentValues']:
print ">>> da[%s] != db[%s]" % (key, key)
print "da[%s] = %r" % (key, da[key])
print "db[%s] = %r" % (key, db[key])
return differences
def dictDiff(da, db):
""" Compares two python dictionaries at the top level and return differences
da: first dictionary
db: second dictionary
Returns: None if dictionaries test equal; otherwise returns a
dictionary as follows:
{
'inAButNotInB':
<sequence of keys that are in da but not in db>
'inBButNotInA':
<sequence of keys that are in db but not in da>
'differentValues':
<sequence of keys whose corresponding values differ
between da and db>
}
"""
different = False
resultDict = dict()
resultDict['inAButNotInB'] = set(da) - set(db)
if resultDict['inAButNotInB']:
different = True
resultDict['inBButNotInA'] = set(db) - set(da)
if resultDict['inBButNotInA']:
different = True
resultDict['differentValues'] = []
for key in (set(da) - resultDict['inAButNotInB']):
comparisonResult = da[key] == db[key]
if isinstance(comparisonResult, bool):
isEqual = comparisonResult
else:
# This handles numpy arrays (but only at the top level)
isEqual = comparisonResult.all()
if not isEqual:
resultDict['differentValues'].append(key)
different = True
assert (((resultDict['inAButNotInB'] or resultDict['inBButNotInA'] or
resultDict['differentValues']) and different) or not different)
return resultDict if different else None
| agpl-3.0 | -6,650,219,823,586,886,000 | 29.606936 | 81 | 0.62474 | false | 3.954444 | false | false | false |
mfherbst/spack | var/spack/repos/builtin/packages/sw4lite/package.py | 2 | 3776 | ##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
class Sw4lite(MakefilePackage):
"""Sw4lite is a bare bone version of SW4 intended for testing
performance optimizations in a few important numerical kernels of SW4."""
tags = ['proxy-app', 'ecp-proxy-app']
homepage = "https://geodynamics.org/cig/software/sw4"
url = "https://github.com/geodynamics/sw4lite/archive/v1.0.zip"
git = "https://github.com/geodynamics/sw4lite.git"
version('develop', branch='master')
version('1.0', '3d911165f4f2ff6d5f9c1bd56ab6723f')
variant('openmp', default=True, description='Build with OpenMP support')
variant('precision', default='double', values=('float', 'double'),
multi=False, description='Floating point precision')
variant('ckernel', default=False, description='C or Fortran kernel')
depends_on('blas')
depends_on('lapack')
depends_on('mpi')
parallel = False
@property
def build_targets(self):
targets = []
spec = self.spec
if spec.variants['precision'].value == 'double':
cxxflags = ['-I../src', '-I../src/double']
else:
cxxflags = ['-I../src', '-I../src/float']
cflags = []
fflags = []
if '+openmp' in self.spec:
cflags.append('-DSW4_OPENMP')
cflags.append(self.compiler.openmp_flag)
cxxflags.append('-DSW4_OPENMP')
cxxflags.append(self.compiler.openmp_flag)
fflags.append(self.compiler.openmp_flag)
if spec.variants['ckernel'].value is True:
cxxflags.append('-DSW4_CROUTINES')
targets.append('ckernel=yes')
targets.append('FC=' + spec['mpi'].mpifc)
targets.append('CXX=' + spec['mpi'].mpicxx)
targets.append('CFLAGS={0}'.format(' '.join(cflags)))
targets.append('CXXFLAGS={0}'.format(' '.join(cxxflags)))
targets.append('FFLAGS={0}'.format(' '.join(fflags)))
targets.append('EXTRA_CXX_FLAGS=')
targets.append('EXTRA_FORT_FLAGS=')
lapack_blas = spec['lapack'].libs + spec['blas'].libs
if spec.satisfies('%gcc'):
targets.append('EXTRA_LINK_FLAGS={0} -lgfortran'
.format(lapack_blas.ld_flags))
else:
targets.append('EXTRA_LINK_FLAGS={0}'.format(lapack_blas.ld_flags))
return targets
def install(self, spec, prefix):
mkdir(prefix.bin)
exe_name = glob.glob('*/sw4lite')[0]
install(exe_name, prefix.bin)
install_tree('tests', prefix.tests)
| lgpl-2.1 | -5,484,572,795,504,979,000 | 37.530612 | 79 | 0.620233 | false | 3.806452 | false | false | false |
servo-automation/highfive | tests/api_provider_tests.py | 2 | 7671 | from highfive.runner import Configuration, Response
from highfive.api_provider.interface import APIProvider, CONTRIBUTORS_STORE_KEY, DEFAULTS
from handler_tests import TestStore
from datetime import datetime
from dateutil.parser import parse as datetime_parse
from unittest import TestCase
def create_config():
config = Configuration()
config.name = 'test_app'
config.imgur_client_id = None
return config
class APIProviderTests(TestCase):
def test_api_init(self):
'''The default interface will only initialize the app name and payload.'''
config = Configuration()
config.name = 'test_app'
api = APIProvider(config=config, payload={})
self.assertEqual(api.name, 'test_app')
self.assertEqual(api.payload, {})
self.assertEqual(api.config, config)
for attr in DEFAULTS:
self.assertTrue(getattr(api, attr) is None)
def test_api_issue_payload(self):
'''
If the payload is related to an issue (or an issue comment in an issue/PR),
then this should've initialized the commonly used issue-related stuff.
'''
payload = {
'issue': {
'user': {
'login': 'Foobar'
},
'state': 'open',
'labels': [
{ 'name': 'Foo' },
{ 'name': 'Bar' }
],
'number': 200,
'updated_at': '1970-01-01T00:00:00Z'
},
}
api = APIProvider(config=create_config(), payload=payload)
self.assertEqual(api.payload, payload)
self.assertFalse(api.is_pull)
self.assertTrue(api.is_open)
self.assertEqual(api.creator, 'foobar')
self.assertEqual(api.last_updated, payload['issue']['updated_at'])
self.assertEqual(api.number, '200')
self.assertTrue(api.pull_url is None)
self.assertEqual(api.labels, ['foo', 'bar'])
def test_api_pr_payload(self):
'''
If the payload is related to a PR, then the commonly used PR attributes
should've been initialized.
'''
payload = {
'pull_request': {
'user': {
'login': 'Foobar'
},
'assignee': {
'login': 'Baz'
},
'state': 'open',
'number': 50,
'url': 'some url',
'updated_at': '1970-01-01T00:00:00Z'
}
}
api = APIProvider(config=create_config(), payload=payload)
self.assertEqual(api.payload, payload)
self.assertTrue(api.is_open)
self.assertTrue(api.is_pull)
self.assertEqual(api.creator, 'foobar')
self.assertEqual(api.assignee, 'baz')
self.assertEqual(api.last_updated, payload['pull_request']['updated_at'])
self.assertEqual(api.number, '50')
self.assertEqual(api.pull_url, 'some url')
def test_api_other_events(self):
'''Test for payload belonging to other events such as comment, label, etc.'''
payload = { # This is a hypothetical payload just for tests
'sender': {
'login': 'Someone'
},
'label': {
'name': 'Label'
},
'repository': {
'owner': {
'login': 'foo'
},
'name': 'bar'
},
'comment': {
'body': 'Hello, world!',
},
'issue': {
'pull_request': {},
'labels': [],
'user': {
'login': 'Foobar'
},
'state': 'open',
'number': 200,
}
}
api = APIProvider(config=create_config(), payload=payload)
self.assertTrue(api.is_pull)
self.assertEqual(api.sender, 'someone')
self.assertEqual(api.comment, 'Hello, world!')
self.assertEqual(api.current_label, 'label')
self.assertEqual(api.owner, 'foo')
self.assertEqual(api.repo, 'bar')
def test_api_imgur_upload(self):
'''Test Imgur API upload'''
config = create_config()
api = APIProvider(config=config, payload={})
resp = api.post_image_to_imgur('some data')
self.assertTrue(resp is None) # No client ID - returns None
config.imgur_client_id = 'foobar'
def test_valid_request(method, url, data, headers):
self.assertEqual(headers['Authorization'], 'Client-ID foobar')
self.assertEqual(method, 'POST')
self.assertEqual(url, 'https://api.imgur.com/3/image')
self.assertEqual(data, {'image': 'some data'})
return Response(data={'data': {'link': 'hello'}})
tests = [
(test_valid_request, 'hello'),
(lambda method, url, data, headers: Response(data='', code=400), None),
(lambda method, url, data, headers: Response(data=''), None)
]
for func, expected in tests:
resp = api.post_image_to_imgur('some data', json_request=func)
self.assertEqual(resp, expected)
def test_contributors_update(self):
'''
Contributors list (cache) live only for an hour (by default). Once it's outdated,
the next call to `get_contributors` calls `fetch_contributors`, writes it to the store
and returns the list. Any calls within the next hour will return the existing contributors
without calling the API.
'''
class TestAPI(APIProvider):
fetched = False
def fetch_contributors(self):
self.fetched = True
return []
config = create_config()
api = TestAPI(config=config, payload={}, store=None)
self.assertFalse(api.fetched)
api.get_contributors()
# No store. This will always call the API.
self.assertTrue(api.fetched)
store = TestStore()
api = TestAPI(config=config, payload={}, store=store)
self.assertFalse(api.fetched)
now = datetime.now()
api.get_contributors()
data = store.get_object(CONTRIBUTORS_STORE_KEY)
updated_time = datetime_parse(data['last_update_time'])
# Store doesn't have contributors. It's been updated for the first time.
self.assertTrue(updated_time >= now)
self.assertTrue(api.fetched)
store = TestStore()
store.write_object(CONTRIBUTORS_STORE_KEY,
{ 'last_update_time': str(now), 'list': ['booya'] })
api = TestAPI(config=config, payload={}, store=store)
self.assertFalse(api.fetched)
api.get_contributors()
data = store.get_object(CONTRIBUTORS_STORE_KEY)
updated_time = datetime_parse(data['last_update_time'])
# Called within a cycle - no fetch occurs.
self.assertEqual(updated_time, now)
self.assertFalse(api.fetched)
store = TestStore()
store.write_object(CONTRIBUTORS_STORE_KEY,
{ 'last_update_time': str(now), 'list': ['booya'] })
api = TestAPI(config=config, payload={}, store=store)
self.assertFalse(api.fetched)
api.get_contributors(fetch=True)
# When `fetch` is enabled, API is called regardless.
self.assertTrue(api.fetched)
data = store.get_object(CONTRIBUTORS_STORE_KEY)
updated_time = datetime_parse(data['last_update_time'])
self.assertTrue(updated_time > now)
| mpl-2.0 | 4,291,612,290,385,657,300 | 35.014085 | 98 | 0.551688 | false | 4.224119 | true | false | false |
djeraseit/PredictionIO | examples/experimental/scala-local-friend-recommendation/file_random.py | 48 | 4883 | import sys
import random
read_file = open("data/user_profile.txt", 'r')
write_file = open("data/mini_user_profile.txt", 'w')
number_of_lines = int(sys.argv[1])
number_of_items = int(sys.argv[2])
#record number of lines
count = 0
random_num_list = []
# loop through the file to get number of lines in the file
for line in read_file:
count += 1
print "generating random numbers"
# generating a list of random lines to read from
for i in range(0, number_of_lines):
random_num_list.append(random.randint(0, count))
#get rid of any duplicates
no_duplicate_list = list(set(random_num_list))
#sort the list
no_duplicate_list.sort()
#print no_duplicate_list
#go to file begining
read_file.seek(0)
count = 0
index = 0
user_id_list = []
print "getting lines from user_profile"
for line in read_file:
if count == no_duplicate_list[index]:
write_file.write(line)
index += 1
user_id_list.append(int(line.split()[0]))
if index == len(no_duplicate_list):
break
count += 1
#user_id_list is sorted
user_id_list = map(str, user_id_list)
user_id_list.sort()
#print user_id_list
print "user_id finished"
print "getting lines from item"
read_file = open("data/item.txt", 'r')
write_file = open("data/mini_item.txt", 'w')
count = 0
random_num_list = []
for line in read_file:
count += 1
for i in range(0, number_of_items):
random_num_list.append(random.randint(0, count))
#no duplicate
random_num_list = list(set(random_num_list))
random_num_list.sort()
read_file.seek(0)
count = 0
index = 0
item_id_list = []
for line in read_file:
if count == random_num_list[index]:
write_file.write(line)
index += 1
item_id_list.append(int(line.split()[0]))
if index == len(random_num_list):
break
count += 1
print "item finished"
print "getting mini user_key_word"
read_file = open("data/user_key_word.txt", 'r')
write_file = open("data/mini_user_key_word.txt", 'w')
#record number of lines
count = 0
index = 0
# loop through the file to get number of lines in the file
for line in read_file:
if line.split()[0] == user_id_list[index]:
write_file.write(line)
index += 1
if index == len(user_id_list):
#print "break"
break
print "user keyword finished"
#go to file begining
#getting the user_sns_small
print "getting user sns"
#print user_id_list
read_file = open("data/user_sns.txt", 'r')
#write_file = open("data/mini_user_sns_small.txt", 'w')
user_sns_list = []
index = 0
met = False
count = 0
for line in read_file:
count += 1
#print count
#Same user multiple following
if met:
if line.split()[0] != user_id_list[index]:
index += 1
met = False
if index == len(user_id_list):
break
if line.split()[0] == user_id_list[index]:
#print "here"
user_sns_list.append(line)
met = True
# if the current line's user is greater than the user list, that means
# the user doesn't follow or are following, then we move to next user
if line.split()[0] > user_id_list[index]:
index += 1
if index == len(user_id_list):
break
#print user_sns_list
write_file = open("data/mini_user_sns.txt",'w')
for line in user_sns_list:
for user_id in user_id_list:
if line.split()[1] == user_id:
write_file.write(line)
break
print "sns got"
print "getting user action"
#for line in write_file:
read_file = open("data/user_action.txt", 'r')
user_action_list = []
index = 0
met = False
count = 0
for line in read_file:
count += 1
#print count
if met:
if line.split()[0] != user_id_list[index]:
index += 1
met = False
if index == len(user_id_list):
break
if line.split()[0] == user_id_list[index]:
#print "here"
user_action_list.append(line)
met = True
if line.split()[0] > user_id_list[index]:
index += 1
if index == len(user_id_list):
break
#print user_action_list
write_file = open("data/mini_user_action.txt",'w')
for line in user_action_list:
for user_id in user_id_list:
if line.split()[1] == user_id:
write_file.write(line)
break
print "user action got"
print "getting rec_log_train"
user_set = set(user_id_list)
item_set = set(item_id_list)
read_file = open("data/rec_log_train.txt", 'r')
write_file = open("data/mini_rec_log_train.txt",'w')
count = 0
#for item in item_set:
# print type(item)
#for user in user_set:
# print type(user)
for line in read_file:
words = line.split()
# if words[0] in user_set and (words[1] in user_set or words[1] in item_set):
if words[0] in user_set and words[1] in item_set:
write_file.write(line)
print count
count += 1
print "Done"
| apache-2.0 | -7,507,922,459,721,964,000 | 24.7 | 80 | 0.618063 | false | 3.057608 | false | false | false |
Softmotions/edx-platform | common/test/acceptance/pages/lms/discussion.py | 36 | 25473 | from contextlib import contextmanager
from bok_choy.javascript import wait_for_js
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, Promise
from .course_page import CoursePage
class DiscussionPageMixin(object):
def is_ajax_finished(self):
return self.browser.execute_script("return jQuery.active") == 0
class DiscussionThreadPage(PageObject, DiscussionPageMixin):
url = None
def __init__(self, browser, thread_selector):
super(DiscussionThreadPage, self).__init__(browser)
self.thread_selector = thread_selector
def _find_within(self, selector):
"""
Returns a query corresponding to the given CSS selector within the scope
of this thread page
"""
return self.q(css=self.thread_selector + " " + selector)
def is_browser_on_page(self):
return self.q(css=self.thread_selector).present
def _get_element_text(self, selector):
"""
Returns the text of the first element matching the given selector, or
None if no such element exists
"""
text_list = self._find_within(selector).text
return text_list[0] if text_list else None
def _is_element_visible(self, selector):
query = self._find_within(selector)
return query.present and query.visible
@contextmanager
def _secondary_action_menu_open(self, ancestor_selector):
"""
Given the selector for an ancestor of a secondary menu, return a context
manager that will open and close the menu
"""
self._find_within(ancestor_selector + " .action-more").click()
EmptyPromise(
lambda: self._is_element_visible(ancestor_selector + " .actions-dropdown"),
"Secondary action menu opened"
).fulfill()
yield
if self._is_element_visible(ancestor_selector + " .actions-dropdown"):
self._find_within(ancestor_selector + " .action-more").click()
EmptyPromise(
lambda: not self._is_element_visible(ancestor_selector + " .actions-dropdown"),
"Secondary action menu closed"
).fulfill()
def get_group_visibility_label(self):
"""
Returns the group visibility label shown for the thread.
"""
return self._get_element_text(".group-visibility-label")
def get_response_total_text(self):
"""Returns the response count text, or None if not present"""
return self._get_element_text(".response-count")
def get_num_displayed_responses(self):
"""Returns the number of responses actually rendered"""
return len(self._find_within(".discussion-response"))
def get_shown_responses_text(self):
"""Returns the shown response count text, or None if not present"""
return self._get_element_text(".response-display-count")
def get_load_responses_button_text(self):
"""Returns the load more responses button text, or None if not present"""
return self._get_element_text(".load-response-button")
def load_more_responses(self):
"""Clicks the load more responses button and waits for responses to load"""
self._find_within(".load-response-button").click()
EmptyPromise(
self.is_ajax_finished,
"Loading more Responses"
).fulfill()
def has_add_response_button(self):
"""Returns true if the add response button is visible, false otherwise"""
return self._is_element_visible(".add-response-btn")
def click_add_response_button(self):
"""
Clicks the add response button and ensures that the response text
field receives focus
"""
self._find_within(".add-response-btn").first.click()
EmptyPromise(
lambda: self._find_within(".discussion-reply-new textarea:focus").present,
"Response field received focus"
).fulfill()
@wait_for_js
def is_response_editor_visible(self, response_id):
"""Returns true if the response editor is present, false otherwise"""
return self._is_element_visible(".response_{} .edit-post-body".format(response_id))
@wait_for_js
def is_discussion_body_visible(self):
return self._is_element_visible(".post-body")
def is_mathjax_preview_available(self):
return self.q(css=".MathJax_Preview").text[0] == ""
def is_mathjax_rendered(self):
return self._is_element_visible(".MathJax")
def is_response_visible(self, comment_id):
"""Returns true if the response is viewable onscreen"""
return self._is_element_visible(".response_{} .response-body".format(comment_id))
def is_response_editable(self, response_id):
"""Returns true if the edit response button is present, false otherwise"""
with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
return self._is_element_visible(".response_{} .discussion-response .action-edit".format(response_id))
def get_response_body(self, response_id):
return self._get_element_text(".response_{} .response-body".format(response_id))
def start_response_edit(self, response_id):
"""Click the edit button for the response, loading the editing view"""
with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
self._find_within(".response_{} .discussion-response .action-edit".format(response_id)).first.click()
EmptyPromise(
lambda: self.is_response_editor_visible(response_id),
"Response edit started"
).fulfill()
def get_link_href(self):
"""Extracts href attribute of the referenced link"""
link_href = self._find_within(".post-body p a").attrs('href')
return link_href[0] if link_href else None
def get_response_vote_count(self, response_id):
return self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id))
def vote_response(self, response_id):
current_count = self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id))
self._find_within(".response_{} .discussion-response .action-vote".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: current_count != self.get_response_vote_count(response_id),
"Response is voted"
).fulfill()
def is_response_reported(self, response_id):
return self._is_element_visible(".response_{} .discussion-response .post-label-reported".format(response_id))
def report_response(self, response_id):
with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
self._find_within(".response_{} .discussion-response .action-report".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: self.is_response_reported(response_id),
"Response is reported"
).fulfill()
def is_response_endorsed(self, response_id):
return "endorsed" in self._get_element_text(".response_{} .discussion-response .posted-details".format(response_id))
def endorse_response(self, response_id):
self._find_within(".response_{} .discussion-response .action-endorse".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: self.is_response_endorsed(response_id),
"Response edit started"
).fulfill()
def set_response_editor_value(self, response_id, new_body):
"""Replace the contents of the response editor"""
self._find_within(".response_{} .discussion-response .wmd-input".format(response_id)).fill(new_body)
def submit_response_edit(self, response_id, new_response_body):
"""Click the submit button on the response editor"""
self._find_within(".response_{} .discussion-response .post-update".format(response_id)).first.click()
EmptyPromise(
lambda: (
not self.is_response_editor_visible(response_id) and
self.is_response_visible(response_id) and
self.get_response_body(response_id) == new_response_body
),
"Comment edit succeeded"
).fulfill()
def is_show_comments_visible(self, response_id):
"""Returns true if the "show comments" link is visible for a response"""
return self._is_element_visible(".response_{} .action-show-comments".format(response_id))
def show_comments(self, response_id):
"""Click the "show comments" link for a response"""
self._find_within(".response_{} .action-show-comments".format(response_id)).first.click()
EmptyPromise(
lambda: self._is_element_visible(".response_{} .comments".format(response_id)),
"Comments shown"
).fulfill()
def is_add_comment_visible(self, response_id):
"""Returns true if the "add comment" form is visible for a response"""
return self._is_element_visible("#wmd-input-comment-body-{}".format(response_id))
def is_comment_visible(self, comment_id):
"""Returns true if the comment is viewable onscreen"""
return self._is_element_visible("#comment_{} .response-body".format(comment_id))
def get_comment_body(self, comment_id):
return self._get_element_text("#comment_{} .response-body".format(comment_id))
def is_comment_deletable(self, comment_id):
"""Returns true if the delete comment button is present, false otherwise"""
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
return self._is_element_visible("#comment_{} .action-delete".format(comment_id))
def delete_comment(self, comment_id):
with self.handle_alert():
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
self._find_within("#comment_{} .action-delete".format(comment_id)).first.click()
EmptyPromise(
lambda: not self.is_comment_visible(comment_id),
"Deleted comment was removed"
).fulfill()
def is_comment_editable(self, comment_id):
"""Returns true if the edit comment button is present, false otherwise"""
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
return self._is_element_visible("#comment_{} .action-edit".format(comment_id))
def is_comment_editor_visible(self, comment_id):
"""Returns true if the comment editor is present, false otherwise"""
return self._is_element_visible(".edit-comment-body[data-id='{}']".format(comment_id))
def _get_comment_editor_value(self, comment_id):
return self._find_within("#wmd-input-edit-comment-body-{}".format(comment_id)).text[0]
def start_comment_edit(self, comment_id):
"""Click the edit button for the comment, loading the editing view"""
old_body = self.get_comment_body(comment_id)
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
self._find_within("#comment_{} .action-edit".format(comment_id)).first.click()
EmptyPromise(
lambda: (
self.is_comment_editor_visible(comment_id) and
not self.is_comment_visible(comment_id) and
self._get_comment_editor_value(comment_id) == old_body
),
"Comment edit started"
).fulfill()
def set_comment_editor_value(self, comment_id, new_body):
"""Replace the contents of the comment editor"""
self._find_within("#comment_{} .wmd-input".format(comment_id)).fill(new_body)
def submit_comment_edit(self, comment_id, new_comment_body):
"""Click the submit button on the comment editor"""
self._find_within("#comment_{} .post-update".format(comment_id)).first.click()
EmptyPromise(
lambda: (
not self.is_comment_editor_visible(comment_id) and
self.is_comment_visible(comment_id) and
self.get_comment_body(comment_id) == new_comment_body
),
"Comment edit succeeded"
).fulfill()
def cancel_comment_edit(self, comment_id, original_body):
"""Click the cancel button on the comment editor"""
self._find_within("#comment_{} .post-cancel".format(comment_id)).first.click()
EmptyPromise(
lambda: (
not self.is_comment_editor_visible(comment_id) and
self.is_comment_visible(comment_id) and
self.get_comment_body(comment_id) == original_body
),
"Comment edit was canceled"
).fulfill()
class DiscussionSortPreferencePage(CoursePage):
"""
Page that contain the discussion board with sorting options
"""
def __init__(self, browser, course_id):
super(DiscussionSortPreferencePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum"
def is_browser_on_page(self):
"""
Return true if the browser is on the right page else false.
"""
return self.q(css="body.discussion .forum-nav-sort-control").present
def get_selected_sort_preference(self):
"""
Return the text of option that is selected for sorting.
"""
options = self.q(css="body.discussion .forum-nav-sort-control option")
return options.filter(lambda el: el.is_selected())[0].get_attribute("value")
def change_sort_preference(self, sort_by):
"""
Change the option of sorting by clicking on new option.
"""
self.q(css="body.discussion .forum-nav-sort-control option[value='{0}']".format(sort_by)).click()
def refresh_page(self):
"""
Reload the page.
"""
self.browser.refresh()
class DiscussionTabSingleThreadPage(CoursePage):
def __init__(self, browser, course_id, discussion_id, thread_id):
super(DiscussionTabSingleThreadPage, self).__init__(browser, course_id)
self.thread_page = DiscussionThreadPage(
browser,
"body.discussion .discussion-article[data-id='{thread_id}']".format(thread_id=thread_id)
)
self.url_path = "discussion/forum/{discussion_id}/threads/{thread_id}".format(
discussion_id=discussion_id, thread_id=thread_id
)
def is_browser_on_page(self):
return self.thread_page.is_browser_on_page()
def __getattr__(self, name):
return getattr(self.thread_page, name)
def close_open_thread(self):
with self.thread_page._secondary_action_menu_open(".forum-thread-main-wrapper"):
self._find_within(".forum-thread-main-wrapper .action-close").first.click()
@wait_for_js
def is_window_on_top(self):
"""
Check if window's scroll is at top
"""
return self.browser.execute_script("return $('html, body').offset().top") == 0
def _thread_is_rendered_successfully(self, thread_id):
return self.q(css=".discussion-article[data-id='{}']".format(thread_id)).visible
def click_and_open_thread(self, thread_id):
"""
Click specific thread on the list.
"""
thread_selector = "li[data-id='{}']".format(thread_id)
self.q(css=thread_selector).first.click()
EmptyPromise(
lambda: self._thread_is_rendered_successfully(thread_id),
"Thread has been rendered"
).fulfill()
def check_threads_rendered_successfully(self, thread_count):
"""
Count the number of threads available on page.
"""
return len(self.q(css=".forum-nav-thread").results) == thread_count
def check_window_is_on_top(self):
"""
Check window is on top of the page
"""
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
class InlineDiscussionPage(PageObject):
url = None
def __init__(self, browser, discussion_id):
super(InlineDiscussionPage, self).__init__(browser)
self._discussion_selector = (
".discussion-module[data-discussion-id='{discussion_id}'] ".format(
discussion_id=discussion_id
)
)
def _find_within(self, selector):
"""
Returns a query corresponding to the given CSS selector within the scope
of this discussion page
"""
return self.q(css=self._discussion_selector + " " + selector)
def is_browser_on_page(self):
self.wait_for_ajax()
return self.q(css=self._discussion_selector).present
def is_discussion_expanded(self):
return self._find_within(".discussion").present
def expand_discussion(self):
"""Click the link to expand the discussion"""
self._find_within(".discussion-show").first.click()
EmptyPromise(
self.is_discussion_expanded,
"Discussion expanded"
).fulfill()
def get_num_displayed_threads(self):
return len(self._find_within(".discussion-thread"))
def has_thread(self, thread_id):
"""Returns true if this page is showing the thread with the specified id."""
return self._find_within('.discussion-thread#thread_{}'.format(thread_id)).present
def element_exists(self, selector):
return self.q(css=self._discussion_selector + " " + selector).present
def is_new_post_opened(self):
return self._find_within(".new-post-article").visible
def click_element(self, selector):
self.wait_for_element_presence(
"{discussion} {selector}".format(discussion=self._discussion_selector, selector=selector),
"{selector} is visible".format(selector=selector)
)
self._find_within(selector).click()
def click_cancel_new_post(self):
self.click_element(".cancel")
EmptyPromise(
lambda: not self.is_new_post_opened(),
"New post closed"
).fulfill()
def click_new_post_button(self):
self.click_element(".new-post-btn")
EmptyPromise(
self.is_new_post_opened,
"New post opened"
).fulfill()
@wait_for_js
def _is_element_visible(self, selector):
query = self._find_within(selector)
return query.present and query.visible
class InlineDiscussionThreadPage(DiscussionThreadPage):
def __init__(self, browser, thread_id):
super(InlineDiscussionThreadPage, self).__init__(
browser,
"body.courseware .discussion-module #thread_{thread_id}".format(thread_id=thread_id)
)
def expand(self):
"""Clicks the link to expand the thread"""
self._find_within(".forum-thread-expand").first.click()
EmptyPromise(
lambda: bool(self.get_response_total_text()),
"Thread expanded"
).fulfill()
def is_thread_anonymous(self):
return not self.q(css=".posted-details > .username").present
@wait_for_js
def check_if_selector_is_focused(self, selector):
"""
Check if selector is focused
"""
return self.browser.execute_script("return $('{}').is(':focus')".format(selector))
class DiscussionUserProfilePage(CoursePage):
TEXT_NEXT = u'Next >'
TEXT_PREV = u'< Previous'
PAGING_SELECTOR = "a.discussion-pagination[data-page-number]"
def __init__(self, browser, course_id, user_id, username, page=1):
super(DiscussionUserProfilePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum/dummy/users/{}?page={}".format(user_id, page)
self.username = username
def is_browser_on_page(self):
return (
self.q(css='section.discussion-user-threads[data-course-id="{}"]'.format(self.course_id)).present
and
self.q(css='section.user-profile a.learner-profile-link').present
and
self.q(css='section.user-profile a.learner-profile-link').text[0] == self.username
)
@wait_for_js
def is_window_on_top(self):
return self.browser.execute_script("return $('html, body').offset().top") == 0
def get_shown_thread_ids(self):
elems = self.q(css="article.discussion-thread")
return [elem.get_attribute("id")[7:] for elem in elems]
def get_current_page(self):
def check_func():
try:
current_page = int(self.q(css="nav.discussion-paginator li.current-page").text[0])
except:
return False, None
return True, current_page
return Promise(
check_func, 'discussion-paginator current page has text', timeout=5,
).fulfill()
def _check_pager(self, text, page_number=None):
"""
returns True if 'text' matches the text in any of the pagination elements. If
page_number is provided, only return True if the element points to that result
page.
"""
elems = self.q(css=self.PAGING_SELECTOR).filter(lambda elem: elem.text == text)
if page_number:
elems = elems.filter(lambda elem: int(elem.get_attribute('data-page-number')) == page_number)
return elems.present
def get_clickable_pages(self):
return sorted([
int(elem.get_attribute('data-page-number'))
for elem in self.q(css=self.PAGING_SELECTOR)
if str(elem.text).isdigit()
])
def is_prev_button_shown(self, page_number=None):
return self._check_pager(self.TEXT_PREV, page_number)
def is_next_button_shown(self, page_number=None):
return self._check_pager(self.TEXT_NEXT, page_number)
def _click_pager_with_text(self, text, page_number):
"""
click the first pagination element with whose text is `text` and ensure
the resulting page number matches `page_number`.
"""
targets = [elem for elem in self.q(css=self.PAGING_SELECTOR) if elem.text == text]
targets[0].click()
EmptyPromise(
lambda: self.get_current_page() == page_number,
"navigated to desired page"
).fulfill()
def click_prev_page(self):
self._click_pager_with_text(self.TEXT_PREV, self.get_current_page() - 1)
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
def click_next_page(self):
self._click_pager_with_text(self.TEXT_NEXT, self.get_current_page() + 1)
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
def click_on_page(self, page_number):
self._click_pager_with_text(unicode(page_number), page_number)
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
def click_on_sidebar_username(self):
self.wait_for_page()
self.q(css='.learner-profile-link').first.click()
class DiscussionTabHomePage(CoursePage, DiscussionPageMixin):
ALERT_SELECTOR = ".discussion-body .forum-nav .search-alert"
def __init__(self, browser, course_id):
super(DiscussionTabHomePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum/"
def is_browser_on_page(self):
return self.q(css=".discussion-body section.home-header").present
def perform_search(self, text="dummy"):
self.q(css=".forum-nav-search-input").fill(text + chr(10))
EmptyPromise(
self.is_ajax_finished,
"waiting for server to return result"
).fulfill()
def get_search_alert_messages(self):
return self.q(css=self.ALERT_SELECTOR + " .message").text
def get_search_alert_links(self):
return self.q(css=self.ALERT_SELECTOR + " .link-jump")
def dismiss_alert_message(self, text):
"""
dismiss any search alert message containing the specified text.
"""
def _match_messages(text):
return self.q(css=".search-alert").filter(lambda elem: text in elem.text)
for alert_id in _match_messages(text).attrs("id"):
self.q(css="{}#{} a.dismiss".format(self.ALERT_SELECTOR, alert_id)).click()
EmptyPromise(
lambda: _match_messages(text).results == [],
"waiting for dismissed alerts to disappear"
).fulfill()
def click_new_post_button(self):
"""
Clicks the 'New Post' button.
"""
self.new_post_button.click()
EmptyPromise(
lambda: (
self.new_post_form
),
"New post action succeeded"
).fulfill()
@property
def new_post_button(self):
"""
Returns the new post button.
"""
elements = self.q(css="ol.course-tabs .new-post-btn")
return elements.first if elements.visible and len(elements) == 1 else None
@property
def new_post_form(self):
"""
Returns the new post form.
"""
elements = self.q(css=".forum-new-post-form")
return elements[0] if elements.visible and len(elements) == 1 else None
| agpl-3.0 | 1,997,688,661,498,393,600 | 38.129032 | 128 | 0.620147 | false | 3.991382 | false | false | false |
Balannen/LSMASOMM | atom3/Kernel/ColoredText/configHandler.py | 1 | 27398 | """Provides access to stored IDLE configuration information.
Refer to the comments at the beginning of config-main.def for a description of
the available configuration files and the design implemented to update user
configuration information. In particular, user configuration choices which
duplicate the defaults will be removed from the user's configuration files,
and if a file becomes empty, it will be deleted.
The contents of the user files may be altered using the Options/Configure IDLE
menu to access the configuration GUI (configDialog.py), or manually.
Throughout this module there is an emphasis on returning useable defaults
when a problem occurs in returning a requested configuration value back to
idle. This is to allow IDLE to continue to function in spite of errors in
the retrieval of config information. When a default is returned instead of
a requested config value, a message is printed to stderr to aid in
configuration problem notification and resolution.
"""
import os
import sys
import string
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
class InvalidConfigType(Exception): pass
class InvalidConfigSet(Exception): pass
class InvalidFgBg(Exception): pass
class InvalidTheme(Exception): pass
class IdleConfParser(ConfigParser):
"""
A ConfigParser specialised for idle configuration file handling
"""
def __init__(self, cfgFile, cfgDefaults=None):
"""
cfgFile - string, fully specified configuration file name
"""
self.file=cfgFile
ConfigParser.__init__(self,defaults=cfgDefaults)
def Get(self, section, option, type=None, default=None):
"""
Get an option value for given section/option or return default.
If type is specified, return as type.
"""
if type=='bool':
getVal=self.getboolean
elif type=='int':
getVal=self.getint
else:
getVal=self.get
if self.has_option(section,option):
#return getVal(section, option, raw, vars, default)
return getVal(section, option)
else:
return default
def GetOptionList(self,section):
"""
Get an option list for given section
"""
if self.has_section(section):
return self.options(section)
else: #return a default value
return []
def Load(self):
"""
Load the configuration file from disk
"""
self.read(self.file)
class IdleUserConfParser(IdleConfParser):
"""
IdleConfigParser specialised for user configuration handling.
"""
def AddSection(self,section):
"""
if section doesn't exist, add it
"""
if not self.has_section(section):
self.add_section(section)
def RemoveEmptySections(self):
"""
remove any sections that have no options
"""
for section in self.sections():
if not self.GetOptionList(section):
self.remove_section(section)
def IsEmpty(self):
"""
Remove empty sections and then return 1 if parser has no sections
left, else return 0.
"""
self.RemoveEmptySections()
if self.sections():
return 0
else:
return 1
def RemoveOption(self,section,option):
"""
If section/option exists, remove it.
Returns 1 if option was removed, 0 otherwise.
"""
if self.has_section(section):
return self.remove_option(section,option)
def SetOption(self,section,option,value):
"""
Sets option to value, adding section if required.
Returns 1 if option was added or changed, otherwise 0.
"""
if self.has_option(section,option):
if self.get(section,option)==value:
return 0
else:
self.set(section,option,value)
return 1
else:
if not self.has_section(section):
self.add_section(section)
self.set(section,option,value)
return 1
def RemoveFile(self):
"""
Removes the user config file from disk if it exists.
"""
if os.path.exists(self.file):
os.remove(self.file)
def Save(self):
"""Update user configuration file.
Remove empty sections. If resulting config isn't empty, write the file
to disk. If config is empty, remove the file from disk if it exists.
"""
if not self.IsEmpty():
cfgFile=open(self.file,'w')
self.write(cfgFile)
else:
self.RemoveFile()
class IdleConf:
"""
holds config parsers for all idle config files:
default config files
(idle install dir)/config-main.def
(idle install dir)/config-extensions.def
(idle install dir)/config-highlight.def
(idle install dir)/config-keys.def
user config files
(user home dir)/.idlerc/config-main.cfg
(user home dir)/.idlerc/config-extensions.cfg
(user home dir)/.idlerc/config-highlight.cfg
(user home dir)/.idlerc/config-keys.cfg
"""
def __init__(self):
self.defaultCfg={}
self.userCfg={}
self.cfg={}
self.CreateConfigHandlers()
self.LoadCfgFiles()
#self.LoadCfg()
def CreateConfigHandlers(self):
"""
set up a dictionary of config parsers for default and user
configurations respectively
"""
#build idle install path
if __name__ != '__main__': # we were imported
idleDir=os.path.dirname(__file__)
else: # we were exec'ed (for testing only)
idleDir=os.path.abspath(sys.path[0])
userDir=self.GetUserCfgDir()
configTypes=('main','extensions','highlight','keys')
defCfgFiles={}
usrCfgFiles={}
for cfgType in configTypes: #build config file names
defCfgFiles[cfgType]=os.path.join(idleDir,'config-'+cfgType+'.def')
usrCfgFiles[cfgType]=os.path.join(userDir,'config-'+cfgType+'.cfg')
for cfgType in configTypes: #create config parsers
self.defaultCfg[cfgType]=IdleConfParser(defCfgFiles[cfgType])
self.userCfg[cfgType]=IdleUserConfParser(usrCfgFiles[cfgType])
def GetUserCfgDir(self):
"""
Creates (if required) and returns a filesystem directory for storing
user config files.
"""
cfgDir='.idlerc'
userDir=os.path.expanduser('~')
if userDir != '~': #'HOME' exists as a key in os.environ
if not os.path.exists(userDir):
warn=('\n Warning: HOME environment variable points to\n '+
userDir+'\n but the path does not exist.\n')
sys.stderr.write(warn)
userDir='~'
if userDir=='~': #we still don't have a home directory
#traditionally idle has defaulted to os.getcwd(), is this adeqate?
userDir = os.getcwd() #hack for no real homedir
userDir=os.path.join(userDir,cfgDir)
if not os.path.exists(userDir):
try: #make the config dir if it doesn't exist yet
os.mkdir(userDir)
except IOError:
warn=('\n Warning: unable to create user config directory\n '+
userDir+'\n')
sys.stderr.write(warn)
return userDir
def GetOption(self, configType, section, option, default=None, type=None):
"""
Get an option value for given config type and given general
configuration section/option or return a default. If type is specified,
return as type. Firstly the user configuration is checked, with a
fallback to the default configuration, and a final 'catch all'
fallback to a useable passed-in default if the option isn't present in
either the user or the default configuration.
configType must be one of ('main','extensions','highlight','keys')
If a default is returned a warning is printed to stderr.
"""
if self.userCfg[configType].has_option(section,option):
return self.userCfg[configType].Get(section, option, type=type)
elif self.defaultCfg[configType].has_option(section,option):
return self.defaultCfg[configType].Get(section, option, type=type)
else: #returning default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetOption -\n'+
' problem retrieving configration option '+`option`+'\n'+
' from section '+`section`+'.\n'+
' returning default value: '+`default`+'\n')
sys.stderr.write(warning)
return default
def GetSectionList(self, configSet, configType):
"""
Get a list of sections from either the user or default config for
the given config type.
configSet must be either 'user' or 'default'
configType must be one of ('main','extensions','highlight','keys')
"""
if not (configType in ('main','extensions','highlight','keys')):
raise InvalidConfigType, 'Invalid configType specified'
if configSet == 'user':
cfgParser=self.userCfg[configType]
elif configSet == 'default':
cfgParser=self.defaultCfg[configType]
else:
raise InvalidConfigSet, 'Invalid configSet specified'
return cfgParser.sections()
def GetHighlight(self, theme, element, fgBg=None):
"""
return individual highlighting theme elements.
fgBg - string ('fg'or'bg') or None, if None return a dictionary
containing fg and bg colours (appropriate for passing to Tkinter in,
e.g., a tag_config call), otherwise fg or bg colour only as specified.
"""
if self.defaultCfg['highlight'].has_section(theme):
themeDict=self.GetThemeDict('default',theme)
else:
themeDict=self.GetThemeDict('user',theme)
fore=themeDict[element+'-foreground']
if element=='cursor': #there is no config value for cursor bg
back=themeDict['normal-background']
else:
back=themeDict[element+'-background']
highlight={"foreground": fore,"background": back}
if not fgBg: #return dict of both colours
return highlight
else: #return specified colour only
if fgBg == 'fg':
return highlight["foreground"]
if fgBg == 'bg':
return highlight["background"]
else:
raise InvalidFgBg, 'Invalid fgBg specified'
def GetThemeDict(self,type,themeName):
"""
type - string, 'default' or 'user' theme type
themeName - string, theme name
Returns a dictionary which holds {option:value} for each element
in the specified theme. Values are loaded over a set of ultimate last
fallback defaults to guarantee that all theme elements are present in
a newly created theme.
"""
if type == 'user':
cfgParser=self.userCfg['highlight']
elif type == 'default':
cfgParser=self.defaultCfg['highlight']
else:
raise InvalidTheme, 'Invalid theme type specified'
#foreground and background values are provded for each theme element
#(apart from cursor) even though all these values are not yet used
#by idle, to allow for their use in the future. Default values are
#generally black and white.
theme={ 'normal-foreground':'#000000',
'normal-background':'#ffffff',
'keyword-foreground':'#000000',
'keyword-background':'#ffffff',
'comment-foreground':'#000000',
'comment-background':'#ffffff',
'string-foreground':'#000000',
'string-background':'#ffffff',
'definition-foreground':'#000000',
'definition-background':'#ffffff',
'hilite-foreground':'#000000',
'hilite-background':'gray',
'break-foreground':'#ffffff',
'break-background':'#000000',
'hit-foreground':'#ffffff',
'hit-background':'#000000',
'error-foreground':'#ffffff',
'error-background':'#000000',
#cursor (only foreground can be set)
'cursor-foreground':'#000000',
#shell window
'stdout-foreground':'#000000',
'stdout-background':'#ffffff',
'stderr-foreground':'#000000',
'stderr-background':'#ffffff',
'console-foreground':'#000000',
'console-background':'#ffffff' }
for element in theme.keys():
if not cfgParser.has_option(themeName,element):
#we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict'+
' -\n problem retrieving theme element '+`element`+
'\n from theme '+`themeName`+'.\n'+
' returning default value: '+`theme[element]`+'\n')
sys.stderr.write(warning)
colour=cfgParser.Get(themeName,element,default=theme[element])
theme[element]=colour
return theme
def CurrentTheme(self):
"""
Returns the name of the currently active theme
"""
return self.GetOption('main','Theme','name',default='')
def CurrentKeys(self):
"""
Returns the name of the currently active key set
"""
return self.GetOption('main','Keys','name',default='')
def GetExtensions(self, activeOnly=1):
"""
Gets a list of all idle extensions declared in the config files.
activeOnly - boolean, if true only return active (enabled) extensions
"""
extns=self.RemoveKeyBindNames(
self.GetSectionList('default','extensions'))
userExtns=self.RemoveKeyBindNames(
self.GetSectionList('user','extensions'))
for extn in userExtns:
if extn not in extns: #user has added own extension
extns.append(extn)
if activeOnly:
activeExtns=[]
for extn in extns:
if self.GetOption('extensions',extn,'enable',default=1,
type='bool'):
#the extension is enabled
activeExtns.append(extn)
return activeExtns
else:
return extns
def RemoveKeyBindNames(self,extnNameList):
#get rid of keybinding section names
names=extnNameList
kbNameIndicies=[]
for name in names:
if name.endswith('_bindings') or name.endswith('_cfgBindings'):
kbNameIndicies.append(names.index(name))
kbNameIndicies.sort()
kbNameIndicies.reverse()
for index in kbNameIndicies: #delete each keybinding section name
del(names[index])
return names
def GetExtnNameForEvent(self,virtualEvent):
"""
Returns the name of the extension that virtualEvent is bound in, or
None if not bound in any extension.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
extName=None
vEvent='<<'+virtualEvent+'>>'
for extn in self.GetExtensions(activeOnly=0):
for event in self.GetExtensionKeys(extn).keys():
if event == vEvent:
extName=extn
return extName
def GetExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension,as they exist in the dictionary returned by GetCurrentKeySet;
that is, where previously used bindings are disabled.
"""
keysName=extensionName+'_cfgBindings'
activeKeys=self.GetCurrentKeySet()
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
event='<<'+eventName+'>>'
binding=activeKeys[event]
extKeys[event]=binding
return extKeys
def __GetRawExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension, as defined in the configuration files, or an empty dictionary
if no bindings are found
"""
keysName=extensionName+'_cfgBindings'
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
binding=self.GetOption('extensions',keysName,
eventName,default='').split()
event='<<'+eventName+'>>'
extKeys[event]=binding
return extKeys
def GetExtensionBindings(self,extensionName):
"""
Returns a dictionary of all the event bindings for a particular
extension. The configurable keybindings are returned as they exist in
the dictionary returned by GetCurrentKeySet; that is, where re-used
keybindings are disabled.
"""
bindsName=extensionName+'_bindings'
extBinds=self.GetExtensionKeys(extensionName)
#add the non-configurable bindings
if self.defaultCfg['extensions'].has_section(bindsName):
eventNames=self.defaultCfg['extensions'].GetOptionList(bindsName)
for eventName in eventNames:
binding=self.GetOption('extensions',bindsName,
eventName,default='').split()
event='<<'+eventName+'>>'
extBinds[event]=binding
return extBinds
def GetKeyBinding(self, keySetName, eventStr):
"""
returns the keybinding for a specific event.
keySetName - string, name of key binding set
eventStr - string, the virtual event we want the binding for,
represented as a string, eg. '<<event>>'
"""
eventName=eventStr[2:-2] #trim off the angle brackets
binding=self.GetOption('keys',keySetName,eventName,default='').split()
return binding
def GetCurrentKeySet(self):
return self.GetKeySet(self.CurrentKeys())
def GetKeySet(self,keySetName):
"""
Returns a dictionary of: all requested core keybindings, plus the
keybindings for all currently active extensions. If a binding defined
in an extension is already in use, that binding is disabled.
"""
keySet=self.GetCoreKeys(keySetName)
activeExtns=self.GetExtensions(activeOnly=1)
for extn in activeExtns:
extKeys=self.__GetRawExtensionKeys(extn)
if extKeys: #the extension defines keybindings
for event in extKeys.keys():
if extKeys[event] in keySet.values():
#the binding is already in use
extKeys[event]='' #disable this binding
keySet[event]=extKeys[event] #add binding
return keySet
def IsCoreBinding(self,virtualEvent):
"""
returns true if the virtual event is bound in the core idle keybindings.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
return ('<<'+virtualEvent+'>>') in self.GetCoreKeys().keys()
def GetCoreKeys(self, keySetName=None):
"""
returns the requested set of core keybindings, with fallbacks if
required.
Keybindings loaded from the config file(s) are loaded _over_ these
defaults, so if there is a problem getting any core binding there will
be an 'ultimate last resort fallback' to the CUA-ish bindings
defined here.
"""
keyBindings={
'<<copy>>': ['<Control-c>', '<Control-C>'],
'<<cut>>': ['<Control-x>', '<Control-X>'],
'<<paste>>': ['<Control-v>', '<Control-V>'],
'<<beginning-of-line>>': ['<Control-a>', '<Home>'],
'<<center-insert>>': ['<Control-l>'],
'<<close-all-windows>>': ['<Control-q>'],
'<<close-window>>': ['<Alt-F4>'],
'<<do-nothing>>': ['<Control-x>'],
'<<end-of-file>>': ['<Control-d>'],
'<<python-docs>>': ['<F1>'],
'<<python-context-help>>': ['<Shift-F1>'],
'<<history-next>>': ['<Alt-n>'],
'<<history-previous>>': ['<Alt-p>'],
'<<interrupt-execution>>': ['<Control-c>'],
'<<view-restart>>': ['<F6>'],
'<<restart-shell>>': ['<Control-F6>'],
'<<open-class-browser>>': ['<Alt-c>'],
'<<open-module>>': ['<Alt-m>'],
'<<open-new-window>>': ['<Control-n>'],
'<<open-window-from-file>>': ['<Control-o>'],
'<<plain-newline-and-indent>>': ['<Control-j>'],
'<<print-window>>': ['<Control-p>'],
'<<redo>>': ['<Control-y>'],
'<<remove-selection>>': ['<Escape>'],
'<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'],
'<<save-window-as-file>>': ['<Alt-s>'],
'<<save-window>>': ['<Control-s>'],
'<<select-all>>': ['<Alt-a>'],
'<<toggle-auto-coloring>>': ['<Control-slash>'],
'<<undo>>': ['<Control-z>'],
'<<find-again>>': ['<Control-g>', '<F3>'],
'<<find-in-files>>': ['<Alt-F3>'],
'<<find-selection>>': ['<Control-F3>'],
'<<find>>': ['<Control-f>'],
'<<replace>>': ['<Control-h>'],
'<<goto-line>>': ['<Alt-g>'],
'<<smart-backspace>>': ['<Key-BackSpace>'],
'<<newline-and-indent>>': ['<Key-Return> <Key-KP_Enter>'],
'<<smart-indent>>': ['<Key-Tab>'],
'<<indent-region>>': ['<Control-Key-bracketright>'],
'<<dedent-region>>': ['<Control-Key-bracketleft>'],
'<<comment-region>>': ['<Alt-Key-3>'],
'<<uncomment-region>>': ['<Alt-Key-4>'],
'<<tabify-region>>': ['<Alt-Key-5>'],
'<<untabify-region>>': ['<Alt-Key-6>'],
'<<toggle-tabs>>': ['<Alt-Key-t>'],
'<<change-indentwidth>>': ['<Alt-Key-u>']
}
if keySetName:
for event in keyBindings.keys():
binding=self.GetKeyBinding(keySetName,event)
if binding:
keyBindings[event]=binding
else: #we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys'+
' -\n problem retrieving key binding for event '+
`event`+'\n from key set '+`keySetName`+'.\n'+
' returning default value: '+`keyBindings[event]`+'\n')
sys.stderr.write(warning)
return keyBindings
def GetExtraHelpSourceList(self,configSet):
"""Fetch list of extra help sources from a given configSet.
Valid configSets are 'user' or 'default'. Return a list of tuples of
the form (menu_item , path_to_help_file , option), or return the empty
list. 'option' is the sequence number of the help resource. 'option'
values determine the position of the menu items on the Help menu,
therefore the returned list must be sorted by 'option'.
"""
helpSources=[]
if configSet=='user':
cfgParser=self.userCfg['main']
elif configSet=='default':
cfgParser=self.defaultCfg['main']
else:
raise InvalidConfigSet, 'Invalid configSet specified'
options=cfgParser.GetOptionList('HelpFiles')
for option in options:
value=cfgParser.Get('HelpFiles',option,default=';')
if value.find(';')==-1: #malformed config entry with no ';'
menuItem='' #make these empty
helpPath='' #so value won't be added to list
else: #config entry contains ';' as expected
value=string.split(value,';')
menuItem=value[0].strip()
helpPath=value[1].strip()
if menuItem and helpPath: #neither are empty strings
helpSources.append( (menuItem,helpPath,option) )
helpSources.sort(self.__helpsort)
return helpSources
def __helpsort(self, h1, h2):
if int(h1[2]) < int(h2[2]):
return -1
elif int(h1[2]) > int(h2[2]):
return 1
else:
return 0
def GetAllExtraHelpSourcesList(self):
"""
Returns a list of tuples containing the details of all additional help
sources configured, or an empty list if there are none. Tuples are of
the format returned by GetExtraHelpSourceList.
"""
allHelpSources=( self.GetExtraHelpSourceList('default')+
self.GetExtraHelpSourceList('user') )
return allHelpSources
def LoadCfgFiles(self):
"""
load all configuration files.
"""
for key in self.defaultCfg.keys():
self.defaultCfg[key].Load()
self.userCfg[key].Load() #same keys
def SaveUserCfgFiles(self):
"""
write all loaded user configuration files back to disk
"""
for key in self.userCfg.keys():
self.userCfg[key].Save()
idleConf=IdleConf()
### module test
if __name__ == '__main__':
def dumpCfg(cfg):
print '\n',cfg,'\n'
for key in cfg.keys():
sections=cfg[key].sections()
print key
print sections
for section in sections:
options=cfg[key].options(section)
print section
print options
for option in options:
print option, '=', cfg[key].Get(section,option)
dumpCfg(idleConf.defaultCfg)
dumpCfg(idleConf.userCfg)
print idleConf.userCfg['main'].Get('Theme','name')
#print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')
| gpl-3.0 | 1,339,416,341,385,229,300 | 39.829008 | 86 | 0.563764 | false | 4.563291 | true | false | false |
nddsg/TreeDecomps | xplodnTree/tdec/b2CliqueTreeRules.py | 1 | 3569 | #!/usr/bin/env python
__author__ = 'saguinag' + '@' + 'nd.edu'
__version__ = "0.1.0"
##
## fname "b2CliqueTreeRules.py"
##
## TODO: some todo list
## VersionLog:
import net_metrics as metrics
import pandas as pd
import argparse, traceback
import os, sys
import networkx as nx
import re
from collections import deque, defaultdict, Counter
import tree_decomposition as td
import PHRG as phrg
import probabilistic_cfg as pcfg
import exact_phrg as xphrg
import a1_hrg_cliq_tree as nfld
from a1_hrg_cliq_tree import load_edgelist
DEBUG = False
def get_parser ():
parser = argparse.ArgumentParser(description='b2CliqueTreeRules.py: given a tree derive grammar rules')
parser.add_argument('-t', '--treedecomp', required=True, help='input tree decomposition (dimacs file format)')
parser.add_argument('--version', action='version', version=__version__)
return parser
def dimacs_td_ct (tdfname):
""" tree decomp to clique-tree """
print '... input file:', tdfname
fname = tdfname
graph_name = os.path.basename(fname)
gname = graph_name.split('.')[0]
gfname = "datasets/out." + gname
tdh = os.path.basename(fname).split('.')[1] # tree decomp heuristic
tfname = gname+"."+tdh
G = load_edgelist(gfname)
if DEBUG: print nx.info(G)
print
with open(fname, 'r') as f: # read tree decomp from inddgo
lines = f.readlines()
lines = [x.rstrip('\r\n') for x in lines]
cbags = {}
bags = [x.split() for x in lines if x.startswith('B')]
for b in bags:
cbags[int(b[1])] = [int(x) for x in b[3:]] # what to do with bag size?
edges = [x.split()[1:] for x in lines if x.startswith('e')]
edges = [[int(k) for k in x] for x in edges]
tree = defaultdict(set)
for s, t in edges:
tree[frozenset(cbags[s])].add(frozenset(cbags[t]))
if DEBUG: print '.. # of keys in `tree`:', len(tree.keys())
if DEBUG: print tree.keys()
root = list(tree)[0]
if DEBUG: print '.. Root:', root
root = frozenset(cbags[1])
if DEBUG: print '.. Root:', root
T = td.make_rooted(tree, root)
if DEBUG: print '.. T rooted:', len(T)
# nfld.unfold_2wide_tuple(T) # lets me display the tree's frozen sets
T = phrg.binarize(T)
prod_rules = {}
td.new_visit(T, G, prod_rules)
if DEBUG: print "--------------------"
if DEBUG: print "- Production Rules -"
if DEBUG: print "--------------------"
for k in prod_rules.iterkeys():
if DEBUG: print k
s = 0
for d in prod_rules[k]:
s += prod_rules[k][d]
for d in prod_rules[k]:
prod_rules[k][d] = float(prod_rules[k][d]) / float(s) # normailization step to create probs not counts.
if DEBUG: print '\t -> ', d, prod_rules[k][d]
rules = []
id = 0
for k, v in prod_rules.iteritems():
sid = 0
for x in prod_rules[k]:
rhs = re.findall("[^()]+", x)
rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]))
if DEBUG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])
sid += 1
id += 1
df = pd.DataFrame(rules)
outdf_fname = "./ProdRules/"+tfname+".prules"
if not os.path.isfile(outdf_fname+".bz2"):
print '...',outdf_fname, "written"
df.to_csv(outdf_fname+".bz2", compression="bz2")
else:
print '...', outdf_fname, "file exists"
return
def main ():
parser = get_parser()
args = vars(parser.parse_args())
dimacs_td_ct(args['treedecomp']) # gen synth graph
if __name__ == '__main__':
try:
main()
except Exception, e:
print str(e)
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| mit | 1,961,374,472,834,969,000 | 26.037879 | 112 | 0.612496 | false | 2.984114 | false | false | false |
ar4s/django | django/db/models/sql/expressions.py | 3 | 4373 | import copy
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
class SQLEvaluator(object):
def __init__(self, expression, query, allow_joins=True, reuse=None):
self.expression = expression
self.opts = query.get_meta()
self.reuse = reuse
self.cols = []
self.expression.prepare(self, query, allow_joins)
def relabeled_clone(self, change_map):
clone = copy.copy(self)
clone.cols = []
for node, col in self.cols:
if hasattr(col, 'relabeled_clone'):
clone.cols.append((node, col.relabeled_clone(change_map)))
else:
clone.cols.append((node,
(change_map.get(col[0], col[0]), col[1])))
return clone
def get_cols(self):
cols = []
for node, col in self.cols:
if hasattr(node, 'get_cols'):
cols.extend(node.get_cols())
elif isinstance(col, tuple):
cols.append(col)
return cols
def prepare(self):
return self
def as_sql(self, qn, connection):
return self.expression.evaluate(self, qn, connection)
#####################################################
# Vistor methods for initial expression preparation #
#####################################################
def prepare_node(self, node, query, allow_joins):
for child in node.children:
if hasattr(child, 'prepare'):
child.prepare(self, query, allow_joins)
def prepare_leaf(self, node, query, allow_joins):
if not allow_joins and LOOKUP_SEP in node.name:
raise FieldError("Joined field references are not permitted in this query")
field_list = node.name.split(LOOKUP_SEP)
if node.name in query.aggregates:
self.cols.append((node, query.aggregate_select[node.name]))
else:
try:
field, sources, opts, join_list, path = query.setup_joins(
field_list, query.get_meta(),
query.get_initial_alias(), self.reuse)
targets, _, join_list = query.trim_joins(sources, join_list, path)
if self.reuse is not None:
self.reuse.update(join_list)
for t in targets:
self.cols.append((node, (join_list[-1], t.column)))
except FieldDoesNotExist:
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (self.name,
[f.name for f in self.opts.fields]))
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
expressions = []
expression_params = []
for child in node.children:
if hasattr(child, 'evaluate'):
sql, params = child.evaluate(self, qn, connection)
else:
sql, params = '%s', (child,)
if len(getattr(child, 'children', [])) > 1:
format = '(%s)'
else:
format = '%s'
if sql:
expressions.append(format % sql)
expression_params.extend(params)
return connection.ops.combine_expression(node.connector, expressions), expression_params
def evaluate_leaf(self, node, qn, connection):
col = None
for n, c in self.cols:
if n is node:
col = c
break
if col is None:
raise ValueError("Given node not found")
if hasattr(col, 'as_sql'):
return col.as_sql(qn, connection)
else:
return '%s.%s' % (qn(col[0]), qn(col[1])), []
def evaluate_date_modifier_node(self, node, qn, connection):
timedelta = node.children.pop()
sql, params = self.evaluate_node(node, qn, connection)
if (timedelta.days == timedelta.seconds == timedelta.microseconds == 0):
return sql, params
return connection.ops.date_interval_sql(sql, node.connector, timedelta), params
| bsd-3-clause | -2,947,435,316,157,446,000 | 36.376068 | 96 | 0.52504 | false | 4.462245 | false | false | false |
iodoom-gitorious/enhanced-iodoom3 | neo/sys/linux/runner/runner_lib.py | 61 | 6759 | # run doom process on a series of maps
# can be used for regression testing, or to fetch media
# keeps a log of each run ( see getLogfile )
# currently uses a basic stdout activity timeout to decide when to move on
# using a periodic check of /proc/<pid>/status SleepAVG
# when the sleep average is reaching 0, issue a 'quit' to stdout
# keeps serialized run status in runner.pickle
# NOTE: can be used to initiate runs on failed maps only for instance etc.
# TODO: use the serialized and not the logs to sort the run order
# TODO: better logging. Use idLogger?
# TODO: configurable event when the process is found interactive
# instead of emitting a quit, perform some warning action?
import sys, os, commands, string, time, traceback, pickle
from twisted.application import internet, service
from twisted.internet import protocol, reactor, utils, defer
from twisted.internet.task import LoopingCall
class doomClientProtocol( protocol.ProcessProtocol ):
# ProcessProtocol API
def connectionMade( self ):
self.logfile.write( 'connectionMade\n' )
def outReceived( self, data ):
print data
self.logfile.write( data )
def errReceived( self, data ):
print 'stderr: ' + data
self.logfile.write( 'stderr: ' + data )
def inConnectionLost( self ):
self.logfile.write( 'inConnectionLost\n' )
def outConnectionLost( self ):
self.logfile.write( 'outConnectionLost\n' )
def errConnectionLost( self ):
self.logfile.write( 'errConnectionLost\n' )
def processEnded( self, status_object ):
self.logfile.write( 'processEnded %s\n' % repr( status_object ) )
self.logfile.write( time.strftime( '%H:%M:%S', time.localtime( time.time() ) ) + '\n' )
self.logfile.close()
self.deferred.callback( None )
# mac management
def __init__( self, logfilename, deferred ):
self.logfilename = logfilename
self.logfile = open( logfilename, 'a' )
self.logfile.write( time.strftime( '%H:%M:%S', time.localtime( time.time() ) ) + '\n' )
self.deferred = deferred
class doomService( service.Service ):
# current monitoring state
# 0: nothing running
# 1: we have a process running, we're monitoring it's CPU usage
# 2: we issued a 'quit' to the process's stdin
# either going to get a processEnded, or a timeout
# 3: we forced a kill because of error, timeout etc.
state = 0
# load check period
check_period = 10
# pickled status file
pickle_file = 'runner.pickle'
# stores status indexed by filename
# { 'mapname' : ( state, last_update ), .. }
status = {}
# start the maps as multiplayer server
multiplayer = 0
def __init__( self, bin, cmdline, maps, sort = 0, multiplayer = 0, blank_run = 0 ):
self.p_transport = None
self.multiplayer = multiplayer
self.blank_run = blank_run
if ( self.multiplayer ):
print 'Operate in multiplayer mode'
self.bin = os.path.abspath( bin )
if ( type( cmdline ) is type( '' ) ):
self.cmdline = string.split( cmdline, ' ' )
else:
self.cmdline = cmdline
self.maps = maps
if ( os.path.exists( self.pickle_file ) ):
print 'Loading pickled status %s' % self.pickle_file
handle = open( self.pickle_file, 'r' )
self.status = pickle.load( handle )
handle.close()
if ( sort ):
print 'Sorting maps oldest runs first'
maps_sorted = [ ]
for i in self.maps:
i_log = self.getLogfile( i )
if ( os.path.exists( i_log ) ):
maps_sorted.append( ( i, os.path.getmtime( i_log ) ) )
else:
maps_sorted.append( ( i, 0 ) )
maps_sorted.sort( lambda x,y : cmp( x[1], y[1] ) )
self.maps = [ ]
if ( blank_run ):
self.maps.append( 'blankrun' )
for i in maps_sorted:
self.maps.append( i[ 0 ] )
print 'Sorted as: %s\n' % repr( self.maps )
def getLogfile( self, name ):
return 'logs/' + string.translate( name, string.maketrans( '/', '-' ) ) + '.log'
# deferred call when child process dies
def processEnded( self, val ):
print 'child has died - state %d' % self.state
self.status[ self.maps[ self.i_map ] ] = ( self.state, time.time() )
self.i_map += 1
if ( self.i_map >= len( self.maps ) ):
reactor.stop()
else:
self.nextMap()
def processTimeout( self ):
self.p_transport.signalProcess( "KILL" )
def sleepAVGReply( self, val ):
try:
s = val[10:][:-2]
print 'sleepAVGReply %s%%' % s
if ( s == '0' ):
# need twice in a row
if ( self.state == 2 ):
print 'child process is interactive'
self.p_transport.write( 'quit\n' )
else:
self.state = 2
else:
self.state = 1
# else:
# reactor.callLater( self.check_period, self.checkCPU )
except:
print traceback.format_tb( sys.exc_info()[2] )
print sys.exc_info()[0]
print 'exception raised in sleepAVGReply - killing process'
self.state = 3
self.p_transport.signalProcess( 'KILL' )
def sleepAVGTimeout( self ):
print 'sleepAVGTimeout - killing process'
self.state = 3
self.p_transport.signalProcess( 'KILL' )
# called at regular intervals to monitor the sleep average of the child process
# when sleep reaches 0, it means the map is loaded and interactive
def checkCPU( self ):
if ( self.state == 0 or self.p_transport is None or self.p_transport.pid is None ):
print 'checkCPU: no child process atm'
return
defer = utils.getProcessOutput( '/bin/bash', [ '-c', 'cat /proc/%d/status | grep SleepAVG' % self.p_transport.pid ] )
defer.addCallback( self.sleepAVGReply )
defer.setTimeout( 2, self.sleepAVGTimeout )
def nextMap( self ):
self.state = 0
name = self.maps[ self.i_map ]
print 'Starting map: ' + name
logfile = self.getLogfile( name )
print 'Logging to: ' + logfile
if ( self.multiplayer ):
cmdline = [ self.bin ] + self.cmdline + [ '+set', 'si_map', name ]
if ( name != 'blankrun' ):
cmdline.append( '+spawnServer' )
else:
cmdline = [ self.bin ] + self.cmdline
if ( name != 'blankrun' ):
cmdline += [ '+devmap', name ]
print 'Command line: ' + repr( cmdline )
self.deferred = defer.Deferred()
self.deferred.addCallback( self.processEnded )
self.p_transport = reactor.spawnProcess( doomClientProtocol( logfile, self.deferred ), self.bin, cmdline , path = os.path.dirname( self.bin ), env = os.environ )
self.state = 1
# # setup the CPU usage loop
# reactor.callLater( self.check_period, self.checkCPU )
def startService( self ):
print 'doomService startService'
loop = LoopingCall( self.checkCPU )
loop.start( self.check_period )
self.i_map = 0
self.nextMap()
def stopService( self ):
print 'doomService stopService'
if ( not self.p_transport.pid is None ):
self.p_transport.signalProcess( 'KILL' )
# serialize
print 'saving status to %s' % self.pickle_file
handle = open( self.pickle_file, 'w+' )
pickle.dump( self.status, handle )
handle.close()
| gpl-3.0 | 291,736,784,438,179,700 | 31.339713 | 163 | 0.669182 | false | 3.110446 | false | false | false |
igel-kun/pyload | module/plugins/hooks/CloudFlareDdos.py | 1 | 11909 | # -*- coding: utf-8 -*-
import inspect
import re
import urlparse
from module.network.HTTPRequest import BadHeader
from ..captcha.ReCaptcha import ReCaptcha
from ..internal.Addon import Addon
from ..internal.misc import parse_html_header
def plugin_id(plugin):
return ("<%(plugintype)s %(pluginname)s%(id)s>" %
{'plugintype': plugin.__type__.upper(),
'pluginname': plugin.__name__,
'id': "[%s]" % plugin.pyfile.id if plugin.pyfile else ""})
def is_simple_plugin(obj):
return any(k.__name__ in ("SimpleHoster", "SimpleCrypter")
for k in inspect.getmro(type(obj)))
def get_plugin_last_header(plugin):
# @NOTE: req can be a HTTPRequest or a Browser object
return plugin.req.http.header if hasattr(plugin.req, "http") else plugin.req.header
class CloudFlare(object):
@staticmethod
def handle_function(addon_plugin, owner_plugin, func_name, orig_func, args):
addon_plugin.log_debug("Calling %s() of %s" % (func_name, plugin_id(owner_plugin)))
try:
data = orig_func(*args[0], **args[1])
addon_plugin.log_debug("%s() returned successfully" % func_name)
return data
except BadHeader, e:
addon_plugin.log_debug("%s(): got BadHeader exception %s" % (func_name, e.code))
header = parse_html_header(e.header)
if "cloudflare" in header.get('server', ""):
if e.code == 403:
data = CloudFlare._solve_cf_security_check(addon_plugin, owner_plugin, e.content)
elif e.code == 503:
for _i in range(3):
try:
data = CloudFlare._solve_cf_ddos_challenge(addon_plugin, owner_plugin, e.content)
break
except BadHeader, e: #: Possibly we got another ddos challenge
addon_plugin.log_debug("%s(): got BadHeader exception %s" % (func_name, e.code))
header = parse_html_header(e.header)
if e.code == 503 and "cloudflare" in header.get('server', ""):
continue #: Yes, it's a ddos challenge again..
else:
data = None # Tell the exception handler to re-throw the exception
break
else:
addon_plugin.log_error("%s(): Max solve retries reached" % func_name)
data = None # Tell the exception handler to re-throw the exception
else:
addon_plugin.log_warning(_("Unknown CloudFlare response code %s") % e.code)
raise
if data is None:
raise e
else:
return data
else:
raise
@staticmethod
def _solve_cf_ddos_challenge(addon_plugin, owner_plugin, data):
try:
addon_plugin.log_info(_("Detected CloudFlare's DDoS protection page"))
# Cloudflare requires a delay before solving the challenge
wait_time = (int(re.search('submit\(\);\r?\n\s*},\s*([0-9]+)', data).group(1)) + 999) / 1000
owner_plugin.set_wait(wait_time)
last_url = owner_plugin.req.lastEffectiveURL
urlp = urlparse.urlparse(last_url)
domain = urlp.netloc
submit_url = "%s://%s/cdn-cgi/l/chk_jschl" % (urlp.scheme, domain)
get_params = {}
try:
get_params['jschl_vc'] = re.search(r'name="jschl_vc" value="(\w+)"', data).group(1)
get_params['pass'] = re.search(r'name="pass" value="(.+?)"', data).group(1)
get_params['s'] = re.search(r'name="s" value="(.+?)"', data).group(1)
# Extract the arithmetic operation
js = re.search(r'setTimeout\(function\(\){\s+(var s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n',
data).group(1)
js = re.sub(r'a\.value = (.+\.toFixed\(10\);).+', r'\1', js)
solution_name = re.search(r's,t,o,p,b,r,e,a,k,i,n,g,f,\s*(.+)\s*=', js).group(1)
g = re.search(r'(.*};)\n\s*(t\s*=(.+))\n\s*(;%s.*)' % (solution_name), js, re.M | re.I | re.S).groups()
js = g[0] + g[-1]
js = re.sub(r"[\n\\']", "", js)
except Exception:
# Something is wrong with the page.
# This may indicate CloudFlare has changed their anti-bot
# technique.
owner_plugin.log_error(_("Unable to parse CloudFlare's DDoS protection page"))
return None # Tell the exception handler to re-throw the exception
if "toFixed" not in js:
owner_plugin.log_error(_("Unable to parse CloudFlare's DDoS protection page"))
return None # Tell the exception handler to re-throw the exception
atob = 'var atob = function(str) {return Buffer.from(str, "base64").toString("binary");}'
try:
k = re.search(r'k\s*=\s*\'(.+?)\';', data).group(1)
v = re.search(r'<div(?:.*)id="%s"(?:.*)>(.*)</div>' % k, data).group(1)
doc = 'var document= {getElementById: function(x) { return {innerHTML:"%s"};}}' % v
except (AttributeError, IndexError):
doc = ''
js = '%s;%s;var t="%s";%s' % (doc, atob, domain, js)
# Safely evaluate the Javascript expression
res = owner_plugin.js.eval(js)
try:
get_params['jschl_answer'] = str(float(res))
except ValueError:
owner_plugin.log_error(_("Unable to parse CloudFlare's DDoS protection page"))
return None # Tell the exception handler to re-throw the exception
owner_plugin.wait() # Do the actual wait
return owner_plugin.load(submit_url,
get=get_params,
ref=last_url)
except BadHeader, e:
raise e #: Huston, we have a BadHeader!
except Exception, e:
addon_plugin.log_error(e)
return None # Tell the exception handler to re-throw the exception
@staticmethod
def _solve_cf_security_check(addon_plugin, owner_plugin, data):
try:
last_url = owner_plugin.req.lastEffectiveURL
captcha = ReCaptcha(owner_plugin.pyfile)
captcha_key = captcha.detect_key(data)
if captcha_key:
addon_plugin.log_info(_("Detected CloudFlare's security check page"))
response, challenge = captcha.challenge(captcha_key, data)
return owner_plugin.load(owner_plugin.fixurl("/cdn-cgi/l/chk_captcha"),
get={'g-recaptcha-response': response},
ref=last_url)
else:
addon_plugin.log_warning(_("Got unexpected CloudFlare html page"))
return None # Tell the exception handler to re-throw the exception
except Exception, e:
addon_plugin.log_error(e)
return None # Tell the exception handler to re-throw the exception
class PreloadStub(object):
def __init__(self, addon_plugin, owner_plugin):
self.addon_plugin = addon_plugin
self.owner_plugin = owner_plugin
self.old_preload = owner_plugin._preload
def my_preload(self, *args, **kwargs):
data = CloudFlare.handle_function(self.addon_plugin, self.owner_plugin, "_preload", self.old_preload, (args, kwargs))
if data is not None:
self.owner_plugin.data = data
def __repr__(self):
return "<PreloadStub object at %s>" % hex(id(self))
class CloudFlareDdos(Addon):
__name__ = "CloudFlareDdos"
__type__ = "hook"
__version__ = "0.16"
__status__ = "testing"
__config__ = [("activated", "bool", "Activated", False)]
__description__ = """CloudFlare DDoS protection support"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
def activate(self):
self.stubs = {}
self._override_get_url()
def deactivate(self):
while len(self.stubs):
stub = next(self.stubs.itervalues())
self._unoverride_preload(stub.owner_plugin)
self._unoverride_get_url()
def _unoverride_preload(self, plugin):
if id(plugin) in self.stubs:
self.log_debug("Unoverriding _preload() for %s" % plugin_id(plugin))
stub = self.stubs.pop(id(plugin))
stub.owner_plugin._preload = stub.old_preload
else:
self.log_warning(_("No _preload() override found for %s, cannot un-override>") %
plugin_id(plugin))
def _override_preload(self, plugin):
if id(plugin) not in self.stubs:
stub = PreloadStub(self, plugin)
self.stubs[id(plugin)] = stub
self.log_debug("Overriding _preload() for %s" % plugin_id(plugin))
plugin._preload = stub.my_preload
else:
self.log_warning(_("Already overrided _preload() for %s") % plugin_id(plugin))
def _override_get_url(self):
self.log_debug("Overriding get_url()")
self.old_get_url = self.pyload.requestFactory.getURL
self.pyload.requestFactory.getURL = self.my_get_url
def _unoverride_get_url(self):
self.log_debug("Unoverriding get_url()")
self.pyload.requestFactory.getURL = self.old_get_url
def _find_owner_plugin(self):
"""
Walk the callstack until we find SimpleHoster or SimpleCrypter class
Dirty but works.
"""
f = frame = inspect.currentframe()
try:
while True:
if f is None:
return None
elif 'self' in f.f_locals and is_simple_plugin(f.f_locals['self']):
return f.f_locals['self']
else:
f = f.f_back
finally:
del frame
def download_preparing(self, pyfile):
#: Only SimpleHoster and SimpleCrypter based plugins are supported
if not is_simple_plugin(pyfile.plugin):
self.log_debug("Skipping plugin %s" % plugin_id(pyfile.plugin))
return
attr = getattr(pyfile.plugin, "_preload", None)
if not attr and not callable(attr):
self.log_error(_("%s is missing _preload() function, cannot override!") % plugin_id(pyfile.plugin))
return
self._override_preload(pyfile.plugin)
def download_processed(self, pyfile):
if id(pyfile.plugin) in self.stubs:
self._unoverride_preload(pyfile.plugin)
def my_get_url(self, *args, **kwargs):
owner_plugin = self._find_owner_plugin()
if owner_plugin is None:
self.log_warning(_("Owner plugin not found, cannot process"))
return self.old_get_url(*args, **kwargs)
else:
#@NOTE: Better use owner_plugin.load() instead of get_url() so cookies are saved and so captcha credits
#@NOTE: Also that way we can use 'owner_plugin.req.header' to get the headers, otherwise we cannot get them
res = CloudFlare.handle_function(self, owner_plugin, "get_url", owner_plugin.load, (args, kwargs))
if kwargs.get('just_header', False):
# @NOTE: SimpleHoster/SimpleCrypter returns a dict while get_url() returns raw headers string,
# make sure we return a string for get_url('just_header'=True)
res = get_plugin_last_header(owner_plugin)
return res
| gpl-3.0 | -8,680,382,063,904,714,000 | 37.665584 | 128 | 0.546393 | false | 3.888018 | false | false | false |
einaru/luma | luma/plugins/browser/AddAttributeWizard.py | 3 | 8525 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2011
# Per Ove Ringdal
#
# Copyright (C) 2004
# Wido Depping, <widod@users.sourceforge.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
import os.path
import copy
import PyQt4
from PyQt4.QtCore import QString, pyqtSlot
from PyQt4.QtGui import QWizard
from .gui.AddAttributeWizardDesign import Ui_AddAttributeWizardDesign
from base.backend.ObjectClassAttributeInfo import ObjectClassAttributeInfo
from base.util.IconTheme import pixmapFromTheme
class AddAttributeWizard(QWizard, Ui_AddAttributeWizardDesign):
def __init__(self, parent = None, flags = PyQt4.QtCore.Qt.Widget):
QWizard.__init__(self, parent, flags)
self.setupUi(self)
# need to initialize the pages before connecting signals
self.restart()
attributePixmap = pixmapFromTheme(
"addattribute", ":/icons/64/add-attribute")
objectclassPixmap = pixmapFromTheme(
"objectclass", ":/icons/64/objectclass")
self.imageLabel.setPixmap(attributePixmap)
self.objectclassLabel.setPixmap(objectclassPixmap)
self.enableAllBox.toggled.connect(self.initAttributeBox)
self.attributeBox.activated[str].connect(self.newSelection)
self.classBox.itemSelectionChanged.connect(self.classSelection)
# attribute values of the current ldap object
self.OBJECTVALUES = None
# schema information for the ldap server
self.SCHEMAINFO = None
# set of attributes which are possible with the current objectclasses
self.possibleAttributes = None
# set of all attributes which are supported by the server
self.allPossibleAttributes = None
###############################################################################
def setData(self, smartObject):
""" Sets the current object data, schema information and initializes
the attribute box and wizard buttons.
"""
self.smartObject = smartObject
self.SCHEMAINFO = ObjectClassAttributeInfo(self.smartObject.getServerMeta())
self.processData()
self.initAttributeBox()
currentPageWidget = self.page(0)
#self.button(QWizard.FinishButton).setDisabled(False)
#self.button(QWizard.NextButton).setDisabled(True)
###############################################################################
def processData(self):
""" Compute all attributes which can be added according to the data of
the object. Single values which are already given are sorted out.
"""
possibleMust, possibleMay = self.smartObject.getPossibleAttributes()
# attributes used by the current objectClass
#usedAttributes = set(objectAttributes).difference(set(['objectClass']))
usedAttributes = self.smartObject.getAttributeList()
# set of attribute which are used and have to be single
singleAttributes = set(filter(self.SCHEMAINFO.isSingle, usedAttributes))
# create a set of attributes which may be added
self.possibleAttributes = (possibleMust.union(possibleMay)).difference(singleAttributes)
self.possibleAttributes = map(lambda x: x.lower(), self.possibleAttributes)
# create a set of attributes which are supported by the server
self.allPossibleAttributes = set(self.SCHEMAINFO.attributeDict.keys()).difference(singleAttributes)
###############################################################################
def initAttributeBox(self):
self.attributeBox.clear()
currentPageWidget = self.currentPage()
showAll = self.enableAllBox.isChecked()
currentPageWidget.setFinalPage(True)
currentPageWidget.setCommitPage(False)
#self.button(QWizard.FinishButton).setDisabled(False)
tmpList = None
if showAll:
tmpList = copy.deepcopy(self.allPossibleAttributes)
else:
tmpList = copy.deepcopy(self.possibleAttributes)
structuralClass = self.smartObject.getStructuralClasses()
# only show attributes whose objectclass combinations don't violate
# the objectclass chain (not two structural classes)
if len(structuralClass) > 0:
classList = filter(lambda x: not self.SCHEMAINFO.isStructural(x), self.SCHEMAINFO.getObjectClasses())
for x in structuralClass:
classList += self.SCHEMAINFO.getParents(x)
for x in self.smartObject.getObjectClasses():
if not (x in classList):
classList.append(x)
mustAttributes, mayAttributes = self.SCHEMAINFO.getAllAttributes(classList)
attributeList = mustAttributes.union(mayAttributes)
cleanList = filter(lambda x: x.lower() in tmpList, attributeList)
tmpList = cleanList
else:
self.enableAllBox.setChecked(True)
self.enableAllBox.setEnabled(False)
tmpList = sorted(self.allPossibleAttributes)
tmpList.sort()
tmpList = filter(lambda x: not (x.lower() == "objectclass"), tmpList)
map(self.attributeBox.addItem, tmpList)
self.newSelection(self.attributeBox.currentText())
###############################################################################
@pyqtSlot(int)
def newSelection(self, attribute):
pass
@pyqtSlot("QString")
def newSelection(self, attribute):
attribute = str(attribute).lower()
currentPageWidget = self.currentPage()
mustSet, maySet = self.SCHEMAINFO.getAllObjectclassesForAttr(attribute)
tmpSet = mustSet.union(maySet)
if (attribute in self.possibleAttributes) or (len(tmpSet) == 0):
currentPageWidget.setFinalPage(True)
#self.button(QWizard.FinishButton).setDisabled(False)
self.button(QWizard.NextButton).setDisabled(True)
else:
currentPageWidget.setFinalPage(False)
#self.button(QWizard.FinishButton).setDisabled(True)
self.button(QWizard.NextButton).setDisabled(False)
###############################################################################
def initClassPage(self):
currentPageWidget = self.currentPage()
#self.button(QWizard.FinishButton).setDisabled(True)
self.classBox.clear()
self.mustAttributeBox.clear()
attribute = str(self.attributeBox.currentText())
mustSet, maySet = self.SCHEMAINFO.getAllObjectclassesForAttr(attribute)
classList = mustSet.union(maySet)
if self.smartObject.hasStructuralClass():
structList = filter(lambda x: self.SCHEMAINFO.isStructural(x), classList)
classList = filter(lambda x: not self.SCHEMAINFO.isStructural(x), classList)
for x in structList:
for y in self.smartObject.getObjectClasses():
if self.SCHEMAINFO.sameObjectClassChain(x, y):
classList.append(x)
else:
classList = sorted(classList)
classList.sort()
map(self.classBox.addItem, classList)
self.classBox.setCurrentRow(0)
###############################################################################
def classSelection(self):
self.mustAttributeBox.clear()
objectclass = str(self.classBox.currentItem().text())
mustAttributes = self.SCHEMAINFO.getAllMusts([objectclass])
attribute = set([str(self.attributeBox.currentText())])
map(self.mustAttributeBox.addItem, mustAttributes.difference(attribute))
currentPageWidget = self.currentPage()
#self.button(QWizard.FinishButton).setDisabled(False)
###############################################################################
def initializePage(self, id):
if id == 1:
self.initClassPage()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| gpl-2.0 | -6,101,912,037,820,293,000 | 36.888889 | 113 | 0.637654 | false | 4.522546 | false | false | false |
leiferikb/bitpop | depot_tools/third_party/boto/mashups/interactive.py | 119 | 2737 | # Copyright (C) 2003-2007 Robey Pointer <robey@lag.net>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import socket
import sys
# windows does not have termios...
try:
import termios
import tty
has_termios = True
except ImportError:
has_termios = False
def interactive_shell(chan):
if has_termios:
posix_shell(chan)
else:
windows_shell(chan)
def posix_shell(chan):
import select
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = chan.recv(1024)
if len(x) == 0:
print '\r\n*** EOF\r\n',
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
# thanks to Mike Looijmans for this code
def windows_shell(chan):
import threading
sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n")
def writeall(sock):
while True:
data = sock.recv(256)
if not data:
sys.stdout.write('\r\n*** EOF ***\r\n\r\n')
sys.stdout.flush()
break
sys.stdout.write(data)
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(chan,))
writer.start()
try:
while True:
d = sys.stdin.read(1)
if not d:
break
chan.send(d)
except EOFError:
# user hit ^Z or F6
pass
| gpl-3.0 | 4,704,446,714,513,671,000 | 27.216495 | 93 | 0.565583 | false | 3.960926 | false | false | false |
eeshangarg/zulip | zilencer/management/commands/add_new_realm.py | 6 | 1137 | from typing import Any
from zerver.lib.actions import bulk_add_subscriptions, do_create_realm, do_create_user
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.onboarding import send_initial_realm_messages
from zerver.models import Realm, UserProfile
class Command(ZulipBaseCommand):
help = """Add a new realm and initial user for manual testing of the onboarding process."""
def handle(self, **options: Any) -> None:
string_id = "realm{:02}".format(Realm.objects.filter(string_id__startswith="realm").count())
realm = do_create_realm(string_id, string_id)
name = "{:02}-user".format(UserProfile.objects.filter(email__contains="user@").count())
user = do_create_user(
f"{name}@{string_id}.zulip.com",
"password",
realm,
name,
role=UserProfile.ROLE_REALM_ADMINISTRATOR,
acting_user=None,
)
assert realm.signup_notifications_stream is not None
bulk_add_subscriptions(realm, [realm.signup_notifications_stream], [user], acting_user=None)
send_initial_realm_messages(realm)
| apache-2.0 | 8,829,127,599,415,805,000 | 39.607143 | 100 | 0.671944 | false | 3.893836 | false | false | false |
jbzdak/edx-platform | lms/djangoapps/shoppingcart/processors/tests/test_CyberSource2.py | 164 | 18446 | # -*- coding: utf-8 -*-
"""
Tests for the newer CyberSource API implementation.
"""
from mock import patch
from django.test import TestCase
from django.conf import settings
import ddt
from student.tests.factories import UserFactory
from shoppingcart.models import Order, OrderItem
from shoppingcart.processors.CyberSource2 import (
processor_hash,
process_postpay_callback,
render_purchase_form_html,
get_signed_purchase_params,
_get_processor_exception_html
)
from shoppingcart.processors.exceptions import (
CCProcessorSignatureException,
CCProcessorDataException,
CCProcessorWrongAmountException
)
@ddt.ddt
class CyberSource2Test(TestCase):
"""
Test the CyberSource API implementation. As much as possible,
this test case should use ONLY the public processor interface
(defined in shoppingcart.processors.__init__.py).
Some of the tests in this suite rely on Django settings
to be configured a certain way.
"""
COST = "10.00"
CALLBACK_URL = "/test_callback_url"
FAILED_DECISIONS = ["DECLINE", "CANCEL", "ERROR"]
def setUp(self):
""" Create a user and an order. """
super(CyberSource2Test, self).setUp()
self.user = UserFactory()
self.order = Order.get_cart_for_user(self.user)
self.order_item = OrderItem.objects.create(
order=self.order,
user=self.user,
unit_cost=self.COST,
line_cost=self.COST
)
def assert_dump_recorded(self, order):
"""
Verify that this order does have a dump of information from the
payment processor.
"""
self.assertNotEqual(order.processor_reply_dump, '')
def test_render_purchase_form_html(self):
# Verify that the HTML form renders with the payment URL specified
# in the test settings.
# This does NOT test that all the form parameters are correct;
# we verify that by testing `get_signed_purchase_params()` directly.
html = render_purchase_form_html(self.order, callback_url=self.CALLBACK_URL)
self.assertIn('<form action="/shoppingcart/payment_fake" method="post">', html)
self.assertIn('transaction_uuid', html)
self.assertIn('signature', html)
self.assertIn(self.CALLBACK_URL, html)
def test_get_signed_purchase_params(self):
params = get_signed_purchase_params(self.order, callback_url=self.CALLBACK_URL)
# Check the callback URL override
self.assertEqual(params['override_custom_receipt_page'], self.CALLBACK_URL)
# Parameters determined by the order model
self.assertEqual(params['amount'], '10.00')
self.assertEqual(params['currency'], 'usd')
self.assertEqual(params['orderNumber'], 'OrderId: {order_id}'.format(order_id=self.order.id))
self.assertEqual(params['reference_number'], self.order.id)
# Parameters determined by the Django (test) settings
self.assertEqual(params['access_key'], '0123456789012345678901')
self.assertEqual(params['profile_id'], 'edx')
# Some fields will change depending on when the test runs,
# so we just check that they're set to a non-empty string
self.assertGreater(len(params['signed_date_time']), 0)
self.assertGreater(len(params['transaction_uuid']), 0)
# Constant parameters
self.assertEqual(params['transaction_type'], 'sale')
self.assertEqual(params['locale'], 'en')
self.assertEqual(params['payment_method'], 'card')
self.assertEqual(
params['signed_field_names'],
",".join([
'amount',
'currency',
'orderNumber',
'access_key',
'profile_id',
'reference_number',
'transaction_type',
'locale',
'signed_date_time',
'signed_field_names',
'unsigned_field_names',
'transaction_uuid',
'payment_method',
'override_custom_receipt_page',
'override_custom_cancel_page',
])
)
self.assertEqual(params['unsigned_field_names'], '')
# Check the signature
self.assertEqual(params['signature'], self._signature(params))
# We patch the purchased callback because
# we're using the OrderItem base class, which throws an exception
# when item doest not have a course id associated
@patch.object(OrderItem, 'purchased_callback')
def test_process_payment_raises_exception(self, purchased_callback): # pylint: disable=unused-argument
self.order.clear()
OrderItem.objects.create(
order=self.order,
user=self.user,
unit_cost=self.COST,
line_cost=self.COST,
)
params = self._signed_callback_params(self.order.id, self.COST, self.COST)
process_postpay_callback(params)
# We patch the purchased callback because
# (a) we're using the OrderItem base class, which doesn't implement this method, and
# (b) we want to verify that the method gets called on success.
@patch.object(OrderItem, 'purchased_callback')
@patch.object(OrderItem, 'pdf_receipt_display_name')
def test_process_payment_success(self, pdf_receipt_display_name, purchased_callback): # pylint: disable=unused-argument
# Simulate a callback from CyberSource indicating that payment was successful
params = self._signed_callback_params(self.order.id, self.COST, self.COST)
result = process_postpay_callback(params)
# Expect that we processed the payment successfully
self.assertTrue(
result['success'],
msg="Payment was not successful: {error}".format(error=result.get('error_html'))
)
self.assertEqual(result['error_html'], '')
# Expect that the item's purchased callback was invoked
purchased_callback.assert_called_with()
# Expect that the order has been marked as purchased
self.assertEqual(result['order'].status, 'purchased')
self.assert_dump_recorded(result['order'])
def test_process_payment_rejected(self):
# Simulate a callback from CyberSource indicating that the payment was rejected
params = self._signed_callback_params(self.order.id, self.COST, self.COST, decision='REJECT')
result = process_postpay_callback(params)
# Expect that we get an error message
self.assertFalse(result['success'])
self.assertIn(u"did not accept your payment", result['error_html'])
self.assert_dump_recorded(result['order'])
def test_process_payment_invalid_signature(self):
# Simulate a callback from CyberSource indicating that the payment was rejected
params = self._signed_callback_params(self.order.id, self.COST, self.COST, signature="invalid!")
result = process_postpay_callback(params)
# Expect that we get an error message
self.assertFalse(result['success'])
self.assertIn(u"corrupted message regarding your charge", result['error_html'])
def test_process_payment_invalid_order(self):
# Use an invalid order ID
params = self._signed_callback_params("98272", self.COST, self.COST)
result = process_postpay_callback(params)
# Expect an error
self.assertFalse(result['success'])
self.assertIn(u"inconsistent data", result['error_html'])
def test_process_invalid_payment_amount(self):
# Change the payment amount (no longer matches the database order record)
params = self._signed_callback_params(self.order.id, "145.00", "145.00")
result = process_postpay_callback(params)
# Expect an error
self.assertFalse(result['success'])
self.assertIn(u"different amount than the order total", result['error_html'])
# refresh data for current order
order = Order.objects.get(id=self.order.id)
self.assert_dump_recorded(order)
def test_process_amount_paid_not_decimal(self):
# Change the payment amount to a non-decimal
params = self._signed_callback_params(self.order.id, self.COST, "abcd")
result = process_postpay_callback(params)
# Expect an error
self.assertFalse(result['success'])
self.assertIn(u"badly-typed value", result['error_html'])
def test_process_user_cancelled(self):
# Change the payment amount to a non-decimal
params = self._signed_callback_params(self.order.id, self.COST, "abcd")
params['decision'] = u'CANCEL'
result = process_postpay_callback(params)
# Expect an error
self.assertFalse(result['success'])
self.assertIn(u"you have cancelled this transaction", result['error_html'])
@patch.object(OrderItem, 'purchased_callback')
@patch.object(OrderItem, 'pdf_receipt_display_name')
def test_process_no_credit_card_digits(self, pdf_receipt_display_name, purchased_callback): # pylint: disable=unused-argument
# Use a credit card number with no digits provided
params = self._signed_callback_params(
self.order.id, self.COST, self.COST,
card_number='nodigits'
)
result = process_postpay_callback(params)
# Expect that we processed the payment successfully
self.assertTrue(
result['success'],
msg="Payment was not successful: {error}".format(error=result.get('error_html'))
)
self.assertEqual(result['error_html'], '')
self.assert_dump_recorded(result['order'])
# Expect that the order has placeholders for the missing credit card digits
self.assertEqual(result['order'].bill_to_ccnum, '####')
@ddt.data('req_reference_number', 'req_currency', 'decision', 'auth_amount')
def test_process_missing_parameters(self, missing_param):
# Remove a required parameter
params = self._signed_callback_params(self.order.id, self.COST, self.COST)
del params[missing_param]
# Recalculate the signature with no signed fields so we can get past
# signature validation.
params['signed_field_names'] = 'reason_code,message'
params['signature'] = self._signature(params)
result = process_postpay_callback(params)
# Expect an error
self.assertFalse(result['success'])
self.assertIn(u"did not return a required parameter", result['error_html'])
@patch.object(OrderItem, 'purchased_callback')
@patch.object(OrderItem, 'pdf_receipt_display_name')
def test_sign_then_verify_unicode(self, pdf_receipt_display_name, purchased_callback): # pylint: disable=unused-argument
params = self._signed_callback_params(
self.order.id, self.COST, self.COST,
first_name=u'\u2699'
)
# Verify that this executes without a unicode error
result = process_postpay_callback(params)
self.assertTrue(result['success'])
self.assert_dump_recorded(result['order'])
@ddt.data('string', u'üñîçø∂é')
def test_get_processor_exception_html(self, error_string):
"""
Tests the processor exception html message
"""
for exception_type in [CCProcessorSignatureException, CCProcessorWrongAmountException, CCProcessorDataException]:
error_msg = error_string
exception = exception_type(error_msg)
html = _get_processor_exception_html(exception)
self.assertIn(settings.PAYMENT_SUPPORT_EMAIL, html)
self.assertIn('Sorry!', html)
self.assertIn(error_msg, html)
def _signed_callback_params(
self, order_id, order_amount, paid_amount,
decision='ACCEPT', signature=None, card_number='xxxxxxxxxxxx1111',
first_name='John'
):
"""
Construct parameters that could be returned from CyberSource
to our payment callback.
Some values can be overridden to simulate different test scenarios,
but most are fake values captured from interactions with
a CyberSource test account.
Args:
order_id (string or int): The ID of the `Order` model.
order_amount (string): The cost of the order.
paid_amount (string): The amount the user paid using CyberSource.
Keyword Args:
decision (string): Whether the payment was accepted or rejected or declined.
signature (string): If provided, use this value instead of calculating the signature.
card_numer (string): If provided, use this value instead of the default credit card number.
first_name (string): If provided, the first name of the user.
Returns:
dict
"""
# Parameters sent from CyberSource to our callback implementation
# These were captured from the CC test server.
signed_field_names = ["transaction_id",
"decision",
"req_access_key",
"req_profile_id",
"req_transaction_uuid",
"req_transaction_type",
"req_reference_number",
"req_amount",
"req_currency",
"req_locale",
"req_payment_method",
"req_override_custom_receipt_page",
"req_bill_to_forename",
"req_bill_to_surname",
"req_bill_to_email",
"req_bill_to_address_line1",
"req_bill_to_address_city",
"req_bill_to_address_state",
"req_bill_to_address_country",
"req_bill_to_address_postal_code",
"req_card_number",
"req_card_type",
"req_card_expiry_date",
"message",
"reason_code",
"auth_avs_code",
"auth_avs_code_raw",
"auth_response",
"auth_amount",
"auth_code",
"auth_trans_ref_no",
"auth_time",
"bill_trans_ref_no",
"signed_field_names",
"signed_date_time"]
# if decision is in FAILED_DECISIONS list then remove auth_amount from
# signed_field_names list.
if decision in self.FAILED_DECISIONS:
signed_field_names.remove("auth_amount")
params = {
# Parameters that change based on the test
"decision": decision,
"req_reference_number": str(order_id),
"req_amount": order_amount,
"auth_amount": paid_amount,
"req_card_number": card_number,
# Stub values
"utf8": u"✓",
"req_bill_to_address_country": "US",
"auth_avs_code": "X",
"req_card_expiry_date": "01-2018",
"bill_trans_ref_no": "85080648RYI23S6I",
"req_bill_to_address_state": "MA",
"signed_field_names": ",".join(signed_field_names),
"req_payment_method": "card",
"req_transaction_type": "sale",
"auth_code": "888888",
"req_locale": "en",
"reason_code": "100",
"req_bill_to_address_postal_code": "02139",
"req_bill_to_address_line1": "123 Fake Street",
"req_card_type": "001",
"req_bill_to_address_city": "Boston",
"signed_date_time": "2014-08-18T14:07:10Z",
"req_currency": "usd",
"auth_avs_code_raw": "I1",
"transaction_id": "4083708299660176195663",
"auth_time": "2014-08-18T140710Z",
"message": "Request was processed successfully.",
"auth_response": "100",
"req_profile_id": "0000001",
"req_transaction_uuid": "ddd9935b82dd403f9aa4ba6ecf021b1f",
"auth_trans_ref_no": "85080648RYI23S6I",
"req_bill_to_surname": "Doe",
"req_bill_to_forename": first_name,
"req_bill_to_email": "john@example.com",
"req_override_custom_receipt_page": "http://localhost:8000/shoppingcart/postpay_callback/",
"req_access_key": "abcd12345",
}
# if decision is in FAILED_DECISIONS list then remove the auth_amount from params dict
if decision in self.FAILED_DECISIONS:
del params["auth_amount"]
# Calculate the signature
params['signature'] = signature if signature is not None else self._signature(params)
return params
def _signature(self, params):
"""
Calculate the signature from a dictionary of params.
NOTE: This method uses the processor's hashing method. That method
is a thin wrapper of standard library calls, and it seemed overly complex
to rewrite that code in the test suite.
Args:
params (dict): Dictionary with a key 'signed_field_names',
which is a comma-separated list of keys in the dictionary
to include in the signature.
Returns:
string
"""
return processor_hash(
",".join([
u"{0}={1}".format(signed_field, params[signed_field])
for signed_field
in params['signed_field_names'].split(u",")
])
)
def test_process_payment_declined(self):
# Simulate a callback from CyberSource indicating that the payment was declined
params = self._signed_callback_params(self.order.id, self.COST, self.COST, decision='DECLINE')
result = process_postpay_callback(params)
# Expect that we get an error message
self.assertFalse(result['success'])
self.assertIn(u"payment was declined", result['error_html'])
| agpl-3.0 | 8,347,593,669,894,794,000 | 40.804989 | 130 | 0.599371 | false | 4.250865 | true | false | false |
SOKP/kernel_motorola_msm8226 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 | -7,861,997,407,647,542,000 | 28.407609 | 158 | 0.679357 | false | 2.69338 | false | false | false |
vponomaryov/rally | rally/plugins/openstack/context/existing_users.py | 1 | 2614 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark import context
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils as rutils
from rally import objects
from rally import osclients
LOG = logging.getLogger(__name__)
# NOTE(boris-42): This context should be hidden for now and used only by
# benchmark engine. In future during various refactoring of
# validation system and rally CI testing we will make it public
@context.context(name="existing_users", order=99, hidden=True)
class ExistingUsers(context.Context):
"""This context supports using existing users in Rally.
It uses information about deployment to properly
initialize context["users"] and context["tenants"]
So there won't be big difference between usage of "users" and
"existing_users" context.
"""
# NOTE(boris-42): We don't need to check config schema because
# this is used only by benchmark engine
CONFIG_SCHEMA = {}
def __init__(self, ctx):
super(ExistingUsers, self).__init__(ctx)
self.context["users"] = []
self.context["tenants"] = {}
@rutils.log_task_wrapper(LOG.info, _("Enter context: `existing_users`"))
def setup(self):
for user in self.config:
user_endpoint = objects.Endpoint(**user)
user_kclient = osclients.Clients(user_endpoint).keystone()
if user_kclient.tenant_id not in self.context["tenants"]:
self.context["tenants"][user_kclient.tenant_id] = {
"id": user_kclient.tenant_id,
"name": user_kclient.tenant_name
}
self.context["users"].append({
"endpoint": user_endpoint,
"id": user_kclient.user_id,
"tenant_id": user_kclient.tenant_id
})
@rutils.log_task_wrapper(LOG.info, _("Exit context: `existing_users`"))
def cleanup(self):
"""These users are not managed by Rally, so don't touch them."""
| apache-2.0 | -8,003,209,098,813,009,000 | 36.342857 | 79 | 0.65264 | false | 4.097179 | false | false | false |
robwebset/screensaver.weather | resources/lib/settings.py | 1 | 1308 | # -*- coding: utf-8 -*-
import xbmc
import xbmcaddon
ADDON = xbmcaddon.Addon(id='screensaver.weather')
ADDON_ID = ADDON.getAddonInfo('id')
# Common logging module
def log(txt, loglevel=xbmc.LOGDEBUG):
if (ADDON.getSetting("logEnabled") == "true") or (loglevel != xbmc.LOGDEBUG):
if isinstance(txt, str):
txt = txt.decode("utf-8")
message = u'%s: %s' % (ADDON_ID, txt)
xbmc.log(msg=message.encode("utf-8"), level=loglevel)
##############################
# Stores Various Settings
##############################
class Settings():
DIM_LEVEL = (
'00000000',
'11000000',
'22000000',
'33000000',
'44000000',
'55000000',
'66000000',
'77000000',
'88000000',
'99000000',
'AA000000',
'BB000000',
'CC000000',
'DD000000',
'EE000000'
)
@staticmethod
def getDimValue():
# The actual dim level (Hex) is one of
# Where 00000000 is not changed
# So that is a total of 15 different options
# FF000000 would be completely black, so we do not use that one
if ADDON.getSetting("dimLevel"):
return Settings.DIM_LEVEL[int(ADDON.getSetting("dimLevel"))]
else:
return '00000000'
| gpl-2.0 | -4,987,328,565,707,168,000 | 25.693878 | 81 | 0.542049 | false | 3.824561 | false | false | false |
niekas/dakis | dakis/website/migrations/openid/0001_initial.py | 5 | 1240 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='OpenIDNonce',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('server_url', models.CharField(max_length=255)),
('timestamp', models.IntegerField()),
('salt', models.CharField(max_length=255)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='OpenIDStore',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('server_url', models.CharField(max_length=255)),
('handle', models.CharField(max_length=255)),
('secret', models.TextField()),
('issued', models.IntegerField()),
('lifetime', models.IntegerField()),
('assoc_type', models.TextField()),
],
),
]
| agpl-3.0 | -1,867,539,125,413,394,700 | 34.428571 | 114 | 0.533871 | false | 4.644195 | false | false | false |
Jonbean/DSSM | classification/utils.py | 8 | 5542 | '''
Author: Jon Tsai
Created: May 29 2016
'''
import numpy as np
import theano
from time import sleep
import sys
def progress_bar(percent, speed):
i = int(percent)/2
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("[%-50s] %d%% %f instances/s" % ('='*i, percent, speed))
sys.stdout.flush()
def combine_sents(sent_set):
'''
parameter: sent_set ==> 2D sentences set
==> type: list[list[list]]
return: sents1D ==> 1D sentences set
==> type: list[list]
This function will combine 2D sentence set
into 1D sentence set.
e.g.
[
[[sent1], [sent2], [sent3], ..., [sentn]]
...
[[sent1], [sent2], [sent3], ..., [sentn]]
]
==>
[
[sentences1],
...
[sentencesn]
]
'''
sents1D = []
for doc in sent_set:
combine_sent = np.array([])
for sent in doc:
combine_sent = np.concatenate((combine_sent,sent))
sents1D.append(combine_sent)
return sents1D
def shuffle_index(length_of_indices_ls):
'''
----------
parameter:
----------
length_of_indices_ls: type = int
----------
return:
----------
a shuffled numpy array of indices
'''
ls = np.arange(length_of_indices_ls)
np.random.shuffle(ls)
return ls
def padding(batch_input_list):
'''
----------
parameter:
----------
batch_input_list: type = list(list)
----------
return:
----------
numpy.ndarray: shape == (n_batch, max_time_step)
'''
n_batch = len(batch_input_list)
max_time_step = max([len(batch_input_list[i]) for i in range(n_batch)])
padding_result = np.zeros((n_batch, max_time_step))
for batch in range(n_batch):
padding_result[batch] = np.concatenate((np.asarray(batch_input_list[batch]),
np.zeros(max_time_step - len(batch_input_list[batch]))))
return padding_result.astype('int64')
def mask_generator(indices_matrix):
'''
----------
parameter:
----------
indices_matrix: type = list[list]
----------
return:
----------
mask : type = np.ndarray
a mask matrix of a batch of varied length instances
'''
n_batch = len(indices_matrix)
len_ls = [len(sent) for sent in indices_matrix]
max_len = max(len_ls)
mask = np.zeros((n_batch, max_len))
for i in range(n_batch):
for j in range(len(indices_matrix[i])):
mask[i][j] = 1
return mask
def mlp_mask_generator(indices_matrix, wemb_size):
'''
----------
parameter:
----------
indices_matrix: type = list[list]
----------
return:
----------
mask : type = np.ndarray
mask.shape = (n_batch, wemb_size)
'''
n_batch = len(indices_matrix)
len_ls = [len(sent) for sent in indices_matrix]
mask = np.ones((n_batch, wemb_size))
for i in range(n_batch):
mask[i] = mask[i] * len_ls[i]
return mask
def fake_input_generator(max_index, batch_number, length_range):
'''
----------
parameter:
----------
max_index: type = int
batch_number: type = int
length_range: tuple(int), len(length_range) = 2
e.g. (50, 70)
----------
return:
----------
fake_data: type = list[list]
format: fake_data.shape[0] = batch_number
length_range[0] <= len(fake_data[i]) <= length_range[1]
0 <= fake_data[i][j] <= max_index
'''
max_time_step = length_range[0] + np.random.randint(length_range[1] - length_range[0] + 1)
fake_data = np.zeros((batch_number, max_time_step))
mask = np.zeros((batch_number, max_time_step)).astype(theano.config.floatX)
len_range = max_time_step - length_range[0]
assert len_range >= 0
#pick a row to be the max length row
row = np.random.randint(batch_number)
fake_data[row] = np.random.randint(max_index+1, size = (max_time_step,))
mask[row] = np.ones(max_time_step)
for batch in range(batch_number):
if batch == row:
continue
length = length_range[0]+np.random.randint(len_range)
fake_data[batch] = np.concatenate((np.random.randint(max_index+1 ,size = (length,)),
np.zeros(max_time_step - length)))
mask[batch] = np.concatenate((np.ones(length), np.zeros(max_time_step - length)))
return (fake_data.astype('int32'), mask)
def fake_data(max_index, batch_number, max_time_step, min_time_step):
fake_data = np.zeros((batch_number, max_time_step))
mask = np.zeros((batch_number, max_time_step)).astype(theano.config.floatX)
len_range = max_time_step - min_time_step
assert len_range >= 0
#pick a row to be the max length row
row = np.random.randint(batch_number)
fake_data[row] = np.random.randint(max_index+1, size = (max_time_step,))
mask[row] = np.ones(max_time_step)
for batch in range(batch_number):
if batch == row:
continue
length = min_time_step+np.random.randint(len_range)
fake_data[batch] = np.concatenate((np.random.randint(max_index+1 ,size = (length,)),
np.zeros(max_time_step - length)))
mask[batch] = np.concatenate((np.ones(length), np.zeros(max_time_step - length)))
return (fake_data.astype('int32'), mask) | gpl-3.0 | -28,399,149,772,886,012 | 26.305419 | 104 | 0.544388 | false | 3.459426 | false | false | false |
Treeki/NewerSMBW | Koopatlas/src/editorui/objects.py | 1 | 4434 | from common import *
from editorcommon import *
import weakref
class KPEditorObject(KPEditorItem):
SNAP_TO = (24,24)
def __init__(self, obj, layer):
KPEditorItem.__init__(self)
obj.qtItem = self
self._objRef = weakref.ref(obj)
self._layerRef = weakref.ref(layer)
self._updatePosition()
self._updateSize()
self.setAcceptHoverEvents(True)
self.resizing = None
if not hasattr(KPEditorObject, 'SELECTION_PEN'):
KPEditorObject.SELECTION_PEN = QtGui.QPen(Qt.green, 1, Qt.DotLine)
# I don't bother setting the ZValue because it doesn't quite matter:
# only one layer's objects are ever clickable, and drawBackground takes
# care of the layered drawing
def _updatePosition(self):
self.ignoreMovement = True
x,y = self._objRef().position
self.setPos(x*24, y*24)
self.ignoreMovement = False
def _updateSize(self):
self.prepareGeometryChange()
obj = self._objRef()
w,h = obj.size
self._boundingRect = QtCore.QRectF(0, 0, w*24, h*24)
self._selectionRect = QtCore.QRectF(0, 0, w*24-1, h*24-1)
self._resizerEndXY = (w*24-5, h*24-5)
def paint(self, painter, option, widget):
if self.isSelected():
painter.setPen(self.SELECTION_PEN)
painter.drawRect(self._selectionRect)
def hoverMoveEvent(self, event):
if self._layerRef() != KP.mapScene.currentLayer:
self.setCursor(Qt.ArrowCursor)
return
pos = event.pos()
bit = self.resizerPortionAt(pos.x(), pos.y())
if bit == 1 or bit == 4:
self.setCursor(Qt.SizeFDiagCursor)
elif bit == 2 or bit == 3:
self.setCursor(Qt.SizeBDiagCursor)
elif bit == 7 or bit == 8:
self.setCursor(Qt.SizeHorCursor)
elif bit == 5 or bit == 6:
self.setCursor(Qt.SizeVerCursor)
else:
self.setCursor(Qt.ArrowCursor)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
pos = event.pos()
bit = self.resizerPortionAt(pos.x(), pos.y())
if self._layerRef() == KP.mapScene.currentLayer and bit:
# if bit:
event.accept()
x, xSide, y, ySide = False, None, False, None
if bit == 1 or bit == 7 or bit == 3:
x, xSide = True, 1
elif bit == 2 or bit == 4 or bit == 8:
x, xSide = True, 0
if bit == 1 or bit == 2 or bit == 5:
y, ySide = True, 1
elif bit == 3 or bit == 4 or bit == 6:
y, ySide = True, 0
self.resizing = (x, xSide, y, ySide)
return
KPEditorItem.mousePressEvent(self, event)
def _tryAndResize(self, obj, axisIndex, mousePosition, stationarySide):
objPosition = obj.position[axisIndex]
objSize = obj.size[axisIndex]
if stationarySide == 0:
# Resize the right/bottom side
relativeMousePosition = mousePosition - objPosition
newSize = relativeMousePosition + 1
if newSize == objSize or newSize < 1:
return False
if axisIndex == 1:
obj.size = (obj.size[0], newSize)
else:
obj.size = (newSize, obj.size[1])
else:
# Resize the left/top side
rightSide = objPosition + objSize - 1
newLeftSide = mousePosition
newPosition = newLeftSide
newSize = rightSide - newLeftSide + 1
if newSize < 1:
return False
if newPosition == objPosition and newSize == objSize:
return False
if axisIndex == 1:
obj.position = (obj.position[0], newPosition)
obj.size = (obj.size[0], newSize)
else:
obj.position = (newPosition, obj.position[1])
obj.size = (newSize, obj.size[1])
return True
def mouseMoveEvent(self, event):
if self.resizing:
obj = self._objRef()
scenePos = event.scenePos()
hasChanged = False
resizeX, xSide, resizeY, ySide = self.resizing
if resizeX:
hasChanged |= self._tryAndResize(obj, 0, int(scenePos.x() / 24), xSide)
if resizeY:
hasChanged |= self._tryAndResize(obj, 1, int(scenePos.y() / 24), ySide)
if hasChanged:
obj.updateCache()
self._layerRef().updateCache()
self._updatePosition()
self._updateSize()
else:
KPEditorItem.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
if self.resizing and event.button() == Qt.LeftButton:
self.resizing = None
else:
KPEditorItem.mouseReleaseEvent(self, event)
def _itemMoved(self, oldX, oldY, newX, newY):
obj = self._objRef()
obj.position = (newX/24, newY/24)
self._layerRef().updateCache()
def remove(self, withItem=False):
obj = self._objRef()
layer = self._layerRef()
layer.objects.remove(obj)
layer.updateCache()
if withItem:
self.scene().removeItem(self)
| mit | -8,735,065,201,513,281,000 | 22.967568 | 75 | 0.662382 | false | 2.871762 | false | false | false |
eestay/edx-ora2 | scripts/render_templates.py | 7 | 3912 | #!/usr/bin/env python
"""
Render Django templates.
Useful for generating fixtures for the JavaScript unit test suite.
Usage:
python render_templates.py path/to/templates.json
where "templates.json" is a JSON file of the form:
[
{
"template": "openassessmentblock/oa_base.html",
"context": {
"title": "Lorem",
"question": "Ipsum?"
},
"output": "oa_base.html"
},
...
]
The rendered templates are saved to "output" relative to the
templates.json file's directory.
"""
import sys
import os.path
import json
import re
import dateutil.parser
import pytz
# This is a bit of a hack to ensure that the root repo directory
# is in the Python path, so Django can find the settings module.
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from django.template.context import Context
from django.template.loader import get_template
USAGE = u"{prog} TEMPLATE_DESC"
DATETIME_REGEX = re.compile("^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}$")
def parse_dates(context):
"""
Transform datetime strings into Python datetime objects.
JSON does not provide a standard way to serialize datetime objects,
but some of the templates expect that the context contains
Python datetime objects.
This (somewhat hacky) solution recursively searches the context
for formatted datetime strings of the form "2014-01-02T12:34"
and converts them to Python datetime objects with the timezone
set to UTC.
Args:
context (JSON-serializable): The context (or part of the context)
that will be passed to the template. Dictionaries and lists
will be recursively searched and transformed.
Returns:
JSON-serializable of the same type as the `context` argument.
"""
if isinstance(context, dict):
return {
key: parse_dates(value)
for key, value in context.iteritems()
}
elif isinstance(context, list):
return [
parse_dates(item)
for item in context
]
elif isinstance(context, basestring):
if DATETIME_REGEX.match(context) is not None:
return dateutil.parser.parse(context).replace(tzinfo=pytz.utc)
return context
def render_templates(root_dir, template_json):
"""
Create rendered templates.
Args:
root_dir (str): The directory in which to write the rendered templates.
template_json (dict): Description of which templates to render. Must be a list
of dicts, each containing keys "template" (str), "context" (dict), and "output" (str).
Returns:
None
"""
for template_dict in template_json:
template = get_template(template_dict['template'])
context = parse_dates(template_dict['context'])
rendered = template.render(Context(context))
output_path = os.path.join(root_dir, template_dict['output'])
try:
with open(output_path, 'w') as output_file:
output_file.write(rendered.encode('utf-8'))
except IOError:
print "Could not write rendered template to file: {}".format(output_path)
sys.exit(1)
def main():
"""
Main entry point for the script.
"""
if len(sys.argv) < 2:
print USAGE.format(sys.argv[0])
sys.exit(1)
try:
with open(sys.argv[1]) as template_json:
root_dir = os.path.dirname(sys.argv[1])
render_templates(root_dir, json.load(template_json))
except IOError as ex:
print u"Could not open template description file: {}".format(sys.argv[1])
print(ex)
sys.exit(1)
except ValueError as ex:
print u"Could not parse template description as JSON: {}".format(sys.argv[1])
print(ex)
sys.exit(1)
if __name__ == '__main__':
main()
| agpl-3.0 | 2,144,937,601,752,186,000 | 28.413534 | 98 | 0.629601 | false | 4.041322 | false | false | false |
scalient/ebsmount | cmd_manual.py | 2 | 2801 | #!/usr/bin/python
# Copyright (c) 2010 Alon Swartz <alon@turnkeylinux.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""EBS Mount - manually mount EBS device (simulates udev add trigger)
Arguments:
device EBS device to mount (e.g., /dev/xvdf, /dev/vda)
Options:
--format=FS Format device prior to mount (e.g., --format=ext3)
"""
import re
import os
import sys
import getopt
import ebsmount
import executil
from utils import config, is_mounted
def usage(e=None):
if e:
print >> sys.stderr, "error: " + str(e)
print >> sys.stderr, "Syntax: %s [-opts] <device>" % sys.argv[0]
print >> sys.stderr, __doc__.strip()
sys.exit(1)
def fatal(s):
print >> sys.stderr, "error: " + str(s)
sys.exit(1)
def _expected_devpath(devname, devpaths):
"""ugly hack to test expected structure of devpath"""
raw_output = executil.getoutput('udevadm info -a -n %s' % devname)
for line in raw_output.splitlines():
line = line.strip()
m = re.match("^looking at parent device '(.*)':", line)
if m:
devpath = m.group(1)
for pattern in devpaths:
if re.search(pattern, devpath):
return True
return False
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'h', ['format='])
except getopt.GetoptError, e:
usage(e)
filesystem = None
for opt, val in opts:
if opt == '-h':
usage()
if opt == '--format':
filesystem = val
if not len(args) == 1:
usage()
devname = args[0]
if not os.path.exists(devname):
fatal("%s does not exist" % devname)
if not _expected_devpath(devname, config.devpaths.split()):
fatal("devpath not of expected structure, or failed lookup")
if filesystem:
if is_mounted(devname):
fatal("%s is mounted" % devname)
if not filesystem in config.filesystems.split():
fatal("%s is not supported in %s" % (filesystem, config.CONF_FILE))
executil.system("mkfs." + filesystem, "-q", devname)
ebsmount.ebsmount_add(devname, config.mountdir)
if __name__=="__main__":
main()
| gpl-2.0 | 4,128,131,317,433,193,500 | 26.732673 | 79 | 0.625848 | false | 3.572704 | false | false | false |
vicky2135/lucious | lucious/lib/python2.7/site-packages/pip/_vendor/progress/__init__.py | 916 | 3023 | # Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from collections import deque
from datetime import timedelta
from math import ceil
from sys import stderr
from time import time
__version__ = '1.2'
class Infinite(object):
file = stderr
sma_window = 10
def __init__(self, *args, **kwargs):
self.index = 0
self.start_ts = time()
self._ts = self.start_ts
self._dt = deque(maxlen=self.sma_window)
for key, val in kwargs.items():
setattr(self, key, val)
def __getitem__(self, key):
if key.startswith('_'):
return None
return getattr(self, key, None)
@property
def avg(self):
return sum(self._dt) / len(self._dt) if self._dt else 0
@property
def elapsed(self):
return int(time() - self.start_ts)
@property
def elapsed_td(self):
return timedelta(seconds=self.elapsed)
def update(self):
pass
def start(self):
pass
def finish(self):
pass
def next(self, n=1):
if n > 0:
now = time()
dt = (now - self._ts) / n
self._dt.append(dt)
self._ts = now
self.index = self.index + n
self.update()
def iter(self, it):
for x in it:
yield x
self.next()
self.finish()
class Progress(Infinite):
def __init__(self, *args, **kwargs):
super(Progress, self).__init__(*args, **kwargs)
self.max = kwargs.get('max', 100)
@property
def eta(self):
return int(ceil(self.avg * self.remaining))
@property
def eta_td(self):
return timedelta(seconds=self.eta)
@property
def percent(self):
return self.progress * 100
@property
def progress(self):
return min(1, self.index / self.max)
@property
def remaining(self):
return max(self.max - self.index, 0)
def start(self):
self.update()
def goto(self, index):
incr = index - self.index
self.next(incr)
def iter(self, it):
try:
self.max = len(it)
except TypeError:
pass
for x in it:
yield x
self.next()
self.finish()
| bsd-3-clause | 8,970,014,737,514,017,000 | 23.577236 | 74 | 0.600397 | false | 3.936198 | false | false | false |