code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/python
from app import app
if __name__ == "__main__":
app.run(debug=True, port=5001, host='0.0.0.0')
| jelly/leetnews | run.py | Python | mit | 118 |
#!/usr/bin/env python
"""
=====
Atlas
=====
Atlas of all graphs of 6 nodes or less.
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
# Copyright (C) 2004-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import random
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
try:
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either "
"PyGraphviz or pydot.")
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
from networkx.generators.atlas import graph_atlas_g
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas = graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U = nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree = [n for n in G if G.degree(n) == 0]
for n in zerodegree:
G.remove_node(n)
U = nx.disjoint_union(U, G)
# iterator of graphs of all connected components
C = (U.subgraph(c) for c in nx.connected_components(U))
UU = nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist = [] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G, nlist):
nlist.append(G)
UU = nx.disjoint_union(UU, G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1, G2):
return True
return False
if __name__ == '__main__':
G = atlas6()
print("graph has %d nodes with %d edges"
% (nx.number_of_nodes(G), nx.number_of_edges(G)))
print(nx.number_connected_components(G), "connected components")
plt.figure(1, figsize=(8, 8))
# layout graphs with positions using graphviz neato
pos = graphviz_layout(G, prog="neato")
# color nodes the same in each connected subgraph
C = (G.subgraph(c) for c in nx.connected_components(G))
for g in C:
c = [random.random()] * nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.show()
| sserrot/champion_relationships | venv/share/doc/networkx-2.4/examples/drawing/plot_atlas.py | Python | mit | 2,796 |
# coding: utf-8
# Copyright 2017 video++ Project, SJTU MediaLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vpp.config import CONF
class StorageDriver(object):
def __init__(self):
pass
def upload(self, src_file, dst_file):
""" upload file to storage server
:src: abs path of local file
:dst: abs path (based on base work dir) of remote file
"""
pass
def download(self, src, dst):
""" download file from storage server
"""
pass
def mkdir(self, dirname):
""" mkdir on storage server
:dirname: dir path from driver's base work dir. e.g. "dirname=a/b/c"
will be "<base work dir>/a/b/c" in effect
"""
pass
def rmdir(self, dirname):
""" rmdir on storage server
"""
pass
def rmtree(self, jobcxt):
""" rmtree on storage server
"""
pass
def rm(self, jobcxt):
""" rm a file on storage server
"""
pass
| ArthurChiao/videoplusplus | vpp/storage/driver.py | Python | apache-2.0 | 1,521 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
from superdesk.io.rfc822 import rfc822Parser
from superdesk.tests import TestCase
from superdesk.tests import setup
class rfc822TestCase(TestCase):
filename = 'simple_email.txt'
def setUp(self):
setup(context=self)
with self.app.app_context():
provider = {'name': 'Test'}
dirname = os.path.dirname(os.path.realpath(__file__))
fixture = os.path.join(dirname, 'fixtures', self.filename)
with open(fixture, mode='rb') as f:
bytes = f.read()
parser = rfc822Parser()
self.items = parser.parse_email([(1, bytes)], provider)
def test_headline(self):
self.assertEqual(self.items[0]['headline'], 'Test message 1234')
def test_body(self):
self.assertEquals(self.items[0]['body_html'].strip(), '<div>body text<br/><div>\n</div></div>')
class rfc822ComplexTestCase(TestCase):
filename = 'composite_email.txt'
def setUp(self):
setup(context=self)
with self.app.app_context():
provider = {'name': 'Test'}
dirname = os.path.dirname(os.path.realpath(__file__))
fixture = os.path.join(dirname, 'fixtures', self.filename)
with open(fixture, mode='rb') as f:
bytes = f.read()
parser = rfc822Parser()
self.items = parser.parse_email([(1, bytes)], provider)
def test_composite(self):
self.assertEqual(len(self.items), 3)
for item in self.items:
self.assertIn('versioncreated', item)
class rfc822OddCharSet(TestCase):
filename = 'odd_charset_email.txt'
def setUp(self):
setup(context=self)
with self.app.app_context():
provider = {'name': 'Test'}
dirname = os.path.dirname(os.path.realpath(__file__))
fixture = os.path.join(dirname, 'fixtures', self.filename)
with open(fixture, mode='rb') as f:
bytes = f.read()
parser = rfc822Parser()
self.items = parser.parse_email([(1, bytes)], provider)
def test_headline(self):
# This tests a subject that fails to decode but we just try a string conversion
self.assertEqual(self.items[0]['headline'], '=?windows-1252?Q?TravTalk���s_Special_for_TAAI_convention?=')
def test_body(self):
self.assertRegex(self.items[0]['body_html'], '<span>')
class rfc822CharSetInSubject(TestCase):
filename = 'charset_in_subject_email.txt'
def setUp(self):
setup(context=self)
with self.app.app_context():
provider = {'name': 'Test'}
dirname = os.path.dirname(os.path.realpath(__file__))
fixture = os.path.join(dirname, 'fixtures', self.filename)
with open(fixture, mode='rb') as f:
bytes = f.read()
parser = rfc822Parser()
self.items = parser.parse_email([(1, bytes)], provider)
def test_headline(self):
# This test a subject that has a charset that decodes correctly
self.assertEqual(self.items[0]['headline'], 'Google Apps News March 2015')
| vied12/superdesk | server/superdesk/io/rfc822_parser_test.py | Python | agpl-3.0 | 3,444 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# mail_receiver.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
MailReceiver service definition
If there's a user facing problem when processing an email, it will be
bounced back to the sender.
User facing problems could be:
- Unknown user (missing uuid)
- Public key not found
Any other problem is a bug, which will be logged. Until the bug is
fixed, the email will stay in there waiting.
"""
import os
import uuid as pyuuid
import json
import email.utils
import socket
from email import message_from_string
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import formatdate
from email.header import decode_header
from twisted.application.service import Service
from twisted.internet import inotify, defer, task, reactor
from twisted.python import filepath, log
from leap.soledad.common.crypto import (
EncryptionSchemes,
ENC_JSON_KEY,
ENC_SCHEME_KEY,
)
from leap.soledad.common.couch import CouchDatabase, CouchDocument
from leap.keymanager import openpgp
BOUNCE_TEMPLATE = """
Delivery to the following recipient failed:
{0}
Reasons:
{1}
Original message:
{2}
""".strip()
from twisted.internet import protocol
from twisted.internet.error import ProcessDone
class BouncerSubprocessProtocol(protocol.ProcessProtocol):
"""
Bouncer subprocess protocol that will feed the msg contents to be
bounced through stdin
"""
def __init__(self, msg):
"""
Constructor for the BouncerSubprocessProtocol
:param msg: Message to send to stdin when the process has
launched
:type msg: str
"""
self._msg = msg
self._outBuffer = ""
self._errBuffer = ""
self._d = None
def connectionMade(self):
self._d = defer.Deferred()
self.transport.write(self._msg)
self.transport.closeStdin()
def outReceived(self, data):
self._outBuffer += data
def errReceived(self, data):
self._errBuffer += data
def processEnded(self, reason):
if reason.check(ProcessDone):
self._d.callback(self._outBuffer)
else:
self._d.errback(reason)
def async_check_output(args, msg):
"""
Async spawn a process and return a defer to be able to check the
output with a callback/errback
:param args: the command to execute along with the params for it
:type args: list of str
:param msg: string that will be send to stdin of the process once
it's spawned
:type msg: str
:rtype: defer.Deferred
"""
pprotocol = BouncerSubprocessProtocol(msg)
reactor.spawnProcess(pprotocol, args[0], args)
return pprotocol.d
class MailReceiver(Service):
"""
Service that monitors incoming email and processes it
"""
INCOMING_KEY = 'incoming'
ERROR_DECRYPTING_KEY = "errdecr"
def __init__(self, mail_couch_url, users_cdb, directories, bounce_from,
bounce_subject):
"""
Constructor
:param mail_couch_url: URL prefix for the couchdb where mail
should be stored
:type mail_couch_url: str
:param users_cdb: CouchDB instance from where to get the uuid
and pubkey for a user
:type users_cdb: ConnectedCouchDB
:param directories: list of directories to monitor
:type directories: list of tuples (path: str, recursive: bool)
:param bounce_from: Email address of the bouncer
:type bounce_from: str
:param bounce_subject: Subject line used in the bounced mail
:type bounce_subject: str
"""
# Service doesn't define an __init__
self._mail_couch_url = mail_couch_url
self._users_cdb = users_cdb
self._directories = directories
self._domain = socket.gethostbyaddr(socket.gethostname())[0]
self._processing_skipped = False
self._bounce_from = bounce_from
self._bounce_subject = bounce_subject
def startService(self):
"""
Starts the MailReceiver service
"""
Service.startService(self)
self.wm = inotify.INotify()
self.wm.startReading()
mask = inotify.IN_CREATE
for directory, recursive in self._directories:
log.msg("Watching %r --- Recursive: %r" % (directory, recursive))
self.wm.watch(filepath.FilePath(directory), mask,
callbacks=[self._process_incoming_email],
recursive=recursive)
self._lcall = task.LoopingCall(self._process_skipped)
# Run once every half an hour, but don't start right now
self._lcall.start(interval=60*30, now=False)
def _encrypt_message(self, pubkey, message):
"""
Given a public key and a message, it encrypts the message to
that public key.
The address is needed in order to build the OpenPGPKey object.
:param pubkey: public key for the owner of the message
:type pubkey: str
:param message: message contents
:type message: email.message.Message
:return: doc to sync with Soledad or None, None if something
went wrong.
:rtype: CouchDocument
"""
if pubkey is None or len(pubkey) == 0:
log.msg("_encrypt_message: Something went wrong, here's all "
"I know: %r" % (pubkey,))
return None
# find message's encoding
message_as_string = message.as_string()
doc = CouchDocument(doc_id=str(pyuuid.uuid4()))
# store plain text if pubkey is not available
data = {'incoming': True, 'content': message_as_string}
if pubkey is None or len(pubkey) == 0:
doc.content = {
self.INCOMING_KEY: True,
self.ERROR_DECRYPTING_KEY: False,
ENC_SCHEME_KEY: EncryptionSchemes.NONE,
ENC_JSON_KEY: json.dumps(data,
ensure_ascii=False)
}
return doc
# otherwise, encrypt
with openpgp.TempGPGWrapper(gpgbinary='/usr/bin/gpg') as gpg:
gpg.import_keys(pubkey)
key = gpg.list_keys().pop()
# We don't care about the actual address, so we use a
# dummy one, we just care about the import of the pubkey
openpgp_key = openpgp._build_key_from_gpg("dummy@mail.com",
key, pubkey)
# add X-Leap-Provenance header if message is not encrypted
if message.get_content_type() != 'multipart/encrypted' and \
'-----BEGIN PGP MESSAGE-----' not in \
message_as_string:
message.add_header(
'X-Leap-Provenance',
email.utils.formatdate(),
pubkey=openpgp_key.key_id)
data = {'incoming': True, 'content': message.as_string()}
doc.content = {
self.INCOMING_KEY: True,
self.ERROR_DECRYPTING_KEY: False,
ENC_SCHEME_KEY: EncryptionSchemes.PUBKEY,
ENC_JSON_KEY: str(gpg.encrypt(
json.dumps(data, ensure_ascii=False),
openpgp_key.fingerprint,
symmetric=False))
}
return doc
def _export_message(self, uuid, doc):
"""
Given a UUID and a CouchDocument, it saves it directly in the
couchdb that serves as a backend for Soledad, in a db
accessible to the recipient of the mail.
:param uuid: the mail owner's uuid
:type uuid: str
:param doc: CouchDocument that represents the email
:type doc: CouchDocument
:return: True if it's ok to remove the message, False
otherwise
:rtype: bool
"""
if uuid is None or doc is None:
log.msg("_export_message: Something went wrong, here's all "
"I know: %r | %r" % (uuid, doc))
return False
log.msg("Exporting message for %s" % (uuid,))
db = CouchDatabase(self._mail_couch_url, "user-%s" % (uuid,))
db.put_doc(doc)
log.msg("Done exporting")
return True
def _conditional_remove(self, do_remove, filepath):
"""
Removes the message if do_remove is True.
:param do_remove: True if the message should be removed, False
otherwise
:type do_remove: bool
:param filepath: path to the mail
:type filepath: twisted.python.filepath.FilePath
"""
if do_remove:
# remove the original mail
try:
log.msg("Removing %r" % (filepath.path,))
filepath.remove()
log.msg("Done removing")
except Exception:
log.err()
else:
log.msg("Not removing %r" % (filepath.path,))
def _get_owner(self, mail):
"""
Given an email, returns the uuid of the owner.
:param mail: mail to analyze
:type mail: email.message.Message
:returns: uuid
:rtype: str or None
"""
uuid = None
delivereds = mail.get_all("Delivered-To")
if delivereds is None:
return None
for to in delivereds:
name, addr = email.utils.parseaddr(to)
parts = addr.split("@")
if len(parts) > 1 and parts[1] == self._domain:
uuid = parts[0]
break
return uuid
@defer.inlineCallbacks
def _bounce_mail(self, orig_msg, filepath, reason):
"""
Bounces the email contained in orig_msg to it's sender and
removes it from the queue.
:param orig_msg: Message that is going to be bounced
:type orig_msg: email.message.Message
:param filepath: Path for that message
:type filepath: twisted.python.filepath.FilePath
:param reason: Brief explanation about why it's being bounced
:type reason: str
"""
to = orig_msg.get("From")
msg = MIMEMultipart()
msg['From'] = self._bounce_from
msg['To'] = to
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = self._bounce_subject
decoded_to = " ".join([x[0] for x in decode_header(to)])
text = BOUNCE_TEMPLATE.format(decoded_to,
reason,
orig_msg.as_string())
msg.attach(MIMEText(text))
yield async_check_output(["/usr/sbin/sendmail", "-t"], msg.as_string())
yield self._conditional_remove(True, filepath)
def sleep(self, secs):
"""
Async sleep for a defer. Use this when you want to wait for
another (non atomic) defer to finish.
:param secs: seconds to wait (not really accurate, it depends
on the event queue)
:type secs: int
:rtype: twisted.internet.defer.Deferred
"""
from twisted.internet import reactor
d = defer.Deferred()
reactor.callLater(secs, d.callback, None)
return d
@defer.inlineCallbacks
def _process_skipped(self):
"""
Recursively or not (depending on the configuration) process
all the watched directories for unprocessed mail and try to
process it.
"""
if self._processing_skipped:
defer.returnValue(None)
self._processing_skipped = True
try:
log.msg("Starting processing skipped mail...")
log.msg("-"*50)
for directory, recursive in self._directories:
for root, dirs, files in os.walk(directory):
for fname in files:
try:
fullpath = os.path.join(root, fname)
fpath = filepath.FilePath(fullpath)
yield self._step_process_mail_backend(fpath)
except Exception:
log.msg("Error processing skipped mail: %r" % \
(fullpath,))
log.err()
if not recursive:
break
except Exception:
log.msg("Error processing skipped mail")
log.err()
finally:
self._processing_skipped = False
log.msg("+"*50)
log.msg("Done processing skipped mail")
@defer.inlineCallbacks
def _step_process_mail_backend(self, filepath):
"""
Processes the email pointed by filepath in an async
fashion. yield this method in another inlineCallbacks method
or return it for it to be run.
:param filepath: Path of the file that changed
:type filepath: twisted.python.filepath.FilePath
"""
log.msg("Processing new mail at %r" % (filepath.path,))
with filepath.open("r") as f:
mail_data = f.read()
msg = message_from_string(mail_data)
uuid = self._get_owner(msg)
if uuid is None:
log.msg("Don't know how to deliver mail %r, skipping..." %
(filepath.path,))
bounce_reason = "Missing UUID: There was a problem " \
"locating the user in our database."
yield self._bounce_mail(msg, filepath, bounce_reason)
defer.returnValue(None)
log.msg("Mail owner: %s" % (uuid,))
if uuid is None:
log.msg("BUG: There was no uuid!")
defer.returnValue(None)
pubkey = yield self._users_cdb.getPubKey(uuid)
if pubkey is None or len(pubkey) == 0:
log.msg("No public key, stopping the processing chain")
bounce_reason = "Missing PubKey: There was a problem " \
"locating the user's public key in our " \
"database."
yield self._bounce_mail(msg, filepath, bounce_reason)
defer.returnValue(None)
log.msg("Encrypting message to %s's pubkey" % (uuid,))
doc = yield self._encrypt_message(pubkey, msg)
do_remove = yield self._export_message(uuid, doc)
yield self._conditional_remove(do_remove, filepath)
@defer.inlineCallbacks
def _process_incoming_email(self, otherself, filepath, mask):
"""
Callback that processes incoming email.
:param otherself: Watch object for the current callback from
inotify.
:type otherself: twisted.internet.inotify._Watch
:param filepath: Path of the file that changed
:type filepath: twisted.python.filepath.FilePath
:param mask: identifier for the type of change that triggered
this callback
:type mask: int
"""
try:
while self._processing_skipped:
log.msg("Waiting for the process of skipped mail to be done...")
yield self.sleep(10) # NO-OP
if os.path.split(filepath.dirname())[-1] == "new":
yield self._step_process_mail_backend(filepath)
except Exception as e:
log.msg("Something went wrong while processing {0!r}: {1!r}"
.format(filepath, e))
log.err()
| meskio/leap_mx | src/leap/mx/mail_receiver.py | Python | agpl-3.0 | 16,282 |
"""
File: <Sin2_plus_cos2>
Copyright (c) 2016 <Lauren Graziani>
License: MIT
<debugging a program>
"""
"""
# a
from math import sin, cos #need to import pi from math
x = pi/4
1_val = math.sin^2(x) + math.cos^2(x) #can't start a variable with a number, powers are written by **
print 1_VAL
"""
# a debugged
from math import sin, cos, pi
x = pi / 4
val1 = sin(x) ** 2 + cos(x) ** 2
print val1
"""
# b
v0 = 3 m/s #get rid of m/s
t = 1 s #get rid of s
a = 2 m/s**2 # **2 should come right after 2, get rid of m/s
s = v0.t + 0,5.a.t**2 #v0.t should be v0*2, change comma to period and periods to *
print s
"""
# b debugged
v0 = 3
t = 1
a = 2 ** 2
s = v0*t + 0.5*a*t**2
print s
#c
"""
a = 3,3 b = 5,3
a2 = a**2
b2 = b**2
eq1_sum = a2 + 2ab + b2
eq2_sum = a2 - (2ab + b2
eq1_pow = (a+b)**2
eq2_pow = (a-b)**2
print 'First equation: %g = %g', % (eq1_sum, eq1_pow)
print 'Second equation: %h = %h', % (eq2_pow, eq2_pow)
# c debugged (cofused???)
a = 3,3
b=5,3
a2 = a**2
b2 = b**2
eq1_sum = a2 + (2*a*b) + b2
eq2_sum = a2 - (2*a*b) + b2
eq1_pow = (a+b)**2
eq2_pow = (a-b)**2
print "First equation: %g = %g" % (eq1_sum, eq1_pow)
print "Second equation: %h = %h" % (eq2_pow, eq2_pow)
"""
| chapman-cpsc-230/hw1-grazi102 | sin2_plus_cos2.py | Python | mit | 1,198 |
#!/usr/bin/env python
import os
import re
import sys
import codecs
from setuptools import setup, find_packages
# When creating the sdist, make sure the django.mo file also exists:
if 'sdist' in sys.argv or 'develop' in sys.argv:
os.chdir('seo')
try:
from django.core import management
management.call_command('compilemessages', stdout=sys.stderr, verbosity=1)
except ImportError:
if 'sdist' in sys.argv:
raise
finally:
os.chdir('..')
def read(*parts):
file_path = os.path.join(os.path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name="django-easy-seo",
version=find_version('seo', '__init__.py'),
license="GPLv3 License",
install_requires=[
'django-classy-tags',
],
requires=[
'Django (>=1.4)',
],
description="Adds generic SEO fields for objects in your site",
long_description=read('README.rst'),
author="Alexander Ivanov",
author_email="alexander.ivanov@redsolution.ru",
maintainer='Basil Shubin',
maintainer_email='basil.shubin@gmail.com',
url='https://github.com/bashu/django-easy-seo',
download_url='https://github.com/bashu/django-easy-seo/zipball/master',
packages=find_packages(exclude=('example*',)),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| bashu/django-easy-seo | setup.py | Python | gpl-3.0 | 2,428 |
import abc
class Defense(object):
@abc.abstractmethod
def name(self):
pass
@abc.abstractmethod
def description(self):
pass
@abc.abstractmethod
def attacker_message(self):
pass
@abc.abstractmethod
def observer_message(self):
pass
@abc.abstractmethod
def evaluate(self, attack_result):
pass
@abc.abstractmethod
def execute(self, attack_result):
pass
| ChrisLR/Python-Roguelike-Template | combat/defenses/base.py | Python | mit | 451 |
#!/usr/bin/env python
# jhbuild - a tool to ease building collections of source packages
# Copyright (C) 2001-2006 James Henstridge
#
# changecvsroot.py: script to alter the CVS root of a working copy
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
def changecvsroot(oldroot, newroot, *dirs):
def handle((oldroot, newroot), dirname, fnames):
if os.path.basename(dirname) == 'CVS' and 'Root' in fnames:
r = open(os.path.join(dirname, 'Root'), 'r').read().strip()
if r == oldroot:
fp = open(os.path.join(dirname, 'Root'), 'w')
fp.write('%s\n' % newroot)
fp.close()
for dir in dirs:
os.path.walk(dir, handle, (oldroot, newroot))
if __name__ == '__main__':
import sys
if len(sys.argv) < 4:
sys.stderr.write('usage: changecvsroot.py oldroot newroot dirs ...\n')
sys.exit(1)
changecvsroot(sys.argv[1], sys.argv[2], *sys.argv[2:])
| ahmeier/jhbuild | scripts/changecvsroot.py | Python | gpl-2.0 | 1,618 |
# Time: O(1.189), counted by statistics, limit would be O(log10/log7) = O(1.183)
# Space: O(1)
# Given a function rand7 which generates a uniform random integer in the range 1 to 7,
# write a function rand10 which generates a uniform random integer in the range 1 to 10.
#
# Do NOT use system's Math.random().
#
# Example 1:
#
# Input: 1
# Output: [7]
# Example 2:
#
# Input: 2
# Output: [8,4]
# Example 3:
#
# Input: 3
# Output: [8,1,10]
#
# Note:
#
# rand7 is predefined.
# Each testcase has one argument: n, the number of times that rand10 is called.
#
# Follow up:
# - What is the expected value for the number of calls to rand7() function?
# - Could you minimize the number of calls to rand7()?
#
# The rand7() API is already defined for you.
import random
def rand7():
return random.randint(1, 7)
# Reference: https://leetcode.com/problems/implement-rand10-using-rand7/discuss/151567/C++JavaPython-Average-1.199-Call-rand7-Per-rand10
class Solution(object):
def __init__(self):
self.__cache = []
def rand10(self):
"""
:rtype: int
"""
def generate(cache):
n = 32
curr = sum((rand7()-1) * (7**i) for i in xrange(n))
rang = 7**n
while curr < rang//10*10:
cache.append(curr%10+1)
curr /= 10
rang /= 10
while not self.__cache:
generate(self.__cache)
return self.__cache.pop()
# Time: O(2 * (1 + (9/49) + (9/49)^2 + ...)) = O(2/(1-(9/49)) = O(2.45)
# Space: O(1)
class Solution2(object):
def rand10(self):
"""
:rtype: int
"""
while True:
x = (rand7()-1)*7 + (rand7()-1)
if x < 40:
return x%10 + 1
| tudennis/LeetCode---kamyu104-11-24-2015 | Python/implement-rand10-using-rand7.py | Python | mit | 1,761 |
# -*- coding:utf-8 -*-
import unittest, sys, os, re, random, string, time, subprocess
sys.path[:0] = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from datetime import datetime
from saklient.util import Util
from saklient.cloud.api import API
from saklient.cloud.resources.routerplan import RouterPlan
from saklient.cloud.resources.swytch import Swytch
from saklient.cloud.resources.server import Server
from saklient.cloud.resources.iface import Iface
from saklient.cloud.resources.ipv6net import Ipv6Net
from saklient.cloud.resources.ipv4net import Ipv4Net
class TestRouter(unittest.TestCase):
def test_should_be_cruded(self):
# load config file
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_ok_file = root + '/testok'
if not os.path.exists(test_ok_file):
print("詳細テストを行うには " + test_ok_file + " をtouchしてください。")
sys.exit(0)
config_file = root + '/config.sh'
self.assertTrue(os.path.exists(config_file)) # config_file を作成してください。
config = {}
fh = open(config_file, "r")
for line in fh:
m = re.search("^\\s*export\\s+(\\w+)\\s*=\\s*(.+?)\\s*$", line)
if m is None: continue
key = m.group(1)
value = m.group(2)
value = re.sub("'([^']*)'|\"([^\"]*)\"|\\\\(.)|(.)", lambda m: m.group(1) or m.group(2) or m.group(3) or m.group(4), value)
config[key] = value
fh.close()
self.assertIn('SACLOUD_TOKEN', config)
self.assertIn('SACLOUD_SECRET', config)
self.assertIn('SACLOUD_ZONE', config)
# authorize
api = API.authorize(config['SACLOUD_TOKEN'], config['SACLOUD_SECRET'], config['SACLOUD_ZONE'])
self.assertIsInstance(api, API)
# should be CRUDed
name = '!python_test-' + datetime.now().strftime('%Y%m%d_%H%M%S') + '-' + ''.join([random.choice(string.ascii_letters + string.digits) for i in range(8)])
description = 'This instance was created by saklient.python test'
tag = 'saklient-test'
mask_len = 28
mask_len_cnt = 1<<32-mask_len
sroute_mask_len = 28
sroute_mask_len_cnt = 1<<32-sroute_mask_len
#
swytch = None
if True:
print('ルータ+スイッチの帯域プランを検索しています...')
plans = api.product.router.find()
min_mbps = 0x7FFFFFFF
for plan in plans:
self.assertIsInstance(plan, RouterPlan)
self.assertTrue(0 < plan.band_width_mbps)
min_mbps = min(plan.band_width_mbps, min_mbps)
print('ルータ+スイッチを作成しています...')
router = api.router.create()
router.name = name
router.description = description
router.band_width_mbps = min_mbps
router.network_mask_len = mask_len
router.save()
print('ルータ+スイッチの作成完了を待機しています...')
if not router.sleep_while_creating(): fail('ルータが正常に作成されません')
swytch = router.get_swytch()
else:
print('既存のルータ+スイッチを取得しています...')
swytches = api.swytch.with_name_like('saklient-static-1').limit(1).find()
self.assertEqual(len(swytches), 1)
swytch = swytches[0]
self.assertIsInstance(swytch, Swytch)
self.assertEqual(len(swytch.ipv4_nets), 1)
self.assertIsInstance(swytch.ipv4_nets[0], Ipv4Net)
self.assertEqual(len(swytch.ipv4_nets[0].range.as_array), mask_len_cnt-5)
self.assertEqual(len(swytch.collect_used_ipv4_addresses()), 0)
self.assertEqual(len(swytch.collect_unused_ipv4_addresses()), mask_len_cnt-5)
#
print('サーバを作成しています...')
server = api.server.create()
self.assertIsInstance(server, Server)
server.name = name
server.description = description
server.plan = api.product.server.get_by_spec(1, 1)
server.save()
self.assertTrue(0 < int(server.id))
#
print('インタフェースを増設しています...')
iface = server.add_iface()
self.assertIsInstance(iface, Iface)
self.assertTrue(0 < int(iface.id))
#
print('インタフェースをルータ+スイッチに接続しています...')
iface.connect_to_swytch(swytch)
#
print('インタフェースにIPアドレスを設定しています...')
iface.user_ip_address = swytch.ipv4_nets[0].range.as_array[1]
iface.save()
self.assertEqual(len(swytch.collect_used_ipv4_addresses()), 1)
self.assertEqual(len(swytch.collect_unused_ipv4_addresses()), mask_len_cnt-6)
#
print('ルータ+スイッチの帯域プランを変更しています...')
router_id_before = swytch.router.id
swytch.change_plan(500 if swytch.router.band_width_mbps==100 else 100)
self.assertNotEqual(swytch.router.id, router_id_before)
#
print('ルータ+スイッチにIPv6ネットワークを割り当てています...')
v6net = swytch.add_ipv6_net()
self.assertIsInstance(v6net, Ipv6Net)
self.assertEqual(len(swytch.ipv6_nets), 1)
#
print('ルータ+スイッチにスタティックルートを割り当てています...')
net0 = swytch.ipv4_nets[0]
next_hop = Util.long2ip(Util.ip2long(net0.address) + 4)
sroute = swytch.add_static_route(28, next_hop)
self.assertIsInstance(sroute, Ipv4Net)
self.assertEqual(len(swytch.ipv4_nets), 2)
self.assertEqual(len(swytch.ipv4_nets[1].range.as_array), sroute_mask_len_cnt)
#
for i in range(len(swytch.ipv4_nets) - 1, 0, -1):
print('ルータ+スイッチからスタティックルートの割当を解除しています...')
net = swytch.ipv4_nets[i]
swytch.remove_static_route(net)
#
if 0 < len(swytch.ipv6_nets):
print('ルータ+スイッチからIPv6ネットワークの割当を解除しています...')
swytch.remove_ipv6_net()
#
print('サーバを削除しています...')
server.destroy()
if __name__ == '__main__':
unittest.main()
| sakura-internet/saklient.python | tests/test_router.py | Python | mit | 6,717 |
#!/usr/bin/env python
from xml.dom import minidom
import sys
doc = minidom.parse(sys.argv[1])
filters = doc.getElementsByTagName('pattern')
print "char * stringlst = ["
for filter in filters:
id = filter.getAttribute('id')
stockid = filter.getAttribute('inkscape:stockid')
print "N_(\"" + stockid + "\"),"
print "];"
| Huluzai/DoonSketch | inkscape-0.48.5/share/patterns/i18n.py | Python | gpl-2.0 | 327 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import shutil
import tempfile
import unittest
import configparser
from click.testing import CliRunner
from qiime2 import Artifact
from qiime2.core.testing.type import IntSequence1
from qiime2.core.testing.util import get_dummy_plugin
import q2cli
import q2cli.util
import q2cli.builtin.info
import q2cli.builtin.tools
from q2cli.commands import RootCommand
from q2cli.core.config import CLIConfig
class TestOption(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
self.runner = CliRunner()
self.tempdir = tempfile.mkdtemp(prefix='qiime2-q2cli-test-temp-')
self.parser = configparser.ConfigParser()
self.path = os.path.join(q2cli.util.get_app_dir(), 'cli-colors.theme')
def tearDown(self):
shutil.rmtree(self.tempdir)
def _assertRepeatedOptionError(self, result, option):
self.assertEqual(result.exit_code, 1)
self.assertTrue(result.output.startswith('Usage:'))
self.assertRegex(result.output, '.*%s.* was specified multiple times'
% option)
def test_repeated_eager_option_with_callback(self):
result = self.runner.invoke(
q2cli.builtin.tools.tools,
['import', '--show-importable-types', '--show-importable-types'])
self._assertRepeatedOptionError(result, '--show-importable-types')
def test_repeated_builtin_flag(self):
result = self.runner.invoke(
q2cli.builtin.tools.tools,
['import', '--input-path', 'a', '--input-path', 'b'])
self._assertRepeatedOptionError(result, '--input-path')
def test_repeated_action_flag(self):
qiime_cli = RootCommand()
command = qiime_cli.get_command(ctx=None, name='dummy-plugin')
out_path = os.path.join(self.tempdir, 'out.qza')
result = self.runner.invoke(
command, ['no-input-method', '--o-out', out_path,
'--verbose', '--verbose'])
self._assertRepeatedOptionError(result, '--verbose')
def test_repeated_builtin_option(self):
input_path = os.path.join(self.tempdir, 'ints.txt')
with open(input_path, 'w') as f:
f.write('42\n43\n44\n')
output_path = os.path.join(self.tempdir, 'out.qza')
result = self.runner.invoke(
q2cli.builtin.tools.tools,
['import', '--input-path', input_path,
'--output-path', output_path, '--type', 'IntSequence1',
'--type', 'IntSequence1'])
self._assertRepeatedOptionError(result, '--type')
def test_repeated_action_option(self):
qiime_cli = RootCommand()
command = qiime_cli.get_command(ctx=None, name='dummy-plugin')
out_path = os.path.join(self.tempdir, 'out.qza')
result = self.runner.invoke(
command, ['no-input-method', '--o-out', out_path,
'--o-out', out_path])
self._assertRepeatedOptionError(result, '--o-out')
def test_repeated_multiple_option(self):
input_path = os.path.join(self.tempdir, 'ints.qza')
artifact = Artifact.import_data(IntSequence1, [0, 42, 43], list)
artifact.save(input_path)
metadata_path1 = os.path.join(self.tempdir, 'metadata1.tsv')
with open(metadata_path1, 'w') as f:
f.write('id\tcol1\nid1\tfoo\nid2\tbar\n')
metadata_path2 = os.path.join(self.tempdir, 'metadata2.tsv')
with open(metadata_path2, 'w') as f:
f.write('id\tcol2\nid1\tbaz\nid2\tbaa\n')
output_path = os.path.join(self.tempdir, 'out.qza')
qiime_cli = RootCommand()
command = qiime_cli.get_command(ctx=None, name='dummy-plugin')
result = self.runner.invoke(
command, ['identity-with-metadata', '--i-ints', input_path,
'--o-out', output_path, '--m-metadata-file',
metadata_path1, '--m-metadata-file', metadata_path2,
'--verbose'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(os.path.exists(output_path))
self.assertEqual(Artifact.load(output_path).view(list), [0, 42, 43])
def test_config_expected(self):
self.parser['type'] = {'underline': 't'}
with open(self.path, 'w') as fh:
self.parser.write(fh)
config = CLIConfig()
config.parse_file(self.path)
self.assertEqual(
config.styles['type'], {'underline': True})
def test_config_bad_selector(self):
self.parser['tye'] = {'underline': 't'}
with open(self.path, 'w') as fh:
self.parser.write(fh)
config = CLIConfig()
with self.assertRaisesRegex(
configparser.Error, 'tye.*valid selector.*valid selectors'):
config.parse_file(self.path)
def test_config_bad_styling(self):
self.parser['type'] = {'underlined': 't'}
with open(self.path, 'w') as fh:
self.parser.write(fh)
config = CLIConfig()
with self.assertRaisesRegex(
configparser.Error, 'underlined.*valid styling.*valid '
'stylings'):
config.parse_file(self.path)
def test_config_bad_color(self):
self.parser['type'] = {'fg': 'purple'}
with open(self.path, 'w') as fh:
self.parser.write(fh)
config = CLIConfig()
with self.assertRaisesRegex(
configparser.Error, 'purple.*valid color.*valid colors'):
config.parse_file(self.path)
def test_config_bad_boolean(self):
self.parser['type'] = {'underline': 'g'}
with open(self.path, 'w') as fh:
self.parser.write(fh)
config = CLIConfig()
with self.assertRaisesRegex(
configparser.Error, 'g.*valid boolean.*valid booleans'):
config.parse_file(self.path)
def test_no_file(self):
config = CLIConfig()
with self.assertRaisesRegex(
configparser.Error, "'Path' is not a valid filepath."):
config.parse_file('Path')
if __name__ == "__main__":
unittest.main()
| jakereps/q2cli | q2cli/tests/test_core.py | Python | bsd-3-clause | 6,485 |
#! ../env/bin/python
#
# Add all your necessary imports here. Typically, __init__.py should only contain imports and not code
#
# The package version
__version__ = '0.0.1'
# Defines the list of module names to be imported, when "from package import *" is encountered
__all__ = []
| kirang89/bootstrapy | mypackage/__init__.py | Python | bsd-2-clause | 283 |
"""Details about printers which are connected to CUPS."""
import importlib
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_MARKER_TYPE = "marker_type"
ATTR_MARKER_LOW_LEVEL = "marker_low_level"
ATTR_MARKER_HIGH_LEVEL = "marker_high_level"
ATTR_PRINTER_NAME = "printer_name"
ATTR_DEVICE_URI = "device_uri"
ATTR_PRINTER_INFO = "printer_info"
ATTR_PRINTER_IS_SHARED = "printer_is_shared"
ATTR_PRINTER_LOCATION = "printer_location"
ATTR_PRINTER_MODEL = "printer_model"
ATTR_PRINTER_STATE_MESSAGE = "printer_state_message"
ATTR_PRINTER_STATE_REASON = "printer_state_reason"
ATTR_PRINTER_TYPE = "printer_type"
ATTR_PRINTER_URI_SUPPORTED = "printer_uri_supported"
CONF_PRINTERS = "printers"
CONF_IS_CUPS_SERVER = "is_cups_server"
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 631
DEFAULT_IS_CUPS_SERVER = True
ICON_PRINTER = "mdi:printer"
ICON_MARKER = "mdi:water"
SCAN_INTERVAL = timedelta(minutes=1)
PRINTER_STATES = {3: "idle", 4: "printing", 5: "stopped"}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PRINTERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_IS_CUPS_SERVER, default=DEFAULT_IS_CUPS_SERVER): cv.boolean,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the CUPS sensor."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
printers = config.get(CONF_PRINTERS)
is_cups = config.get(CONF_IS_CUPS_SERVER)
if is_cups:
data = CupsData(host, port, None)
data.update()
if data.available is False:
_LOGGER.error("Unable to connect to CUPS server: %s:%s", host, port)
raise PlatformNotReady()
dev = []
for printer in printers:
if printer not in data.printers:
_LOGGER.error("Printer is not present: %s", printer)
continue
dev.append(CupsSensor(data, printer))
if "marker-names" in data.attributes[printer]:
for marker in data.attributes[printer]["marker-names"]:
dev.append(MarkerSensor(data, printer, marker, True))
add_entities(dev, True)
return
data = CupsData(host, port, printers)
data.update()
if data.available is False:
_LOGGER.error("Unable to connect to IPP printer: %s:%s", host, port)
raise PlatformNotReady()
dev = []
for printer in printers:
dev.append(IPPSensor(data, printer))
if "marker-names" in data.attributes[printer]:
for marker in data.attributes[printer]["marker-names"]:
dev.append(MarkerSensor(data, printer, marker, False))
add_entities(dev, True)
class CupsSensor(Entity):
"""Representation of a CUPS sensor."""
def __init__(self, data, printer):
"""Initialize the CUPS sensor."""
self.data = data
self._name = printer
self._printer = None
self._available = False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._printer is None:
return None
key = self._printer["printer-state"]
return PRINTER_STATES.get(key, key)
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON_PRINTER
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
if self._printer is None:
return None
return {
ATTR_DEVICE_URI: self._printer["device-uri"],
ATTR_PRINTER_INFO: self._printer["printer-info"],
ATTR_PRINTER_IS_SHARED: self._printer["printer-is-shared"],
ATTR_PRINTER_LOCATION: self._printer["printer-location"],
ATTR_PRINTER_MODEL: self._printer["printer-make-and-model"],
ATTR_PRINTER_STATE_MESSAGE: self._printer["printer-state-message"],
ATTR_PRINTER_STATE_REASON: self._printer["printer-state-reasons"],
ATTR_PRINTER_TYPE: self._printer["printer-type"],
ATTR_PRINTER_URI_SUPPORTED: self._printer["printer-uri-supported"],
}
def update(self):
"""Get the latest data and updates the states."""
self.data.update()
self._printer = self.data.printers.get(self._name)
self._available = self.data.available
class IPPSensor(Entity):
"""Implementation of the IPPSensor.
This sensor represents the status of the printer.
"""
def __init__(self, data, name):
"""Initialize the sensor."""
self.data = data
self._name = name
self._attributes = None
self._available = False
@property
def name(self):
"""Return the name of the sensor."""
return self._attributes["printer-make-and-model"]
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON_PRINTER
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def state(self):
"""Return the state of the sensor."""
if self._attributes is None:
return None
key = self._attributes["printer-state"]
return PRINTER_STATES.get(key, key)
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
if self._attributes is None:
return None
state_attributes = {}
if "printer-info" in self._attributes:
state_attributes[ATTR_PRINTER_INFO] = self._attributes["printer-info"]
if "printer-location" in self._attributes:
state_attributes[ATTR_PRINTER_LOCATION] = self._attributes[
"printer-location"
]
if "printer-state-message" in self._attributes:
state_attributes[ATTR_PRINTER_STATE_MESSAGE] = self._attributes[
"printer-state-message"
]
if "printer-state-reasons" in self._attributes:
state_attributes[ATTR_PRINTER_STATE_REASON] = self._attributes[
"printer-state-reasons"
]
if "printer-uri-supported" in self._attributes:
state_attributes[ATTR_PRINTER_URI_SUPPORTED] = self._attributes[
"printer-uri-supported"
]
return state_attributes
def update(self):
"""Fetch new state data for the sensor."""
self.data.update()
self._attributes = self.data.attributes.get(self._name)
self._available = self.data.available
class MarkerSensor(Entity):
"""Implementation of the MarkerSensor.
This sensor represents the percentage of ink or toner.
"""
def __init__(self, data, printer, name, is_cups):
"""Initialize the sensor."""
self.data = data
self._name = name
self._printer = printer
self._index = data.attributes[printer]["marker-names"].index(name)
self._is_cups = is_cups
self._attributes = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON_MARKER
@property
def state(self):
"""Return the state of the sensor."""
if self._attributes is None:
return None
return self._attributes[self._printer]["marker-levels"][self._index]
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "%"
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
if self._attributes is None:
return None
high_level = self._attributes[self._printer].get("marker-high-levels")
if isinstance(high_level, list):
high_level = high_level[self._index]
low_level = self._attributes[self._printer].get("marker-low-levels")
if isinstance(low_level, list):
low_level = low_level[self._index]
marker_types = self._attributes[self._printer]["marker-types"]
if isinstance(marker_types, list):
marker_types = marker_types[self._index]
if self._is_cups:
printer_name = self._printer
else:
printer_name = self._attributes[self._printer]["printer-make-and-model"]
return {
ATTR_MARKER_HIGH_LEVEL: high_level,
ATTR_MARKER_LOW_LEVEL: low_level,
ATTR_MARKER_TYPE: marker_types,
ATTR_PRINTER_NAME: printer_name,
}
def update(self):
"""Update the state of the sensor."""
# Data fetching is done by CupsSensor/IPPSensor
self._attributes = self.data.attributes
class CupsData:
"""Get the latest data from CUPS and update the state."""
def __init__(self, host, port, ipp_printers):
"""Initialize the data object."""
self._host = host
self._port = port
self._ipp_printers = ipp_printers
self.is_cups = ipp_printers is None
self.printers = None
self.attributes = {}
self.available = False
def update(self):
"""Get the latest data from CUPS."""
cups = importlib.import_module("cups")
try:
conn = cups.Connection(host=self._host, port=self._port)
if self.is_cups:
self.printers = conn.getPrinters()
for printer in self.printers:
self.attributes[printer] = conn.getPrinterAttributes(name=printer)
else:
for ipp_printer in self._ipp_printers:
self.attributes[ipp_printer] = conn.getPrinterAttributes(
uri=f"ipp://{self._host}:{self._port}/{ipp_printer}"
)
self.available = True
except RuntimeError:
self.available = False
| joopert/home-assistant | homeassistant/components/cups/sensor.py | Python | apache-2.0 | 10,683 |
import os.path
from django.test import TestCase
from django.test.client import RequestFactory
from django.db import models
# from edc.testing.tests.factories import TestModelFactory
from ..classes import ModelLabel
from ..models import LabelPrinter
from .factories import LabelPrinterFactory, ClientFactory, ZplTemplateFactory
import factory
from edc.base.model.models import BaseUuidModel
from edc.base.model.tests.factories import BaseUuidModelFactory
from edc.testing.models import TestModel
class TestModel(BaseUuidModel):
f1 = models.CharField(max_length=10)
f2 = models.CharField(max_length=10)
f3 = models.CharField(max_length=10, null=True, blank=False)
f4 = models.CharField(max_length=10, null=True, blank=False)
f5 = models.CharField(max_length=10)
def barcode_value(self):
return self.f1
class Meta:
app_label = 'labeling'
class TestModelFactory(BaseUuidModelFactory):
FACTORY_FOR = TestModel
f1 = factory.Sequence(lambda n: 'F1{0}'.format(n))
f2 = factory.Sequence(lambda n: 'F2{0}'.format(n))
f3 = factory.Sequence(lambda n: 'F3{0}'.format(n))
f4 = factory.Sequence(lambda n: 'F4{0}'.format(n))
f5 = factory.Sequence(lambda n: 'F5{0}'.format(n))
class ModelLabelTests(TestCase):
def setUp(self):
self.label_printer_default = LabelPrinterFactory(cups_server_ip='127.0.0.1', default=True)
self.label_printer_client_10 = LabelPrinterFactory(cups_server_ip='127.0.0.3', default=True)
self.label_printer_client_11_default = LabelPrinterFactory(cups_server_ip='127.0.0.4', default=True)
self.label_printer_client_11 = LabelPrinterFactory(cups_server_ip='127.0.0.5', default=False)
ClientFactory(ip='127.0.0.10', name='', label_printer=self.label_printer_client_10)
ClientFactory(ip='127.0.0.11', name='', label_printer=self.label_printer_client_11)
ClientFactory(ip='127.0.0.11', name='', label_printer=self.label_printer_client_11_default)
self.default_zpl_template_string = ("""^XA
^FO325,15^A0N,15,20^FDBHHRL^FS
^FO310,30^BY2,3^BCN,75,N,N,N\n
^BY^FD${barcode_value}^FS
^FO320,110^A0N,15,20^FD${f1}^FS
^FO320,110^A0N,15,20^FD${f2}^FS
^FO320,110^A0N,15,20^FD${f3}^FS
^FO320,110^A0N,15,20^FD${f4}^FS
^FO325,130^A0N,15,20^FDCD4^FS
^FO325,150^A0N,20^FD${created}^FS
^XZ""")
self.zpl_template = ZplTemplateFactory(
name='Default template',
default=True,
template=self.default_zpl_template_string)
def test_zpl_template_not_set(self):
"""Assert fails if template not set."""
label = ModelLabel()
test_model = TestModelFactory()
request = None
self.assertRaises(TypeError, label.print_label, request, test_model, update_messages=False, client_addr='127.0.0.1')
def test_zpl_template(self):
"""Assert fails if template not set."""
label = ModelLabel()
test_model = TestModelFactory()
label.zpl_template = self.zpl_template
def test_print_label(self):
"""Assert sends error message if printer is not known to lpr."""
test_model = TestModelFactory()
label = ModelLabel()
label.zpl_template = self.zpl_template
label.default_label_printer = self.label_printer_default
request = None
label.print_label(request, test_model, update_messages=False, client_addr='127.0.0.1')
self.assertIn('printer or class does not exist', label.error_message)
def test_label_printer(self):
"""Assert fails if template not set."""
label = ModelLabel()
label.zpl_template = self.zpl_template
self.assertIsNotNone(label.label_printer)
self.assertTrue(isinstance(label.label_printer, LabelPrinter))
def test_label_printer1(self):
"""Assert fails if template not set."""
label = ModelLabel()
test_model = TestModelFactory()
label.zpl_template = self.zpl_template
# self.assertRaises(TypeError, label.print_label, request, test_model, update_messages=False, client_addr='127.0.0.1')
def test_label_context(self):
"""Assert can refresh the label context with the model instance."""
label = ModelLabel()
label.zpl_template = self.zpl_template
test_model = TestModelFactory()
label.model_instance = test_model
self.assertTrue(label.refresh_label_context())
def test_formatted_label_string(self):
"""Assert model values are in formatted label string."""
label = ModelLabel()
label.zpl_template = self.zpl_template
test_model = TestModelFactory()
label.model_instance = test_model
label.refresh_label_context()
self.assertIn(label.label_context.get('f1'), label.formatted_label_string)
self.assertIn(label.label_context.get('f2'), label.formatted_label_string)
self.assertIn(label.label_context.get('f3'), label.formatted_label_string)
self.assertIn(label.label_context.get('f4'), label.formatted_label_string)
self.assertIn(label.label_context.get('created'), label.formatted_label_string)
self.assertIn(label.label_context.get('barcode_value'), label.formatted_label_string)
| botswana-harvard/getresults-label | getresults_label/tests/model_label_tests.py | Python | gpl-2.0 | 5,259 |
# -*- encoding: utf-8 -*-
# Copyright © 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
import pecan
from ironic.api import acl
from ironic.api import config
from ironic.api.controllers.base import Version
from ironic.api import hooks
from ironic.api import middleware
from ironic.common.i18n import _
api_opts = [
cfg.StrOpt(
'auth_strategy',
default='keystone',
help=_('Authentication strategy used by ironic-api: one of "keystone" '
'or "noauth". "noauth" should not be used in a production '
'environment because all authentication will be disabled.')),
cfg.BoolOpt('pecan_debug',
default=False,
help=_('Enable pecan debug mode. WARNING: this is insecure '
'and should not be used in a production environment.')),
]
CONF = cfg.CONF
CONF.register_opts(api_opts)
def get_pecan_config():
# Set up the pecan configuration
filename = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None, extra_hooks=None):
app_hooks = [hooks.ConfigHook(),
hooks.DBHook(),
hooks.ContextHook(pecan_config.app.acl_public_routes),
hooks.RPCHook(),
hooks.NoExceptionTracebackHook(),
hooks.PublicUrlHook()]
if extra_hooks:
app_hooks.extend(extra_hooks)
if not pecan_config:
pecan_config = get_pecan_config()
if pecan_config.app.enable_acl:
app_hooks.append(hooks.TrustedCallHook())
pecan.configuration.set_config(dict(pecan_config), overwrite=True)
app = pecan.make_app(
pecan_config.app.root,
static_root=pecan_config.app.static_root,
debug=CONF.pecan_debug,
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
)
if pecan_config.app.enable_acl:
app = acl.install(app, cfg.CONF, pecan_config.app.acl_public_routes)
# Create a CORS wrapper, and attach ironic-specific defaults that must be
# included in all CORS responses.
app = cors_middleware.CORS(app, CONF)
app.set_latent(
allow_headers=[Version.max_string, Version.min_string, Version.string],
allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
expose_headers=[Version.max_string, Version.min_string, Version.string]
)
return app
class VersionSelectorApplication(object):
def __init__(self):
pc = get_pecan_config()
pc.app.enable_acl = (CONF.auth_strategy == 'keystone')
self.v1 = setup_app(pecan_config=pc)
def __call__(self, environ, start_response):
return self.v1(environ, start_response)
| hpproliant/ironic | ironic/api/app.py | Python | apache-2.0 | 3,477 |
#!/usr/bin/env python3
#
# This file is part of sarracenia.
# The sarracenia suite is Free and is proudly provided by the Government of Canada
# Copyright (C) Her Majesty The Queen in Right of Canada, Environment Canada, 2008-2015
#
# Questions or bugs report: dps-client@ec.gc.ca
# sarracenia repository: git://git.code.sf.net/p/metpx/git
# Documentation: http://metpx.sourceforge.net/#SarraDocumentation
#
# sr_sarra.py : python3 program allowing users to listen and download product from
# another sarracenia server or from user posting (sr_post/sr_watch)
# and reannounce the product on the current server
#
# Code contributed by:
# Michel Grenier - Shared Services Canada
# Last Changed : Mon Sep 25 20:45 UTC 2017
# code rewritten : sr_sarra is an instantiation of sr_subscribe
#
########################################################################
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#============================================================
# usage example
#
# sr_sarra [options] [config] [foreground|start|stop|restart|reload|status|cleanup|setup]
#
# sr_sarra consumes message, for each message it downloads the product
# and reannounce it. On usage of sarra is to acquire data from source
# that announce their products. The other usage is to dessiminate products
# from other brokers/pumps.
#
# condition 1: from a source
# broker = where sarra is running (manager)
# exchange = xs_source_user
# product = downloaded under directory (option document_root)
# = subdirectory from mirror option OR
# message.headers['rename'] ...
# can be trimmed by option strip
#
# post_broker = where sarra is running (manager)
# post_exchange = xpublic
# post_message = same as incoming message
# message.headers['source'] is set from source_user
# message.headers['cluster'] is set from option cluster from default.conf
# message is built from url option give
#
# report_exchange = xreport
#
#
# condition 2: from another broker/pump
# broker = the remote broker...
# exchange = xpublic
# product = usually the product placement is mirrored
# option document_root needs to be set
# post_broker = where sarra is running (manager)
# post_exchange = xpublic
# post_message = same as incoming message
# message.headers['source'] left as is
# message.headers['cluster'] left as is
# option url : gives new url announcement for this product
# report_exchange = xs_"remoteBrokerUser"
#
#
#============================================================
import os,sys,time
try :
from sr_subscribe import *
print( "Using local module definitions, not system ones")
except :
from sarra.sr_subscribe import *
class sr_sarra(sr_subscribe):
def check(self):
if self.config_name == None : return
if self.broker == None :
self.logger.error("no broker given")
self.help()
sys.exit(1)
if self.post_broker == None : self.post_broker = self.broker
# exchanges suffix process if needed
if self.exchange == None and self.exchange_suffix :
self.exchange = 'xs_%s' % self.broker.username + self.exchange_suffix
if self.post_exchange == None and self.post_exchange_suffix :
self.post_exchange = 'xs_%s' % self.post_broker.username + self.post_exchange_suffix
# verify exchange
if self.exchange == None :
self.logger.error("no exchange given")
self.help()
sys.exit(1)
# verify post_base_dir
if self.post_base_dir == None :
if self.post_document_root != None :
self.post_base_dir = self.post_document_root
self.logger.warning("use post_base_dir instead of post_document_root")
elif self.document_root != None :
self.post_base_dir = self.document_root
self.logger.warning("use post_base_dir instead of document_root")
# bindings should be defined
if self.bindings == [] :
key = self.topic_prefix + '.#'
self.bindings.append( (self.exchange,key) )
self.logger.debug("*** BINDINGS %s"% self.bindings)
# default queue name if not given
if self.queue_name == None :
self.queue_name = 'q_' + self.broker.username + '.'
self.queue_name += self.program_name + '.' + self.config_name
# ===========================================================
# some sr_subscribe options reset to match sr_sarra behavior
# ===========================================================
# currentDir is post_document_root if unset
if self.currentDir == None :
self.currentDir = self.post_document_root
# always download ...
if self.notify_only :
self.logger.error("sarra notify_only True")
os._exit(1)
# we dont save nor restore
if self.save or self.restore :
self.logger.error("sarra no save/restore support")
sys.exit(1)
# we dont discard
if self.discard :
self.logger.error("sarra discard True")
sys.exit(1)
# retry_ttl setup.
if self.retry_ttl == None:
self.retry_ttl = self.expire
if self.retry_ttl == 0:
self.retry_ttl = None
if self.retry_mode :
self.execfile("plugin",'hb_retry')
# default reportback if unset
if self.reportback == None : self.reportback = False
# do_task should have doit_download for now... make it a plugin later
# and the download is the first thing that should be done
if not self.doit_download in self.do_task_list :
self.do_task_list.insert(0,self.doit_download)
# MG FIXME : I dont think I forgot anything but if some options need
# to be specifically set for sr_sarra put them HERE
def overwrite_defaults(self):
# overwrite defaults
# the default settings in most cases :
# sarra receives directly from sources onto itself
# or it consumes message from another pump
# we cannot define a default broker exchange
# since it can be xreport or xs_remotepumpUsername ?
# default broker and exchange None
# in most cases, sarra downloads and repost for itself.
self.inflight = None
# default post_broker and post_exchange are
self.post_exchange = 'xpublic'
if hasattr(self,'manager'):
self.post_broker = self.manager
# most of the time we want to mirror product directory and share queue
self.mirror = True
# no directory if not provided
self.currentDir = None
# ===========================================================
# some sr_subscribe options reset to understand user sr_sarra setup
# ===========================================================
self.discard = False
self.notify_only = False
self.restore = False
self.save = False
self.reportback = None
self.accept_unmatch = True
# ===================================
# MAIN
# ===================================
def main():
args,action,config,old = startup_args(sys.argv)
sarra = sr_sarra(config,args,action)
sarra.exec_action(action,old)
os._exit(0)
# =========================================
# direct invocation
# =========================================
if __name__=="__main__":
main()
| petersilva/metpx-sarracenia | sarra/sr_sarra.py | Python | gpl-2.0 | 8,710 |
import LinkList
class Queue(LinkList.LinkList):
def showqueue(self):
self.showlist()
def enqueue(self,data):
self.addattail(data)
def dequeue(self):
print self.Head.get_data(),"is the dequeued data"
print "Rest of the Queue:"
self.delhead()
def main():
pass
if __name__ == '__main__':
main()
Queuell = Queue()
Queuell.enqueue(10)
Queuell.enqueue(20)
Queuell.enqueue(45)
Queuell.showqueue()
Queuell.dequeue()
Queuell.showqueue()
| ketand114/beginnerpyfuncoding | QueueLinkList.py | Python | gpl-2.0 | 523 |
import _plotly_utils.basevalidators
class OrientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="orientation",
parent_name="layout.coloraxis.colorbar",
**kwargs
):
super(OrientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["h", "v"]),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/layout/coloraxis/colorbar/_orientation.py | Python | mit | 526 |
#!/usr/bin/env python3
"Load data, scale, train a linear model, output predictions"
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures, MinMaxScaler
from sklearn.preprocessing import Normalizer, MaxAbsScaler, StandardScaler, RobustScaler
from sklearn.linear_model import LogisticRegression as LR
import os
import sys
eps = sys.float_info.epsilon
train_file = os.getenv('TRAINING')
test_file = os.getenv('TESTING')
output_file = os.getenv('PREDICTING')
#
train = pd.read_csv(train_file, header=0)
test = pd.read_csv(test_file, header=0)
features = [f for f in list(train) if 'feature' in f]
#
y_train = train.target.values
x_train = train[features]
x_test = test[features]
transformers = [ MaxAbsScaler(), MinMaxScaler(), RobustScaler(), StandardScaler(),
Normalizer( norm = 'l1' ), Normalizer( norm = 'l2' ), Normalizer( norm = 'max' ) ]
#poly_scaled = Pipeline([('poly', PolynomialFeatures()), ('scaler', MinMaxScaler())])
#transformers.extend([PolynomialFeatures(), poly_scaled])
selecting = int(os.getenv('SELECTING'))
if selecting != 0:
transformer = transformers[selecting - 1]
x_train = transformer.fit_transform(x_train)
x_test = transformer.transform(x_test)
print("training...")
lr = LR(n_jobs=-1)
lr.fit( x_train, y_train )
print("predicting...")
p = lr.predict_proba( x_test )
print("saving...")
test['probability'] = np.clip(p[:,1], 0.0 + eps, 1.0 - eps)
test.to_csv(output_file, columns=('id', 'probability'), index=None, float_format='%.16f')
# 0.69101 public
| altermarkive/Resurrecting-JimFleming-Numerai | src/ml-zygmuntz--numer.ai/march/predict_lr.py | Python | mit | 1,584 |
# ########################## Copyrights and License #############################
# #
# Copyright 2016 Yang Fang <yangfangscu@gmail.com> #
# #
# This file is part of PhySpeTree. #
# https://xiaofeiyangyang.github.io/physpetools/ #
# #
# PhySpeTree is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PhySpeTree is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PhySpeTree. If not, see <http://www.gnu.org/licenses/>. #
# #
# ###############################################################################
"""
Combine tree module: use this module can combine more tree to a consensus tree.
"""
import os
import subprocess
from physpetool.softwares.path import getlocalpath
def docontree(input, output, rule):
"""
Combine tree
:param input: input files
:param output: output directory
"""
# get raxml path
raxmlpath = getlocalpath()
# run
# prepare a dir store result
if not os.path.exists(output):
os.mkdir(output)
consensuseCmd = raxmlpath + "/raxmlHPC-PTHREADS-AVX2 " + " -J " + rule + " -m GTRCAT -z " + input + " -w " + output + " -n T1"
subprocess.call(consensuseCmd, shell=True)
def do_astral(input,output):
astralpath = getlocalpath()
if not os.path.exists(output):
os.mkdir(output)
out_name = "combine.tree"
consensuseCmd = "java -jar " + astralpath + "/astral.5.6.3.jar" + " -i " + input+" -o " + output +"/"+ out_name
subprocess.call(consensuseCmd, shell=True)
def do_supertree(input, output):
supertreepath = getlocalpath()
if not os.path.exists(output):
os.mkdir(output)
out_name = "spr_supertree.tree"
consensuseCmd = supertreepath + "/spr_supertree" + " < " + input+" > " + output +"/"+ out_name
subprocess.call(consensuseCmd, shell=True) | xiaofeiyangyang/physpetools | physpetool/phylotree/consensustree.py | Python | gpl-3.0 | 2,984 |
import unittest
from benchmarker.benchmarker import run
class MiscTests(unittest.TestCase):
def test_no_framework(self):
with self.assertRaises(Exception):
run([])
def test_no_problem(self):
with self.assertRaises(Exception):
run(["--framework=pytorch"])
def test_bad_mode(self):
with self.assertRaises(AssertionError):
args = [
"--framework=pytorch",
"--problem=conv1d",
"--problem_size=4,4,4",
"--batch_size=4",
"--mode=depeche",
]
run(args)
| undertherain/benchmarker | test/test_misc.py | Python | mpl-2.0 | 625 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='MediaCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, editable=False, db_index=True)),
('modified', models.DateTimeField(default=django.utils.timezone.now, editable=False, db_index=True)),
('name', models.CharField(unique=True, max_length=255)),
],
options={
'verbose_name_plural': 'Media categories',
},
),
]
| ic-labs/django-icekit | icekit/migrations/0001_initial.py | Python | mit | 862 |
import math
import random
import copy
import pickle
"""
multilayer neural network.
This version is designed for effeciency so is less object oriented.
The bias input in implemented by adding an extra weight to each set of inputs.
The weights are stored as an 3 dimensional array [layers][neurons_in_layer][weights_into_neuron]
e.g. 3 inputs 2 hidden neurons 1 output
The weights might be
in-hidden hidden-out
weights=[ [[1 2 3 0],[3 4 5 -1]] , [[8,9,2]] ]
"""
def step(x):
"""
Threshold step function
"""
if x>0:
return 1
return 0
def sigmoid(x):
"""
transfer function with an output range 0 to 1
"""
if x < -100.0: # avoid math.exp(x) blowing up
return 0.0
return 1.0 / (1.0 + math.exp(-x))
def atan(x):
"""
transfer function with an output range -1 to 1
"""
if x < -100.:
return -1.0
if x > 100.:
return 1.0
ee=math.exp(-x)
return (1.0 -ee) / (1.0 + ee)
def randomSeed():
"""
Random number between -0.5 and 0.5
"""
return 0.5 - random.random()
class FeedForwardBrain:
"""
Basic feedforward neural net
"""
def __init__(self,size=None,func=None,weight=None):
"""
Create a multi layer network
Number of nodes in each layer is define by size
size[0] is the number of inputs
size[n] number of neurons in layer n
func is an array of activation functions (for each layer)
if this is None the activation defaults to sigmoid.
weight can be used to initialize the weights of the NN
in this case size can be None
otherwise if weight is None random values are assigned to the weights using size
Example for 2 inputs 2 hidden and 1 output
size=[2,2,1]
"""
self.layer_size = []
if weight != None:
self.weight=weight
self.layer_size.append(len(weight[0][0])-1)
for i in range(len(self.weight)):
self.layer_size.append(len(weight[i]))
else:
for i in range(len(size)):
self.layer_size.append(size[i])
# print self.layer_size
self.num_layer=len(self.layer_size)
if func == None:
func=[]
for _ in range(self.num_layer):
func.append(sigmoid)
self.func=func
#// allocate memory for output of each neuron
self.out = [] # new float[num_layer][];
for i in range(self.num_layer):
self.out.append([]);
a=self.out[i]
for _ in range(self.layer_size[i]):
a.append(0.0)
if weight == None:
self.weight=[]
for i in range(self.num_layer-1):
layer=[]
for _ in range(self.layer_size[i+1]):
w=[]
for _ in range(self.layer_size[i]):
w.append(randomSeed())
w.append(randomSeed())
layer.append(w)
self.weight.append(layer)
def ffwd(self,x):
"""
input: x list of input values
returns: list of output values.
"""
# assign content to input layer
for i in range(self.layer_size[0]):
self.out[0][i] = x[i] # output_from_neuron(layer,j) Jth neuron in Ith Layer
# assign output(activation) value
# to each neuron using sigmoid func
for layer in range(self.num_layer-1): # For each layer
for j in range(self.layer_size[layer+1]): # For each neuron in current layer
sum = 0.0;
for k in range(self.layer_size[layer]): # For input from each neuron in preceeding layer
sum += self.out[layer][k] * self.weight[layer][j][k]; # Apply weight to inputs and add to sum
sum += self.weight[layer][j][self.layer_size[layer]]; # Apply bias
self.out[layer+1][j] = self.func[layer](sum); # Apply transfer function
return self.out[self.num_layer - 1];
def copyWeights(self):
"""
Return a copy of the weights
"""
return copy.deepcopy(self.wieghts)
def clone(self):
"""
Create a new brain which is the same as this one.
"""
clone=FeedForwardBrain(self.layer_size,self.func,self.weight)
return clone
#------------------ More advanced functionality (you can ignore this) ---------------------------------------------------
def resize_inputs(self,nIn):
"""
Add extra inputs to the network
"""
assert nIn > self.layer_size[0]
for _ in range(nIn-self.layer_size[0]):
self.out[0].append(0.0)
for a in self.weight[1]:
wLast=a.pop()
a.append(0.0)
for _ in range(nIn-self.layer_size[0]-1):
a.append(0.0)
a.append(wLast)
if self.layer_size[0]<nIn:
self.layer_size[0]=nIn
def mutate1(brain,amount):
"""
mutate *all* the weights by a random amount.
amount: range of mutation is amount*0.5
"""
# for all layers with inputs
for i in range(1,brain.num_layer):
a=brain.weight[i]
# for all neurons in the layer
for j in range(brain.layer_size[i]):
r=a[j]
for k in range(brain.layer_size[i-1]+1):
r[k]=r[k]+randomSeed()*amount
def dist(brain1,brain2):
"""
WARNING UNTESTED --- might be useful for implement Niches
WARNING NO ERROR CHECKING (brains must be same topology)
sqrt(sum of diff weights squared)
compares brain1 and brain2 by calculating Euclidean distance between weight vectors
"""
sum=0.0
# for all layers with inputs
for i in range(1,brain1.num_layer):
a=brain1.weight[i]
b=brain2.weight[i]
# for all neurons in the layer
for j in range(brain1.layer_size[i]):
ra=a[j]
rb=b[j]
for k in range(brain1.layer_size[i-1]+1):
sum += (ra[k]-rb[k])**2
return math.sqrt(sum)
def mutate2(brain,amount):
"""
Only mutate a random section of each set of inputs
"""
for i in range(1,brain.num_layer):
a=brain.weight[i]
for j in range(brain.layer_size[i]):
r=a[j]
k=random.randint(0, brain.layer_size[i-1]+1)
z=random.randint(0, (brain.layer_size[i-1]+1))
while k < z:
r[k]=r[k]+randomSeed()*amount
k=k+1
| pauljohnleonard/pod-world | CI_2014/ATTIC/OLD_SIMULATION_CODE/CartWithIP/brain.py | Python | gpl-2.0 | 7,638 |
# Solfege - free ear training software
# Copyright (C) 2007, 2008, 2011 Tom Cato Amundsen
# License is GPL, see file COPYING
import os
import unittest
from solfege.soundcard.exporter import MidiExporter
from solfege.mpd.track import Track
from solfege.testlib import outdir
class TestMidiExporter(unittest.TestCase):
def test_empty(self):
m = MidiExporter()
m.start_export(os.path.join(outdir, "a.mid"))
m.end_export()
# We don't generate a file if no music has been played
# since start_export()
self.assertFalse(os.path.exists(os.path.join(outdir, "a.mid")))
def test_export_track(self):
t = Track()
t.start_note(50, 120)
m = MidiExporter()
m.start_export(os.path.join(outdir, "a.mid"))
m.play_track(t)
m.end_export()
os.remove(os.path.join(outdir, "a.mid"))
suite = unittest.makeSuite(TestMidiExporter)
| yuanyelele/solfege | solfege/soundcard/tests/test_exporter.py | Python | gpl-3.0 | 922 |
import datetime
from calendar import timegm
from jwt import MalformedJWT, JWT
from openid.consumer.consumer import Consumer, SUCCESS, CANCEL, FAILURE
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import sreg, ax, pape
from social.utils import url_add_parameters
from social.backends.base import BaseAuth
from social.backends.oauth import BaseOAuth2
from social.exceptions import AuthException, AuthFailed, AuthCanceled, \
AuthUnknownError, AuthMissingParameter, \
AuthTokenError
# OpenID configuration
OLD_AX_ATTRS = [
('http://schema.openid.net/contact/email', 'old_email'),
('http://schema.openid.net/namePerson', 'old_fullname'),
('http://schema.openid.net/namePerson/friendly', 'old_nickname')
]
AX_SCHEMA_ATTRS = [
# Request both the full name and first/last components since some
# providers offer one but not the other.
('http://axschema.org/contact/email', 'email'),
('http://axschema.org/namePerson', 'fullname'),
('http://axschema.org/namePerson/first', 'first_name'),
('http://axschema.org/namePerson/last', 'last_name'),
('http://axschema.org/namePerson/friendly', 'nickname'),
]
SREG_ATTR = [
('email', 'email'),
('fullname', 'fullname'),
('nickname', 'nickname')
]
OPENID_ID_FIELD = 'openid_identifier'
SESSION_NAME = 'openid'
class OpenIdAuth(BaseAuth):
"""Generic OpenID authentication backend"""
name = 'openid'
URL = None
USERNAME_KEY = 'username'
def get_user_id(self, details, response):
"""Return user unique id provided by service"""
return response.identity_url
def get_ax_attributes(self):
attrs = self.setting('AX_SCHEMA_ATTRS', [])
if attrs and self.setting('IGNORE_DEFAULT_AX_ATTRS', True):
return attrs
return attrs + AX_SCHEMA_ATTRS + OLD_AX_ATTRS
def get_sreg_attributes(self):
return self.setting('SREG_ATTR') or SREG_ATTR
def values_from_response(self, response, sreg_names=None, ax_names=None):
"""Return values from SimpleRegistration response or
AttributeExchange response if present.
@sreg_names and @ax_names must be a list of name and aliases
for such name. The alias will be used as mapping key.
"""
values = {}
# Use Simple Registration attributes if provided
if sreg_names:
resp = sreg.SRegResponse.fromSuccessResponse(response)
if resp:
values.update((alias, resp.get(name) or '')
for name, alias in sreg_names)
# Use Attribute Exchange attributes if provided
if ax_names:
resp = ax.FetchResponse.fromSuccessResponse(response)
if resp:
for src, alias in ax_names:
name = alias.replace('old_', '')
values[name] = resp.getSingle(src, '') or values.get(name)
return values
def get_user_details(self, response):
"""Return user details from an OpenID request"""
values = {'username': '', 'email': '', 'fullname': '',
'first_name': '', 'last_name': ''}
# update values using SimpleRegistration or AttributeExchange
# values
values.update(self.values_from_response(
response, self.get_sreg_attributes(), self.get_ax_attributes()
))
fullname = values.get('fullname') or ''
first_name = values.get('first_name') or ''
last_name = values.get('last_name') or ''
email = values.get('email') or ''
if not fullname and first_name and last_name:
fullname = first_name + ' ' + last_name
elif fullname:
try:
first_name, last_name = fullname.rsplit(' ', 1)
except ValueError:
last_name = fullname
username_key = self.setting('USERNAME_KEY') or self.USERNAME_KEY
values.update({'fullname': fullname, 'first_name': first_name,
'last_name': last_name,
'username': values.get(username_key) or
(first_name.title() + last_name.title()),
'email': email})
return values
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
"""Return defined extra data names to store in extra_data field.
Settings will be inspected to get more values names that should be
stored on extra_data field. Setting name is created from current
backend name (all uppercase) plus _SREG_EXTRA_DATA and
_AX_EXTRA_DATA because values can be returned by SimpleRegistration
or AttributeExchange schemas.
Both list must be a value name and an alias mapping similar to
SREG_ATTR, OLD_AX_ATTRS or AX_SCHEMA_ATTRS
"""
sreg_names = self.setting('SREG_EXTRA_DATA')
ax_names = self.setting('AX_EXTRA_DATA')
values = self.values_from_response(response, sreg_names, ax_names)
from_details = super(OpenIdAuth, self).extra_data(
user, uid, {}, details, *args, **kwargs
)
values.update(from_details)
return values
def auth_url(self):
"""Return auth URL returned by service"""
openid_request = self.setup_request(self.auth_extra_arguments())
# Construct completion URL, including page we should redirect to
return_to = self.strategy.absolute_uri(self.redirect_uri)
return openid_request.redirectURL(self.trust_root(), return_to)
def auth_html(self):
"""Return auth HTML returned by service"""
openid_request = self.setup_request(self.auth_extra_arguments())
return_to = self.strategy.absolute_uri(self.redirect_uri)
form_tag = {'id': 'openid_message'}
return openid_request.htmlMarkup(self.trust_root(), return_to,
form_tag_attrs=form_tag)
def trust_root(self):
"""Return trust-root option"""
return self.setting('OPENID_TRUST_ROOT') or \
self.strategy.absolute_uri('/')
def continue_pipeline(self, *args, **kwargs):
"""Continue previous halted pipeline"""
response = self.consumer().complete(dict(self.data.items()),
self.strategy.absolute_uri(
self.redirect_uri
))
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def auth_complete(self, *args, **kwargs):
"""Complete auth process"""
response = self.consumer().complete(dict(self.data.items()),
self.strategy.absolute_uri(
self.redirect_uri
))
self.process_error(response)
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def process_error(self, data):
if not data:
raise AuthException(self, 'OpenID relying party endpoint')
elif data.status == FAILURE:
raise AuthFailed(self, data.message)
elif data.status == CANCEL:
raise AuthCanceled(self)
elif data.status != SUCCESS:
raise AuthUnknownError(self, data.status)
def setup_request(self, params=None):
"""Setup request"""
request = self.openid_request(params)
# Request some user details. Use attribute exchange if provider
# advertises support.
if request.endpoint.supportsType(ax.AXMessage.ns_uri):
fetch_request = ax.FetchRequest()
# Mark all attributes as required, Google ignores optional ones
for attr, alias in self.get_ax_attributes():
fetch_request.add(ax.AttrInfo(attr, alias=alias,
required=True))
else:
fetch_request = sreg.SRegRequest(
optional=list(dict(self.get_sreg_attributes()).keys())
)
request.addExtension(fetch_request)
# Add PAPE Extension for if configured
preferred_policies = self.setting(
'OPENID_PAPE_PREFERRED_AUTH_POLICIES'
)
preferred_level_types = self.setting(
'OPENID_PAPE_PREFERRED_AUTH_LEVEL_TYPES'
)
max_age = self.setting('OPENID_PAPE_MAX_AUTH_AGE')
if max_age is not None:
try:
max_age = int(max_age)
except (ValueError, TypeError):
max_age = None
if max_age is not None or preferred_policies or preferred_level_types:
pape_request = pape.Request(
max_auth_age=max_age,
preferred_auth_policies=preferred_policies,
preferred_auth_level_types=preferred_level_types
)
request.addExtension(pape_request)
return request
def consumer(self):
"""Create an OpenID Consumer object for the given Django request."""
if not hasattr(self, '_consumer'):
self._consumer = self.create_consumer(self.strategy.openid_store())
return self._consumer
def create_consumer(self, store=None):
return Consumer(self.strategy.openid_session_dict(SESSION_NAME), store)
def uses_redirect(self):
"""Return true if openid request will be handled with redirect or
HTML content will be returned.
"""
return self.openid_request().shouldSendRedirect()
def openid_request(self, params=None):
"""Return openid request"""
try:
return self.consumer().begin(url_add_parameters(self.openid_url(),
params))
except DiscoveryFailure as err:
raise AuthException(self, 'OpenID discovery error: {0}'.format(
err
))
def openid_url(self):
"""Return service provider URL.
This base class is generic accepting a POST parameter that specifies
provider URL."""
if self.URL:
return self.URL
elif OPENID_ID_FIELD in self.data:
return self.data[OPENID_ID_FIELD]
else:
raise AuthMissingParameter(self, OPENID_ID_FIELD)
class OpenIdConnectAssociation(object):
""" Use Association model to save the nonce by force. """
def __init__(self, handle, secret='', issued=0, lifetime=0, assoc_type=''):
self.handle = handle # as nonce
self.secret = secret.encode() # not use
self.issued = issued # not use
self.lifetime = lifetime # not use
self.assoc_type = assoc_type # as state
class OpenIdConnectAuth(BaseOAuth2):
"""
Base class for Open ID Connect backends.
Currently only the code response type is supported.
"""
ID_TOKEN_ISSUER = None
ID_TOKEN_MAX_AGE = 600
DEFAULT_SCOPE = ['openid']
EXTRA_DATA = ['id_token', 'refresh_token', ('sub', 'id')]
# Set after access_token is retrieved
id_token = None
def auth_params(self, state=None):
"""Return extra arguments needed on auth process."""
params = super(OpenIdConnectAuth, self).auth_params(state)
params['nonce'] = self.get_and_store_nonce(
self.AUTHORIZATION_URL, state
)
return params
def auth_complete_params(self, state=None):
params = super(OpenIdConnectAuth, self).auth_complete_params(state)
# Add a nonce to the request so that to help counter CSRF
params['nonce'] = self.get_and_store_nonce(
self.ACCESS_TOKEN_URL, state
)
return params
def get_and_store_nonce(self, url, state):
# Create a nonce
nonce = self.strategy.random_string(64)
# Store the nonce
association = OpenIdConnectAssociation(nonce, assoc_type=state)
self.strategy.storage.association.store(url, association)
return nonce
def get_nonce(self, nonce):
try:
return self.strategy.storage.association.get(
server_url=self.ACCESS_TOKEN_URL,
handle=nonce
)[0]
except IndexError:
pass
def remove_nonce(self, nonce_id):
self.strategy.storage.association.remove([nonce_id])
def validate_and_return_id_token(self, id_token):
"""
Validates the id_token according to the steps at
http://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation.
"""
client_id, _client_secret = self.get_key_and_secret()
decode_kwargs = {
'algorithms': ['HS256'],
'audience': client_id,
'issuer': self.ID_TOKEN_ISSUER,
'key': self.setting('ID_TOKEN_DECRYPTION_KEY'),
'options': {
'verify_signature': True,
'verify_exp': True,
'verify_iat': True,
'verify_aud': True,
'verify_iss': True,
'require_exp': True,
'require_iat': True,
},
}
decode_kwargs.update(self.setting('ID_TOKEN_JWT_DECODE_KWARGS', {}))
try:
# Decode the JWT and raise an error if the secret is invalid or
# the response has expired.
id_token = JWT.decode(id_token, **decode_kwargs)
except MalformedJWT as err:
raise AuthTokenError(self, err)
# Verify the token was issued within a specified amount of time
iat_leeway = self.setting('ID_TOKEN_MAX_AGE', self.ID_TOKEN_MAX_AGE)
utc_timestamp = timegm(datetime.datetime.utcnow().utctimetuple())
if id_token['iat'] < (utc_timestamp - iat_leeway):
raise AuthTokenError(self, 'Incorrect id_token: iat')
# Validate the nonce to ensure the request was not modified
nonce = id_token.get('nonce')
if not nonce:
raise AuthTokenError(self, 'Incorrect id_token: nonce')
nonce_obj = self.get_nonce(nonce)
if nonce_obj:
self.remove_nonce(nonce_obj.id)
else:
raise AuthTokenError(self, 'Incorrect id_token: nonce')
return id_token
def request_access_token(self, *args, **kwargs):
"""
Retrieve the access token. Also, validate the id_token and
store it (temporarily).
"""
response = self.get_json(*args, **kwargs)
self.id_token = self.validate_and_return_id_token(response['id_token'])
return response
| merutak/python-social-auth | social/backends/open_id.py | Python | bsd-3-clause | 14,789 |
"""
SNMPMixin for objects that can be accessed with SNMP
"""
import clusto
from clusto.drivers.resourcemanagers import IPManager
# Get rid of pesky errors about missing routes and tcpdump
import logging
runtime = logging.getLogger('scapy.runtime')
runtime.setLevel(logging.ERROR)
loading = logging.getLogger('scapy.loading')
loading.setLevel(logging.ERROR)
from scapy.all import SNMP, SNMPget, SNMPset, SNMPnext, SNMPvarbind
from socket import socket, AF_INET, SOCK_DGRAM
class SNMPMixin:
"""Provide SNMP capabilities to devices
"""
def _snmp_connect(self, port=161):
ip = IPManager.get_ips(self)
if not ip:
raise ValueError('Device %s does not have an IP' % self.name)
ip = ip[0]
community = self.attr_values(key='snmp', subkey='community', merge_container_attrs=True)
if not community:
raise ValueError('Device %s does not have an SNMP community attribute' % self.name)
sock = socket(AF_INET, SOCK_DGRAM)
sock.connect((ip, port))
return (str(community[0]), sock)
def _snmp_get(self, oid):
community, sock = self._snmp_connect()
pdu = SNMPget(varbindlist=[SNMPvarbind(oid=str(oid))])
p = SNMP(community=community, PDU=pdu)
sock.sendall(p.build())
r = SNMP(sock.recv(4096))
return r.PDU.varbindlist[0].value.val
def _snmp_set(self, oid, value):
community, sock = self._snmp_connect()
pdu = SNMPset(varbindlist=[SNMPvarbind(oid=str(oid), value=value)])
p = SNMP(community=community, PDU=pdu)
sock.sendall(p.build())
r = SNMP(sock.recv(4096))
return r
def _snmp_walk(self, oid_prefix):
community, sock = self._snmp_connect()
nextoid = oid_prefix
while True:
p = SNMP(community=community, PDU=SNMPnext(varbindlist=[SNMPvarbind(oid=nextoid)]))
sock.sendall(p.build())
r = SNMP(sock.recv(4096))
oid = r.PDU.varbindlist[0].oid.val
if oid.startswith(oid_prefix):
yield (oid, r.PDU.varbindlist[0].value.val)
else:
break
nextoid = oid
sock.close()
| rongoro/clusto | src/clusto/drivers/devices/common/snmpmixin.py | Python | bsd-3-clause | 2,220 |
import os
import shutil
import json
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from rest_framework import status
from hs_core import hydroshare
from hs_core.models import BaseResource, ResourceFile
from hs_core.views import create_resource
from hs_core.testing import MockIRODSTestCaseMixin, ViewTestCase
class TestCreateResourceViewFunctions(MockIRODSTestCaseMixin, ViewTestCase):
def setUp(self):
super(TestCreateResourceViewFunctions, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.username = 'john'
self.password = 'jhmypassword'
self.user = hydroshare.create_account(
'john@gmail.com',
username=self.username,
first_name='John',
last_name='Clarson',
superuser=False,
password=self.password,
groups=[]
)
self.odm2_sqlite_file_name = 'ODM2_Multi_Site_One_Variable.sqlite'
self.odm2_sqlite_file = 'hs_app_timeseries/tests/{}'.format(self.odm2_sqlite_file_name)
target_temp_sqlite_file = os.path.join(self.temp_dir, self.odm2_sqlite_file_name)
shutil.copy(self.odm2_sqlite_file, target_temp_sqlite_file)
self.odm2_sqlite_file_obj = open(target_temp_sqlite_file, 'r')
self.odm2_sqlite_invalid_file_name = 'ODM2_invalid.sqlite'
self.odm2_sqlite_invalid_file = 'hs_app_timeseries/tests/{}'.format(
self.odm2_sqlite_invalid_file_name)
target_temp_sqlite_invalid_file = os.path.join(self.temp_dir,
self.odm2_sqlite_invalid_file_name)
shutil.copy(self.odm2_sqlite_invalid_file, target_temp_sqlite_invalid_file)
self.odm2_sqlite_invalid_file_obj = open(target_temp_sqlite_invalid_file, 'r')
def tearDown(self):
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
super(TestCreateResourceViewFunctions, self).tearDown()
def test_create_resource(self):
# here we are testing the create_resource view function
# test with no file upload
post_data = {'resource-type': 'TimeSeriesResource',
'title': 'Test Time Series Resource Creation',
'irods_federated': 'true'
}
url = reverse('create_resource')
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
response = create_resource(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_content = json.loads(response.content)
self.assertEqual(json_content['status'], 'success')
res_id = json_content['resource_url'].split('/')[3]
self.assertEqual(BaseResource.objects.filter(short_id=res_id).exists(), True)
hydroshare.delete_resource(res_id)
self.assertEqual(BaseResource.objects.count(), 0)
# test with file upload
self.assertEqual(ResourceFile.objects.count(), 0)
post_data = {'resource-type': 'TimeSeriesResource',
'title': 'Test Time Series Resource Creation',
'irods_federated': 'true',
'files': (self.odm2_sqlite_file_name, open(self.odm2_sqlite_file))
}
url = reverse('create_resource')
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
response = create_resource(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_content = json.loads(response.content)
self.assertEqual(json_content['status'], 'success')
self.assertEqual(json_content['file_upload_status'], 'success')
res_id = json_content['resource_url'].split('/')[3]
self.assertEqual(BaseResource.objects.filter(short_id=res_id).exists(), True)
self.assertEqual(ResourceFile.objects.count(), 1)
ts_resource = BaseResource.objects.filter(short_id=res_id).first()
# check that the resource title got updated due to metadata extraction as part of resource
# creation
self.assertEqual(ts_resource.metadata.title.value,
"Water temperature data from the Little Bear River, UT")
hydroshare.delete_resource(res_id)
def test_create_resource_with_invalid_file(self):
# here we are testing the create_resource view function
self.assertEqual(BaseResource.objects.count(), 0)
self.assertEqual(ResourceFile.objects.count(), 0)
# test with bad sqlite file - this file should not be uploaded
post_data = {'resource-type': 'TimeSeriesResource',
'title': 'Test Time Series Resource Creation',
'irods_federated': 'true',
'files': (self.odm2_sqlite_invalid_file_name,
open(self.odm2_sqlite_invalid_file))
}
url = reverse('create_resource')
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
response = create_resource(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_content = json.loads(response.content)
self.assertEqual(json_content['status'], 'success')
self.assertEqual(json_content['file_upload_status'], 'error')
res_id = json_content['resource_url'].split('/')[3]
self.assertEqual(BaseResource.objects.filter(short_id=res_id).exists(), True)
# that bad sqlite file was not uploaded
self.assertEqual(ResourceFile.objects.count(), 0)
hydroshare.delete_resource(res_id)
| ResearchSoftwareInstitute/MyHPOM | hs_app_timeseries/tests/views/test_create_timeseries_resource.py | Python | bsd-3-clause | 6,012 |
# -*- coding: utf-8 -*-
from navmazing import NavigateToSibling
from widgetastic.widget import Checkbox, Text, View
from widgetastic_manageiq import Accordion, ManageIQTree
from widgetastic_patternfly import Button, Dropdown, Input
from cfme.base import Server
from cfme.base.login import BaseLoggedInPage
from cfme.base.ui import automate_menu_name
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep
class AutomateCustomizationView(BaseLoggedInPage):
# TODO re-model this so it can be nested as a sidebar instead of inherited
@property
def in_customization(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == automate_menu_name(
self.context['object'].appliance) + ['Customization'])
@property
def is_displayed(self):
return self.in_customization and self.configuration.is_displayed
@View.nested
class provisioning_dialogs(Accordion): # noqa
ACCORDION_NAME = 'Provisioning Dialogs'
tree = ManageIQTree()
@View.nested
class service_dialogs(Accordion): # noqa
ACCORDION_NAME = 'Service Dialogs'
tree = ManageIQTree()
@View.nested
class buttons(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class import_export(Accordion): # noqa
ACCORDION_NAME = 'Import/Export'
tree = ManageIQTree()
configuration = Dropdown('Configuration')
@navigator.register(Server)
class AutomateCustomization(CFMENavigateStep):
VIEW = AutomateCustomizationView
prerequisite = NavigateToSibling('LoggedIn')
def step(self):
self.view.navigation.select(*automate_menu_name(self.obj.appliance) + ['Customization'])
class DialogForm(AutomateCustomizationView):
title = Text('#explorer_title_text')
plus_btn = Dropdown('Add')
label = Input(name='label')
description = Input(name="description")
submit_button = Checkbox(name='chkbx_submit')
cancel_button = Checkbox(name='chkbx_cancel')
class AddDialogView(DialogForm):
add_button = Button("Add")
plus_btn = Dropdown('Add')
@property
def is_displayed(self):
return (
self.in_customization and self.service_dialogs.is_opened and
self.title.text == "Adding a new Dialog [Dialog Information]"
)
class EditDialogView(DialogForm):
element_tree = ManageIQTree('dialog_edit_treebox')
save_button = Button('Save')
reset_button = Button('Reset')
@property
def is_displayed(self):
return (
self.in_customization and self.service_dialogs.is_opened and
self.title.text == "Editing Dialog {}".format(self.label)
)
class TabForm(AddDialogView):
tab_label = Input(name='tab_label')
tab_desc = Input(name="tab_description")
class AddTabView(TabForm):
plus_btn = Dropdown('Add')
@property
def is_displayed(self):
return (
self.in_customization and self.service_dialogs.is_opened and
self.title.text == "Adding a new Dialog [Tab Information]"
)
class BoxForm(AddTabView):
box_label = Input(name='group_label')
box_desc = Input(name="group_description")
class AddBoxView(BoxForm):
"""AddBox View."""
plus_btn = Dropdown('Add')
@property
def is_displayed(self):
return (
self.in_customization and self.service_dialogs.is_opened and
self.title.text == "Adding a new Dialog [Box Information]"
)
| jkandasa/integration_tests | cfme/automate/__init__.py | Python | gpl-2.0 | 3,556 |
import os, sys
from pdb import set_trace
import pandas as pd
import fnmatch
def recursive_glob(treeroot, pattern):
results = []
for base, dirs, files in os.walk(treeroot):
goodfiles = fnmatch.filter(files, pattern)
results.extend(os.path.join(base, f) for f in goodfiles)
return results
def change_class_to_bool(fname):
old = pd.read_csv(fname)
old.loc[old[old.columns[-1]] > 0, old.columns[-1]] = "T"
old.loc[old[old.columns[-1]] <= 0, old.columns[-1]] = "F"
old.to_csv(fname, index=False)
if __name__ == "__main__":
res = recursive_glob(".", "*.csv")
for fname in res:
change_class_to_bool(fname)
set_trace()
| bellwethers-in-se/defects | src/data/mccabe/renamehdr.py | Python | mit | 682 |
""" Views for the layout application """
from django.shortcuts import render, HttpResponseRedirect
import django
def home(request):
""" Default view for the root """
djangoversion = django.get_version()
return render(request, 'layout/home.html',{'djangoversion':djangoversion })
def profile(request):
return render(request, "user/profile.html" )
| allox/django-base-template | layout/views.py | Python | bsd-3-clause | 370 |
import time
from datetime import timedelta
from typing import List
from treeherder.config import settings
from treeherder.perf.sheriffing_criteria import (
EngineerTractionFormula,
FixRatioFormula,
CriteriaTracker,
TotalAlertsFormula,
)
from treeherder.perf.sheriffing_criteria import criteria_tracking
from mo_times import Duration
from django.core.management.base import BaseCommand
def pretty_enumerated(formulas: List[str]) -> str:
comma = ', '
return ' & '.join(comma.join(formulas).rsplit(comma, maxsplit=1))
class Command(BaseCommand):
ENGINEER_TRACTION = 'engineer traction'
FIX_RATIO = 'fix ratio'
FORMULAS = [ENGINEER_TRACTION, FIX_RATIO] # register new formulas here
help = f'''
Compute the {pretty_enumerated(FORMULAS)} for multiple framework/suite combinations,
according to the Perf Sheriffing Criteria specification.\nRequires "{criteria_tracking.CRITERIA_FILENAME}" to be provided for both program input & output.
'''
INITIAL_PROMPT_MSG = 'Computing Perf Sheriffing Criteria... (may take some time)'
PRECISION = '.1f'
def add_arguments(self, parser):
parser.add_argument(
'--quantifying-period',
'-qp',
default=settings.QUANTIFYING_PERIOD,
type=self.parse_time_interval,
help='''How far back to look for gathering formula's input data, from now.
Expressed in a humanized form.
Examples: 1year, 6month, 2weeks etc.
More details about accepted forms: https://github.com/mozilla/ActiveData/blob/dev/docs/jx_time.md#duration''',
metavar='QUANTIFYING_PERIOD',
)
parser.add_argument(
'--bug-cooldown',
'-bc',
default=settings.BUG_COOLDOWN_TIME,
type=self.parse_time_interval,
help='''How old Bugzilla bugs should be to be taken into consideration.
Expressed in a humanized form.
Examples: 1year, 6month, 2weeks etc.
More details about accepted forms: https://github.com/mozilla/ActiveData/blob/dev/docs/jx_time.md#duration''',
metavar='BUG_COOLDOWN',
)
parser.add_argument(
'--multiprocessing',
'-mp',
action='store_true',
help='''Experimental! Whether to use a process pool instead of a thread pool''',
)
subparser = parser.add_subparsers(dest='individually')
individual_parser = subparser.add_parser(
'individually',
help='Compute perf sheriffing criteria for individual framework/suite combo (no CSV file required)',
)
individual_parser.add_argument('framework', action='store')
individual_parser.add_argument('suite', action='store')
individual_parser.add_argument('--test', default=None)
def handle(self, *args, **options):
if options.get('individually'):
return self._handle_individually(options)
quant_period = options['quantifying_period']
bug_cooldown = options['bug_cooldown']
multiprocessed = options['multiprocessing']
init_params = (None, quant_period, bug_cooldown)
formula_map = {
'EngineerTraction': EngineerTractionFormula(*init_params),
'FixRatio': FixRatioFormula(*init_params),
'TotalAlerts': TotalAlertsFormula(quant_period),
}
tracker = CriteriaTracker(formula_map, multiprocessed=multiprocessed)
tracker.load_records()
start = time.time()
tracker.update_records()
duration = time.time() - start
print(f'{self.INITIAL_PROMPT_MSG}', end='')
for record in tracker:
print(record)
print(f"Took {duration:.1f} seconds")
def _handle_individually(self, options):
framework = options['framework']
suite = options['suite']
test = options['test']
quant_period = options['quantifying_period']
bug_cooldown = options['bug_cooldown']
init_params = (None, quant_period, bug_cooldown)
targetted_test = (framework, suite, test)
engineer_traction = EngineerTractionFormula(*init_params)
fix_ratio = FixRatioFormula(*init_params)
print(f'\r{self.INITIAL_PROMPT_MSG}', end='')
compute_start = time.time()
eng_traction_result = engineer_traction(*targetted_test)
fix_ratio_result = fix_ratio(*targetted_test)
compute_duration = time.time() - compute_start
# turn into regular percentages
eng_traction_result *= 100
fix_ratio_result *= 100
# display results (inline)
test_moniker = ' '.join(filter(None, (suite, test)))
title = f'Perf Sheriffing Criteria for {framework} - {test_moniker}'
big_underline = '-' * len(title)
# & results headers
eng_traction_head = self.ENGINEER_TRACTION.capitalize()
fix_ratio_head = self.FIX_RATIO.capitalize()
justify_head = self.__get_head_justification(eng_traction_head, fix_ratio_head)
# let's update 1st prompt line
print(f"\r{' ' * len(self.INITIAL_PROMPT_MSG)}", end='')
print(
f"\rComputing Perf Sheriffing Criteria... (took {compute_duration:{self.PRECISION}} seconds)"
)
# display title
print(big_underline)
print(title)
print(big_underline)
# & actual results
print(f'{eng_traction_head:<{justify_head}}: {eng_traction_result:{self.PRECISION}}%')
print(f'{fix_ratio_head:<{justify_head}}: {fix_ratio_result:{self.PRECISION}}%')
print(big_underline)
def __get_head_justification(self, *result_heads):
return max([len(head) for head in result_heads]) + 1
def parse_time_interval(self, interval: str) -> timedelta:
duration = Duration(interval)
return timedelta(seconds=duration.total_seconds())
| jmaher/treeherder | treeherder/perf/management/commands/compute_criteria_formulas.py | Python | mpl-2.0 | 5,956 |
import RPi.GPIO as GPIO
class raspio:
def __init__(self, setup):
# 5v 5v GN 14 15 18 GN 23 24 GN 25 08 07 DC GN 12 GN 16 21 21
#-----------------------------------------------------------------#
# 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 32 34 36 38 40 #
# 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 33 35 37 39 #
#-----------------------------------------------------------------#
# 3.3v 02 03 04 GN 17 27 22 3.3v 10 09 11 GN DC 05 06 13 19 26 GN
self.mPins = {}
self.mPins['pins'] = [8,10,12,16,18,22,24,26,32,36,38,40,37,35,33,31,29,23,21,19,15,13,11,7,5,3]
self.mPins['ground'] = [6, 14, 20, 30, 34 , 9, 25, 39]
self.mPins['DNC'] = [27, 28]
# GPIO.BOARD || GPIO.BCM
self.mBoard = GPIO.BOARD
self.mSetup = setup
self.setup()
def setup(self):
GPIO.setmode(self.mBoard)
for item in self.mSetup.keys():
pin = self.mSetup[item]['pin']
es = self.mSetup[item]['ES']
print "Setting pin",pin, "ES", es
if 'initial' in self.mSetup[item].keys():
initial = self.mSetup[item]['initial']
GPIO.setup(pin, es, initial=initial)
else:
GPIO.setup(pin, es)
def getPin(self, item):
pin = self.mSetup[item]['pin']
return GPIO.input(pin)
| parnedo/rasp-home | temperature_node/raspio.py | Python | mit | 1,414 |
__author__ = 'bptripp'
from cnn_stimuli import get_image_file_list
import cPickle as pickle
import time
import numpy as np
import matplotlib.pyplot as plt
from alexnet import preprocess, load_net, load_vgg
def excess_kurtosis(columns):
m = np.mean(columns, axis=0)
sd = np.std(columns, axis=0)
result = np.zeros(columns.shape[1])
for i in range(columns.shape[1]):
column = columns[:,i]
d = column - m[i]
result[i] = np.sum(d**4) / columns.shape[0] / sd[i]**4 - 3
return result
# Copying these two functions from Salman's repo,
# ObjectSelectivity/sparseness.py and ObjectSelectivity/kurtosis_selectivity_profile.py
def calculate_kurtosis(rates_per_object):
"""
Given an array of firing rates of the neuron to objects, return the sparseness metric
Kurtosis (actually excess kurtosis) of the neuron as defined in:
[1] Lehky, S. R., Kiani, R., Esteky, H., & Tanaka, K. (2011). Statistics of
visual responses in primate inferotemporal cortex to object stimuli.
Journal of Neurophysiology, 106(3), 1097-117.
Kurtosis = (sum (Ri - Rmean)**4 / (n*sigma**4)) - 3
:param rates_per_object: array of firing rates of the neuron to multiple objects.
:return: kurtosis sparseness.
This is defined outside the class as it is used by other selectivity profiles.
"""
n = np.float(rates_per_object.shape[0])
rates_mean = np.mean(rates_per_object)
rates_sigma = np.std(rates_per_object)
kurtosis = np.sum((rates_per_object - rates_mean)**4) / (n * rates_sigma**4) - 3
# kurtosis2= np.sum((rates_per_object - rates_mean)**4) / n \
# / (np.sum((rates_per_object - rates_mean)**2) / n)** 2 - 3
return kurtosis
def activity_fraction(rates_per_object):
R = rates_per_object
n = len(rates_per_object)
return n/(n-1) * ( 1 - np.sum(R/n)**2 / np.sum(R**2/n) )
# num = 1 - (np.sum(R)/n)**2 / np.sum(R**2)/n
# den = 1 - 1/n
# return num / den
def plot_selectivity_and_sparseness(r_mat, font_size=10):
# plt.figure(figsize=)
# fig = plt.figure()
# print(fig.get_size_inches())
# f, ax_arr = plt.subplots(2, 1, sharex=True, figsize=(3.5,5))
f, ax_arr = plt.subplots(2, 1, sharex=False, figsize=(3,5))
# Single Neuron selectivities
n_neurons = r_mat.shape[0]
n_objs = r_mat.shape[1]
selectivities = np.zeros(n_neurons)
sparsenesses = np.zeros(n_objs)
for n_idx in np.arange(n_neurons):
rates = r_mat[n_idx, :]
selectivities[n_idx] = calculate_kurtosis(rates)
for o_idx in np.arange(n_objs):
rates = r_mat[:, o_idx]
sparsenesses[o_idx] = calculate_kurtosis(rates)
print(np.mean(selectivities))
print(np.mean(sparsenesses))
print('min selectivity: ' + str(np.min(selectivities)))
print('max selectivity: ' + str(np.max(selectivities)))
# Plot selectivities ------------------------------------------------
ax_arr[0].hist(np.clip(selectivities, -10, 25), bins=np.arange(-5, 850, step=1), color='red')
ax_arr[0].set_ylabel('frequency', fontsize=font_size)
ax_arr[0].set_xlabel('kurtosis', fontsize=font_size)
ax_arr[0].tick_params(axis='x', labelsize=font_size)
ax_arr[0].tick_params(axis='y', labelsize=font_size)
# ax_arr[0].set_xlim([0.1, 850])
ax_arr[0].annotate('mean=%0.2f' % np.mean(selectivities),
xy=(0.55, 0.98),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[0].annotate('med.=%0.2f' % np.median(selectivities),
xy=(0.55, 0.88),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[0].annotate('n=%d' % len(selectivities),
xy=(0.55, 0.78),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[0].annotate('single-neuron',
xy=(0.01, 0.98),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
# ax_arr[0].set_ylim([0, 40])
# ax_arr[0].set_xlim([0, 200])
# ax_arr[0].set_ylim([0, 130])
# ax_arr[0].set_xscale('log')
# Plot sparsenesses ------------------------------------------------
ax_arr[1].hist(np.clip(sparsenesses, -10, 60), bins=np.arange(-5, 850, step=3))
ax_arr[1].set_ylabel('frequency', fontsize=font_size)
ax_arr[1].set_xlabel('kurtosis', fontsize=font_size)
ax_arr[1].tick_params(axis='x', labelsize=font_size)
ax_arr[1].tick_params(axis='y', labelsize=font_size)
ax_arr[1].annotate('mean=%0.2f' % np.mean(sparsenesses),
xy=(0.55, 0.98),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[1].annotate('med.=%0.2f' % np.median(sparsenesses),
xy=(0.55, 0.88),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[1].annotate('n=%d' % len(sparsenesses),
xy=(0.55, 0.78),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[1].annotate('population',
xy=(0.01, 0.98),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[0].set_xlim([-2, 26])
ax_arr[1].set_xlim([-2, 62])
# ax_arr[1].set_ylim([0, 300])
plt.tight_layout()
# ax_arr[1].set_xscale('log')
if False:
with open('face-preference-alexnet-0.pkl', 'rb') as file:
alexnet0 = pickle.load(file)
with open('face-preference-alexnet-1.pkl', 'rb') as file:
alexnet1 = pickle.load(file)
with open('face-preference-alexnet-2.pkl', 'rb') as file:
alexnet2 = pickle.load(file)
with open('face-preference-vgg-0.pkl', 'rb') as file:
vgg0 = pickle.load(file)
with open('face-preference-vgg-1.pkl', 'rb') as file:
vgg1 = pickle.load(file)
with open('face-preference-vgg-2.pkl', 'rb') as file:
vgg2 = pickle.load(file)
edges = np.linspace(-5, 5, 21)
plt.figure(figsize=(8,4.5))
plt.subplot(2,3,1)
plt.hist(alexnet2, edges)
plt.ylabel('AlexNet Unit Count', fontsize=16)
plt.title('output-2', fontsize=16)
plt.subplot(2,3,2)
plt.hist(alexnet1, edges)
plt.title('output-1', fontsize=16)
plt.subplot(2,3,3)
plt.hist(alexnet0, edges)
plt.title('output', fontsize=16)
plt.subplot(2,3,4)
plt.hist(vgg2, edges, color='g')
plt.ylabel('VGG Unit Count', fontsize=16)
plt.subplot(2,3,5)
plt.hist(vgg1, edges, color='g')
plt.xlabel('Preference for Face Images', fontsize=16)
plt.subplot(2,3,6)
plt.hist(vgg0, edges, color='g')
plt.tight_layout(pad=0.05)
plt.savefig('../figures/selectivity-faces.eps')
plt.show()
if False:
use_vgg = True
remove_level = 2
if use_vgg:
model = load_vgg(weights_path='../weights/vgg16_weights.h5', remove_level=remove_level)
else:
model = load_net(weights_path='../weights/alexnet_weights.h5', remove_level=remove_level)
image_files = get_image_file_list('./images/lehky-processed/', 'png', with_path=True)
im = preprocess(image_files, use_vgg=use_vgg)
print(image_files)
mainly_faces = [197]
mainly_faces.extend(range(170, 178))
mainly_faces.extend(range(181, 196))
mainly_faces.extend(range(203, 214))
mainly_faces.extend(range(216, 224))
faces_major = [141, 142, 165, 169, 179, 196, 214, 215, 271]
faces_major.extend(range(144, 147))
faces_major.extend(range(157, 159))
faces_present = [131, 143, 178, 180, 198, 230, 233, 234, 305, 306, 316, 372, 470]
faces_present.extend(range(134, 141))
faces_present.extend(range(147, 150))
faces_present.extend(range(155, 157))
faces_present.extend(range(161, 165))
faces_present.extend(range(365, 369))
faces_present.extend(faces_major)
faces_present.extend(mainly_faces)
faces_ind = []
for i in range(len(image_files)):
for j in range(len(mainly_faces)):
if str(mainly_faces[j]) + '.' in image_files[i]:
faces_ind.append(i)
no_faces_ind = []
for i in range(len(image_files)):
has_face = False
for j in range(len(faces_present)):
if str(faces_present[j]) + '.' in image_files[i]:
has_face = True
if not has_face:
no_faces_ind.append(i)
# print(faces_ind)
# print(no_faces_ind)
start_time = time.time()
out = model.predict(im)
print(out.shape)
f = out[faces_ind,:]
nf = out[no_faces_ind,:]
print(f.shape)
print(nf.shape)
face_preference = np.mean(f, axis=0) - np.mean(nf, axis=0)
vf = np.var(f, axis=0) + 1e-3 # small constant in case zero variance due to lack of response
vnf = np.var(nf, axis=0) + 1e-3
d_prime = face_preference / np.sqrt((vf + vnf)/2)
network_name = 'vgg' if use_vgg else 'alexnet'
with open('face-preference-' + network_name + '-' + str(remove_level) + '.pkl', 'wb') as file:
pickle.dump(d_prime, file)
print(d_prime)
plt.hist(d_prime)
plt.show()
if True:
use_vgg = False
remove_level = 1
if use_vgg:
model = load_vgg(weights_path='../weights/vgg16_weights.h5', remove_level=remove_level)
else:
model = load_net(weights_path='../weights/alexnet_weights.h5', remove_level=remove_level)
# model = load_net(weights_path='../weights/alexnet_weights.h5')
image_files = get_image_file_list('./images/lehky-processed/', 'png', with_path=True)
im = preprocess(image_files, use_vgg=use_vgg)
start_time = time.time()
out = model.predict(im)
print('prediction time: ' + str(time.time() - start_time))
# with open('lehky.pkl', 'wb') as file:
# pickle.dump(out, file)
# with open('lehky.pkl', 'rb') as file:
# out = pickle.load(file)
n = 674
# use first n or n with greatest responses
if False:
rect = np.maximum(0, out[:,:n])
else:
maxima = np.max(out, axis=0)
ind = np.zeros(n, dtype=int)
c = 0
i = 0
while c < n:
if maxima[i] > 2:
ind[c] = i
c = c + 1
i = i + 1
# ind = (-maxima).argsort()[:n]
rect = np.maximum(0, out[:,ind])
selectivity = excess_kurtosis(rect)
sparseness = excess_kurtosis(rect.T)
print(np.mean(selectivity))
print(np.mean(sparseness))
print(np.max(selectivity))
print(np.max(sparseness))
plot_selectivity_and_sparseness(rect.T, 11)
network_name = 'vgg' if use_vgg else 'alexnet'
plt.savefig('../figures/selectivity-' + network_name + '-' + str(remove_level) + '-talk.eps')
plt.show()
if False:
plt.figure(figsize=(4,3.8))
plt.scatter(3.5, 12.51, c='k', marker='x', s=40, label='IT') # from Lehky et al. Fig 4A and 4B
selectivity_alexnet = [10.53, 28.59, 31.44]
sparseness_alexnet = [4.04, 8.85, 6.61]
selectivity_vgg = [26.79, 14.44, 34.65]
sparseness_vgg = [6.59, 3.40, 3.54]
plt.scatter([10.53, 28.59, 31.44], [4.04, 8.85, 6.61], c='b', marker='o', s=30, label='Alexnet')
plt.scatter([26.79, 14.44, 34.65], [6.59, 3.40, 3.54], c='g', marker='s', s=45, label='VGG-16')
plt.plot([0, 40], [0, 40], 'k')
plt.xlim([0,38])
plt.ylim([0,38])
gap = 0.4
plt.text(3.5+gap, 9.61+gap+.05, 'IT')
plt.text(selectivity_alexnet[0]+gap, sparseness_alexnet[0]+gap, 'out')
plt.text(selectivity_alexnet[1]+gap, sparseness_alexnet[1]+gap, 'out-1')
plt.text(selectivity_alexnet[2]+gap, sparseness_alexnet[2]+gap, 'out-2')
plt.text(selectivity_vgg[0]+gap, sparseness_vgg[0]+gap, 'out')
plt.text(selectivity_vgg[1]+gap, sparseness_vgg[1]+gap, 'out-1')
plt.text(selectivity_vgg[2]+gap, sparseness_vgg[2]+gap, 'out-2')
plt.xlabel('Selectivity')
plt.ylabel('Sparseness')
plt.tight_layout()
plt.savefig('../figures/cnn-selectivity.eps')
plt.show()
if False:
r_mat = rect.T
n_neurons = r_mat.shape[0]
activity_fractions = np.zeros(n_neurons)
for n_idx in np.arange(n_neurons):
rates = r_mat[n_idx, :]
activity_fractions[n_idx] = activity_fraction(rates)
print(activity_fractions)
plt.plot(activity_fractions)
plt.show()
rate = np.mean(rect,0)
# with open('activity-fraction.pkl', 'wb') as file:
# pickle.dump((ind, activity_fractions), file)
# bins = np.linspace(0, 1000, 501)
# plt.figure()
# plt.subplot(2,1,1)
# plt.hist(selectivity, bins)
# # plt.xlim([0, 100])
# # plt.ylim([0, 100])
# plt.subplot(2,1,2)
# plt.hist(sparseness, bins)
# # plt.xlim([0, 100])
# # plt.ylim([0, 100])
# plt.show()
#
# note: there is a NaN due to single kurtosis much less than gaussian
# print(np.corrcoef(np.mean(rect,0), np.log(selectivity+1)))
# plt.figure()
# plt.scatter(np.mean(rect,0), np.log(selectivity+1))
# plt.gca().set_xscale('log')
# plt.gca().set_yscale('log')
# plt.show()
#
# rate = np.mean(rect,0)
# with open('rate-vs-selectivity.pkl', 'wb') as file:
# pickle.dump((ind, rate, selectivity), file)
| bptripp/it-cnn | tuning/selectivity.py | Python | mit | 13,980 |
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_testing/grpc_version.py.template`!!!
VERSION = '1.25.0.dev0'
| pszemus/grpc | src/python/grpcio_testing/grpc_version.py | Python | apache-2.0 | 702 |
#!/usr/bin/env python
import sys
import string
import numpy
import scipy.io
from matplotlib.pylab import *
from fractionS import *
N_A = 6.0221367e23
E2 = 5
V = 1e-15
def plot_theory( K ):
N = 1000
minE1 = 0.1
maxE1 = 100.
e1array = numpy.mgrid[minE1:maxE1:(maxE1-minE1)/N]
farray = [ fraction_Sp( E1, E2, K ) for E1 in e1array ]
farray = numpy.array( farray )
#print farray
semilogx( e1array/E2, farray, label='K = %f' % K )
def file_mean( filename, skip ):
ycolumns = [2,]
#ycolumns = [2,6]
#ycolumns = [3,5]
#ycolumns = [2,6,3,5]
data = load( filename )
x = data[:,0]
y = data[:,ycolumns[0]]
start = x.searchsorted( skip )
if len(x)<=start:
return None
x = x[start:]
y = y[start:]
#print x[-1]
xdiff = x[1:] - x[:-1]
yscaled = y[:-1] * xdiff
yscaledmean = yscaled.sum() / ( x[-1] - x[0] )
print yscaledmean, y.mean()
#return y.mean()
return yscaledmean
import glob
import os
S_tot = 300.0
model = 'pushpull'
Keq_str = '0.05'
#Keq_str = '5'
#koff_ratio_str = '0.1'
#koff_ratio_str = '0.5'
koff_ratio_str = '0.9'
#koff_ratio_str = '0'
N_P = 10
V = '1e-14'
T = '300'
#mode = 'normal'
#mode = 'localized'
mode = 'single'
skip = float(T) *0.9
dir = sys.argv[1]
outdir = sys.argv[2]
#pattern = sys.argv[2]
#globpattern = pattern.replace('ALL','*') + '_*.dat'
for N_K in range( 40 ):
globpattern = \
string.join( ( model, Keq_str, koff_ratio_str, str(N_K),
str(N_P), V, mode, '*' ), '_' )
print globpattern
filelist = glob.glob( dir + os.sep + globpattern )
if not filelist:
continue
data = []
for file in filelist:
print file
res = file_mean( file, skip )
if res:
data.append( res )
data = numpy.array( data )
print data
data /= S_tot
mean = data.mean()
std_err = data.std()/math.sqrt(len(data))
print mean, std_err
errorbar( float(N_K)/N_P, mean, yerr=std_err, fmt='+' )
plot_theory( float( Keq_str ) )
figtitle = string.join( ( model, Keq_str, koff_ratio_str, 'ALL',
str(N_P), V, mode ),
'_' )
title( figtitle )
show()
#savefig( outdir + '/' + figtitle + '.png', dpi=80 )
| gfrd/gfrd | samples/pushpull/plot.py | Python | gpl-2.0 | 2,295 |
import os
from subprocess import check_output
import imageio
import numpy as np
import PIL.Image
import utils.exif_helper as ehelp
from utils.exposure_helper import calculate_ev100_from_metadata
########## Slightly modified version of LLFF data loading code
########## see https://github.com/Fyusion/LLFF for original
def _minify(basedir, factors=[], resolutions=[]):
needtoload = False
for r in factors:
imgdir = os.path.join(basedir, "images_{}".format(r))
maskdir = os.path.join(basedir, "masks_{}".format(r))
if not os.path.exists(imgdir) and not os.path.exists(maskdir):
needtoload = True
for r in resolutions:
imgdir = os.path.join(basedir, "images_{}x{}".format(r[1], r[0]))
maskdir = os.path.join(basedir, "masks_{}x{}".format(r[1], r[0]))
if not os.path.exists(imgdir) and not os.path.exists(maskdir):
needtoload = True
if not needtoload:
return
print("Minify needed...")
imgdir = os.path.join(basedir, "images")
maskdir = os.path.join(basedir, "masks")
imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))]
masks = [os.path.join(maskdir, f) for f in sorted(os.listdir(maskdir))]
imgs = [
f
for f in imgs
if any([f.endswith(ex) for ex in ["JPG", "jpg", "png", "jpeg", "PNG"]])
]
masks = [
f
for f in masks
if any([f.endswith(ex) for ex in ["JPG", "jpg", "png", "jpeg", "PNG"]])
]
imgdir_orig = imgdir
maskdir_orig = maskdir
wd = os.getcwd()
for r in factors + resolutions:
if isinstance(r, int):
nameImg = "images_{}".format(r)
nameMask = "masks_{}".format(r)
resizearg = "{}%".format(100.0 / r)
else:
nameImg = "images_{}x{}".format(r[1], r[0])
nameMask = "masks_{}x{}".format(r[1], r[0])
resizearg = "{}x{}".format(r[1], r[0])
imgdir = os.path.join(basedir, nameImg)
maskdir = os.path.join(basedir, nameMask)
if os.path.exists(imgdir) and os.path.exists(maskdir):
continue
print("Minifying", r, basedir)
os.makedirs(imgdir)
os.makedirs(maskdir)
check_output("cp {}/* {}".format(imgdir_orig, imgdir), shell=True)
check_output("cp {}/* {}".format(maskdir_orig, maskdir), shell=True)
extImg = imgs[0].split(".")[-1]
extMask = masks[0].split(".")[-1]
argsImg = " ".join(
["mogrify", "-resize", resizearg, "-format", "png", "*.{}".format(extImg)]
)
argsMask = " ".join(
["mogrify", "-resize", resizearg, "-format", "png", "*.{}".format(extMask)]
)
os.chdir(imgdir)
check_output(argsImg, shell=True)
os.chdir(wd)
os.chdir(maskdir)
check_output(argsMask, shell=True)
os.chdir(wd)
if extImg != "png":
check_output("rm {}/*.{}".format(imgdir, extImg), shell=True)
print("Removed duplicates")
if extMask != "png":
check_output("rm {}/*.{}".format(maskdir, extMask), shell=True)
print("Removed duplicates")
print("Done")
def ensureNoChannel(x: np.ndarray) -> np.ndarray:
ret = x
if len(x.shape) > 2:
ret = ret[:, :, 0]
return ret
def _load_data(basedir, factor=None, width=None, height=None, load_imgs=True):
poses_arr = np.load(os.path.join(basedir, "poses_bounds.npy"))
poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1, 2, 0])
bds = poses_arr[:, -2:].transpose([1, 0])
img0 = [
os.path.join(basedir, "images", f)
for f in sorted(os.listdir(os.path.join(basedir, "images")))
if f.endswith("JPG") or f.endswith("jpg") or f.endswith("png")
][0]
sh = imageio.imread(img0).shape
ev100s = handle_exif(basedir)
sfx = ""
if factor is not None:
print("Apply factor", factor)
sfx = "_{}".format(factor)
_minify(basedir, factors=[factor])
factor = factor
elif height is not None:
factor = sh[0] / float(height)
width = int(sh[1] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = "_{}x{}".format(width, height)
elif width is not None:
factor = sh[1] / float(width)
height = int(sh[0] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = "_{}x{}".format(width, height)
else:
factor = 1
imgdir = os.path.join(basedir, "images" + sfx)
maskdir = os.path.join(basedir, "masks" + sfx)
if not os.path.exists(imgdir):
print(imgdir, "does not exist, returning")
return
if not os.path.exists(maskdir):
print(maskdir, "does not exist, returning")
return
imgfiles = [
os.path.join(imgdir, f)
for f in sorted(os.listdir(imgdir))
if f.endswith("JPG") or f.endswith("jpg") or f.endswith("png")
]
masksfiles = [
os.path.join(maskdir, f)
for f in sorted(os.listdir(maskdir))
if f.endswith("JPG") or f.endswith("jpg") or f.endswith("png")
]
if poses.shape[-1] != len(imgfiles) and len(imgfiles) == len(masksfiles):
print(
"Mismatch between imgs {}, masks {} and poses {} !!!!".format(
len(imgfiles), len(masksfiles), poses.shape[-1]
)
)
return
sh = imageio.imread(imgfiles[0]).shape
poses[:2, 4, :] = np.array(sh[:2]).reshape([2, 1])
poses[2, 4, :] = poses[2, 4, :] * 1.0 / factor
if not load_imgs:
return poses, bds
def imread(f):
if f.endswith("png"):
return imageio.imread(f, ignoregamma=True)
else:
return imageio.imread(f)
imgs = [imread(f)[..., :3] / 255.0 for f in imgfiles]
masks = [ensureNoChannel(imread(f) / 255.0) for f in masksfiles]
imgs = np.stack(imgs, -1)
masks = np.stack(masks, -1)
print("Loaded image data", imgs.shape, masks.shape, poses[:, -1, 0])
return poses, bds, imgs, masks, ev100s
def handle_exif(basedir):
files = [
os.path.join(basedir, "images", f)
for f in sorted(os.listdir(os.path.join(basedir, "images")))
if f.endswith("JPG") or f.endswith("jpg")
]
if len(files) == 0:
raise Exception(
"Only jpegs can be used to gain EXIF data needed to handle tonemapping"
)
ret = []
for f in files:
try:
img = PIL.Image.open(f)
exif_data = ehelp.formatted_exif_data(img)
iso = int(exif_data["ISOSpeedRatings"])
aperture = float(exif_data["FNumber"])
exposureTime = ehelp.getExposureTime(exif_data)
ev100 = calculate_ev100_from_metadata(aperture, exposureTime, iso)
except:
ev100 = 8
ret.append(ev100)
if len(files) != len(ret):
raise Exception("Not all images are valid!")
return np.stack(ret, 0).astype(np.float32)
def normalize(x):
return x / np.linalg.norm(x)
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
vec0 = normalize(np.cross(vec1_avg, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, pos], 1)
return m
def ptstocam(pts, c2w):
tt = np.matmul(c2w[:3, :3].T, (pts - c2w[:3, 3])[..., np.newaxis])[..., 0]
return tt
def poses_avg(poses):
hwf = poses[0, :3, -1:]
center = poses[:, :3, 3].mean(0)
vec2 = normalize(poses[:, :3, 2].sum(0))
up = poses[:, :3, 1].sum(0)
c2w = np.concatenate([viewmatrix(vec2, up, center), hwf], 1)
return c2w
def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, rots, N):
render_poses = []
rads = np.array(list(rads) + [1.0])
hwf = c2w[:, 4:5]
for theta in np.linspace(0.0, 2.0 * np.pi * rots, N + 1)[:-1]:
c = np.dot(
c2w[:3, :4],
np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0])
* rads,
)
z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.0])))
render_poses.append(np.concatenate([viewmatrix(z, up, c), hwf], 1))
return render_poses
def recenter_poses(poses):
poses_ = poses + 0
bottom = np.reshape([0, 0, 0, 1.0], [1, 4])
c2w = poses_avg(poses)
c2w = np.concatenate([c2w[:3, :4], bottom], -2)
bottom = np.tile(np.reshape(bottom, [1, 1, 4]), [poses.shape[0], 1, 1])
poses = np.concatenate([poses[:, :3, :4], bottom], -2)
poses = np.linalg.inv(c2w) @ poses
poses_[:, :3, :4] = poses[:, :3, :4]
poses = poses_
return poses
#####################
def spherify_poses(poses, bds):
p34_to_44 = lambda p: np.concatenate(
[p, np.tile(np.reshape(np.eye(4)[-1, :], [1, 1, 4]), [p.shape[0], 1, 1])], 1
)
rays_d = poses[:, :3, 2:3]
rays_o = poses[:, :3, 3:4]
def min_line_dist(rays_o, rays_d):
A_i = np.eye(3) - rays_d * np.transpose(rays_d, [0, 2, 1])
b_i = -A_i @ rays_o
pt_mindist = np.squeeze(
-np.linalg.inv((np.transpose(A_i, [0, 2, 1]) @ A_i).mean(0)) @ (b_i).mean(0)
)
return pt_mindist
pt_mindist = min_line_dist(rays_o, rays_d)
center = pt_mindist
up = (poses[:, :3, 3] - center).mean(0)
vec0 = normalize(up)
vec1 = normalize(np.cross([0.1, 0.2, 0.3], vec0))
vec2 = normalize(np.cross(vec0, vec1))
pos = center
c2w = np.stack([vec1, vec2, vec0, pos], 1)
poses_reset = np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:, :3, :4])
rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:, :3, 3]), -1)))
sc = 1.0 / rad
poses_reset[:, :3, 3] *= sc
bds *= sc
rad *= sc
centroid = np.mean(poses_reset[:, :3, 3], 0)
zh = centroid[2]
radcircle = np.sqrt(rad ** 2 - zh ** 2)
new_poses = []
for th in np.linspace(0.0, 2.0 * np.pi, 120):
camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh])
up = np.array([0, 0, -1.0])
vec2 = normalize(camorigin)
vec0 = normalize(np.cross(vec2, up))
vec1 = normalize(np.cross(vec2, vec0))
pos = camorigin
p = np.stack([vec0, vec1, vec2, pos], 1)
new_poses.append(p)
new_poses = np.stack(new_poses, 0)
new_poses = np.concatenate(
[new_poses, np.broadcast_to(poses[0, :3, -1:], new_poses[:, :3, -1:].shape)], -1
)
poses_reset = np.concatenate(
[
poses_reset[:, :3, :4],
np.broadcast_to(poses[0, :3, -1:], poses_reset[:, :3, -1:].shape),
],
-1,
)
return poses_reset, new_poses, bds
def load_llff_data(
basedir,
factor=8,
recenter=True,
bd_factor=0.75,
spherify=False,
path_zflat=False,
):
poses, bds, imgs, msks, ev100s = _load_data(
basedir,
factor=factor,
) # factor=8 downsamples original imgs by 8x
print("Loaded", basedir, bds.min(), bds.max())
# Correct rotation matrix ordering and move variable dim to axis 0
poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)
poses = np.moveaxis(poses, -1, 0).astype(np.float32)
imgs = np.moveaxis(imgs, -1, 0).astype(np.float32)
msks = np.expand_dims(np.moveaxis(msks, -1, 0), -1).astype(np.float32)
images = imgs
masks = msks
bds = np.moveaxis(bds, -1, 0).astype(np.float32)
# Rescale if bd_factor is provided
sc = 1.0 if bd_factor is None else 1.0 / (bds.min() * bd_factor)
poses[:, :3, 3] *= sc
bds *= sc
if recenter:
poses = recenter_poses(poses)
if spherify:
poses, render_poses, bds = spherify_poses(poses, bds)
else:
c2w = poses_avg(poses)
print("recentered", c2w.shape)
print(c2w[:3, :4])
## Get spiral
# Get average pose
up = normalize(poses[:, :3, 1].sum(0))
# Find a reasonable "focus depth" for this dataset
close_depth, inf_depth = bds.min() * 0.9, bds.max() * 5.0
dt = 0.75
mean_dz = 1.0 / (((1.0 - dt) / close_depth + dt / inf_depth))
focal = mean_dz
# Get radii for spiral path
zdelta = close_depth * 0.2
tt = poses[:, :3, 3] # ptstocam(poses[:3,3,:].T, c2w).T
rads = np.percentile(np.abs(tt), 90, 0)
c2w_path = c2w
N_views = 120
N_rots = 2
if path_zflat:
# zloc = np.percentile(tt, 10, 0)[2]
zloc = -close_depth * 0.1
c2w_path[:3, 3] = c2w_path[:3, 3] + zloc * c2w_path[:3, 2]
rads[2] = 0.0
N_rots = 1
N_views /= 2
# Generate poses for spiral path
render_poses = render_path_spiral(
c2w_path, up, rads, focal, zdelta, zrate=0.5, rots=N_rots, N=N_views
)
render_poses = np.array(render_poses).astype(np.float32)
c2w = poses_avg(poses)
print("Data:")
print(poses.shape, images.shape, masks.shape, bds.shape)
dists = np.sum(np.square(c2w[:3, 3] - poses[:, :3, 3]), -1)
i_test = np.argmin(dists)
print("HOLDOUT view is", i_test)
images = images.astype(np.float32)
masks = masks.astype(np.float32)
poses = poses.astype(np.float32)
return images, masks, ev100s, poses, bds, render_poses, i_test
| cgtuebingen/Neural-PIL | dataflow/nerd/load_real_world.py | Python | mit | 13,336 |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import os
DEBUG = True
TESTING = True
PRODUCTION = False
FLASK_DEBUGTOOLBAR = False
HOST = '0.0.0.0'
SQLALCHEMY_DATABASE_URI = os.environ.get(
'GGRC_DATABASE_URI',
'mysql+mysqldb://root:root@127.0.0.1/ggrcdev?charset=utf8')
FULLTEXT_INDEXER = 'ggrc.fulltext.mysql.MysqlIndexer'
LOGIN_MANAGER = 'ggrc.login.noop'
# SQLALCHEMY_ECHO = True
SQLALCHEMY_RECORD_QUERIES = 'slow'
AUTOBUILD_ASSETS = True
ENABLE_JASMINE = True
# DEBUG_ASSETS = True
USE_APP_ENGINE_ASSETS_SUBDOMAIN = False
MEMCACHE_MECHANISM = False
APPENGINE_EMAIL = "user@example.com"
LOGGING_FORMATTER = {
"()": "colorlog.ColoredFormatter",
"format": "%(log_color)s%(levelname)-8s %(asctime)s %(name)s %(message)s",
}
| j0gurt/ggrc-core | src/ggrc/settings/development.py | Python | apache-2.0 | 809 |
# This file is part of trc.me.
#
# trc.me is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# trc.me is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# trc.me. If not, see <http://www.gnu.org/licenses/>.
#
try:
from cjson import encode as to_json
except ImportError:
from django.utils.simplejson import dumps as to_json
import logging
import re
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm
from django.contrib.auth.models import User
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext, Context, loader, Template
from django.views.decorators.csrf import csrf_protect
from tagging.models import Tag as Hashtag, TaggedItem as HashtaggedItem
from trc_me.core.models import Flag, Notification, Tag
from trc_me.core.utils import base62_decode, is_mobile_browser
from trc_me.accounts.forms import PrettyAuthenticationForm, PrettyPasswordChangeForm, ProfileForm, RegistrationForm
from trc_me.web.forms import FlagForm, PositionForm, TagForm
from trc_me.web.models import Page
@csrf_protect
def index(request):
"""Landing page
"""
authform = PrettyAuthenticationForm()
regform = RegistrationForm()
# Featured flags
flags = Flag.objects.filter(is_featured=True).all()
#if request.device is not None \
# and request.device['resolution_width'] < 1024:
template = 'web/index.html' if not is_mobile_browser(request) else 'mobile/index.html'
next = reverse(index)
return render_to_response(template,
{'authform': authform,
'regform': regform,
'flags': flags,
'next': next},
context_instance=RequestContext(request))
def search(request):
if 'q' not in request.GET:
# User came directly here with no search query
return HttpResponseRedirect(reverse(index))
q = request.GET['q'].replace('#', '')
flags = HashtaggedItem.objects.get_by_model(Flag.objects.get_visible_to(request.user), q)
tags = HashtaggedItem.objects.get_by_model(Tag.objects.get_visible_to(request.user), q)
# TODO: Find users whose full name is like q
# TODO: Allow users to set their full names!
#users = ... CONCAT(first_name, ' ', last_name) LIKE '%q%';
template = 'web/search_results.html' if not is_mobile_browser(request) else 'mobile/search_results.html'
return render_to_response(template,
{'tags': tags,
'flags': flags,
'q': request.GET['q']},
context_instance=RequestContext(request))
@login_required
@csrf_protect
def create_tag(request):
"""Create a new tag
"""
if request.method == 'POST':
form = TagForm(request.POST, request.FILES)
if form.is_valid():
tag = form.save(commit=False)
tag.user = request.user
tag.save()
# Get hashtags from tag.description or from POST['hashtags']
hashtags = tag.get_hashtags('description', request.POST['hashtags'])
Hashtag.objects.update_tags(tag, hashtags)
# Subscribe tag owner to tag
try:
track_tag(request, tag.id, silent=True)
except Exception as ex: # TODO: Catch a real exception
logging.debug(
"Exception raised when subscribing tag owner '%s' to tag '%s': %s"
% (request.user, tag, ex))
return HttpResponseRedirect(reverse(view_tag, kwargs={'id': tag.id}))
else:
form = TagForm()
template = 'web/tag_form.html' if not is_mobile_browser(request) else 'mobile/tag_form.html'
return render_to_response(template,
{'form': form},
context_instance=RequestContext(request))
@login_required
@csrf_protect
def edit_tag(request, id):
"""Edit a tag's details
"""
tag = get_object_or_404(Tag, pk=id)
if tag.is_deleted:
raise Http404
if request.method == 'POST':
form = TagForm(request.POST, request.FILES, instance=tag)
if form.is_valid():
tag = form.save()
Hashtag.objects.update_tags(tag, request.POST['hashtags'].replace('#', ''))
return HttpResponseRedirect(reverse(view_tag, kwargs={'id': tag.id}))
else:
form = TagForm(instance=tag)
hashtags = ' '.join([u'#%s' % (t, ) for t in Hashtag.objects.get_for_object(tag)])
template = 'web/tag_form.html' if not is_mobile_browser(request) else 'mobile/tag_form.html'
return render_to_response(template,
{'form': form,
'hashtags': hashtags},
context_instance=RequestContext(request))
@csrf_protect
def view_tag(request, id):
"""View a tag's details
The creator of a tag can see everything about it. If a tag is public,
anyone can see it and all its public flags. If a tag is not public, a
user can see it only if they have flagged it, and then they can only see
their flags.
"""
from subhub.models import Subscription, SubscriptionTask
tag = get_object_or_404(Tag, pk=id)
if tag.is_deleted:
raise Http404
if not tag.is_visible_to(request.user):
#return HttpResponseForbidden
raise Http404
flags = tag.flag_set.get_visible_to(request.user)
if request.user == tag.user and len(flags) == 0:
offer_initial_flag = True
else:
offer_initial_flag = False
form = TagForm(instance=tag)
# Find out whether user is tracking tag
if request.user.is_authenticated():
callback = request.build_absolute_uri(
reverse(track_tag_callback,
kwargs={'username': request.user.username,
'tag_id': tag.id}))
is_tracking = (
Subscription.objects.filter(callback=callback).count() \
+ SubscriptionTask.objects.filter(callback=callback).filter(mode='subscribe').count() \
- SubscriptionTask.objects.filter(callback=callback).filter(mode='unsubscribe').count()) > 0
else:
is_tracking = False
template = 'web/tag.html' if not is_mobile_browser(request) else 'mobile/tag.html'
return render_to_response(template,
{'tag': tag,
'form': form,
'flags': flags,
'initial': offer_initial_flag,
'is_tracking': is_tracking},
context_instance=RequestContext(request))
@csrf_protect
def update_tag(request, code):
"""Flag a tag
"""
tag = get_object_or_404(Tag, pk=base62_decode(code.upper()))
if tag.is_deleted:
raise Http404
if request.method == 'POST':
flag_form = FlagForm(request.POST, request.FILES)
position_form = PositionForm(request.POST)
if flag_form.is_valid() and position_form.is_valid():
# Save position
position = position_form.save()
# Save flag
flag = flag_form.save(commit=False)
flag.tag = tag
flag.position = position
if request.user.is_authenticated():
flag.user = request.user
else:
# Flags of unauthenticated users are always public
flag.visibility = 'pub'
flag.points = flag.calc_points()
flag.save()
# Get hashtags from flag.note or from POST['hashtags']
hashtags = flag.get_hashtags('note', request.POST['hashtags'])
Hashtag.objects.update_tags(flag, hashtags)
if request.user.is_authenticated():
if flag.points > 0:
profile = request.user.get_profile()
profile.points = profile.points + flag.points
profile.save()
# If logged in, redirect to the user's profile, or to the tag's redirect URL
url = tag.redirect_url if tag.redirect_url else reverse(view_user,
kwargs={'username': request.user.username})
return HttpResponseRedirect(url)
# Otherwise view the tag (or go to its redirect URL)
url = tag.redirect_url if tag.redirect_url else reverse(view_tag, kwargs={'id': tag.id})
return HttpResponseRedirect(url)
else:
flag_form = FlagForm()
position_form = PositionForm()
authform = PrettyAuthenticationForm(request)
regform = RegistrationForm()
next = reverse('trc_me.web.views.update_tag', kwargs={'code': code})
template = 'web/tag_update.html' if not is_mobile_browser(request) else 'mobile/tag_update.html'
return render_to_response(template,
{'tag': tag,
'authform': authform,
'regform': regform,
'flagform': flag_form,
'positionform': position_form,
'next': next},
context_instance=RequestContext(request))
@csrf_protect
def mobile_update_tag(request):
"""Handles update tag requests from the mobile index page
(Web index page does this asynchronously.)
"""
if 'code' not in request.GET:
return HttpResponseRedirect(index)
code = request.GET['code']
# If user gave hostname too, strip it
pattern = r'\/(\w{6,})\/?$';
result = re.search(pattern, code)
if result is not None:
code = result.groups()[0]
tag = get_object_or_404(Tag, pk=base62_decode(code.upper()))
if tag.is_deleted:
raise Http404
flag_form = FlagForm()
position_form = PositionForm()
authform = PrettyAuthenticationForm(request)
next = reverse(update_tag, kwargs={'code': code})
return render_to_response('mobile/tag_update.html',
{'tag': tag,
'authform': authform,
'flagform': flag_form,
'positionform': position_form,
'next': next},
context_instance=RequestContext(request))
@login_required
def track_tag(request, id, mode='subscribe', silent=False):
"""Subscribe to tag's feed, and be notified of new flags
mode in ('subscribe', 'unsubscribe')
if silent do not return HttpResponse()
"""
import urllib
import urllib2
import hashlib
from django.conf import settings
from django.core.mail import send_mail
tag = get_object_or_404(Tag, pk=id)
if not tag.is_visible_to(request.user):
raise Http404
h = hashlib.sha1('%s tracking tag %d at %s' % (request.user.username,
tag.id,
settings.SECRET_KEY))
data = {
'hub.callback': request.build_absolute_uri(
reverse(track_tag_callback,
kwargs={'username': request.user.username,
'tag_id': tag.id})),
'hub.mode': mode,
'hub.topic': request.build_absolute_uri(
reverse('tag_feed', kwargs={'id': tag.id})),
'hub.verify': 'async',
'hub.verify_token': h.hexdigest()}
response = urllib2.urlopen(
request.build_absolute_uri(reverse('subhub-hub')),
urllib.urlencode(data))
headers = response.headers
body = response.read()
response.close()
if silent:
if headers.status == '' and body == 'Subscription queued':
return
return 'Error %s: %s' % (headers.status, body)
if headers.status == '' and body == 'Subscription queued':
if mode == 'subscribe':
# Notify tag owner of new tracker
# TODO: Decent multipart message body
message = '%s [ %s ] is tracking your tag "%s".' % (
request.user.username,
request.build_absolute_uri(reverse(view_user, kwargs={'username': request.user.username})),
tag.description)
send_mail('%s is tracking your tag',
message,
settings.DEFAULT_FROM_EMAIL,
[tag.user.email])
return render_to_response(
'web/tag_untrack_button.html',
{'tag': tag},
context_instance=RequestContext(request))
elif mode == 'unsubscribe':
# Notify owner of untrack
message = '%s [ %s ] stopped tracking your tag "%s".' % (
request.user.username,
request.build_absolute_uri(reverse(view_user, kwargs={'username': request.user.username})),
tag.description)
send_mail('%s stopped tracking your tag',
message,
settings.DEFAULT_FROM_EMAIL,
[tag.user.email])
return render_to_response(
'web/tag_track_button.html',
{'tag': tag},
context_instance=RequestContext(request))
return HttpResponse('Error %s: %s' % (headers.status, body))
def track_tag_callback(request, username, tag_id):
"""PuSH will send pings to this URL when a tag's feed is updated
"""
import hashlib
from django.conf import settings
tag = get_object_or_404(Tag, pk=tag_id)
user = User.objects.get(username=username)
if 'hub.mode' in request.GET and request.GET['hub.mode'] == 'subscribe':
# Confirm subscription
h = hashlib.sha1(('%s tracking tag %d at %s' % (user.username,
tag.id,
settings.SECRET_KEY)))
if request.GET['hub.topic'] == request.build_absolute_uri(reverse('tag_feed', kwargs={'id': tag.id})) \
and request.GET['hub.verify_token'] == h.hexdigest():
# TODO: Consider using the hub.secret mechanism for authentication
# All OK
return HttpResponse(request.GET['hub.challenge'])
else:
#if request.GET['hub.topic'] != request.build_absolute_uri(reverse('tag_feed', kwargs={'id': tag.id})):
# return HttpResponse(
# 'Error: Topic "%s" does not match feed URL "%s"' % (
# request.GET['hub.topic'],
# request.build_absolute_uri(
# reverse('tag_feed', kwargs={'id': tag.id}))))
#if request.GET['hub.verify_token'] != h.hexdigest():
# return HttpResponse('Error: Tokens do not match "%s tracking tag %d"' % (user.username,
# tag.id))
return HttpResponse('Error confirming topic and token')
# Create notification
n = Notification.objects.create(
user=user,
subject=tag.description if len(tag.description) <= 30 else '%s...' % (tag.description[:27], ),
subject_url=reverse(view_tag, kwargs={'id': tag.id}),
verb='was flagged')
# TODO: Fetch and parse feed item
#object = models.CharField(max_length=30)
#object_url = models.URLField()
# Notify user if necessary
if user.get_profile().notify_by_email:
t = loader.get_template('web/email_notification.txt')
c = Context({'n': n})
body_text = t.render(c)
message = EmailMultiAlternatives('Update from trc.me',
body_text,
settings.DEFAULT_FROM_EMAIL,
[user.email])
if hasattr(settings, 'EMAIL_ADDRESS_BOUNCE'):
message.headers = {'Return-Path': settings.EMAIL_ADDRESS_BOUNCE}
t = loader.get_template('web/email_notification.html')
body_html = t.render(c)
message.attach_alternative(body_html, 'text/html')
message.send()
return HttpResponse('OK')
@login_required
def print_tag(request, id):
tag = get_object_or_404(Tag, pk=id)
if tag.is_deleted:
raise Http404
if not tag.user == request.user:
return HttpResponseForbidden
return render_to_response('web/tag_print.html',
{'tag': tag},
context_instance=RequestContext(request))
@login_required
def delete_tag(request, id):
tag = get_object_or_404(Tag, pk=id)
if tag.is_deleted:
raise Http404
if not tag.user == request.user:
return HttpResponseForbidden
tag.is_deleted = True
tag.save()
return HttpResponseRedirect(reverse(view_user, kwargs={'username': request.user.username}))
@csrf_protect
def view_flag(request, id):
"""Open the tag page, and populate panel with flag
"""
flag = get_object_or_404(Flag, pk=id)
if not flag.is_visible_to(request.user):
raise Http404
template = 'web/flag.html' if not is_mobile_browser(request) else 'mobile/flag.html'
return render_to_response(template,
{'flag': flag,
'tag': flag.tag},
context_instance=RequestContext(request))
@csrf_protect
def view_user(request, username=None):
"""Show account details
"""
from subhub.models import Subscription, SubscriptionTask
# TODO: Add mobile
if username == None:
# Redirect to authenticated user's profile
if request.user.is_authenticated():
return HttpResponseRedirect(
reverse(view_user,
kwargs={'username': request.user.username}))
# User isn't authenticated. Redirect to landing page
return HttpResponseRedirect(reverse(index))
if request.user.is_authenticated() and request.user.username == username:
# It's the user's own profile
u = request.user
tags = Tag.objects.filter(user=request.user).filter(is_deleted=False)
flags = Flag.objects.filter(user=request.user).order_by('-created_at')
notis = Notification.objects.filter(user=request.user).order_by('-created_at')[:10]
else:
u = User.objects.get(username=username)
tags = Tag.objects.get_visible_to(request.user).filter(user=u)
flags = Flag.objects.get_visible_to(request.user).filter(user=u)
notis = []
# Find out whether request.user is following user
if request.user.is_authenticated():
callback = request.build_absolute_uri(
reverse(follow_user_callback,
kwargs={'username': request.user.username,
'tracked_username': u.username}))
is_following = (
Subscription.objects.filter(callback=callback).count() \
+ SubscriptionTask.objects.filter(callback=callback).filter(mode='subscribe').count() \
- SubscriptionTask.objects.filter(callback=callback).filter(mode='unsubscribe').count()) > 0
else:
is_following = False
profile = u.get_profile()
profileform = ProfileForm(instance=profile)
passwordform = PrettyPasswordChangeForm(user=u)
template = 'web/user_home.html' if not is_mobile_browser(request) else 'mobile/user_home.html'
return render_to_response(template,
{'u': u,
'profile': profile,
'is_following': is_following,
'profileform': profileform,
'passwordform': passwordform,
'tags': tags,
'flags': flags,
'notis': notis},
context_instance=RequestContext(request))
@login_required
def follow_user(request, username, mode='subscribe'):
"""Subscribe to user's feed, and be notified of new tags and flags
"""
# Send PuSH subscription request to hub
import urllib2
import hashlib
from django.conf import settings
from django.core.mail import send_mail
user = User.objects.get(username=username)
h = hashlib.sha1(('%s tracking user %s at %s' % (request.user.username,
user.username,
settings.SECRET_KEY)))
data = {
'hub.callback': request.build_absolute_uri(
reverse(follow_user_callback,
kwargs={'username': request.user.username,
'tracked_username': user.username})),
'hub.mode': mode,
'hub.topic': request.build_absolute_uri(
reverse('user_feed', kwargs={'username': user.username})),
'hub.verify': 'async',
'hub.verify_token': h.hexdigest()}
response = urllib2.urlopen(
request.build_absolute_uri(reverse('subhub-hub')),
data)
headers = response.headers
body = response.read()
response.close()
if headers.status == '' and body == 'Subscription queued':
# Notify user of new tracker
# TODO: Decent multipart message body
if mode == 'subscribe':
message = '%s [ %s ] is following you.' % (
request.user.username,
request.build_absolute_uri(reverse(view_user, kwargs={'username': request.user.username})))
send_mail('%s is following you',
message,
settings.DEFAULT_FROM_EMAIL,
[user.email])
return render_to_response(
'web/user_unfollow_button.html',
{'user': user},
context_instance=RequestContext(request))
elif mode == 'unsubscribe':
# Uncomment to notify user on unfollow:
#message = '%s [ %s ] stopped following you.' % (
# request.user.username,
# request.build_absolute_uri(reverse(view_user, kwargs={'username': request.user.username})))
#send_mail('%s stopped following you',
# message,
# settings.DEFAULT_FROM_EMAIL,
# [user.email])
return render_to_response(
'web/user_follow_button.html',
{'user': user},
context_instance=RequestContext(request))
return HttpResponse('Error %s: %s' % (headers.status, body))
def follow_user_callback(request, username, tracked_username):
"""PuSH will send pings to this URL when a user's feed is updated
"""
import hashlib
from django.conf import settings
tracked_user = get_object_or_404(Tag, username=tracked_username)
user = User.objects.get(username=username)
if 'hub.mode' in request.GET and request.GET['hub.mode'] == 'subscribe':
# Confirm subscription
h = hashlib.sha1(('%s tracking user %s at %s' % (request.user.username,
tracked_user.username,
settings.SECRET_KEY)))
if request.GET['hub.topic'] == reverse('user_feed', kwargs={'username': tracked_username}) \
and request.GET['hub.verify_token'] == h.hexdigest():
# TODO: Consider using the hub.secret mechanism for authentication
# All OK
return HttpResponse(request.GET['hub.challenge'])
else:
return HttpResponse('Error confirming topic and token')
# Create notification
n = Notification.objects.create(
user=user,
subject=tracked_user.username,
subject_url=reverse(view_user, kwargs={'username': tracked_user.username}))
# TODO: Fetch and parse feed item
# Notify user if necessary
if user.get_profile().notify_by_email:
t = loader.get_template('web/email_notification.txt')
c = Context({'n': n})
body_text = t.render(c)
message = EmailMultiAlternatives('Update from trc.me',
body_text,
settings.DEFAULT_FROM_EMAIL,
[user.email])
if hasattr(settings, 'EMAIL_ADDRESS_BOUNCE'):
message.headers = {'Return-Path': settings.EMAIL_ADDRESS_BOUNCE}
t = loader.get_template('web/email_notification.html')
body_html = t.render(c)
message.attach_alternative(body_html, 'text/html')
message.send()
return HttpResponse('OK')
def mobile_view_page(request, slug):
"""Return the page identified by the slug
"""
# TODO: Use a generic view for this.
page = get_object_or_404(Page, slug=slug)
return render_to_response('mobile/page.html',
{'page': page},
context_instance=RequestContext(request))
@csrf_protect
def ajax_index(request):
authform = AuthenticationForm()
reg_form = RegistrationForm()
return render_to_response('web/index_dialog.html',
{'authform': authform,
'regform': reg_form},
context_instance=RequestContext(request))
@login_required
@csrf_protect
def ajax_create_tag(request):
form = TagForm()
return render_to_response('web/tag_form_dialog.html',
{'form': form},
context_instance=RequestContext(request))
@csrf_protect
def ajax_update_tag(request, id):
"""Return HTML for the Update Tag dialog
"""
tag = get_object_or_404(Tag, pk=base62_decode(id.upper()))
flag_form = FlagForm()
authform = AuthenticationForm()
position_form = PositionForm()
return render_to_response('web/tag_update_dialog.html',
{'tag': tag,
'flagform': flag_form,
'authform': authform,
'positionform': position_form},
context_instance=RequestContext(request))
@csrf_protect
def ajax_view_flag(request, id):
flag = get_object_or_404(Flag, pk=id)
if not flag.is_visible_to(request.user):
raise Http404
return render_to_response('web/flag_panel.html',
{'flag': flag,
'tag': flag.tag},
context_instance=RequestContext(request))
def ajax_view_page(request, slug):
"""Return the page identified by the slug
"""
# TODO: Use a generic view for this.
page = get_object_or_404(Page, slug=slug)
return render_to_response('web/page.html',
{'page': page},
context_instance=RequestContext(request))
| kaapstorm/trc_me | src/trc_me/web/views.py | Python | agpl-3.0 | 28,044 |
from random import random
from random import uniform
from datetime import datetime
from django.utils import timezone
from datetime import timedelta
from django.forms import model_to_dict
from seshdash.data.db.influx import Influx
from seshdash.models import BoM_Data_Point as Data_Point
from seshdash.utils.time_utils import get_time_interval_array
# models
from seshdash.models import Sesh_Site, Daily_Data_Point
# weather
from seshdash.tasks import get_weather_data
def get_random_int():
val = random() * 100
return int(val)
def get_random_float():
val = random() * 100
return val
def get_random_binary():
val = get_random_int()
if val > 50:
return 1
return 0
def get_random_interval(cieling,floor):
return uniform(cieling,floor)
def generate_date_array(start=None, end = 'now', naive=False, interval=5, units='minutes'):
if end == 'now':
end = timezone.now()
if naive:
end = datetime.now()
if not start:
start = end - timedelta(hours=24)
time_arr = get_time_interval_array(interval, units,start, end)
return time_arr
def create_test_data(site, start=None, end="now", interval=5, units='minutes' , val=50, db='test_db', data={}):
"""
data = {'R1':[0,0,0,..],'R2':[0,0,123,12,...]...} will not generate date but use fixed data set
if val is not set random data will be generated if data is not existing
"""
_influx_db_name = db
i = Influx(database=_influx_db_name)
data_point_dates = generate_date_array(start=start, end=end, interval=interval, units=units)
voltage_in = 220
voltage_out = 220
soc = val
R1 = val
R2 = val
R3 = val
R4 = val
R5 = val
count = 0
print "creating %s test data points"%len(data_point_dates)
print "between %s and %s "%(data_point_dates[0],data_point_dates[len(data_point_dates)-1:])
# Simulate Grid outage
for time_val in data_point_dates:
if not val:
try:
soc = data.get('soc',[])[count]
except:
soc = get_random_int()
try:
R1 = data.get('R1',[])[count]
except:
R1 = voltage_in * get_random_binary()
try:
R2 = data.get('R2',[])[count]
except:
R2 = get_random_interval(100,500)
try:
R3 = data.get('R3',[])[count]
except:
R3 = get_random_interval(22,28)
try:
R4 = data.get('R4',[])[count]
except:
R4 = get_random_interval(100,500)
try:
R5 = data.get('R5',[])[count]
except:
R5 = get_random_interval(100,500)
dp = Data_Point.objects.create(
site=site,
soc = soc ,
battery_voltage = R3,
time=time_val,
AC_Voltage_in = R1,
AC_Voltage_out = voltage_out,
AC_input = R4,
AC_output = R5,
AC_output_absolute = R2,
AC_Load_in = R2,
AC_Load_out = R4,
pv_production = R5)
# Also send ton influx
dp_dict = model_to_dict(dp)
dp_dict.pop('time')
dp_dict.pop('inverter_state')
dp_dict.pop('id')
i.send_object_measurements(dp_dict,timestamp=time_val.isoformat(),tags={"site_name":site.site_name})
count = count + 1
# Count number of outages
return len(data_point_dates)
def create_test_data_daily_points (site_id, number_days=355):
""" Generates test data for daily data points for a given site for a given number of days """
start_date = timezone.now() - timedelta(days=number_days)
data_point_dates = generate_date_array(start=start_date, interval=24, units='hours')
total_items = len(data_point_dates)
count = 0
done = 0
increment = 100
site = Sesh_Site.objects.filter(pk=site_id).first()
# Simulate Grid outage
print "createing %s test data points"%total_items
for time_val in data_point_dates:
dp = Daily_Data_Point.objects.create(
site=site,
daily_battery_charge=get_random_float(),
daily_grid_outage_n=get_random_float(),
daily_grid_outage_t=get_random_float(),
daily_grid_usage=get_random_float(),
daily_no_of_alerts=get_random_float(),
daily_power_cons_pv=get_random_float(),
daily_power_consumption_total=get_random_float(),
daily_pv_yield=get_random_float(),
date=time_val
)
dp.save()
count = count + 1
done = done + 1
if count % increment == 0:
print "100 items Done, %s to go" % (total_items - done )
return len(data_point_dates)
def generate_test_data_daily_points():
""" Generates Daily Data Point for all sites """
sites = Sesh_Site.objects.all()
for site in sites:
create_test_data_daily_points(site.id)
def create_weather_data():
"""
creating weather_data for next 5 days
"""
get_weather_data()
| GreatLakesEnergy/sesh-dash-beta | seshdash/tests/data_generation.py | Python | mit | 6,141 |
#!/usr/bin/env python
import ast
import re
from setuptools import setup, find_packages
INSTALL_REQUIRES = [
'boto3>=1.4.2',
'botocore>=1.4.85',
'virtualenv',
]
STYLE_REQUIRES = [
'flake8>=2.5.4',
'pylint>=1.5.5',
]
TEST_REQUIRES = [
'coverage>=4.0.3',
'pytest>=2.9.1',
'moto>=0.4.23',
'mock',
'nose',
]
EXTRAS_REQUIRE = {
'test': TEST_REQUIRES,
'style': STYLE_REQUIRES,
# alias
'lint': STYLE_REQUIRES,
'test-requirements': TEST_REQUIRES + STYLE_REQUIRES,
}
def package_meta():
"""Read __init__.py for global package metadata.
Do this without importing the package.
"""
_version_re = re.compile(r'__version__\s+=\s+(.*)')
_url_re = re.compile(r'__url__\s+=\s+(.*)')
_license_re = re.compile(r'__license__\s+=\s+(.*)')
with open('lambda_uploader/__init__.py', 'rb') as ffinit:
initcontent = ffinit.read()
version = str(ast.literal_eval(_version_re.search(
initcontent.decode('utf-8')).group(1)))
url = str(ast.literal_eval(_url_re.search(
initcontent.decode('utf-8')).group(1)))
licencia = str(ast.literal_eval(_license_re.search(
initcontent.decode('utf-8')).group(1)))
return {
'version': version,
'license': licencia,
'url': url,
}
_lu_meta = package_meta()
setup(
name='lambda-uploader',
description='AWS Python Lambda Packager',
keywords='aws lambda',
version=_lu_meta['version'],
extras_require=EXTRAS_REQUIRE,
tests_require=TEST_REQUIRES + STYLE_REQUIRES,
install_requires=INSTALL_REQUIRES,
packages=find_packages(exclude=['tests']),
test_suite='tests',
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
license=_lu_meta['license'],
author="Rackers",
maintainer_email="jim.rosser@rackspace.com",
url=_lu_meta['url'],
entry_points={
'console_scripts': [
'lambda-uploader=lambda_uploader.shell:main'
]
},
)
| rackerlabs/lambda-uploader | setup.py | Python | apache-2.0 | 2,168 |
# -*- coding: utf-8 -*-
"""Tests for exceptions."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 9fa8041db90dd43d14dcaea36accce58c715edde $'
import pywikibot
from tests.aspects import unittest, DeprecationTestCase
class TestDeprecatedExceptions(DeprecationTestCase):
"""Test usage of deprecation in library code."""
net = False
def test_UploadWarning(self):
"""Test exceptions.UploadWarning is deprecated only."""
# Accessing from the main package should work fine.
cls = pywikibot.UploadWarning
self.assertNoDeprecation()
e = cls('foo', 'bar')
self.assertIsInstance(e, pywikibot.Error)
self.assertNoDeprecation()
self._reset_messages()
# But it sholdnt be accessed from the exceptions module.
cls = pywikibot.exceptions.UploadWarning
self.assertDeprecation(
'pywikibot.exceptions.UploadWarning is deprecated, '
'use pywikibot.data.api.UploadWarning instead.')
self._reset_messages()
e = cls('foo', 'bar')
self.assertIsInstance(e, pywikibot.Error)
self.assertNoDeprecation()
def test_PageNotFound(self):
"""Test PageNotFound is deprecated from the package."""
cls = pywikibot.PageNotFound
self.assertDeprecation(
'pywikibot.PageNotFound is deprecated, and no longer '
'used by pywikibot; use http.fetch() instead.')
self._reset_messages()
e = cls('foo')
self.assertIsInstance(e, pywikibot.Error)
self.assertDeprecation(
'pywikibot.exceptions.DeprecatedPageNotFoundError is deprecated.')
self._reset_messages()
cls = pywikibot.exceptions.PageNotFound
self.assertDeprecation(
'pywikibot.exceptions.PageNotFound is deprecated, and no longer '
'used by pywikibot; use http.fetch() instead.')
self._reset_messages()
e = cls('foo')
self.assertIsInstance(e, pywikibot.Error)
self.assertDeprecation(
'pywikibot.exceptions.DeprecatedPageNotFoundError is deprecated.')
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| hperala/kontuwikibot | tests/exceptions_tests.py | Python | mit | 2,321 |
import json
import os
import os.path
import shutil
from datetime import datetime
from subprocess import check_output
from pkgpanda.util import write_json, write_string
dcos_image_commit = os.getenv('DCOS_IMAGE_COMMIT', None)
if dcos_image_commit is None:
dcos_image_commit = check_output(['git', 'rev-parse', 'HEAD']).decode('utf-8').strip()
if dcos_image_commit is None:
raise ValueError("Unable to set dcos_image_commit from environment or git.")
template_generation_date = str(datetime.utcnow())
def try_makedirs(path):
try:
os.makedirs(path)
except FileExistsError:
pass
def copy_makedirs(src, dest):
try_makedirs(os.path.dirname(dest))
shutil.copy(src, dest)
def do_bundle_onprem(extra_files, gen_out, output_dir):
# We are only being called via dcos_generate_config.sh with an output_dir
assert output_dir is not None
assert output_dir
assert output_dir[-1] != '/'
output_dir = output_dir + '/'
# Copy the extra_files
for filename in extra_files:
copy_makedirs(filename, output_dir + filename)
# Copy the config packages
for package_name in json.loads(gen_out.arguments['config_package_names']):
filename = gen_out.cluster_packages[package_name]['filename']
copy_makedirs(filename, output_dir + filename)
# Write an index of the cluster packages
write_json(output_dir + 'cluster-package-info.json', gen_out.cluster_packages)
# Write the bootstrap id
write_string(output_dir + 'bootstrap.latest', gen_out.arguments['bootstrap_id'])
def variant_str(variant):
"""Return a string representation of variant."""
if variant is None:
return ''
return variant
def variant_name(variant):
"""Return a human-readable string representation of variant."""
if variant is None:
return '<default>'
return variant
def variant_prefix(variant):
"""Return a filename prefix for variant."""
if variant is None:
return ''
return variant + '.'
| BenWhitehead/dcos | gen/build_deploy/util.py | Python | apache-2.0 | 2,023 |
# flake8: noqa
import copy
from collections import OrderedDict, deque
import numpy as np
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
def describe_location(location, locations):
if location.can_describe:
final_location = locations.get(location.pk)
if final_location is not None:
location = final_location
result = location.serialize(include_type=True, detailed=False, simple_geometry=True)
if hasattr(location, 'serialize_position'):
result.update(location.serialize_position())
return result
class Route:
def __init__(self, router, origin, destination, path_nodes, options,
origin_addition, destination_addition, origin_xyz, destination_xyz):
self.router = router
self.origin = origin
self.destination = destination
self.path_nodes = path_nodes
self.options = options
self.origin_addition = origin_addition
self.destination_addition = destination_addition
self.origin_xyz = origin_xyz
self.destination_xyz = destination_xyz
def serialize(self, locations):
nodes = [[node, None] for node in self.path_nodes]
if self.origin_addition and any(self.origin_addition):
nodes.insert(0, (self.origin_addition[0], None))
nodes[1][1] = self.origin_addition[1]
if self.destination_addition and any(self.destination_addition):
nodes.append(self.destination_addition)
if self.origin_xyz is not None:
node = nodes[0][0]
if not hasattr(node, 'xyz'):
node = self.router.nodes[node]
origin_distance = np.linalg.norm(node.xyz - self.origin_xyz)
else:
origin_distance = 0
if self.destination_xyz is not None:
node = nodes[-1][0]
if not hasattr(node, 'xyz'):
node = self.router.nodes[node]
destination_distance = np.linalg.norm(node.xyz - self.destination_xyz)
else:
destination_distance = 0
items = deque()
last_node = None
last_item = None
walk_factor = self.options.walk_factor
distance = origin_distance
duration = origin_distance * walk_factor
for i, (node, edge) in enumerate(nodes):
if edge is None:
edge = self.router.edges[last_node, node] if last_node else None
node_obj = self.router.nodes[node] if isinstance(node, (int, np.int32, np.int64)) else node
item = RouteItem(self, node_obj, edge, last_item)
if edge:
distance += edge.distance
duration += item.router_waytype.get_duration(edge, walk_factor)
items.append(item)
last_item = item
last_node = node
distance += destination_distance
duration += destination_distance * walk_factor
# descriptions for waytypes
next_item = None
last_primary_level = None
for item in reversed(items):
icon = 'arrow'
if not item.level.on_top_of_id:
last_primary_level = item.level
if item.waytype:
icon = item.waytype.icon_name or 'arrow'
if item.waytype.join_edges and next_item and next_item.waytype == item.waytype:
continue
if item.waytype.icon_name:
icon = item.waytype.icon_name
if item.waytype.up_separate:
icon += '-up' if item.edge.rise > 0 else '-down'
icon += '.svg'
description = item.waytype.description
if item.waytype.up_separate and item.edge.rise > 0:
description = item.waytype.description_up
if (item.waytype.level_change_description != False and last_primary_level and
((item.last_item and item.level != item.last_item.level) or
item.level.on_top_of_id)): # != False because it's lazy
level_change_description = (
str(item.waytype.level_change_description).replace('{level}', str(last_primary_level.title))
)
description = str(description).replace(
'{level_change_description}', ' ' + level_change_description + ' '
).replace(' ', ' ').replace(' .', '.')
last_primary_level = None
else:
description = description.replace('{level_change_description}', '')
item.descriptions.append((icon, description))
next_item = item
# add space transfer descriptions
last_space = None
current_space = None
for item in items:
if item.new_space:
next_space = item.space
if item.last_item and not item.descriptions:
description = None
if last_space:
description = current_space.cross_descriptions.get((last_space.pk, next_space.pk), None)
if description is None:
description = current_space.leave_descriptions.get(next_space.pk, None)
if description is None:
description = item.space.enter_description
if description == None: # could be a lazy None
description = _('Go to %(space_title)s.') % {'space_title': item.space.title}
item.descriptions.append(('more_vert', description))
last_space = current_space
current_space = next_space
# add description for last space
remaining_distance = destination_distance
for item in reversed(items):
if item.descriptions:
break
if item.edge:
remaining_distance += item.edge.distance
if remaining_distance:
item.descriptions.append(('more_vert', _('%d m remaining to your destination.') % max(remaining_distance, 1)))
items[-1].descriptions.append(('done', _('You have reached your destination.')))
duration = round(duration)
seconds = int(duration) % 60
minutes = int(duration/60)
if minutes:
duration_str = '%d min %d s' % (minutes, seconds)
else:
duration_str = '%d s' % seconds
distance = round(distance, 1)
distance_str = '%d m' % distance
summary = '%s (%s)' % (duration_str, distance_str)
options_summary = [
{
'fastest': _('fastest route'),
'shortest': _('shortest route')
}[self.options['mode']],
]
waytypes_excluded = sum((name.startswith('waytype_') and value != 'allow')
for name, value in self.options.items())
if waytypes_excluded:
options_summary.append(_('some path types avoided'))
else:
options_summary.append(_('default options'))
options_summary = ', '.join(str(s) for s in options_summary)
return OrderedDict((
('origin', describe_location(self.origin, locations)),
('destination', describe_location(self.destination, locations)),
('distance', round(distance, 2)),
('duration', round(duration)),
('distance_str', distance_str),
('duration_str', duration_str),
('summary', summary),
('options_summary', options_summary),
('items', tuple(item.serialize(locations=locations) for item in items)),
))
class RouteItem:
def __init__(self, route, node, edge, last_item):
self.route = route
self.node = node
self.edge = edge
self.last_item = last_item
self.descriptions = []
@cached_property
def waytype(self):
if self.edge and self.edge.waytype:
return self.route.router.waytypes[self.edge.waytype]
@cached_property
def router_waytype(self):
if self.edge:
return self.route.router.waytypes[self.edge.waytype]
@cached_property
def space(self):
return self.route.router.spaces[self.node.space]
@cached_property
def level(self):
return self.route.router.levels[self.space.level_id]
@cached_property
def new_space(self):
return not self.last_item or self.space.pk != self.last_item.space.pk
@cached_property
def new_level(self):
return not self.last_item or self.level.pk != self.last_item.level.pk
def serialize(self, locations):
result = OrderedDict((
('id', self.node.pk),
('coordinates', (self.node.x, self.node.y, self.node.altitude)),
('waytype', (self.route.router.waytypes[self.edge.waytype].serialize(detailed=False)
if self.edge and self.edge.waytype else None)),
))
if self.waytype:
result['waytype'] = self.waytype.serialize(detailed=False)
if self.new_space:
result['space'] = describe_location(self.space, locations)
if self.new_level:
result['level'] = describe_location(self.level, locations)
result['descriptions'] = self.descriptions
return result
class NoRoute:
distance = np.inf
| c3nav/c3nav | src/c3nav/routing/route.py | Python | apache-2.0 | 9,539 |
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import proxy
def entry_point() -> None:
with proxy.Proxy(
enable_web_server=True,
port=9000,
# NOTE: Pass plugins via *args if you define custom flags.
# Currently plugins passed via **kwargs are not discovered for
# custom flags by proxy.py
#
# See https://github.com/abhinavsingh/proxy.py/issues/871
plugins=[
'app.plugins.MyWebServerPlugin',
'app.plugins.MyProxyPlugin',
],
) as _:
proxy.sleep_loop()
| abhinavsingh/proxy.py | skeleton/app/app.py | Python | bsd-3-clause | 911 |
from PyQt4 import QtGui
import os
import overlayDialogBase
import ilastik.core.overlayMgr as overlayMgr
from ilastik.core import dataImpex
from ilastik.gui import stackloader
#*******************************************************************************
# S t a c k O v e r l a y D i a l o g *
#*******************************************************************************
class StackOverlayDialog(overlayDialogBase.OverlayDialogBase):
configuresClass = "ilastik.core.overlays.stackOverlayDialog.StackOverlayDialog"
name = "Add Stack Overlay"
author = "C. N. S."
homepage = "hci"
description = "add a new overlays from image stack"
def __init__(self, ilastik, instance = None):
self.ilastik = ilastik
def okClicked(self):
if len(self.overlayItem.dsets) >= 2:
self.accept()
else:
QtGui.QMessageBox.warning(self, "Error", "Please select more than one Overlay for thresholding - either more than one foreground overlays, or one foreground and one background overlay !")
def exec_(self):
activeItem = self.ilastik.project.dataMgr[self.ilastik._activeImageNumber]
ovm = self.ilastik.project.dataMgr[self.ilastik._activeImageNumber].overlayMgr
sl = stackloader.StackLoader(self.ilastik)
#imageData = sl.exec_()
path, fileList, options = sl.exec_()
if path is None:
return
theDataItem = None
try:
theDataItem = dataImpex.DataImpex.importDataItem(fileList, options)
except MemoryError:
QtGui.QErrorMessage.qtHandler().showMessage("Not enough memory !")
if theDataItem is not None:
# file name
dirname = os.path.basename(os.path.dirname(path))
offsetstr = '(' + str(options.offsets[0]) + ', ' + str(options.offsets[1]) + ', ' + str(options.offsets[2]) + ')'
theDataItem._name = dirname + ' ' + offsetstr
theDataItem.fileName = path
if theDataItem.shape[0:-1] == activeItem.shape[0:-1]:
data = theDataItem[:,:,:,:,:]
ov = overlayMgr.OverlayItem(data, color = long(65535 << 16), alpha = 1.0, colorTable = None, autoAdd = True, autoVisible = True, min = 0, max = 255)
return ov
else:
print "Cannot add " + theDataItem.fileName + " due to dimensionality mismatch"
return None
| ilastik/ilastik-0.5 | ilastik/gui/overlayDialogs/stackOverlayDialog.py | Python | bsd-2-clause | 2,586 |
# coding=utf-8
__version__ = '1.2'
| interhui/py-text | text/__init__.py | Python | apache-2.0 | 36 |
import os
import redis
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
redis_url = os.getenv('REDIS_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(list(map(Queue, listen)))
worker.work()
| noisy/steemprojects.com | worker.py | Python | mit | 325 |
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_feature
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosFeatureModule(TestNxosModule):
module = nxos_feature
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_feature.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_feature.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_run_commands.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = '%s.txt' % str(command).replace(' ', '_')
output.append(load_fixture('nxos_feature', filename))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_feature_enable(self):
set_module_args(dict(feature='nve', state='enabled'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['feature nv overlay'])
def test_nxos_feature_disable(self):
set_module_args(dict(feature='ospf', state='disabled'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['no feature ospf'])
| qrkourier/ansible | test/units/modules/network/nxos/test_nxos_feature.py | Python | gpl-3.0 | 2,591 |
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
class Mark(object):
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end].encode('utf-8')
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
| cortext/crawtextV2 | ~/venvs/crawler/lib/python2.7/site-packages/yaml/error.py | Python | mit | 2,559 |
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb
from twisted.internet import reactor
import sys
class ErrorHandler:
def __init__(self,fileName):
self.fileName = fileName
self.handler = open(self.fileName, "a")
def write(self, string):
self.handler = open(self.fileName, "a")
self.handler.write(string)
self.handler.close()
# not sure if these are necessary:
def close(self):
pass
def flush(self):
pass
class Client(pb.Referenceable):
def __init__(self,id):
self.id=id
def remote_id(self):
id = self.id
return id
#kwargs must contain @templatePath, that have to contain run(kwargs) function
def remote_start(self,kwargs):
temlate = __import__(kwargs['templatePath'], fromlist=["run"])
resp = temlate.run(kwargs)
d=self.server.callRemote("spiderResponse",resp)
def s(a):
global connect
connect.disconnect()
exit()
d.addCallback(s)
def connect(self,server):
self.server=server
server.callRemote("spiderConnected",self)
def main():
f=open('teste.txt','w')
f.write(str(sys.argv))
f.close()
id=sys.argv[1]
#sys.stderr = ErrorHandler('err.txt')
#sys.stdout = ErrorHandler('out.txt')
#id=0
factory = pb.PBClientFactory()
c = Client(id)
global connect
connect = reactor.connectTCP("localhost", 8800, factory)
server = factory.getRootObject()
server.addCallback(c.connect)
reactor.run()
main() | DSerejo/python-webscraping | client.py | Python | gpl-2.0 | 1,638 |
import pytest
import salt.renderers.tomlmod
import salt.serializers.toml
@pytest.mark.skipif(
salt.serializers.toml.HAS_TOML is False, reason="The 'toml' library is missing"
)
def test_toml_render_string():
data = """[[user-sshkey."ssh_auth.present"]]
user = "username"
[[user-sshkey."ssh_auth.present"]]
config = "%h/.ssh/authorized_keys"
[[user-sshkey."ssh_auth.present"]]
names = [
"hereismykey",
"anotherkey"
]
"""
expected_result = {
"user-sshkey": {
"ssh_auth.present": [
{"user": "username"},
{"config": "%h/.ssh/authorized_keys"},
{"names": ["hereismykey", "anotherkey"]},
]
}
}
result = salt.renderers.tomlmod.render(data)
assert result == expected_result
| saltstack/salt | tests/pytests/unit/renderers/test_toml.py | Python | apache-2.0 | 924 |
# Base classes for Hubs.
#
# Copyright (C) 2011-2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
# Modification(s):
# No.1
# Author(s): Xia Lei <lei.xia@cs2c.com.cn>
# Descriptions: - set StorageSpoke to be indirect, and set CustomPartitioningSpoke
# be direct.
# - set default selected disks to be all disks.
# - show correctly the storage partitioned messages on the hub.
# Modificated file(s):pyanaconda/ui/gui/spoke/storage.py,
# pyanaconda/ui/gui/spoke/custom.py,
# pyanaconda/ui/gui/hub/__init__.py
# keywords: indirect and direct; default selected disks; show correctly messages
import os
# pylint: disable-msg=E0611
from gi.repository import GLib
from pyanaconda.flags import flags
from pyanaconda.i18n import _
from pyanaconda.product import distributionText
from pyanaconda.ui import common
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.categories import collect_categories
from pyanaconda.ui.gui.spokes import StandaloneSpoke, collect_spokes
from pyanaconda.ui.gui.utils import gtk_call_once
from pyanaconda.constants import ANACONDA_ENVIRON
import logging
log = logging.getLogger("anaconda")
class Hub(GUIObject, common.Hub):
"""A Hub is an overview UI screen. A Hub consists of one or more grids of
configuration options that the user may choose from. Each grid is
provided by a SpokeCategory, and each option is provided by a Spoke.
When the user dives down into a Spoke and is finished interacting with
it, they are returned to the Hub.
Some Spokes are required. The user must interact with all required
Spokes before they are allowed to proceed to the next stage of
installation.
From a layout perspective, a Hub is the entirety of the screen, though
the screen itself can be roughly divided into thirds. The top third is
some basic navigation information (where you are, what you're
installing). The middle third is the grid of Spokes. The bottom third
is an action area providing additional buttons (quit, continue) or
progress information (during package installation).
Installation may consist of multiple chained Hubs, or Hubs with
additional standalone screens either before or after them.
"""
def __init__(self, data, storage, payload, instclass):
"""Create a new Hub instance.
The arguments this base class accepts defines the API that Hubs
have to work with. A Hub does not get free reign over everything
in the anaconda class, as that would be a big mess. Instead, a
Hub may count on the following:
ksdata -- An instance of a pykickstart Handler object. The
Hub uses this to populate its UI with defaults
and to pass results back after it has run.
storage -- An instance of storage.Storage. This is useful for
determining what storage devices are present and how
they are configured.
payload -- An instance of a packaging.Payload subclass. This
is useful for displaying and selecting packages to
install, and in carrying out the actual installation.
instclass -- An instance of a BaseInstallClass subclass. This
is useful for determining distribution-specific
installation information like default package
selections and default partitioning.
"""
GUIObject.__init__(self, data)
common.Hub.__init__(self, data, storage, payload, instclass)
# enable the autoContinue feature if we are in kickstart
# mode, but if the user interacts with the hub, it will be
# disabled again
self._autoContinue = flags.automatedInstall
self._incompleteSpokes = []
self._inSpoke = False
self._notReadySpokes = []
self._spokes = {}
self._checker = None
def _runSpoke(self, action):
from gi.repository import Gtk
# This duplicates code in widgets/src/BaseWindow.c, but we want to make sure
# maximize gets called every time a spoke is displayed to prevent the 25%
# UI from showing up.
action.window.maximize()
action.window.set_property("expand", True)
action.refresh()
action.window.set_beta(self.window.get_beta())
#nkwin7 add by yuwan
#action.window.set_property("distribution", distributionText().upper())
action.window.set_property("distribution", distributionText())
#nkwin7 done
action.window.set_transient_for(self.window)
action.window.show_all()
# Start a recursive main loop for this spoke, which will prevent
# signals from going to the underlying (but still displayed) Hub and
# prevent the user from switching away. It's up to the spoke's back
# button handler to kill its own layer of main loop.
Gtk.main()
action.window.set_transient_for(None)
action._visitedSinceApplied = True
# Don't take _visitedSinceApplied into account here. It will always be
# True from the line above.
if action.changed and (not action.skipTo or (action.skipTo and action.applyOnSkip)):
action.apply()
action.execute()
action._visitedSinceApplied = False
def _collectCategoriesAndSpokes(self):
"""collects categories and spokes to be displayed on this Hub
:return: dictionary mapping category class to list of spoke classes
:rtype: dictionary[category class] -> [ list of spoke classes ]
"""
ret = {}
# Collect all the categories this hub displays, then collect all the
# spokes belonging to all those categories.
categories = sorted(filter(lambda c: c.displayOnHub == self.__class__, collect_categories(self.paths["categories"])),
key=lambda c: c.title)
for c in categories:
ret[c] = collect_spokes(self.paths["spokes"], c.__name__)
return ret
def _createBox(self):
from gi.repository import Gtk, AnacondaWidgets
from pyanaconda.ui.gui.utils import setViewportBackground
cats_and_spokes = self._collectCategoriesAndSpokes()
categories = cats_and_spokes.keys()
grid = Gtk.Grid()
grid.set_row_spacing(6)
grid.set_column_spacing(6)
grid.set_column_homogeneous(True)
grid.set_margin_bottom(12)
row = 0
for c in sorted(categories, key=lambda c: c.title):
obj = c()
selectors = []
for spokeClass in sorted(cats_and_spokes[c], key=lambda s: s.title):
# Check if this spoke is to be shown in anaconda
if not spokeClass.should_run(ANACONDA_ENVIRON, self.data):
continue
# Create the new spoke and populate its UI with whatever data.
# From here on, this Spoke will always exist.
spoke = spokeClass(self.data, self.storage, self.payload, self.instclass)
# nkwin6 add begin
spokeName = spoke.__class__.__name__
if spokeName == "StorageSpoke":
storageSpokeClass = spoke
if spokeName == "CustomPartitioningSpoke":
storageSpokeClass.hubClass = self
storageSpokeClass.spokeClass = spoke
# If a spoke is not showable, it is unreachable in the UI. We
# might as well get rid of it.
#
# NOTE: Any kind of spoke can be unshowable.
if not spoke.showable:
del(spoke)
continue
# This allows being able to jump between two spokes without
# having to directly involve the hub.
self._spokes[spokeClass.__name__] = spoke
# If a spoke is indirect, it is reachable but not directly from
# a hub. This is for things like the custom partitioning spoke,
# which you can only get to after going through the initial
# storage configuration spoke.
#
# NOTE: This only makes sense for NormalSpokes. Other kinds
# of spokes do not involve a hub.
if spoke.indirect:
spoke.initialize()
continue
spoke.selector = AnacondaWidgets.SpokeSelector(_(spoke.title), spoke.icon)
# Set all selectors to insensitive before initialize runs. The call to
# _updateCompleteness later will take care of setting it straight.
spoke.selector.set_sensitive(False)
spoke.initialize()
if not spoke.ready:
self._notReadySpokes.append(spoke)
# Set some default values on the associated selector that
# affect its display on the hub.
self._updateCompleteness(spoke)
spoke.selector.connect("button-press-event", self._on_spoke_clicked, spoke)
spoke.selector.connect("key-release-event", self._on_spoke_clicked, spoke)
# If this is a kickstart install, attempt to execute any provided ksdata now.
if flags.automatedInstall and spoke.ready and spoke.changed and \
spoke._visitedSinceApplied:
spoke.execute()
spoke._visitedSinceApplied = False
selectors.append(spoke.selector)
if not selectors:
continue
# nkwin7 add by yuwan
if obj.title == "USER SETTINGS":
from gi.repository import Gdk
if Gdk.Screen.height() >= 1000:
pix_gif = Gtk.Image.new_from_file("/usr/share/anaconda/pixmaps/install-big.gif")
elif Gdk.Screen.height() >= 900:
pix_gif = Gtk.Image.new_from_file("/usr/share/anaconda/pixmaps/install-middle.gif")
else:
pix_gif = Gtk.Image.new_from_file("/usr/share/anaconda/pixmaps/install.gif")
pix_gif.show()
grid.attach(pix_gif, 0, 1, 3, 2)
continue
# nkwin7 done
label = Gtk.Label("<span font-desc=\"Sans 14\">%s</span>" % _(obj.title))
label.set_use_markup(True)
label.set_halign(Gtk.Align.START)
label.set_margin_top(12)
label.set_margin_bottom(12)
grid.attach(label, 0, row, 2, 1)
row += 1
col = 0
for selector in selectors:
selector.set_margin_left(12)
grid.attach(selector, col, row, 1, 1)
col = int(not col)
if col == 0:
row += 1
# If this category contains an odd number of selectors, the above
# row += 1 will not have run for the last row, which puts the next
# category's title in the wrong place.
if len(selectors) % 2:
row += 1
spokeArea = self.window.get_spoke_area()
viewport = Gtk.Viewport()
viewport.add(grid)
spokeArea.add(viewport)
setViewportBackground(viewport)
# nkwin7 add begin
# keywords: indirect and direct; default selected disks; show correctly message
# show correctly storage partitioned messages on the hub
def tips_text(self, spoke):
if spoke.status == (_("Not enough free space on disks")):
msg = _("Not enough free space on disks, please click "
"custom partition page to delete used partition "
"and create new partition.")
spoke.selector.set_tooltip_markup(msg)
elif spoke.status == (_("Error checking storage configuration")):
msg = _("Error storage partition, please click custom "
"partition page to look over details.")
spoke.selector.set_tooltip_markup(msg)
elif spoke.status == (_("Warning checking storage configuration")):
msg = _("Warning storage partition, ignoring warning or "
"please click custom partition page to look over "
"details.")
spoke.selector.set_tooltip_markup(msg)
elif spoke.status == (_("Automatic partitioning selected")):
msg = _("Automatic partitioning for you, start to "
"install or click custom partition page to look over "
"details.")
spoke.selector.set_tooltip_markup(msg)
else:
spoke.selector.set_tooltip_markup(spoke.status)
# nkwin7 end
def _updateCompleteness(self, spoke):
spoke.selector.set_sensitive(spoke.ready)
spoke.selector.set_property("status", spoke.status)
# nkwin7 add begin
# keywords: indirect and direct; default selected disks; show correctly message
# show correctly storage partitioned messages on the hub
spokeName = spoke.__class__.__name__
if spokeName == "CustomPartitioningSpoke":
self.tips_text(spoke)
else:
spoke.selector.set_tooltip_markup(spoke.status)
# nkwin7 end
spoke.selector.set_incomplete(not spoke.completed and spoke.mandatory)
self._handleCompleteness(spoke)
def _handleCompleteness(self, spoke):
# Add the spoke to the incomplete list if it's now incomplete, and make
# sure it's not on the list if it's now complete. Then show the box if
# it's needed and hide it if it's not.
if not spoke.mandatory or spoke.completed:
if spoke in self._incompleteSpokes:
self._incompleteSpokes.remove(spoke)
else:
if spoke not in self._incompleteSpokes:
self._incompleteSpokes.append(spoke)
self.clear_info()
if len(self._incompleteSpokes) == 0:
if self._checker and not self._checker.check():
self.set_warning(self._checker.error_message)
self.window.show_all()
else:
# nkwin7 add begin
# keywords: indirect and direct; default selected disks; show correctly message
# show correctly storage partitioned messages on the hub
msg = _("Please look over tips upon the mouse before continuing to the next step.")
# nkwin7 end
self.set_warning(msg)
self.window.show_all()
self._updateContinueButton()
@property
def continuePossible(self):
return len(self._incompleteSpokes) == 0 and len(self._notReadySpokes) == 0 and getattr(self._checker, "success", True)
def _updateContinueButton(self):
self.continueButton.set_sensitive(self.continuePossible)
def _update_spokes(self):
from pyanaconda.ui.communication import hubQ
import Queue
q = hubQ.q
if not self._spokes:
# no spokes, move on
gtk_call_once(self.continueButton.emit, "clicked")
# Grab all messages that may have appeared since last time this method ran.
while True:
try:
(code, args) = q.get(False)
except Queue.Empty:
break
# The first argument to all codes is the name of the spoke we are
# acting on. If no such spoke exists, throw the message away.
spoke = self._spokes.get(args[0], None)
if not spoke:
q.task_done()
continue
if code == hubQ.HUB_CODE_NOT_READY:
self._updateCompleteness(spoke)
if spoke not in self._notReadySpokes:
self._notReadySpokes.append(spoke)
self._updateContinueButton()
log.info("spoke is not ready: %s" % spoke)
elif code == hubQ.HUB_CODE_READY:
self._updateCompleteness(spoke)
if spoke in self._notReadySpokes:
self._notReadySpokes.remove(spoke)
self._updateContinueButton()
log.info("spoke is ready: %s" % spoke)
# If this is a real kickstart install (the kind with an input ks file)
# and all spokes are now completed, we should skip ahead to the next
# hub automatically. Take into account the possibility the user is
# viewing a spoke right now, though.
if flags.automatedInstall:
# Spokes that were not initially ready got the execute call in
# _createBox skipped. Now that it's become ready, do it. Note
# that we also provide a way to skip this processing (see comments
# communication.py) to prevent getting caught in a loop.
if not args[1] and spoke.changed and spoke._visitedSinceApplied:
spoke.execute()
spoke._visitedSinceApplied = False
if self.continuePossible:
if self._inSpoke:
self._autoContinue = False
elif self._autoContinue and q.empty():
# enqueue the emit to the Gtk message queue
gtk_call_once(self.continueButton.emit, "clicked")
elif code == hubQ.HUB_CODE_MESSAGE:
spoke.selector.set_property("status", args[1])
log.info("setting %s status to: %s" % (spoke, args[1]))
q.task_done()
return True
def refresh(self):
GUIObject.refresh(self)
self._createBox()
self._update_spoke_id = GLib.timeout_add(100, self._update_spokes)
### SIGNAL HANDLERS
def register_event_cb(self, event, cb):
if event == "continue" and hasattr(self, "continueButton"):
self.continueButton.connect("clicked", lambda *args: cb())
elif event == "quit" and hasattr(self, "quitButton"):
self.quitButton.connect("clicked", lambda *args: cb())
def _on_spoke_clicked(self, selector, event, spoke):
from gi.repository import Gdk
# This handler only runs for these two kinds of events, and only for
# activate-type keys (space, enter) in the latter event's case.
if event and not event.type in [Gdk.EventType.BUTTON_PRESS, Gdk.EventType.KEY_RELEASE]:
return
if event and event.type == Gdk.EventType.KEY_RELEASE and \
event.keyval not in [Gdk.KEY_space, Gdk.KEY_Return, Gdk.KEY_ISO_Enter, Gdk.KEY_KP_Enter, Gdk.KEY_KP_Space]:
return
if selector:
selector.grab_focus()
# On automated kickstart installs, our desired behavior is to display
# the hub while background processes work, then skip to the progress
# hub immediately after everything's done.
# However if the user proves his intent to change the kickstarted
# values by entering any of the spokes, we need to disable the
# autoContinue feature and wait for the user to explicitly state
# that he is done configuring by pressing the continue button.
self._autoContinue = False
self._inSpoke = True
self._runSpoke(spoke)
self._inSpoke = False
# Now update the selector with the current status and completeness.
for sp in self._spokes.itervalues():
if not sp.indirect:
self._updateCompleteness(sp)
# And then if that spoke wants us to jump straight to another one,
# handle that now.
if spoke.skipTo and spoke.skipTo in self._spokes:
dest = spoke.skipTo
# Clear out the skipTo setting so we don't cycle endlessly.
spoke.skipTo = None
self._on_spoke_clicked(self._spokes[dest].selector, None, self._spokes[dest])
| cs2c-zhangchao/nkwin1.0-anaconda | pyanaconda/ui/gui/hubs/__init__.py | Python | gpl-2.0 | 21,352 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.template import Context, Template
from django.core.urlresolvers import resolve
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.shortcuts import HttpResponse
from .models import Html
import tempfile
import time
import os
import subprocess
class InstanceError(Exception):
pass
class WkHtmlToPdfError(Exception):
pass
class WKHtmlToPDFGenerator(object):
"""
Wkhtmltopdf generator, it will take context and html converts to pdf.
"""
def __init__(self, **kwargs):
"""
By default it will search django core settings,
if wkhtmltopdf not found there it will add that command.
:param kwargs:
It may contains the html and context,
html object is model and context of view
:type kwargs: Dictionary
:returns: None
"""
tempfile.tempdir = '/tmp/'
self.tmp_dir = tempfile.gettempdir()
self.html = kwargs.get('html')
self.context = kwargs.get('context')
self.command = []
self.code_format = "utf-8"
if hasattr(settings, "WKHTML_TO_PDF_CMD") and isinstance(settings.WKHTML_TO_PDF_CMD, str):
self.command = [settings.WKHTML_TO_PDF_CMD]
@property
def _get_options(self):
""" Providing external options for wkhtmltopdf from settings
and HtmlHeaderFooter model
"""
if self.html.htmlheader.quiet:
self.command.append('--quiet')
if self.html.htmlheader.zoom:
self.command.extend(['--zoom', str(self.html.htmlheader.zoom)])
# default to UTF-8 encoding. Use <meta charset="latin-1"> to override.
self.command.extend(['--encoding', self.html.htmlheader.encode_type])
options = getattr(settings, 'WKHTML_OPTIONS', None)
if options is not None:
if not isinstance(options, dict):
raise InstanceError("WKHTML_OPTIONS not a dictionary")
for key, value in options.iteritems():
if value is None:
self.command.append(str(key))
else:
self.command.extend([str(key), str(value)])
def _write_data_into_file(self, content, name, file_to_del, css=False):
"""It will creates the temp file in temporary folder
:param: content: context of view
:type: content: Dictionary
:param: name:
html or css file suffix
:type: name: str or unicode
:param: file_to_del:
it will holds the temp files for after delete those
files from temp folder when pdf generate complete.
:type: file_to_del: list
:param: css:
By default it is False, If it provides then this suffix is css.
:type: css: bool
:returns: temp file object
"""
encoded_content = content.encode(self.code_format)
if not css:
_sanitize_string = self._sanitize_html(encoded_content)
else:
_sanitize_string = encoded_content
render_str = self._render_string(_sanitize_string)
com_file = file(
os.path.join(self.tmp_dir, str(time.time()) + name), 'w'
)
com_file.write(render_str.encode(self.code_format))
file_to_del.append(com_file.name)
return com_file
@property
def generate_pdf_file(self):
"""This method will generates the Pdf object from html
:return: pdf object
:raise: RuntimeError: run time
:raise: WkHtmlToPdfError: html error
:raise: OSError, IOError: none
"""
out_filename = tempfile.mktemp(suffix=".pdf", prefix="webkit.tmp.")
file_to_del = [out_filename]
if not self.command:
self.command = ['wkhtmltopdf']
self._get_options
if self.html.htmlheader.header:
head_file = self._write_data_into_file(
str(self.html.htmlheader.header),
'.head.html',
file_to_del
)
file_to_del.append(head_file.name)
self.command.extend(['--header-html', head_file.name])
head_file.close()
if self.html.htmlheader.footer:
foot_file = self._write_data_into_file(
self.html.htmlheader.footer,
'.foot.html',
file_to_del
)
self.command.extend(['--footer-html', foot_file.name])
file_to_del.append(foot_file.name)
foot_file.close()
if self.html.htmlheader.css:
css_file = self._write_data_into_file(
self.html.htmlheader.footer,
'.style.css',
file_to_del,
css=True
)
file_to_del.append(css_file.name)
self.command.extend(['--user-style-sheet', css_file.name])
css_file.close()
if self.html.htmlheader.margin_top:
self.command.extend([
'--margin-top',
str(self.html.htmlheader.margin_top).replace(',', '.')
])
if self.html.htmlheader.margin_bottom:
self.command.extend([
'--margin-bottom',
str(self.html.htmlheader.margin_bottom).replace(',', '.')
])
if self.html.htmlheader.margin_left:
self.command.extend([
'--margin-left',
str(self.html.htmlheader.margin_left).replace(',', '.')
])
if self.html.htmlheader.margin_right:
self.command.extend([
'--margin-right',
str(self.html.htmlheader.margin_right).replace(',', '.')
])
if self.html.htmlheader.orientation:
self.command.extend([
'--orientation',
str(self.html.htmlheader.orientation).replace(',', '.')
])
if self.html.htmlheader.page_size:
self.command.extend([
'--page-size',
str(self.html.htmlheader.page_size).replace(',', '.')
])
count = 0
for body in self.html.htmlbody.all():
html_file = self._write_data_into_file(
body.body,
'.%s.body.html' % body.id,
file_to_del
)
self.command.append(html_file.name)
count += 1
html_file.close()
self.command.append(out_filename)
seder_fd, seder_path = tempfile.mkstemp(text=True)
file_to_del.append(seder_path)
try:
status = subprocess.call(self.command, stderr=seder_fd)
os.close(seder_fd) # ensure flush before reading
seder_fd = None # avoid closing again in finally block
file_obj = open(seder_path, 'r')
message = file_obj.read()
file_obj.close()
if not message:
error_message = 'No diagnosis message was provided'
else:
error_message = '''The following diagnosis message was provided:\n''' + message
if status:
raise RuntimeError("""
Webkit error The command 'wkhtmltopdf'
failed with error
code = %s. Message: %s""" %
(status, error_message))
pdf_file = open(out_filename, 'rb')
pdf = pdf_file.read()
pdf_file.close()
except Exception as e:
if subprocess.call(['which', self.command[0]]):
raise WkHtmlToPdfError("make sure wkhtmltopdf installed in your instance \
or check wkhtmltopdf path is given correctly")
if "does not support more then one input document" in (e.message):
raise WkHtmlToPdfError("""This Wkhtmltopdf doesn't support please follow this link
http://stackoverflow.com/questions/18758589/wkhtmltopdf-installation-error-on-ubuntu""")
finally:
if seder_fd is not None:
os.close(seder_fd)
for f_to_del in file_to_del:
try:
os.unlink(f_to_del)
except (OSError, IOError):
# print("cannot remove file %s: %s" % (f_to_del, exc))
pass
return pdf
def _render_string(self, html):
"""Render the context in html
:param html: html data
:type html: str or unicode
:returns Sends the Render the Context html
"""
temp = Template(html)
return temp.render(Context(self.context))
@staticmethod
def _sanitize_html(html):
"""wkhtmltopdf expects the html page to declare a doctype.
:param html: html document
:type html: str or unicode
:returns: html document
"""
input_html = html
if input_html and input_html[:9].upper() != "<!DOCTYPE":
html = "<!DOCTYPE html>\n" + input_html
return html
def convert_html_to_pdf(**kwargs):
"""It is Api call for converting Html to Pdf.
It Creates the WKHtmlToPDFGenerator instance.
:param request: view client request
:type request: WSGIRequest
:param context: rendering template with this context --> optional
:type context: Dictionary
:param name: pdf name --> optional
:type name: str or unicode or int
:returns: Sends the HttpResponse to view
:raises: DoesNotExist
:raises: InstanceError
"""
if 'request' not in kwargs:
raise KeyError('request param not in kwargs')
request = kwargs.get('request')
if not isinstance(request, WSGIRequest):
raise InstanceError("request is not instance of WSGIRequest")
url_match = resolve(request.path)
view = url_match.func.__module__ + "." + url_match.func.__name__
try:
html = Html.objects.get(view=view)
except Html.DoesNotExist:
raise Html.DoesNotExist("The provided view does not match in the Html model, view={}\
".format(view))
webkit = WKHtmlToPDFGenerator(context=kwargs.get('context'), html=html)
disposition = 'attachment;'
if not html.attachment:
disposition = ''
if 'name' in kwargs and len(str(kwargs.get('name'))) > 0:
disposition += " filename={}.pdf".format(str(kwargs.get('name')))
else:
disposition += " filename={}.pdf".format(html.name)
response = HttpResponse(
webkit.generate_pdf_file,
content_type='application/pdf')
response['Content-Disposition'] = disposition
webkit.command = []
return response
| dhanababum/dj-wkhtmltopdf | djwkhtmltopdf/utils.py | Python | bsd-2-clause | 10,805 |
MAJOR = 0
MINOR = 0
PATCH = 8
VERSION = (MAJOR, MINOR, PATCH)
| openwsn-berkeley/coap | coap/coapVersion.py | Python | bsd-3-clause | 68 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-07 18:00
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("accounts", "0003_auto_20170607_1800"),
("studies", "0001_initial"),
]
operations = [
migrations.RemoveField(model_name="response", name="profile"),
migrations.AddField(
model_name="response",
name="child",
field=models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.DO_NOTHING,
to="accounts.Child",
),
preserve_default=False,
),
]
| CenterForOpenScience/lookit-api | studies/migrations/0002_auto_20170607_1800.py | Python | apache-2.0 | 757 |
# -*- coding: utf-8 -*-
#
# PyMC documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 29 09:34:37 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyMC'
copyright = u'2013, John Salvatier and Christopher Fonnesbeck'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0'
# The full version, including alpha/beta/rc tags.
release = '3.0a1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyMCdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyMC.tex', u'PyMC Documentation',
u'John Salvatier and Christopher Fonnesbeck', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymc', u'PyMC Documentation',
[u'John Salvatier and Christopher Fonnesbeck'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyMC', u'PyMC Documentation',
u'John Salvatier and Christopher Fonnesbeck', 'PyMC', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'PyMC'
epub_author = u'John Salvatier and Christopher Fonnesbeck'
epub_publisher = u'John Salvatier and Christopher Fonnesbeck'
epub_copyright = u'2013, John Salvatier and Christopher Fonnesbeck'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| LoLab-VU/pymc | docs/conf.py | Python | apache-2.0 | 9,265 |
# -*- coding: utf-8 -*-
# Copyright © 2012-2017 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Nikola plugin categories."""
import sys
import os
import io
from yapsy.IPlugin import IPlugin
from doit.cmd_base import Command as DoitCommand
from .utils import LOGGER, first_line, req_missing
__all__ = (
'Command',
'LateTask',
'PageCompiler',
'RestExtension',
'MarkdownExtension',
'MetadataExtractor',
'Task',
'TaskMultiplier',
'TemplateSystem',
'SignalHandler',
'ConfigPlugin',
'PostScanner',
'Taxonomy',
)
class BasePlugin(IPlugin):
"""Base plugin class."""
def set_site(self, site):
"""Set site, which is a Nikola instance."""
self.site = site
self.inject_templates()
def inject_templates(self):
"""Inject 'templates/<engine>' (if exists) very early in the theme chain."""
try:
# Sorry, found no other way to get this
mod_path = sys.modules[self.__class__.__module__].__file__
mod_dir = os.path.dirname(mod_path)
tmpl_dir = os.path.join(
mod_dir, 'templates', self.site.template_system.name
)
if os.path.isdir(tmpl_dir):
# Inject tmpl_dir low in the theme chain
self.site.template_system.inject_directory(tmpl_dir)
except AttributeError:
# In some cases, __builtin__ becomes the module of a plugin.
# We couldn’t reproduce that, and really find the reason for this,
# so let’s just ignore it and be done with it.
pass
def inject_dependency(self, target, dependency):
"""Add 'dependency' to the target task's task_deps."""
self.site.injected_deps[target].append(dependency)
def get_deps(self, filename):
"""Find the dependencies for a file."""
return []
class PostScanner(BasePlugin):
"""The scan method of these plugins is called by Nikola.scan_posts."""
def scan(self):
"""Create a list of posts from some source. Returns a list of Post objects."""
raise NotImplementedError()
def supported_extensions(self):
"""Return a list of supported file extensions, or None if such a list isn't known beforehand."""
return None
class Command(BasePlugin, DoitCommand):
"""Doit command implementation."""
name = "dummy_command"
doc_purpose = "A short explanation."
doc_usage = ""
doc_description = None # None value will completely omit line from doc
# see http://python-doit.sourceforge.net/cmd_run.html#parameters
cmd_options = ()
needs_config = True
def __init__(self, *args, **kwargs):
"""Initialize a command."""
BasePlugin.__init__(self, *args, **kwargs)
DoitCommand.__init__(self)
def __call__(self, config=None, **kwargs):
"""Reset doit arguments (workaround)."""
self._doitargs = kwargs
DoitCommand.__init__(self, config, **kwargs)
return self
def execute(self, options=None, args=None):
"""Check if the command can run in the current environment, fail if needed, or call _execute."""
options = options or {}
args = args or []
if self.needs_config and not self.site.configured:
LOGGER.error("This command needs to run inside an existing Nikola site.")
return False
return self._execute(options, args)
def _execute(self, options, args):
"""Do whatever this command does.
@param options (dict) with values from cmd_options
@param args (list) list of positional arguments
"""
raise NotImplementedError()
def help(self):
"""Return help text for a command."""
text = []
text.append("Purpose: %s" % self.doc_purpose)
text.append("Usage: nikola %s %s" % (self.name, self.doc_usage))
text.append('')
text.append("Options:")
for opt in self.cmdparser.options:
text.extend(opt.help_doc())
if self.doc_description is not None:
text.append("")
text.append("Description:")
text.append(self.doc_description)
return "\n".join(text)
DoitCommand.help = help
class BaseTask(BasePlugin):
"""Base for task generators."""
name = "dummy_task"
# default tasks are executed by default.
# the others have to be specifie in the command line.
is_default = True
def gen_tasks(self):
"""Generate tasks."""
raise NotImplementedError()
def group_task(self):
"""Return dict for group task."""
return {
'basename': self.name,
'name': None,
'doc': first_line(self.__doc__),
}
class Task(BaseTask):
"""Task generator."""
name = "dummy_task"
class LateTask(BaseTask):
"""Late task generator (plugin executed after all Task plugins)."""
name = "dummy_latetask"
class TemplateSystem(BasePlugin):
"""Provide support for templating systems."""
name = "dummy_templates"
def set_directories(self, directories, cache_folder):
"""Set the list of folders where templates are located and cache."""
raise NotImplementedError()
def template_deps(self, template_name):
"""Return filenames which are dependencies for a template."""
raise NotImplementedError()
def get_deps(self, filename):
"""Return paths to dependencies for the template loaded from filename."""
raise NotImplementedError()
def get_string_deps(self, text):
"""Find dependencies for a template string."""
raise NotImplementedError()
def render_template(self, template_name, output_name, context):
"""Render template to a file using context.
This must save the data to output_name *and* return it
so that the caller may do additional processing.
"""
raise NotImplementedError()
def render_template_to_string(self, template, context):
"""Render template to a string using context."""
raise NotImplementedError()
def inject_directory(self, directory):
"""Inject the directory with the lowest priority in the template search mechanism."""
raise NotImplementedError()
def get_template_path(self, template_name):
"""Get the path to a template or return None."""
raise NotImplementedError()
class TaskMultiplier(BasePlugin):
"""Take a task and return *more* tasks."""
name = "dummy multiplier"
def process(self, task):
"""Examine task and create more tasks. Returns extra tasks only."""
return []
class PageCompiler(BasePlugin):
"""Compile text files into HTML."""
name = "dummy_compiler"
friendly_name = ''
demote_headers = False
supports_onefile = True
use_dep_file = True # If set to false, the .dep file is never written and not automatically added as a target
supports_metadata = False
metadata_conditions = []
default_metadata = {
'title': '',
'slug': '',
'date': '',
'tags': '',
'category': '',
'link': '',
'description': '',
'type': 'text',
}
config_dependencies = []
def get_dep_filename(self, post, lang):
"""Return the .dep file's name for the given post and language."""
return post.translated_base_path(lang) + '.dep'
def _read_extra_deps(self, post, lang):
"""Read contents of .dep file and return them as a list."""
dep_path = self.get_dep_filename(post, lang)
if os.path.isfile(dep_path):
with io.open(dep_path, 'r+', encoding='utf8') as depf:
deps = [l.strip() for l in depf.readlines()]
return deps
return []
def register_extra_dependencies(self, post):
"""Add dependency to post object to check .dep file."""
def create_lambda(lang):
# We create a lambda like this so we can pass `lang` to it, because if we didn’t
# add that function, `lang` would always be the last language in TRANSLATIONS.
# (See http://docs.python-guide.org/en/latest/writing/gotchas/#late-binding-closures)
return lambda: self._read_extra_deps(post, lang)
for lang in self.site.config['TRANSLATIONS']:
post.add_dependency(create_lambda(lang), 'fragment', lang=lang)
def get_extra_targets(self, post, lang, dest):
"""Return a list of extra targets for the render_posts task when compiling the post for the specified language."""
if self.use_dep_file:
return [self.get_dep_filename(post, lang)]
else:
return []
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
raise NotImplementedError()
def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML strings (with shortcode support).
Returns a tuple of at least two elements: HTML string [0] and shortcode dependencies [last].
"""
# This function used to have some different APIs in different places.
raise NotImplementedError()
def create_post(self, path, content=None, onefile=False, is_page=False, **kw):
"""Create post file with optional metadata."""
raise NotImplementedError()
def extension(self):
"""Return the preferred extension for the output of this compiler."""
return ".html"
def read_metadata(self, post, lang=None):
"""Read the metadata from a post, and return a metadata dict."""
return {}
def split_metadata(self, data, post=None, lang=None):
"""Split data from metadata in the raw post content."""
if lang and post:
extractor = post.used_extractor[lang]
else:
import nikola.metadata_extractors
extractor = nikola.metadata_extractors.DEFAULT_EXTRACTOR
if isinstance(extractor, MetadataExtractor):
return extractor.split_metadata_from_text(data)
else:
return data, data
def get_compiler_extensions(self):
"""Activate all the compiler extension plugins for a given compiler and return them."""
plugins = []
for plugin_info in self.site.compiler_extensions:
if plugin_info.plugin_object.compiler_name == self.name:
plugins.append(plugin_info)
return plugins
class CompilerExtension(BasePlugin):
"""An extension for a Nikola compiler.
If you intend to implement those in your own compiler, you can:
(a) create a new plugin class for them; or
(b) use this class and filter them yourself.
If you choose (b), you should the compiler name to the .plugin
file in the Nikola/Compiler section and filter all plugins of
this category, getting the compiler name with:
p.details.get('Nikola', 'Compiler')
Note that not all compiler plugins have this option and you might
need to catch configparser.NoOptionError exceptions.
"""
name = "dummy_compiler_extension"
compiler_name = "dummy_compiler"
class RestExtension(CompilerExtension):
"""Extensions for reStructuredText."""
name = "dummy_rest_extension"
compiler_name = "rest"
class MarkdownExtension(CompilerExtension):
"""Extensions for Markdown."""
name = "dummy_markdown_extension"
compiler_name = "markdown"
class MetadataExtractor(BasePlugin):
"""Plugins that can extract meta information from post files."""
# Name of the extractor. (required)
name = "unknown"
# Where to get metadata from. (MetaSource; required)
source = None
# Priority of extractor. (MetaPriority; required)
priority = None
# List of tuples (MetaCondition, arg) with conditions used to select this extractor.
conditions = []
# Regular expression used for splitting metadata, or None if not applicable.
split_metadata_re = None
# List of tuples (import name, pip name, friendly name) of Python packages required for this extractor.
requirements = []
# Name of METADATA_MAPPING to use, if any.
map_from = None
# Whether or not the extractor supports writing metadata.
supports_write = False
def _extract_metadata_from_text(self, source_text: str) -> dict:
"""Extract metadata from text."""
raise NotImplementedError()
def split_metadata_from_text(self, source_text: str) -> (str, str):
"""Split text into metadata and content (both strings).
If splitting fails (there is no match), return source_text as both metadata and content.
(This behavior is required for 2-file posts.)
"""
if self.split_metadata_re is None:
return source_text, source_text
else:
split_result = self.split_metadata_re.split(source_text.lstrip(), maxsplit=1)
if len(split_result) == 1:
return source_text, source_text
else:
return split_result
def extract_text(self, source_text: str) -> dict:
"""Extract metadata from text (also calls ``split_metadata_from_text``)."""
split = self.split_metadata_from_text(source_text)
meta = self._extract_metadata_from_text(split[0])
return meta
def extract_filename(self, filename: str, lang: str) -> dict:
"""Extract metadata from filename."""
return {}
def write_metadata(self, metadata: dict, comment_wrap=False) -> str:
"""Write metadata in this extractor’s format.
``comment_wrap`` is either True, False, or a 2-tuple of comments to use for wrapping, if necessary.
If it’s set to True, defaulting to ``('<!--', '-->')`` is recommended.
This function should insert comment markers (if applicable) and must insert trailing newlines.
"""
raise NotImplementedError()
def check_requirements(self):
"""Check if requirements for an extractor are satisfied."""
for import_name, pip_name, friendly_name in self.requirements:
try:
__import__(import_name)
except ImportError:
req_missing([pip_name], "use {0} metadata".format(friendly_name), python=True, optional=False)
class SignalHandler(BasePlugin):
"""Signal handlers."""
name = "dummy_signal_handler"
class ConfigPlugin(BasePlugin):
"""A plugin that can edit config (or modify the site) on-the-fly."""
name = "dummy_config_plugin"
class ShortcodePlugin(BasePlugin):
"""A plugin that adds a shortcode."""
name = "dummy_shortcode_plugin"
def set_site(self, site):
"""Set Nikola site."""
self.site = site
site.register_shortcode(self.name, self.handler)
return super(ShortcodePlugin, self).set_site(site)
class Importer(Command):
"""Basic structure for importing data into Nikola.
The flow is:
read_data
preprocess_data
parse_data
generate_base_site
populate_context
create_config
filter_data
process_data
process_data can branch into:
import_story (may use import_file and save_post)
import_post (may use import_file and save_post)
import_attachment (may use import_file)
Finally:
write_urlmap
"""
name = "dummy_importer"
def _execute(self, options={}, args=[]):
"""Import the data into Nikola."""
raise NotImplementedError()
def generate_base_site(self, path):
"""Create the base site."""
raise NotImplementedError()
def populate_context(self):
"""Use data to fill context for configuration."""
raise NotImplementedError()
def create_config(self):
"""Use the context to create configuration."""
raise NotImplementedError()
def read_data(self, source):
"""Fetch data into self.data."""
raise NotImplementedError()
def preprocess_data(self):
"""Modify data if needed."""
pass
def parse_data(self):
"""Convert self.data into self.items."""
raise NotImplementedError()
def filter_data(self):
"""Remove data that's not to be imported."""
pass
def process_data(self):
"""Go through self.items and save them."""
def import_story(self):
"""Create a page."""
raise NotImplementedError()
def import_post(self):
"""Create a post."""
raise NotImplementedError()
def import_attachment(self):
"""Create an attachment."""
raise NotImplementedError()
def import_file(self):
"""Import a file."""
raise NotImplementedError()
def save_post(self):
"""Save a post to disk."""
raise NotImplementedError()
class Taxonomy(BasePlugin):
"""Taxonomy for posts.
A taxonomy plugin allows to classify posts (see #2107) by
classification strings. Classification plugins must adjust
a set of options to determine certain aspects.
The following options are class attributes with their default
values. These variables should be set in the class definition,
in the constructor or latest in the `set_site` function.
classification_name = "taxonomy":
The classification name to be used for path handlers.
Must be overridden!
overview_page_items_variable_name = "items":
When rendering the overview page, its template will have a list
of pairs
(friendly_name, link)
for the classifications available in a variable by this name.
The template will also have a list
(friendly_name, link, post_count)
for the classifications available in a variable by the name
`overview_page_items_variable_name + '_with_postcount'`.
overview_page_variable_name = "taxonomy":
When rendering the overview page, its template will have a list
of classifications available in a variable by this name.
overview_page_hierarchy_variable_name = "taxonomy_hierarchy":
When rendering the overview page, its template will have a list
of tuples
(friendly_name, classification, classification_path, link,
indent_levels, indent_change_before, indent_change_after)
available in a variable by this name. These tuples can be used
to render the hierarchy as a tree.
The template will also have a list
(friendly_name, classification, classification_path, link,
indent_levels, indent_change_before, indent_change_after,
number_of_children, post_count)
available in the variable by the name
`overview_page_hierarchy_variable_name + '_with_postcount'`.
more_than_one_classifications_per_post = False:
If True, there can be more than one classification per post; in that case,
the classification data in the metadata is stored as a list. If False,
the classification data in the metadata is stored as a string, or None
when no classification is given.
has_hierarchy = False:
Whether the classification has a hierarchy.
include_posts_from_subhierarchies = False:
If True, the post list for a classification includes all posts with a
sub-classification (in case has_hierarchy is True).
include_posts_into_hierarchy_root = False:
If True, include_posts_from_subhierarchies == True will also insert
posts into the post list for the empty hierarchy [].
show_list_as_subcategories_list = False:
If True, for every classification which has at least one
subclassification, create a list of subcategories instead of a list/index
of posts. This is only used when has_hierarchy = True. The template
specified in subcategories_list_template will be used. If this is set
to True, it is recommended to set include_posts_from_subhierarchies to
True to get correct post counts.
show_list_as_index = False:
Whether to show the posts for one classification as an index or
as a post list.
subcategories_list_template = "taxonomy_list.tmpl":
The template to use for the subcategories list when
show_list_as_subcategories_list is True.
generate_atom_feeds_for_post_lists = False:
Whether to generate Atom feeds for post lists in case GENERATE_ATOM is set.
template_for_single_list = "tagindex.tmpl":
The template to use for the post list for one classification.
template_for_classification_overview = "list.tmpl":
The template to use for the classification overview page.
Set to None to avoid generating overviews.
always_disable_rss = False:
Whether to always disable RSS feed generation
apply_to_posts = True:
Whether this classification applies to posts.
apply_to_pages = False:
Whether this classification applies to pages.
minimum_post_count_per_classification_in_overview = 1:
The minimum number of posts a classification must have to be listed in
the overview.
omit_empty_classifications = False:
Whether post lists resp. indexes should be created for empty
classifications.
add_other_languages_variable = False:
In case this is `True`, each classification page will get a list
of triples `(other_lang, other_classification, title)` of classifications
in other languages which should be linked. The list will be stored in the
variable `other_languages`.
path_handler_docstrings:
A dictionary of docstrings for path handlers. See eg. nikola.py for
examples. Must be overridden, keys are "taxonomy_index", "taxonomy",
"taxonomy_atom", "taxonomy_rss" (but using classification_name instead
of "taxonomy"). If one of the values is False, the corresponding path
handler will not be created.
"""
name = "dummy_taxonomy"
# Adjust the following values in your plugin!
classification_name = "taxonomy"
overview_page_variable_name = "taxonomy"
overview_page_items_variable_name = "items"
overview_page_hierarchy_variable_name = "taxonomy_hierarchy"
more_than_one_classifications_per_post = False
has_hierarchy = False
include_posts_from_subhierarchies = False
include_posts_into_hierarchy_root = False
show_list_as_subcategories_list = False
show_list_as_index = False
subcategories_list_template = "taxonomy_list.tmpl"
generate_atom_feeds_for_post_lists = False
template_for_single_list = "tagindex.tmpl"
template_for_classification_overview = "list.tmpl"
always_disable_rss = False
apply_to_posts = True
apply_to_pages = False
minimum_post_count_per_classification_in_overview = 1
omit_empty_classifications = False
add_other_languages_variable = False
path_handler_docstrings = {
'taxonomy_index': '',
'taxonomy': '',
'taxonomy_atom': '',
'taxonomy_rss': '',
}
def is_enabled(self, lang=None):
"""Return True if this taxonomy is enabled, or False otherwise.
If lang is None, this determins whether the classification is
made at all. If lang is not None, this determines whether the
overview page and the classification lists are created for this
language.
"""
return True
def get_implicit_classifications(self, lang):
"""Return a list of classification strings which should always appear in posts_per_classification."""
return []
def classify(self, post, lang):
"""Classify the given post for the given language.
Must return a list or tuple of strings.
"""
raise NotImplementedError()
def sort_posts(self, posts, classification, lang):
"""Sort the given list of posts.
Allows the plugin to order the posts per classification as it wants.
The posts will be ordered by date (latest first) before calling
this function. This function must sort in-place.
"""
pass
def sort_classifications(self, classifications, lang, level=None):
"""Sort the given list of classification strings.
Allows the plugin to order the classifications as it wants. The
classifications will be ordered by `natsort` before calling this
function. This function must sort in-place.
For hierarchical taxonomies, the elements of the list are a single
path element of the path returned by `extract_hierarchy()`. The index
of the path element in the path will be provided in `level`.
"""
pass
def get_classification_friendly_name(self, classification, lang, only_last_component=False):
"""Extract a friendly name from the classification.
The result of this function is usually displayed to the user, instead
of using the classification string.
The argument `only_last_component` is only relevant to hierarchical
taxonomies. If it is set, the printable name should only describe the
last component of `classification` if possible.
"""
raise NotImplementedError()
def get_overview_path(self, lang, dest_type='page'):
"""Return path for classification overview.
This path handler for the classification overview must return one or
two values (in this order):
* a list or tuple of strings: the path relative to OUTPUT_DIRECTORY;
* a string with values 'auto', 'always' or 'never', indicating whether
INDEX_FILE should be added or not.
Note that this function must always return a list or tuple of strings;
the other return value is optional with default value `'auto'`.
In case INDEX_FILE should potentially be added, the last element in the
returned path must have no extension, and the PRETTY_URLS config must
be ignored by this handler. The return value will be modified based on
the PRETTY_URLS and INDEX_FILE settings.
`dest_type` can be either 'page', 'feed' (for Atom feed) or 'rss'.
"""
raise NotImplementedError()
def get_path(self, classification, lang, dest_type='page'):
"""Return path to the classification page.
This path handler for the given classification must return one to
three values (in this order):
* a list or tuple of strings: the path relative to OUTPUT_DIRECTORY;
* a string with values 'auto', 'always' or 'never', indicating whether
INDEX_FILE should be added or not;
* an integer if a specific page of the index is to be targeted (will be
ignored for post lists), or `None` if the most current page is targeted.
Note that this function must always return a list or tuple of strings;
the other two return values are optional with default values `'auto'` and
`None`.
In case INDEX_FILE should potentially be added, the last element in the
returned path must have no extension, and the PRETTY_URLS config must
be ignored by this handler. The return value will be modified based on
the PRETTY_URLS and INDEX_FILE settings.
`dest_type` can be either 'page', 'feed' (for Atom feed) or 'rss'.
For hierarchical taxonomies, the result of extract_hierarchy is provided
as `classification`. For non-hierarchical taxonomies, the classification
string itself is provided as `classification`.
"""
raise NotImplementedError()
def extract_hierarchy(self, classification):
"""Given a classification, return a list of parts in the hierarchy.
For non-hierarchical taxonomies, it usually suffices to return
`[classification]`.
"""
return [classification]
def recombine_classification_from_hierarchy(self, hierarchy):
"""Given a list of parts in the hierarchy, return the classification string.
For non-hierarchical taxonomies, it usually suffices to return hierarchy[0].
"""
return hierarchy[0]
def provide_overview_context_and_uptodate(self, lang):
"""Provide data for the context and the uptodate list for the classifiation overview.
Must return a tuple of two dicts. The first is merged into the page's context,
the second will be put into the uptodate list of all generated tasks.
Context must contain `title`.
"""
raise NotImplementedError()
def provide_context_and_uptodate(self, classification, lang, node=None):
"""Provide data for the context and the uptodate list for the list of the given classifiation.
Must return a tuple of two dicts. The first is merged into the page's context,
the second will be put into the uptodate list of all generated tasks.
For hierarchical taxonomies, node is the `hierarchy_utils.TreeNode` element
corresponding to the classification.
Context must contain `title`, which should be something like 'Posts about <classification>'.
"""
raise NotImplementedError()
def should_generate_classification_page(self, classification, post_list, lang):
"""Only generates list of posts for classification if this function returns True."""
return True
def should_generate_rss_for_classification_page(self, classification, post_list, lang):
"""Only generates RSS feed for list of posts for classification if this function returns True."""
return self.should_generate_classification_page(classification, post_list, lang)
def postprocess_posts_per_classification(self, posts_per_classification_per_language, flat_hierarchy_per_lang=None, hierarchy_lookup_per_lang=None):
"""Rearrange, modify or otherwise use the list of posts per classification and per language.
For compatibility reasons, the list could be stored somewhere else as well.
In case `has_hierarchy` is `True`, `flat_hierarchy_per_lang` is the flat
hierarchy consisting of `hierarchy_utils.TreeNode` elements, and
`hierarchy_lookup_per_lang` is the corresponding hierarchy lookup mapping
classification strings to `hierarchy_utils.TreeNode` objects.
"""
pass
def get_other_language_variants(self, classification, lang, classifications_per_language):
"""Return a list of variants of the same classification in other languages.
Given a `classification` in a language `lang`, return a list of pairs
`(other_lang, other_classification)` with `lang != other_lang` such that
`classification` should be linked to `other_classification`.
Classifications where links to other language versions makes no sense
should simply return an empty list.
Provided is a set of classifications per language (`classifications_per_language`).
"""
return []
| xuhdev/nikola | nikola/plugin_categories.py | Python | mit | 32,244 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.db.models import Q
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from zerver.lib import bugdown
from zerver.decorator import JsonableError
from zerver.lib.test_runner import slow
from zilencer.models import Deployment
from zerver.lib.test_helpers import (
ZulipTestCase,
get_user_messages,
make_client,
message_ids, message_stream_count,
most_recent_message,
queries_captured,
)
from zerver.models import (
MAX_MESSAGE_LENGTH, MAX_SUBJECT_LENGTH,
Message, Realm, Recipient, Stream, UserMessage, UserProfile, Attachment,
get_realm, get_stream, get_user_profile_by_email,
)
from zerver.lib.actions import (
check_message, check_send_message,
create_stream_if_needed,
do_add_subscription, do_create_user,
get_client,
)
from zerver.lib.upload import create_attachment
from zerver.views.messages import create_mirrored_message_users
import datetime
import DNS
import mock
import time
import ujson
from six import text_type
from six.moves import range
from typing import Any, Optional
class TestCrossRealmPMs(ZulipTestCase):
def setUp(self):
# type: () -> None
settings.CROSS_REALM_BOT_EMAILS.add('test-og-bot@zulip.com')
dep = Deployment()
dep.base_api_url = "https://zulip.com/api/"
dep.base_site_url = "https://zulip.com/"
# We need to save the object before we can access
# the many-to-many relationship 'realms'
dep.save()
dep.realms = [get_realm("zulip.com")]
dep.save()
def create_user(self, email):
# type: (text_type) -> UserProfile
username, domain = email.split('@')
self.register(username, 'test', domain=domain)
return get_user_profile_by_email(email)
def test_same_realm(self):
# type: () -> None
"""Users on the same realm can PM each other"""
r1 = Realm.objects.create(domain='1.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
user1_email = 'user1@1.example.com'
user1 = self.create_user(user1_email)
user2_email = 'user2@1.example.com'
user2 = self.create_user(user2_email)
self.send_message(user1_email, user2_email, Recipient.PERSONAL)
messages = get_user_messages(user2)
self.assertEqual(len(messages), 1)
self.assertEquals(messages[0].sender.pk, user1.pk)
def test_different_realms(self):
# type: () -> None
"""Users on the different realms can not PM each other"""
r1 = Realm.objects.create(domain='1.example.com')
r2 = Realm.objects.create(domain='2.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
deployment.realms.add(r2)
user1_email = 'user1@1.example.com'
self.create_user(user1_email)
user2_email = 'user2@2.example.com'
self.create_user(user2_email)
with self.assertRaisesRegexp(JsonableError,
'You can\'t send private messages outside of your organization.'):
self.send_message(user1_email, user2_email, Recipient.PERSONAL)
def test_three_different_realms(self):
# type: () -> None
"""Users on three different realms can not PM each other"""
r1 = Realm.objects.create(domain='1.example.com')
r2 = Realm.objects.create(domain='2.example.com')
r3 = Realm.objects.create(domain='3.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
deployment.realms.add(r2)
deployment.realms.add(r3)
user1_email = 'user1@1.example.com'
self.create_user(user1_email)
user2_email = 'user2@2.example.com'
self.create_user(user2_email)
user3_email = 'user3@2.example.com'
self.create_user(user3_email)
with self.assertRaisesRegexp(JsonableError,
'You can\'t send private messages outside of your organization.'):
self.send_message(user1_email, [user2_email, user3_email], Recipient.PERSONAL)
def test_from_zulip_realm(self):
# type: () -> None
"""OG Users in the zulip.com realm can PM any realm"""
r1 = Realm.objects.create(domain='1.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
user1_email = 'test-og-bot@zulip.com'
user1 = self.create_user(user1_email)
user2_email = 'user2@1.example.com'
user2 = self.create_user(user2_email)
self.send_message(user1_email, user2_email, Recipient.PERSONAL)
messages = get_user_messages(user2)
self.assertEqual(len(messages), 1)
self.assertEquals(messages[0].sender.pk, user1.pk)
def test_to_zulip_realm(self):
# type: () -> None
"""All users can PM users in the zulip.com realm"""
r1 = Realm.objects.create(domain='1.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
user1_email = 'user1@1.example.com'
user1 = self.create_user(user1_email)
user2_email = 'test-og-bot@zulip.com'
user2 = self.create_user(user2_email)
self.send_message(user1_email, user2_email, Recipient.PERSONAL)
messages = get_user_messages(user2)
self.assertEqual(len(messages), 1)
self.assertEquals(messages[0].sender.pk, user1.pk)
def test_zulip_realm_can_not_join_realms(self):
# type: () -> None
"""Adding a zulip.com user to a PM will not let you cross realms"""
r1 = Realm.objects.create(domain='1.example.com')
r2 = Realm.objects.create(domain='2.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
deployment.realms.add(r2)
user1_email = 'user1@1.example.com'
self.create_user(user1_email)
user2_email = 'user2@2.example.com'
self.create_user(user2_email)
user3_email = 'test-og-bot@zulip.com'
self.create_user(user3_email)
with self.assertRaisesRegexp(JsonableError,
'You can\'t send private messages outside of your organization.'):
self.send_message(user1_email, [user2_email, user3_email],
Recipient.PERSONAL)
class PersonalMessagesTest(ZulipTestCase):
def test_auto_subbed_to_personals(self):
# type: () -> None
"""
Newly created users are auto-subbed to the ability to receive
personals.
"""
self.register("test", "test")
user_profile = get_user_profile_by_email('test@zulip.com')
old_messages_count = message_stream_count(user_profile)
self.send_message("test@zulip.com", "test@zulip.com", Recipient.PERSONAL)
new_messages_count = message_stream_count(user_profile)
self.assertEqual(new_messages_count, old_messages_count + 1)
recipient = Recipient.objects.get(type_id=user_profile.id,
type=Recipient.PERSONAL)
self.assertEqual(most_recent_message(user_profile).recipient, recipient)
@slow("checks several profiles")
def test_personal_to_self(self):
# type: () -> None
"""
If you send a personal to yourself, only you see it.
"""
old_user_profiles = list(UserProfile.objects.all())
self.register("test1", "test1")
old_messages = []
for user_profile in old_user_profiles:
old_messages.append(message_stream_count(user_profile))
self.send_message("test1@zulip.com", "test1@zulip.com", Recipient.PERSONAL)
new_messages = []
for user_profile in old_user_profiles:
new_messages.append(message_stream_count(user_profile))
self.assertEqual(old_messages, new_messages)
user_profile = get_user_profile_by_email("test1@zulip.com")
recipient = Recipient.objects.get(type_id=user_profile.id, type=Recipient.PERSONAL)
self.assertEqual(most_recent_message(user_profile).recipient, recipient)
def assert_personal(self, sender_email, receiver_email, content="test content"):
# type: (text_type, text_type, text_type) -> None
"""
Send a private message from `sender_email` to `receiver_email` and check
that only those two parties actually received the message.
"""
sender = get_user_profile_by_email(sender_email)
receiver = get_user_profile_by_email(receiver_email)
sender_messages = message_stream_count(sender)
receiver_messages = message_stream_count(receiver)
other_user_profiles = UserProfile.objects.filter(~Q(email=sender_email) &
~Q(email=receiver_email))
old_other_messages = []
for user_profile in other_user_profiles:
old_other_messages.append(message_stream_count(user_profile))
self.send_message(sender_email, receiver_email, Recipient.PERSONAL, content)
# Users outside the conversation don't get the message.
new_other_messages = []
for user_profile in other_user_profiles:
new_other_messages.append(message_stream_count(user_profile))
self.assertEqual(old_other_messages, new_other_messages)
# The personal message is in the streams of both the sender and receiver.
self.assertEqual(message_stream_count(sender),
sender_messages + 1)
self.assertEqual(message_stream_count(receiver),
receiver_messages + 1)
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
self.assertEqual(most_recent_message(sender).recipient, recipient)
self.assertEqual(most_recent_message(receiver).recipient, recipient)
@slow("assert_personal checks several profiles")
def test_personal(self):
# type: () -> None
"""
If you send a personal, only you and the recipient see it.
"""
self.login("hamlet@zulip.com")
self.assert_personal("hamlet@zulip.com", "othello@zulip.com")
@slow("assert_personal checks several profiles")
def test_non_ascii_personal(self):
# type: () -> None
"""
Sending a PM containing non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
self.assert_personal("hamlet@zulip.com", "othello@zulip.com", u"hümbüǵ")
class StreamMessagesTest(ZulipTestCase):
def assert_stream_message(self, stream_name, subject="test subject",
content="test content"):
# type: (text_type, text_type, text_type) -> None
"""
Check that messages sent to a stream reach all subscribers to that stream.
"""
subscribers = self.users_subscribed_to_stream(stream_name, "zulip.com")
old_subscriber_messages = []
for subscriber in subscribers:
old_subscriber_messages.append(message_stream_count(subscriber))
non_subscribers = [user_profile for user_profile in UserProfile.objects.all()
if user_profile not in subscribers]
old_non_subscriber_messages = []
for non_subscriber in non_subscribers:
old_non_subscriber_messages.append(message_stream_count(non_subscriber))
non_bot_subscribers = [user_profile for user_profile in subscribers
if not user_profile.is_bot]
a_subscriber_email = non_bot_subscribers[0].email
self.login(a_subscriber_email)
self.send_message(a_subscriber_email, stream_name, Recipient.STREAM,
subject, content)
# Did all of the subscribers get the message?
new_subscriber_messages = []
for subscriber in subscribers:
new_subscriber_messages.append(message_stream_count(subscriber))
# Did non-subscribers not get the message?
new_non_subscriber_messages = []
for non_subscriber in non_subscribers:
new_non_subscriber_messages.append(message_stream_count(non_subscriber))
self.assertEqual(old_non_subscriber_messages, new_non_subscriber_messages)
self.assertEqual(new_subscriber_messages, [elt + 1 for elt in old_subscriber_messages])
def test_not_too_many_queries(self):
# type: () -> None
recipient_list = ['hamlet@zulip.com', 'iago@zulip.com', 'cordelia@zulip.com', 'othello@zulip.com']
for email in recipient_list:
self.subscribe_to_stream(email, "Denmark")
sender_email = 'hamlet@zulip.com'
sender = get_user_profile_by_email(sender_email)
message_type_name = "stream"
sending_client = make_client(name="test suite")
stream = 'Denmark'
subject = 'foo'
content = 'whatever'
realm = sender.realm
def send_message():
# type: () -> None
check_send_message(sender, sending_client, message_type_name, [stream],
subject, content, forwarder_user_profile=sender, realm=realm)
send_message() # prime the caches
with queries_captured() as queries:
send_message()
self.assert_length(queries, 7)
def test_message_mentions(self):
# type: () -> None
user_profile = get_user_profile_by_email("iago@zulip.com")
self.subscribe_to_stream(user_profile.email, "Denmark")
self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
content="test @**Iago** rules")
message = most_recent_message(user_profile)
assert(UserMessage.objects.get(user_profile=user_profile, message=message).flags.mentioned.is_set)
def test_stream_message_mirroring(self):
# type: () -> None
from zerver.lib.actions import do_change_is_admin
user_profile = get_user_profile_by_email("iago@zulip.com")
do_change_is_admin(user_profile, True, 'api_super_user')
result = self.client_post("/api/v1/send_message", {"type": "stream",
"to": "Verona",
"sender": "cordelia@zulip.com",
"client": "test suite",
"subject": "announcement",
"content": "Everyone knows Iago rules",
"forged": "true",
"email": user_profile.email,
"api-key": user_profile.api_key})
self.assert_json_success(result)
do_change_is_admin(user_profile, False, 'api_super_user')
result = self.client_post("/api/v1/send_message", {"type": "stream",
"to": "Verona",
"sender": "cordelia@zulip.com",
"client": "test suite",
"subject": "announcement",
"content": "Everyone knows Iago rules",
"forged": "true",
"email": user_profile.email,
"api-key": user_profile.api_key})
self.assert_json_error(result, "User not authorized for this query")
@slow('checks all users')
def test_message_to_stream(self):
# type: () -> None
"""
If you send a message to a stream, everyone subscribed to the stream
receives the messages.
"""
self.assert_stream_message("Scotland")
@slow('checks all users')
def test_non_ascii_stream_message(self):
# type: () -> None
"""
Sending a stream message containing non-ASCII characters in the stream
name, subject, or message body succeeds.
"""
self.login("hamlet@zulip.com")
# Subscribe everyone to a stream with non-ASCII characters.
non_ascii_stream_name = u"hümbüǵ"
realm = get_realm("zulip.com")
stream, _ = create_stream_if_needed(realm, non_ascii_stream_name)
for user_profile in UserProfile.objects.filter(realm=realm):
do_add_subscription(user_profile, stream, no_log=True)
self.assert_stream_message(non_ascii_stream_name, subject=u"hümbüǵ",
content=u"hümbüǵ")
class MessageDictTest(ZulipTestCase):
@slow('builds lots of messages')
def test_bulk_message_fetching(self):
# type: () -> None
realm = get_realm("zulip.com")
sender = get_user_profile_by_email('othello@zulip.com')
receiver = get_user_profile_by_email('hamlet@zulip.com')
pm_recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
stream, _ = create_stream_if_needed(realm, 'devel')
stream_recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
sending_client = make_client(name="test suite")
for i in range(300):
for recipient in [pm_recipient, stream_recipient]:
message = Message(
sender=sender,
recipient=recipient,
subject='whatever',
content='whatever %d' % i,
pub_date=datetime.datetime.now(),
sending_client=sending_client,
last_edit_time=datetime.datetime.now(),
edit_history='[]'
)
message.save()
ids = [row['id'] for row in Message.objects.all().values('id')]
num_ids = len(ids)
self.assertTrue(num_ids >= 600)
t = time.time()
with queries_captured() as queries:
rows = list(Message.get_raw_db_rows(ids))
for row in rows:
Message.build_dict_from_raw_db_row(row, False)
delay = time.time() - t
# Make sure we don't take longer than 1ms per message to extract messages.
self.assertTrue(delay < 0.001 * num_ids)
self.assert_length(queries, 7)
self.assertEqual(len(rows), num_ids)
def test_applying_markdown(self):
# type: () -> None
sender = get_user_profile_by_email('othello@zulip.com')
receiver = get_user_profile_by_email('hamlet@zulip.com')
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
sending_client = make_client(name="test suite")
message = Message(
sender=sender,
recipient=recipient,
subject='whatever',
content='hello **world**',
pub_date=datetime.datetime.now(),
sending_client=sending_client,
last_edit_time=datetime.datetime.now(),
edit_history='[]'
)
message.save()
# An important part of this test is to get the message through this exact code path,
# because there is an ugly hack we need to cover. So don't just say "row = message".
row = Message.get_raw_db_rows([message.id])[0]
dct = Message.build_dict_from_raw_db_row(row, apply_markdown=True)
expected_content = '<p>hello <strong>world</strong></p>'
self.assertEqual(dct['content'], expected_content)
message = Message.objects.get(id=message.id)
self.assertEqual(message.rendered_content, expected_content)
self.assertEqual(message.rendered_content_version, bugdown.version)
class MessagePOSTTest(ZulipTestCase):
def test_message_to_self(self):
# type: () -> None
"""
Sending a message to a stream to which you are subscribed is
successful.
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject"})
self.assert_json_success(result)
def test_api_message_to_self(self):
# type: () -> None
"""
Same as above, but for the API view
"""
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
result = self.client_post("/api/v1/send_message", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject",
"email": email,
"api-key": api_key})
self.assert_json_success(result)
def test_api_message_with_default_to(self):
# type: () -> None
"""
Sending messages without a to field should be sent to the default
stream for the user_profile.
"""
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.default_sending_stream = get_stream('Verona', user_profile.realm)
user_profile.save()
result = self.client_post("/api/v1/send_message", {"type": "stream",
"client": "test suite",
"content": "Test message no to",
"subject": "Test subject",
"email": email,
"api-key": api_key})
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEqual(sent_message.content, "Test message no to")
def test_message_to_nonexistent_stream(self):
# type: () -> None
"""
Sending a message to a nonexistent stream fails.
"""
self.login("hamlet@zulip.com")
self.assertFalse(Stream.objects.filter(name="nonexistent_stream"))
result = self.client_post("/json/messages", {"type": "stream",
"to": "nonexistent_stream",
"client": "test suite",
"content": "Test message",
"subject": "Test subject"})
self.assert_json_error(result, "Stream does not exist")
def test_personal_message(self):
# type: () -> None
"""
Sending a personal message to a valid username is successful.
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": "othello@zulip.com"})
self.assert_json_success(result)
def test_personal_message_to_nonexistent_user(self):
# type: () -> None
"""
Sending a personal message to an invalid email returns error JSON.
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": "nonexistent"})
self.assert_json_error(result, "Invalid email 'nonexistent'")
def test_invalid_type(self):
# type: () -> None
"""
Sending a message of unknown type returns error JSON.
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "invalid type",
"content": "Test message",
"client": "test suite",
"to": "othello@zulip.com"})
self.assert_json_error(result, "Invalid message type")
def test_empty_message(self):
# type: () -> None
"""
Sending a message that is empty or only whitespace should fail
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "private",
"content": " ",
"client": "test suite",
"to": "othello@zulip.com"})
self.assert_json_error(result, "Message must not be empty")
def test_mirrored_huddle(self):
# type: () -> None
"""
Sending a mirrored huddle message works
"""
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": ujson.dumps(["starnine@mit.edu",
"espuser@mit.edu"])})
self.assert_json_success(result)
def test_mirrored_personal(self):
# type: () -> None
"""
Sending a mirrored personal message works
"""
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"})
self.assert_json_success(result)
def test_duplicated_mirrored_huddle(self):
# type: () -> None
"""
Sending two mirrored huddles in the row return the same ID
"""
msg = {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": ujson.dumps(["espuser@mit.edu",
"starnine@mit.edu"])}
with mock.patch('DNS.dnslookup', return_value=[['starnine:*:84233:101:Athena Consulting Exchange User,,,:/mit/starnine:/bin/bash']]):
self.login("starnine@mit.edu")
result1 = self.client_post("/json/messages", msg)
with mock.patch('DNS.dnslookup', return_value=[['espuser:*:95494:101:Esp Classroom,,,:/mit/espuser:/bin/athena/bash']]):
self.login("espuser@mit.edu")
result2 = self.client_post("/json/messages", msg)
self.assertEqual(ujson.loads(result1.content)['id'],
ujson.loads(result2.content)['id'])
def test_long_message(self):
# type: () -> None
"""
Sending a message longer than the maximum message length succeeds but is
truncated.
"""
self.login("hamlet@zulip.com")
long_message = "A" * (MAX_MESSAGE_LENGTH + 1)
post_data = {"type": "stream", "to": "Verona", "client": "test suite",
"content": long_message, "subject": "Test subject"}
result = self.client_post("/json/messages", post_data)
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEquals(sent_message.content,
"A" * (MAX_MESSAGE_LENGTH - 3) + "...")
def test_long_topic(self):
# type: () -> None
"""
Sending a message with a topic longer than the maximum topic length
succeeds, but the topic is truncated.
"""
self.login("hamlet@zulip.com")
long_topic = "A" * (MAX_SUBJECT_LENGTH + 1)
post_data = {"type": "stream", "to": "Verona", "client": "test suite",
"content": "test content", "subject": long_topic}
result = self.client_post("/json/messages", post_data)
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEquals(sent_message.topic_name(),
"A" * (MAX_SUBJECT_LENGTH - 3) + "...")
def test_send_forged_message_as_not_superuser(self):
# type: () -> None
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject",
"forged": True})
self.assert_json_error(result, "User not authorized for this query")
def test_send_message_as_not_superuser_to_different_domain(self):
# type: () -> None
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject",
"domain": "mit.edu"})
self.assert_json_error(result, "User not authorized for this query")
def test_send_message_as_superuser_to_domain_that_dont_exist(self):
# type: () -> None
email = "emailgateway@zulip.com"
user = get_user_profile_by_email(email)
password = "test_password"
user.set_password(password)
user.is_api_super_user = True
user.save()
self.login(email, password)
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject",
"domain": "non-existing"})
user.is_api_super_user = False
user.save()
self.assert_json_error(result, "Unknown domain non-existing")
def test_send_message_when_sender_is_not_set(self):
# type: () -> None
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"})
self.assert_json_error(result, "Missing sender")
def test_send_message_as_not_superuser_when_type_is_not_private(self):
# type: () -> None
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "not-private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"})
self.assert_json_error(result, "User not authorized for this query")
@mock.patch("zerver.views.messages.create_mirrored_message_users")
def test_send_message_create_mirrored_message_user_returns_invalid_input(self, create_mirrored_message_users_mock):
# type: (Any) -> None
create_mirrored_message_users_mock.return_value = (False, True)
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"})
self.assert_json_error(result, "Invalid mirrored message")
@mock.patch("zerver.views.messages.create_mirrored_message_users")
def test_send_message_when_client_is_zephyr_mirror_but_domain_is_not_mit_edu(self, create_mirrored_message_users_mock):
# type: (Any) -> None
create_mirrored_message_users_mock.return_value = (True, True)
email = "starnine@mit.edu"
user = get_user_profile_by_email(email)
domain = user.realm.domain
user.realm.domain = 'not_mit.edu'
user.realm.save()
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"}, name='gownooo')
self.assert_json_error(result, "Invalid mirrored realm")
user.realm.domain = domain
user.realm.save()
class EditMessageTest(ZulipTestCase):
def check_message(self, msg_id, subject=None, content=None):
# type: (int, Optional[text_type], Optional[text_type]) -> Message
msg = Message.objects.get(id=msg_id)
cached = msg.to_dict(False)
uncached = msg.to_dict_uncached_helper(False)
self.assertEqual(cached, uncached)
if subject:
self.assertEqual(msg.topic_name(), subject)
if content:
self.assertEqual(msg.content, content)
return msg
def test_save_message(self):
# type: () -> None
"""This is also tested by a client test, but here we can verify
the cache against the database"""
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="editing", content="before edit")
result = self.client_post("/json/update_message", {
'message_id': msg_id,
'content': 'after edit'
})
self.assert_json_success(result)
self.check_message(msg_id, content="after edit")
result = self.client_post("/json/update_message", {
'message_id': msg_id,
'subject': 'edited'
})
self.assert_json_success(result)
self.check_message(msg_id, subject="edited")
def test_fetch_raw_message(self):
# type: () -> None
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="editing", content="**before** edit")
result = self.client_post('/json/fetch_raw_message', dict(message_id=msg_id))
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEquals(data['raw_content'], '**before** edit')
# Test error cases
result = self.client_post('/json/fetch_raw_message', dict(message_id=999999))
self.assert_json_error(result, 'No such message')
self.login("cordelia@zulip.com")
result = self.client_post('/json/fetch_raw_message', dict(message_id=msg_id))
self.assert_json_error(result, 'Message was not sent by you')
def test_edit_message_no_changes(self):
# type: () -> None
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="editing", content="before edit")
result = self.client_post("/json/update_message", {
'message_id': msg_id,
})
self.assert_json_error(result, "Nothing to change")
def test_edit_message_no_topic(self):
# type: () -> None
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="editing", content="before edit")
result = self.client_post("/json/update_message", {
'message_id': msg_id,
'subject': ' '
})
self.assert_json_error(result, "Topic can't be empty")
def test_edit_message_no_content(self):
# type: () -> None
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="editing", content="before edit")
result = self.client_post("/json/update_message", {
'message_id': msg_id,
'content': ' '
})
self.assert_json_error(result, "Content can't be empty")
def test_edit_message_content_limit(self):
# type: () -> None
def set_message_editing_params(allow_message_editing,
message_content_edit_limit_seconds):
# type: (bool, int) -> None
result = self.client_patch("/json/realm", {
'allow_message_editing': ujson.dumps(allow_message_editing),
'message_content_edit_limit_seconds': message_content_edit_limit_seconds
})
self.assert_json_success(result)
def do_edit_message_assert_success(id_, unique_str, topic_only = False):
# type: (int, text_type, bool) -> None
new_subject = 'subject' + unique_str
new_content = 'content' + unique_str
params_dict = { 'message_id': id_, 'subject': new_subject }
if not topic_only:
params_dict['content'] = new_content
result = self.client_post("/json/update_message", params_dict)
self.assert_json_success(result)
if topic_only:
self.check_message(id_, subject=new_subject)
else:
self.check_message(id_, subject=new_subject, content=new_content)
def do_edit_message_assert_error(id_, unique_str, error, topic_only = False):
# type: (int, text_type, text_type, bool) -> None
message = Message.objects.get(id=id_)
old_subject = message.topic_name()
old_content = message.content
new_subject = 'subject' + unique_str
new_content = 'content' + unique_str
params_dict = { 'message_id': id_, 'subject': new_subject }
if not topic_only:
params_dict['content'] = new_content
result = self.client_post("/json/update_message", params_dict)
message = Message.objects.get(id=id_)
self.assert_json_error(result, error)
self.check_message(id_, subject=old_subject, content=old_content)
self.login("iago@zulip.com")
# send a message in the past
id_ = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
content="content", subject="subject")
message = Message.objects.get(id=id_)
message.pub_date = message.pub_date - datetime.timedelta(seconds=180)
message.save()
# test the various possible message editing settings
# high enough time limit, all edits allowed
set_message_editing_params(True, 240)
do_edit_message_assert_success(id_, 'A')
# out of time, only topic editing allowed
set_message_editing_params(True, 120)
do_edit_message_assert_success(id_, 'B', True)
do_edit_message_assert_error(id_, 'C', "The time limit for editing this message has past")
# infinite time, all edits allowed
set_message_editing_params(True, 0)
do_edit_message_assert_success(id_, 'D')
# without allow_message_editing, nothing is allowed
set_message_editing_params(False, 240)
do_edit_message_assert_error(id_, 'E', "Your organization has turned off message editing.", True)
set_message_editing_params(False, 120)
do_edit_message_assert_error(id_, 'F', "Your organization has turned off message editing.", True)
set_message_editing_params(False, 0)
do_edit_message_assert_error(id_, 'G', "Your organization has turned off message editing.", True)
def test_propagate_topic_forward(self):
# type: () -> None
self.login("hamlet@zulip.com")
id1 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id2 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id3 = self.send_message("iago@zulip.com", "Rome", Recipient.STREAM,
subject="topic1")
id4 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic2")
id5 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
result = self.client_post("/json/update_message", {
'message_id': id1,
'subject': 'edited',
'propagate_mode': 'change_later'
})
self.assert_json_success(result)
self.check_message(id1, subject="edited")
self.check_message(id2, subject="edited")
self.check_message(id3, subject="topic1")
self.check_message(id4, subject="topic2")
self.check_message(id5, subject="edited")
def test_propagate_all_topics(self):
# type: () -> None
self.login("hamlet@zulip.com")
id1 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id2 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id3 = self.send_message("iago@zulip.com", "Rome", Recipient.STREAM,
subject="topic1")
id4 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic2")
id5 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id6 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic3")
result = self.client_post("/json/update_message", {
'message_id': id2,
'subject': 'edited',
'propagate_mode': 'change_all'
})
self.assert_json_success(result)
self.check_message(id1, subject="edited")
self.check_message(id2, subject="edited")
self.check_message(id3, subject="topic1")
self.check_message(id4, subject="topic2")
self.check_message(id5, subject="edited")
self.check_message(id6, subject="topic3")
class MirroredMessageUsersTest(TestCase):
class Request(object):
pass
def test_invalid_sender(self):
# type: () -> None
user = get_user_profile_by_email('hamlet@zulip.com')
recipients = [] # type: List[text_type]
request = self.Request()
request.POST = dict() # no sender
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertEqual(valid_input, False)
self.assertEqual(mirror_sender, None)
def test_invalid_client(self):
# type: () -> None
client = get_client(name='banned_mirror') # Invalid!!!
user = get_user_profile_by_email('hamlet@zulip.com')
sender = user
recipients = [] # type: List[text_type]
request = self.Request()
request.POST = dict(
sender=sender.email,
type='private')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertEqual(valid_input, False)
self.assertEqual(mirror_sender, None)
def test_invalid_email(self):
# type: () -> None
invalid_email = 'alice AT example.com'
recipients = [invalid_email]
# We use an MIT user here to maximize code coverage
user = get_user_profile_by_email('starnine@mit.edu')
sender = user
for client_name in ['zephyr_mirror', 'irc_mirror', 'jabber_mirror']:
client = get_client(name=client_name)
request = self.Request()
request.POST = dict(
sender=sender.email,
type='private')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertEqual(valid_input, False)
self.assertEqual(mirror_sender, None)
def test_zephyr_mirror_new_recipient(self):
# type: () -> None
"""Test mirror dummy user creation for PM recipients"""
client = get_client(name='zephyr_mirror')
user = get_user_profile_by_email('starnine@mit.edu')
sender = get_user_profile_by_email('sipbtest@mit.edu')
new_user_email = 'bob_the_new_user@mit.edu'
recipients = [user.email, new_user_email]
# Now make the request.
request = self.Request()
request.POST = dict(
sender=sender.email,
type='private')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertTrue(valid_input)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn(user.email, realm_emails)
self.assertIn(new_user_email, realm_emails)
bob = get_user_profile_by_email(new_user_email)
self.assertTrue(bob.is_mirror_dummy)
def test_zephyr_mirror_new_sender(self):
# type: () -> None
"""Test mirror dummy user creation for sender when sending to stream"""
client = get_client(name='zephyr_mirror')
user = get_user_profile_by_email('starnine@mit.edu')
sender_email = 'new_sender@mit.edu'
recipients = ['stream_name']
# Now make the request.
request = self.Request()
request.POST = dict(
sender=sender_email,
type='stream')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertTrue(valid_input)
self.assertEqual(mirror_sender.email, sender_email)
self.assertTrue(mirror_sender.is_mirror_dummy)
def test_irc_mirror(self):
# type: () -> None
client = get_client(name='irc_mirror')
sender = get_user_profile_by_email('hamlet@zulip.com')
user = sender
recipients = ['alice@zulip.com', 'bob@irc.zulip.com', 'cordelia@zulip.com']
# Now make the request.
request = self.Request()
request.POST = dict(
sender=sender.email,
type='private')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertEqual(valid_input, True)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn('alice@zulip.com', realm_emails)
self.assertIn('bob@irc.zulip.com', realm_emails)
bob = get_user_profile_by_email('bob@irc.zulip.com')
self.assertTrue(bob.is_mirror_dummy)
def test_jabber_mirror(self):
# type: () -> None
client = get_client(name='jabber_mirror')
sender = get_user_profile_by_email('hamlet@zulip.com')
user = sender
recipients = ['alice@zulip.com', 'bob@zulip.com', 'cordelia@zulip.com']
# Now make the request.
request = self.Request()
request.POST = dict(
sender=sender.email,
type='private')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertEqual(valid_input, True)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn('alice@zulip.com', realm_emails)
self.assertIn('bob@zulip.com', realm_emails)
bob = get_user_profile_by_email('bob@zulip.com')
self.assertTrue(bob.is_mirror_dummy)
class StarTests(ZulipTestCase):
def change_star(self, messages, add=True):
# type: (List[int], bool) -> HttpResponse
return self.client_post("/json/messages/flags",
{"messages": ujson.dumps(messages),
"op": "add" if add else "remove",
"flag": "starred"})
def test_change_star(self):
# type: () -> None
"""
You can set a message as starred/un-starred through
POST /json/messages/flags.
"""
self.login("hamlet@zulip.com")
message_ids = [self.send_message("hamlet@zulip.com", "hamlet@zulip.com",
Recipient.PERSONAL, "test")]
# Star a message.
result = self.change_star(message_ids)
self.assert_json_success(result)
for msg in self.get_old_messages():
if msg['id'] in message_ids:
self.assertEqual(msg['flags'], ['starred'])
else:
self.assertEqual(msg['flags'], ['read'])
result = self.change_star(message_ids, False)
self.assert_json_success(result)
# Remove the stars.
for msg in self.get_old_messages():
if msg['id'] in message_ids:
self.assertEqual(msg['flags'], [])
def test_new_message(self):
# type: () -> None
"""
New messages aren't starred.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
content = "Test message for star"
self.send_message(test_email, "Verona", Recipient.STREAM,
content=content)
sent_message = UserMessage.objects.filter(
user_profile=get_user_profile_by_email(test_email)
).order_by("id").reverse()[0]
self.assertEqual(sent_message.message.content, content)
self.assertFalse(sent_message.flags.starred)
class AttachmentTest(ZulipTestCase):
def test_basics(self):
# type: () -> None
self.assertFalse(Message.content_has_attachment('whatever'))
self.assertFalse(Message.content_has_attachment('yo http://foo.com'))
self.assertTrue(Message.content_has_attachment('yo\n https://staging.zulip.com/user_uploads/'))
self.assertTrue(Message.content_has_attachment('yo\n /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.jpg foo'))
self.assertFalse(Message.content_has_image('whatever'))
self.assertFalse(Message.content_has_image('yo http://foo.com'))
self.assertFalse(Message.content_has_image('yo\n /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.pdf foo'))
for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp", ".JPG"]:
content = 'yo\n /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.%s foo' % (ext,)
self.assertTrue(Message.content_has_image(content))
self.assertFalse(Message.content_has_link('whatever'))
self.assertTrue(Message.content_has_link('yo\n http://foo.com'))
self.assertTrue(Message.content_has_link('yo\n https://example.com?spam=1&eggs=2'))
self.assertTrue(Message.content_has_link('yo /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.pdf foo'))
def test_claim_attachment(self):
# type: () -> None
# Create dummy DB entry
sender_email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(sender_email)
dummy_files = [
('zulip.txt', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt'),
('temp_file.py', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py'),
('abc.py', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py')
]
for file_name, path_id in dummy_files:
create_attachment(file_name, path_id, user_profile)
# Send message referring the attachment
self.subscribe_to_stream(sender_email, "Denmark")
body = "Some files here ...[zulip.txt](http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)" + \
"http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py.... Some more...." + \
"http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py"
self.send_message(sender_email, "Denmark", Recipient.STREAM, body, "test")
for file_name, path_id in dummy_files:
attachment = Attachment.objects.get(path_id=path_id)
self.assertTrue(attachment.is_claimed())
class LogDictTest(ZulipTestCase):
def test_to_log_dict(self):
# type: () -> None
email = 'hamlet@zulip.com'
stream_name = 'Denmark'
topic_name = 'Copenhagen'
content = 'find me some good coffee shops'
# self.login("hamlet@zulip.com")
message_id = self.send_message(email, stream_name,
message_type=Recipient.STREAM,
subject=topic_name,
content=content)
message = Message.objects.get(id=message_id)
dct = message.to_log_dict()
self.assertTrue('timestamp' in dct)
self.assertEqual(dct['content'], 'find me some good coffee shops')
self.assertEqual(dct['id'], message.id)
self.assertEqual(dct['recipient'], 'Denmark')
self.assertEqual(dct['sender_domain'], 'zulip.com')
self.assertEqual(dct['sender_email'], 'hamlet@zulip.com')
self.assertEqual(dct['sender_full_name'], 'King Hamlet')
self.assertEqual(dct['sender_id'], get_user_profile_by_email(email).id)
self.assertEqual(dct['sender_short_name'], 'hamlet')
self.assertEqual(dct['sending_client'], 'test suite')
self.assertEqual(dct['subject'], 'Copenhagen')
self.assertEqual(dct['type'], 'stream')
class CheckMessageTest(ZulipTestCase):
def test_basic_check_message_call(self):
# type: () -> None
sender = get_user_profile_by_email('othello@zulip.com')
client = make_client(name="test suite")
stream_name = 'integration'
stream, _ = create_stream_if_needed(get_realm("zulip.com"), stream_name)
message_type_name = 'stream'
message_to = None
message_to = [stream_name]
subject_name = 'issue'
message_content = 'whatever'
ret = check_message(sender, client, message_type_name, message_to,
subject_name, message_content)
self.assertEqual(ret['message'].sender.email, 'othello@zulip.com')
def test_bot_pm_feature(self):
# type: () -> None
"""We send a PM to a bot's owner if their bot sends a message to
an unsubscribed stream"""
parent = get_user_profile_by_email('othello@zulip.com')
bot = do_create_user(
email='othello-bot@zulip.com',
password='',
realm=parent.realm,
full_name='',
short_name='',
active=True,
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=parent
)
bot.last_reminder = None
sender = bot
client = make_client(name="test suite")
stream_name = 'integration'
stream, _ = create_stream_if_needed(get_realm("zulip.com"), stream_name)
message_type_name = 'stream'
message_to = None
message_to = [stream_name]
subject_name = 'issue'
message_content = 'whatever'
old_count = message_stream_count(parent)
ret = check_message(sender, client, message_type_name, message_to,
subject_name, message_content)
new_count = message_stream_count(parent)
self.assertEqual(new_count, old_count + 1)
self.assertEqual(ret['message'].sender.email, 'othello-bot@zulip.com')
| ahmadassaf/zulip | zerver/tests/test_messages.py | Python | apache-2.0 | 58,072 |
from matplotlib import pyplot as plt
import numpy as np
def plot_surface(clf, X, y,
xlim=(-10, 10), ylim=(-10, 10), n_steps=250,
subplot=None, show=True):
if subplot is None:
fig = plt.figure()
else:
plt.subplot(*subplot)
xx, yy = np.meshgrid(np.linspace(*xlim, n_steps),
np.linspace(*ylim, n_steps))
if hasattr(clf, "decision_function"):
z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
z = z.reshape(xx.shape)
plt.contourf(xx, yy, z, alpha=0.8, cmap=plt.cm.RdBu_r)
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.xlim(*xlim)
plt.ylim(*ylim)
if show:
plt.show()
def plot_histogram(clf, X, y, subplot=None, show=True):
if subplot is None:
fig = plt.figure()
else:
plt.subplot(*subplot)
if hasattr(clf, "decision_function"):
d = clf.decision_function(X)
else:
d = clf.predict_proba(X)[:, 1]
plt.hist(d[y == "b"], bins=50, normed=True, color="b", alpha=0.5)
plt.hist(d[y == "r"], bins=50, normed=True, color="r", alpha=0.5)
if show:
plt.show()
def plot_clf(clf, X, y):
plt.figure(figsize=(16, 8))
plot_surface(clf, X, y, subplot=(1, 2, 1), show=False)
plot_histogram(clf, X, y, subplot=(1, 2, 2), show=True)
| bcouturi/TestNotebooks | tutorial.py | Python | gpl-2.0 | 1,442 |
# encoding: utf-8
from __future__ import print_function
def hello():
print("Hello world!")
def myhelpfunc(obj, spec):
print("Custom help text.")
if __name__ == '__main__':
import sys
from marrow.script import Parser
sys.exit(Parser(hello, help=myhelpfunc)(sys.argv[1:]))
| marrow/script | example/simple/custom-help.py | Python | mit | 298 |
import logging
from typing import List
from . import SuperdeskMediaStorage
from .desk_media_storage import SuperdeskGridFSMediaStorage
from .amazon_media_storage import AmazonMediaStorage
logger = logging.getLogger(__name__)
def log_missing_media(id_or_filename, resource=None):
logger.error(
"Media item not found",
extra=dict(
id=id_or_filename,
resource=resource,
),
)
class MissingMediaError(ValueError):
pass
class ProxyMediaStorage(SuperdeskMediaStorage):
_storage: List[SuperdeskMediaStorage]
def __init__(self, app):
super().__init__(app)
self._storage = [SuperdeskGridFSMediaStorage(app)]
if app.config.get("AMAZON_CONTAINER_NAME"):
# make amazon first if configured, so it will be the default
self._storage.insert(0, AmazonMediaStorage(app))
def storage(self, id_or_filename=None, resource=None, fallback=False) -> SuperdeskMediaStorage:
if id_or_filename:
for storage in self._storage:
if storage.exists(id_or_filename, resource):
logger.debug("got media from storage id=%s storage=%s", id_or_filename, storage)
return storage
if not fallback:
log_missing_media(id_or_filename, resource)
raise MissingMediaError
return self._storage[0]
def get(self, id_or_filename, resource=None):
try:
return self.storage(id_or_filename, resource).get(id_or_filename, resource=resource)
except MissingMediaError:
return
def delete(self, id_or_filename, resource=None):
try:
return self.storage(id_or_filename, resource).delete(id_or_filename, resource=resource)
except MissingMediaError:
return True
def exists(self, id_or_filename, resource=None):
try:
return self.storage(id_or_filename, resource).exists(id_or_filename, resource=resource)
except MissingMediaError:
return False
def put(self, content, filename=None, content_type=None, metadata=None, resource=None, **kwargs):
return self.storage(None, resource).put(
content, filename=filename, content_type=content_type, metadata=metadata, resource=resource, **kwargs
)
def url_for_media(self, media_id, content_type=None):
return self.storage(media_id, fallback=True).url_for_media(media_id, content_type=content_type)
def url_for_download(self, media_id, content_type=None):
return self.storage(media_id, fallback=True).url_for_download(media_id, content_type=content_type)
def url_for_external(self, media_id: str, resource: str = None) -> str:
return self.storage(media_id, resource=resource, fallback=True).url_for_external(media_id, resource)
def fetch_rendition(self, rendition):
if rendition.get("media"):
return self.get(rendition["media"])
for storage in self._storage:
media = storage.fetch_rendition(rendition)
if media:
return media
def get_by_filename(self, filename):
for storage in self._storage:
media = storage.get_by_filename(filename)
if media:
return media
| petrjasek/superdesk-core | superdesk/storage/proxy.py | Python | agpl-3.0 | 3,326 |
# -*- coding: utf-8 -*-
"""----------------------------------------------------------------------------
Author:
Huang Quanyong (wo1fSea)
quanyongh@foxmail.com
Date:
2016/10/19
Description:
GuillotinePacker.py
----------------------------------------------------------------------------"""
from ..MaxRectsPacker.MaxRectsPacker import MaxRectsPacker
from ..GuillotinePacker.GuillotineAtlas import GuillotineAtlas
class GuillotinePacker(MaxRectsPacker):
"""
a bin packer using guillotine algorithm
"""
ATLAS_TYPE = GuillotineAtlas
| wo1fsea/PyTexturePacker | PyTexturePacker/GuillotinePacker/GuillotinePacker.py | Python | mit | 564 |
"""
Sampling from HMM
-----------------
This script shows how to sample points from a Hiden Markov Model (HMM):
we use a 4-components with specified mean and covariance.
The plot show the sequence of observations generated with the transitions
between them. We can see that, as specified by our transition matrix,
there are no transition between component 1 and 3.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from hmmlearn import hmm
##############################################################
# Prepare parameters for a 4-components HMM
# Initial population probability
startprob = np.array([0.6, 0.3, 0.1, 0.0])
# The transition matrix, note that there are no transitions possible
# between component 1 and 3
transmat = np.array([[0.7, 0.2, 0.0, 0.1],
[0.3, 0.5, 0.2, 0.0],
[0.0, 0.3, 0.5, 0.2],
[0.2, 0.0, 0.2, 0.6]])
# The means of each component
means = np.array([[0.0, 0.0],
[0.0, 11.0],
[9.0, 10.0],
[11.0, -1.0]])
# The covariance of each component
covars = .5 * np.tile(np.identity(2), (4, 1, 1))
# Build an HMM instance and set parameters
model = hmm.GaussianHMM(n_components=4, covariance_type="full")
# Instead of fitting it from the data, we directly set the estimated
# parameters, the means and covariance of the components
model.startprob_ = startprob
model.transmat_ = transmat
model.means_ = means
model.covars_ = covars
###############################################################
# Generate samples
X, Z = model.sample(500)
print Z
# Plot the sampled data
plt.plot(X[:, 0], X[:, 1], ".-", label="observations", ms=6,
mfc="orange", alpha=0.7)
# Indicate the component numbers
for i, m in enumerate(means):
plt.text(m[0], m[1], 'Component %i' % (i + 1),
size=17, horizontalalignment='center',
bbox=dict(alpha=.7, facecolor='w'))
plt.legend(loc='best')
plt.show()
| patrickjrock/c2finder | plot_hmm_sampling.py | Python | gpl-3.0 | 1,980 |
#############################################################################
# Tree modules -- used by tree.py
# Gregg Thomas
# August 2017
#############################################################################
import core, sys, os, subprocess, treeparse as tp, re
from collections import defaultdict
#############################################################################
def treeSep(infile, out_prefix, outdir):
# This function takes an input file and separates all trees in it to their own files.
if not out_prefix:
out_prefix = os.path.splitext(os.path.basename(infile))[0];
# If no prefix is specified, one is chosen based on the input file name.
pathfile = open(os.path.join(outdir, "tree-sep-paths.txt"), "w");
# Creates a file with a list of paths to each gene tree file. Thought I would use this for Notung, but maybe not.
num_lines, num_trees, line_skip = 0,0,[];
for line in open(infile):
num_lines += 1;
tree = line.strip();
try:
td, t, r = tp.treeParse(line);
except:
line_skip.append(str(num_lines-1));
continue;
num_trees += 1;
# Check to make sure each line can be read as a Newick string.
outfilename = os.path.join(outdir, out_prefix + "-" + str(num_lines) + ".tre");
pathfile.write(os.path.abspath(outfilename) + "\n");
with open(outfilename, "w") as treefile:
treefile.write(tree);
# Write the valid Newick string to its own output file.
pathfile.close();
print("\n" + core.getTime() + " Done!");
print("-----");
print(str(num_lines) + " lines in file.");
if line_skip != []:
print("The following " + str(len(line_skip)) + " line(s) were skipped because they couldn't be read as Newick formatted trees: " + ",".join(line_skip));
print(str(num_trees) + " trees separated!");
print("=======================================================================");
#############################################################################
def treeJoin(infiles, outfilename):
# This function takes an input directory and reads all files. It puts any Newick strings
# it finds into the output file.
num_files, num_read, num_trees, tre_skip, parse_skip = 0,0,0,[],[];
with open(outfilename, "w") as treefile:
for infile in infiles:
num_files += 1;
for line in open(infile):
line = line.strip();
try:
td, t, r = tp.treeParse(line);
except:
if infile not in parse_skip:
parse_skip.append(infile);
continue;
num_trees += 1;
treefile.write(line + "\n");
# Check if each line in the current file is a Newick string and, if so, write it to
# the output file.
print("\n" + core.getTime() + " Done!");
print("-----");
print(str(num_files) + " total files.");
if tre_skip != []:
print("The following " + str(len(tre_skip)) + " file(s) were skipped because they couldn't be read as tree files: " + ",".join([os.path.basename(f) for f in tre_skip]));
print(str(num_trees) + " trees joined.");
if parse_skip != []:
print("The following " + str(len(parse_skip)) + " file(s) had lines that couldn't be read as trees and were skipped: " + ",".join([os.path.basename(f) for f in parse_skip]));
print("=======================================================================");
#############################################################################
def labelTree(infiles, tree_flag, outfilename):
# This function takes either a tree string or a file with multiple trees and
# simply puts integer labels on the internal nodes in the format <#>
if tree_flag:
td, tree, r = infiles;
labeled_tree = tp.addBranchLength(tree, td);
print("\n" + labeled_tree + "\n");
sys.exit();
# For a tree string, just print the labeled tree to the screen
infile = infiles[0];
num_lines, num_trees, line_skip = 0,0,[];
with open(outfilename, "w") as treefile:
for line in open(infile):
num_lines += 1;
try:
td, tree, r = tp.treeParse(line);
except:
line_skip.append(str(num_lines));
continue;
num_trees += 1;
# for each line in the file, check to make sure it is a Newick string.
labeled_tree = tp.addBranchLength(tree, td);
treefile.write(labeled_tree + "\n");
# Label the tree and write to the output file.
print("\n" + core.getTime() + " Done!");
print("-----");
print(str(num_lines) + " total lines.");
print(str(num_trees) + " trees labeled.");
if line_skip != []:
print("The following " + str(len(line_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join([os.path.basename(f) for f in line_skip]));
print("=======================================================================");
#############################################################################
def rootCheck(infiles, tree_flag, outfilename):
# This function takes an input Newick string or file and checks if the trees are rooted or not.
if tree_flag:
td, tree, r = infiles;
rooted = tp.rootedOrNot(td);
if rooted == 0:
print("\nUnrooted!\n")
elif rooted == 1:
print("\nRooted!\n");
else:
print("\n???\n");
sys.exit();
# If the input is a Newick string, simply print the result to the screen.
infile = infiles[0];
num_lines, num_trees, line_skip, num_unroot, num_rooted, num_undet = 0,0,[],0,0,0;
with open(outfilename, "w") as treefile:
for line in open(infile):
num_lines += 1;
try:
td, tree, r = tp.treeParse(line);
except:
line_skip.append(str(num_lines));
treefile.write("**Skipped\n");
continue;
num_trees += 1;
# Check to make sure each line in the file can be read as a Newick string. If not, skip it.
rooted = tp.rootedOrNot(td);
# This function works on the basis that an unrooted tree will be read as a trifurcation at the root by my
# tree parser. If that is the case, it will have one fewer internal nodes than expected.
# This means that this will only work on trees that have only bifurcating internal nodes.
if rooted == 0:
num_unroot += 1;
treefile.write("Unrooted!\n")
if rooted == 1:
num_rooted += 1;
treefile.write("Rooted!\n");
else:
num_undet += 1;
treefile.write("???\n");
print("\n" + core.getTime() + " Done!");
print("-----");
print(str(num_lines) + " total lines.");
if line_skip != []:
print("The following " + str(len(line_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join(line_skip));
print(str(num_trees) + " trees checked.");
print(str(num_rooted) + " rooted trees.");
print(str(num_unroot) + " unrooted trees.");
print(str(num_undet) + " undetermined trees.");
print("=======================================================================");
#############################################################################
def rootTrees(infiles, tree_flag, outgroup, outfilename):
# This function relies on Newick Utilities (NU) to root trees at a specified outgroup.
import re
tmpfilename = "tmp9825xyz-t-m-p.tmp";
# NU can only read trees from a file, so I make a temporary one.
try:
outgroup = outgroup.split(",");
except:
sys.exit(core.errorOut(26, "-outgroup entered incorrectly! Should be comma delimited list of tip labels."));
# Check to make sure the outgroups were entered correctly.
if tree_flag:
td, tree, r, tree_string = infiles;
lca, monophyletic = tp.LCA(outgroup, td);
if monophyletic == 0:
sys.exit(core.errorOut(27, "Your outgroup labels (-outgroup) must be monophyletic!"));
# Specified outgroups must be monophyletic.
with open(tmpfilename, "w") as tmpfile:
tmpfile.write(tree_string);
nw_cmd = "nw_reroot " + tmpfilename + " " + " ".join(outgroup);
print("\n----Rooted tree----");
os.system(nw_cmd);
print()
# The NU call with nw_reroot.
sys.exit();
# If the input is a Newick string, just print the output to the screen.
tmpfilename_2 = "tmp25xzgz-t-m-p.tmp"
# To retrieve output from NU I use another temporary file.
infile = infiles[0];
num_lines, num_trees, non_mono, line_skip = 0,0,[],[];
with open(outfilename, "w") as treefile:
for line in open(infile):
num_lines += 1;
tid = False;
if len(line.split("\t")) > 1:
tid, line = line.strip().split("\t");
try:
td, tree, r = tp.treeParse(line);
except:
line_skip.append(str(num_lines));
treefile.write("**Skipped - couldn't read as Newick string\n");
continue;
if not all(s in td for s in outgroup):
line_skip.append(str(num_lines));
treefile.write("**Skipped - not all outgroups in tree.\n");
continue;
lca, monophyletic = tp.LCA(outgroup, td);
if monophyletic == 0:
non_mono.append(str(num_lines));
continue;
num_trees += 1;
# Check to make sure each line is a Newick string and that the outgroups are monophyletic in that tree.
with open(tmpfilename, "w") as tmpfile:
tmpfile.write(line);
nw_cmd = "nw_reroot " + tmpfilename + " " + " ".join(outgroup) + " > " + tmpfilename_2;
os.system(nw_cmd);
# The NU call with nw_reroot.
rooted_tree = open(tmpfilename_2, "r").read().strip();
if tid:
rooted_tree = tid + "\t" + rooted_tree;
treefile.write(rooted_tree + "\n");
# Getting the output from the tmp file.
os.system("rm " + tmpfilename);
os.system("rm " + tmpfilename_2);
# Remove the tmp files.
print("\n" + core.getTime() + " Done!");
print("-----");
print(str(num_lines) + " total lines.");
if line_skip != []:
print("The following " + str(len(line_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join(line_skip));
if non_mono != []:
print("The following " + str(len(non_mono)) + " lines did not contain monophyletic outgroups: " + ",".join(non_mono));
print(str(num_trees) + " trees rooted.");
print("=======================================================================");
#############################################################################
def rootTreesBest(infiles, tree_flag, outgroup, outfilename):
# This function relies on Newick Utilities (NU) to root trees at a specified outgroup.
import re
tmpfilename = "tmp9825xyz-t-m-p.tmp";
# NU can only read trees from a file, so I make a temporary one.
try:
outgroup = outgroup.split(",");
except:
sys.exit(core.errorOut(26, "-outgroup entered incorrectly! Should be comma delimited list of tip labels."));
# Check to make sure the outgroups were entered correctly.
tmpfilename_2 = "tmp25xzgz-t-m-p.tmp"
# To retrieve output from NU I use another temporary file.
infile = infiles[0];
num_lines, num_trees, non_mono, line_skip = 0,0,[],[];
with open(outfilename, "w") as treefile:
for line in open(infile):
alnfilename, line = line.strip().split("\t");
num_lines += 1;
try:
td, tree, r = tp.treeParse(line);
except:
line_skip.append(str(num_lines));
treefile.write("**Skipped - couldn't read as Newick string\n");
continue;
if not all(s in td for s in outgroup):
line_skip.append(str(num_lines));
treefile.write("**Skipped - not all outgroups in tree.\n");
continue;
lca, monophyletic = tp.LCA(outgroup, td);
if monophyletic == 0:
non_mono.append(str(num_lines));
continue;
num_trees += 1;
# Check to make sure each line is a Newick string and that the outgroups are monophyletic in that tree.
with open(tmpfilename, "w") as tmpfile:
tmpfile.write(line);
nw_cmd = "nw_reroot " + tmpfilename + " " + " ".join(outgroup) + " > " + tmpfilename_2;
os.system(nw_cmd);
# The NU call with nw_reroot.
rooted_tree = open(tmpfilename_2, "r").read().strip();
treefile.write(alnfilename + "\t" + rooted_tree + "\n");
# Getting the output from the tmp file.
os.system("rm " + tmpfilename);
os.system("rm " + tmpfilename_2);
# Remove the tmp files.
print("\n" + core.getTime() + " Done!");
print("-----");
print(str(num_lines) + " total lines.");
if line_skip != []:
print("The following " + str(len(line_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join(line_skip));
if non_mono != []:
print("The following " + str(len(non_mono)) + " lines did not contain monophyletic outgroups: " + ",".join(non_mono));
print(str(num_trees) + " trees rooted.");
print("=======================================================================");
#############################################################################
def flightOfTheConcordance(infiles, tree_flag, genefilename, count_tops):
# This function calculates concordance factors for each node in a species tree given a
# set of singly-copy gene trees.
if tree_flag:
sinfo, stree, sroot, sfilename = infiles;
else:
try:
sinfo, stree, sroot = tp.treeParse(open(infiles[0], "r").read());
except:
sys.exit(core.errorOut(28, "Could not read species tree (-s) as a Newick tree!"));
# If the input species tree was a file check to make sure it contains a valid Newick tree.
stips = [node for node in sinfo if sinfo[node][1] == 'tip'];
sclades = { node : set(tp.getClade(node, sinfo)) for node in sinfo if sinfo[node][2] != 'tip' };
# Get the tips and clades (tip nodes) for each internal node in the species tree.
#node_counts = defaultdict(float);
node_counts = { node : 0.0 for node in sinfo if sinfo[node][2] != 'tip' };
num_lines, tre_skip, sc_skip = 0, [], [];
total_trees = 0.0;
if count_tops:
tops, top_counts, top_trees = [], [], [];
for line in open(genefilename):
num_lines += 1;
try:
ginfo, gtree, groot = tp.treeParse(line);
except:
tre_skip.append(str(num_lines));
continue;
# Check if each line in the genetrees file is a Newick string.
if count_tops:
gclade = set([frozenset(tp.getClade(node, ginfo)) for node in ginfo if ginfo[node][2] != 'tip']);
if gclade in tops:
topind = tops.index(gclade);
top_counts[topind] += 1;
else:
tops.append(gclade);
top_counts.append(1);
top_trees.append(gtree);
gtips = [node for node in ginfo if ginfo[node][1] == 'tip'];
if set(gtips) != set(stips) or len(gtips) != len(stips):
sc_skip.append(str(num_lines));
continue;
# Check to make sure the tips are identical in the current gene tree and the species tree.
gclades = [ set(tp.getClade(node, ginfo)) for node in ginfo if ginfo[node][2] != 'tip' ];
# Get the clades (tip nodes) for each internal node in the current gene tree.
for node in sclades:
if sclades[node] in gclades:
node_counts[node] += 1.0;
if node == sroot and sclades[node] not in gclades:
print(len(ginfo));
print(gtree);
# Check if each species tree clade is in the gene tree.
total_trees += 1.0;
print("\n" + core.getTime() + " Done!");
if count_tops:
print("\n----Topology counts----");
tops_dict = {};
for x in range(len(tops)):
tops_dict[top_trees[x]] = top_counts[x];
for item in sorted(list(tops_dict.items()), key=lambda x: x[1], reverse=True):
print(item[0], item[1]);
print(len(tops_dict), "total topologies found");
print("\n----Concordance factor nodes----");
stree = tp.addBranchLength(stree, sinfo);
for node in node_counts:
cf = round(node_counts[node]/total_trees,2)
print(node, cf);
if sinfo[node][3] not in ["NA",'']:
stree = stree.replace(node, node + "_" + str(cf));
else:
stree = stree.replace(node, node+ "_" + str(cf));
if stree[-1] == "_":
stree = stree[:-1];
print("\n----Concordance factor tree----");
print(stree);
print()
print("-----");
print(str(num_lines) + " total lines in gene tree file.");
if tre_skip != []:
print("The following " + str(len(tre_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join(tre_skip));
if sc_skip != []:
print("The following " + str(len(sc_skip)) + " lines were skipped because they didn't have the same number of nodes as the species tree: " + ",".join(sc_skip));
print(str(total_trees) + " trees read.");
print("=======================================================================");
#############################################################################
def countTips(infile):
# This function counts all unique tip labels given a set of trees.
num_lines, num_trees, tre_skip = 0, 0, [];
tips = defaultdict(int);
for line in open(infile):
num_lines += 1;
try:
td, tree, root = tp.treeParse(line);
except:
tre_skip.append(str(num_lines));
continue;
num_trees += 1;
# Check if each line in the genetrees file is a Newick string.
for node in td:
if td[node][2] == 'tip':
tips[node] += 1;
# Iterate the dictionary for each tip in the current tree.
maxlen = 0;
for tip in tips:
if len(tip) > maxlen:
maxlen = len(tip);
# Get the length of the tip labels for some nicer printing.
print("\n" + core.getTime() + " Done!");
print("\n----Tip counts----");
pad = maxlen + 2;
for tip in tips:
print(core.spacedOut(tip, pad), tips[tip]);
# Print the tip labels and counts.
print("\n-----");
print(str(num_lines) + " total lines in input file.");
if tre_skip != []:
print("The following " + str(len(tre_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join(tre_skip));
print(str(num_trees) + " trees read.");
print("=======================================================================");
#############################################################################
def countClade(infile, clade):
# This function counts all unique tip labels given a set of trees.
num_lines, num_trees, tre_skip, clade_count = 0, 0, [], 0;
for line in open(infile):
num_lines += 1;
try:
td, tree, root = tp.treeParse(line);
except:
tre_skip.append(str(num_lines));
continue;
num_trees += 1;
# Check if each line in the genetrees file is a Newick string.
for node in td:
if td[node][2] != 'tip':
cur_clade = set(tp.getClade(node, td));
if cur_clade == clade:
clade_count += 1;
# Iterate the dictionary for the clade in the current tree.
print("\n" + core.getTime() + " Done!");
print("\n----Clade counts----");
print("# of trees with clade:\t", clade_count);
# Print the # of trees containing the clade.
print("\n-----");
print(str(num_lines) + " total lines in input file.");
if tre_skip != []:
print("The following " + str(len(tre_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join(tre_skip));
print(str(num_trees) + " trees read.");
print("=======================================================================");
#############################################################################
def relabelTips(infile, labels, mode, delim, output):
# This function takes a file with many trees and searches the tip labels to match
# strings for replacements.
try:
labels = {l.split(",")[0] : l.split(",")[1] for l in labels.split(" ")};
except:
sys.exit(core.errorOut(29, "-labels was not input correctly! Format 'oldlabel1,newlabel1 oldlabel2,newlabel2'"));
# Check to make sure the labels were input properly by the user.
if delim == 'space':
delim = ' ';
pad = 20;
print("\n---------Relabeling tips---------");
print(core.spacedOut("Old label contains", pad), "| New label");
print("---------------------------------");
for old in labels:
print(core.spacedOut(old, pad), "| " + labels[old]);
# Some nice printing of the labels.
num_lines, num_trees, tre_skip = 0, 0, [];
with open(output, "w") as outfile:
for line in open(infile):
line = line.strip();
num_lines += 1;
try:
td, tree, root = tp.treeParse(line);
except:
tre_skip.append(str(num_lines));
continue;
num_trees += 1;
# Check if each line in the genetrees file is a Newick string.
for node in td:
for old in labels:
if old in node:
if mode == 1:
line = line.replace(node, labels[old]);
if mode == 2:
line = line.replace(node, labels[old] + delim + node);
if mode == 3:
line = line.replace(node, node + delim + labels[old]);
# Check and relabel every tip in the current tree.
# mode == 1 : replace old label with new label
# mode == 2 : put new label on beginning of old label
# mode == 3 : put new label at end of old label
outfile.write(line + "\n");
# For each node in the tree, check if it contains the text of one of the labels to replace.
# If so, replace it.
print("---------------------------------");
print("\n" + core.getTime() + " Done!");
print(str(num_lines) + " total lines in input file.");
if tre_skip != []:
print("The following " + str(len(tre_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join(tre_skip));
print(str(num_trees) + " trees read.");
print("=======================================================================");
#############################################################################
def rmLabel(infile, mode, outfilename, best_flag=False):
# Takes a file with many trees and removes internal node labels and/or branch lengths (depending on mode).
num_lines, num_trees, tre_skip, parse_skip = 0,0,[],[];
with open(outfilename, "w") as treefile:
for line in open(infile):
num_lines +=1;
line = line.strip();
if best_flag:
title, line = line.split("\t");
try:
td, out_tree, r = tp.treeParse(line);
except:
if infile not in parse_skip:
parse_skip.append(infile);
continue;
num_trees += 1;
# Check if each line in the genetrees file is a Newick string.
if mode == 1:
out_tree = re.sub('\)[\d\w]+:', '):', line);
if mode == 2:
out_tree = re.sub(':[\d.eE-]+', '', line);
if mode == 3:
out_tree = re.sub('<[\d]+>', '', out_tree);
# mode == 1 : remove internal node labels only
# mode == 2 : remove branch lengths only
# mode == 3 : remove both internal node labels and branch lengths
if best_flag:
treefile.write(title + "\t" + out_tree + "\n");
else:
treefile.write(out_tree + "\n");
# Write the edited tre to the output file.
print("\n-----");
print("\n" + core.getTime() + " Done!");
print(str(num_lines) + " total lines in input file.");
if tre_skip != []:
print("The following " + str(len(tre_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join(tre_skip));
print(str(num_trees) + " trees read.");
print("=======================================================================");
#############################################################################
def scaleBL(infile, op, factor, outfilename):
# Takes a file with many trees and removes internal node labels and/or branch lengths (depending on mode).
num_lines, num_trees, tre_skip, parse_skip = 0,0,[],[];
with open(outfilename, "w") as treefile:
for line in open(infile):
num_lines +=1;
line = line.strip();
try:
td, out_tree, r = tp.treeParse(line);
except:
if infile not in parse_skip:
parse_skip.append(infile);
continue;
num_trees += 1;
# Check if each line in the genetrees file is a Newick string.
for node in td:
if node == r:
continue;
old_bl = float(td[node][0]);
if op == "/":
new_bl = old_bl / factor;
elif op == "*":
new_bl = old_bl * factor;
elif op == "+":
new_bl = old_bl + factor;
elif op == "-":
new_bl = old_bl - factor;
td[node][0] = str(new_bl);
out_tree = tp.addBranchLength(out_tree, td);
treefile.write(out_tree + "\n");
# Write the edited tre to the output file.
print("\n-----");
print("\n" + core.getTime() + " Done!");
print(str(num_lines) + " total lines in input file.");
if tre_skip != []:
print("The following " + str(len(tre_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join(tre_skip));
print(str(num_trees) + " trees read.");
print("=======================================================================");
#############################################################################
def robF(infiles, tree_flag, genefilename, raxpath, outfile):
# This function calls RAxML to calculate Robinson-Foulds distances for each gene tree to the species tree.
import re
raxlog = os.path.splitext(outfile)[0] + ".raxlog";
if tree_flag:
sinfo, stree, sroot, sfilename = infiles;
else:
try:
sinfo, stree, sroot = tp.treeParse(open(infiles[0], "r").read());
except:
sys.exit(core.errorOut(30, "Could not read species tree (-s) as a Newick tree!"));
# If the input species tree was a file check to make sure it contains a valid Newick tree.
stips = [node for node in sinfo if sinfo[node][1] == 'tip'];
# Get the tips in the species tree.
stree = re.sub("<[\d]+>", "", stree);
# Remove node labels from species tree.
num_lines, tre_skip, sc_skip, raxfail = 0, [], [], [];
rfs, wrfs = [], [];
total_trees = 0.0;
tmpfile = "tmp74ghr2.tmp";
rfoutfile = "RAxML_RF-Distances.RFtmp7f";
with open(outfile, "w") as out:
for line in open(genefilename):
num_lines += 1;
try:
ginfo, gtree, groot = tp.treeParse(line);
except:
out.write("Couldn't read as Newick string -- Skipping.\n");
tre_skip.append(str(num_lines));
continue;
# Check if each line in the genetrees file is a Newick string.
gtips = [node for node in ginfo if ginfo[node][1] == 'tip'];
if set(gtips) != set(stips) or len(gtips) != len(stips):
out.write("Tip labels not identical to species tree -- Skipping.\n");
sc_skip.append(str(num_lines));
continue;
# Check to make sure the tips are identical in the current gene tree and the species tree.
gtree = re.sub("<[\d]+>", "", gtree);
# Remove node labels from gene tree.
with open(tmpfile, "w") as tfile:
tfile.write(stree + ";\n");
tfile.write(gtree + ";");
raxcall = raxpath + " -m PROTGAMMAJTTF -z " + tmpfile + " -f r -n RFtmp7f >> " + raxlog;
#print num_lines, raxcall;
os.system(raxcall);
if os.path.exists(rfoutfile):
curout = open(rfoutfile, "r").read().strip().split(" ");
cur_rf, cur_wrf = curout[2], curout[3];
rfs.append(float(cur_rf));
wrfs.append(float(cur_wrf));
out.write(cur_rf + " " + cur_wrf + "\n");
else:
out.write("RAxML failed -- Skipping.\n");
raxfail.append(str(num_lines));
os.system("rm " + rfoutfile);
os.system("rm RAxML_info.RFtmp7f");
total_trees += 1.0;
print("-----");
print("Average RF distance for all gene trees:", float(sum(rfs)) / float(len(rfs)));
print("Average weighted RF distance for all gene trees:", round(float(sum(wrfs)) / float(len(wrfs)),3));
os.system("rm " + tmpfile);
print("-----");
print(str(num_lines) + " total lines in gene tree file.");
if tre_skip != []:
print("The following " + str(len(tre_skip)) + " lines couldn't be read as trees and were skipped: " + ",".join(tre_skip));
if sc_skip != []:
print("The following " + str(len(sc_skip)) + " lines were skipped because they didn't have the same number of nodes as the species tree: " + ",".join(sc_skip));
if sc_skip != []:
print("The following " + str(len(sc_skip)) + " lines were skipped because they didn't have the same number of nodes as the species tree: " + ",".join(sc_skip));
print(str(total_trees) + " trees successfully read and calculated RF distances.");
print("======================================================================="); | gwct/core | python/lib/treelib.py | Python | gpl-3.0 | 27,018 |
#!/usr/bin/env python
from horton import *
###############################################################################
## Set up molecule, define basis set ##########################################
###############################################################################
# get the XYZ file from HORTON's test data directory
fn_xyz = context.get_fn('test/h2.xyz')
mol = IOData.from_file(fn_xyz)
obasis = get_gobasis(mol.coordinates, mol.numbers, '4-31G')
###############################################################################
## Define Occupation model, expansion coefficients and overlap ################
###############################################################################
lf = DenseLinalgFactory(obasis.nbasis)
occ_model = AufbauOccModel(1)
orb = lf.create_expansion(obasis.nbasis)
olp = obasis.compute_overlap(lf)
###############################################################################
## Construct Hamiltonian ######################################################
###############################################################################
kin = obasis.compute_kinetic(lf)
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf)
er = obasis.compute_electron_repulsion(lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RExchangeTerm(er, 'x_hf'),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
###############################################################################
## Perform initial guess ######################################################
###############################################################################
guess_core_hamiltonian(olp, kin, na, orb)
###############################################################################
## Do a Hartree-Fock calculation ##############################################
###############################################################################
scf_solver = PlainSCFSolver(1e-6)
scf_solver(ham, lf, olp, occ_model, orb)
###############################################################################
## Get Hartree-Fock energy ####################################################
###############################################################################
ehf = ham.compute_energy()
###############################################################################
## Combine one-electron integrals to single Hamiltonian #######################
###############################################################################
one = kin.copy()
one.iadd(na)
###############################################################################
## Do RMP2 calculation ########################################################
###############################################################################
mp2 = RMP2(lf, occ_model)
emp2, tmp2 = mp2(one, er, orb, **{'eref': ehf})
| eustislab/horton | data/examples/perturbation_theory/mp2_h2_4-31g.py | Python | gpl-3.0 | 2,933 |
from sympy import symbols, Symbol, sinh, nan, oo, zoo, pi, asinh, acosh, log, sqrt, \
coth, I, cot, E, tanh, tan, cosh, cos, S, sin, Rational, atanh, acoth, \
Integer, O, exp, sech, sec, csch
from sympy.utilities.pytest import raises
def test_sinh():
x, y = symbols('x,y')
k = Symbol('k', integer=True)
assert sinh(nan) == nan
assert sinh(zoo) == nan
assert sinh(oo) == oo
assert sinh(-oo) == -oo
assert sinh(0) == 0
assert sinh(1) == sinh(1)
assert sinh(-1) == -sinh(1)
assert sinh(x) == sinh(x)
assert sinh(-x) == -sinh(x)
assert sinh(pi) == sinh(pi)
assert sinh(-pi) == -sinh(pi)
assert sinh(2**1024 * E) == sinh(2**1024 * E)
assert sinh(-2**1024 * E) == -sinh(2**1024 * E)
assert sinh(pi*I) == 0
assert sinh(-pi*I) == 0
assert sinh(2*pi*I) == 0
assert sinh(-2*pi*I) == 0
assert sinh(-3*10**73*pi*I) == 0
assert sinh(7*10**103*pi*I) == 0
assert sinh(pi*I/2) == I
assert sinh(-pi*I/2) == -I
assert sinh(5*pi*I/2) == I
assert sinh(7*pi*I/2) == -I
assert sinh(pi*I/3) == S.Half*sqrt(3)*I
assert sinh(-2*pi*I/3) == -S.Half*sqrt(3)*I
assert sinh(pi*I/4) == S.Half*sqrt(2)*I
assert sinh(-pi*I/4) == -S.Half*sqrt(2)*I
assert sinh(17*pi*I/4) == S.Half*sqrt(2)*I
assert sinh(-3*pi*I/4) == -S.Half*sqrt(2)*I
assert sinh(pi*I/6) == S.Half*I
assert sinh(-pi*I/6) == -S.Half*I
assert sinh(7*pi*I/6) == -S.Half*I
assert sinh(-5*pi*I/6) == -S.Half*I
assert sinh(pi*I/105) == sin(pi/105)*I
assert sinh(-pi*I/105) == -sin(pi/105)*I
assert sinh(2 + 3*I) == sinh(2 + 3*I)
assert sinh(x*I) == sin(x)*I
assert sinh(k*pi*I) == 0
assert sinh(17*k*pi*I) == 0
assert sinh(k*pi*I/2) == sin(k*pi/2)*I
def test_sinh_series():
x = Symbol('x')
assert sinh(x).series(x, 0, 10) == \
x + x**3/6 + x**5/120 + x**7/5040 + x**9/362880 + O(x**10)
def test_cosh():
x, y = symbols('x,y')
k = Symbol('k', integer=True)
assert cosh(nan) == nan
assert cosh(zoo) == nan
assert cosh(oo) == oo
assert cosh(-oo) == oo
assert cosh(0) == 1
assert cosh(1) == cosh(1)
assert cosh(-1) == cosh(1)
assert cosh(x) == cosh(x)
assert cosh(-x) == cosh(x)
assert cosh(pi*I) == cos(pi)
assert cosh(-pi*I) == cos(pi)
assert cosh(2**1024 * E) == cosh(2**1024 * E)
assert cosh(-2**1024 * E) == cosh(2**1024 * E)
assert cosh(pi*I/2) == 0
assert cosh(-pi*I/2) == 0
assert cosh((-3*10**73 + 1)*pi*I/2) == 0
assert cosh((7*10**103 + 1)*pi*I/2) == 0
assert cosh(pi*I) == -1
assert cosh(-pi*I) == -1
assert cosh(5*pi*I) == -1
assert cosh(8*pi*I) == 1
assert cosh(pi*I/3) == S.Half
assert cosh(-2*pi*I/3) == -S.Half
assert cosh(pi*I/4) == S.Half*sqrt(2)
assert cosh(-pi*I/4) == S.Half*sqrt(2)
assert cosh(11*pi*I/4) == -S.Half*sqrt(2)
assert cosh(-3*pi*I/4) == -S.Half*sqrt(2)
assert cosh(pi*I/6) == S.Half*sqrt(3)
assert cosh(-pi*I/6) == S.Half*sqrt(3)
assert cosh(7*pi*I/6) == -S.Half*sqrt(3)
assert cosh(-5*pi*I/6) == -S.Half*sqrt(3)
assert cosh(pi*I/105) == cos(pi/105)
assert cosh(-pi*I/105) == cos(pi/105)
assert cosh(2 + 3*I) == cosh(2 + 3*I)
assert cosh(x*I) == cos(x)
assert cosh(k*pi*I) == cos(k*pi)
assert cosh(17*k*pi*I) == cos(17*k*pi)
assert cosh(k*pi) == cosh(k*pi)
def test_cosh_series():
x = Symbol('x')
assert cosh(x).series(x, 0, 10) == \
1 + x**2/2 + x**4/24 + x**6/720 + x**8/40320 + O(x**10)
def test_tanh():
x, y = symbols('x,y')
k = Symbol('k', integer=True)
assert tanh(nan) == nan
assert tanh(zoo) == nan
assert tanh(oo) == 1
assert tanh(-oo) == -1
assert tanh(0) == 0
assert tanh(1) == tanh(1)
assert tanh(-1) == -tanh(1)
assert tanh(x) == tanh(x)
assert tanh(-x) == -tanh(x)
assert tanh(pi) == tanh(pi)
assert tanh(-pi) == -tanh(pi)
assert tanh(2**1024 * E) == tanh(2**1024 * E)
assert tanh(-2**1024 * E) == -tanh(2**1024 * E)
assert tanh(pi*I) == 0
assert tanh(-pi*I) == 0
assert tanh(2*pi*I) == 0
assert tanh(-2*pi*I) == 0
assert tanh(-3*10**73*pi*I) == 0
assert tanh(7*10**103*pi*I) == 0
assert tanh(pi*I/2) == tanh(pi*I/2)
assert tanh(-pi*I/2) == -tanh(pi*I/2)
assert tanh(5*pi*I/2) == tanh(5*pi*I/2)
assert tanh(7*pi*I/2) == tanh(7*pi*I/2)
assert tanh(pi*I/3) == sqrt(3)*I
assert tanh(-2*pi*I/3) == sqrt(3)*I
assert tanh(pi*I/4) == I
assert tanh(-pi*I/4) == -I
assert tanh(17*pi*I/4) == I
assert tanh(-3*pi*I/4) == I
assert tanh(pi*I/6) == I/sqrt(3)
assert tanh(-pi*I/6) == -I/sqrt(3)
assert tanh(7*pi*I/6) == I/sqrt(3)
assert tanh(-5*pi*I/6) == I/sqrt(3)
assert tanh(pi*I/105) == tan(pi/105)*I
assert tanh(-pi*I/105) == -tan(pi/105)*I
assert tanh(2 + 3*I) == tanh(2 + 3*I)
assert tanh(x*I) == tan(x)*I
assert tanh(k*pi*I) == 0
assert tanh(17*k*pi*I) == 0
assert tanh(k*pi*I/2) == tan(k*pi/2)*I
def test_tanh_series():
x = Symbol('x')
assert tanh(x).series(x, 0, 10) == \
x - x**3/3 + 2*x**5/15 - 17*x**7/315 + 62*x**9/2835 + O(x**10)
def test_coth():
x, y = symbols('x,y')
k = Symbol('k', integer=True)
assert coth(nan) == nan
assert coth(zoo) == nan
assert coth(oo) == 1
assert coth(-oo) == -1
assert coth(0) == coth(0)
assert coth(0) == zoo
assert coth(1) == coth(1)
assert coth(-1) == -coth(1)
assert coth(x) == coth(x)
assert coth(-x) == -coth(x)
assert coth(pi*I) == -I*cot(pi)
assert coth(-pi*I) == cot(pi)*I
assert coth(2**1024 * E) == coth(2**1024 * E)
assert coth(-2**1024 * E) == -coth(2**1024 * E)
assert coth(pi*I) == -I*cot(pi)
assert coth(-pi*I) == I*cot(pi)
assert coth(2*pi*I) == -I*cot(2*pi)
assert coth(-2*pi*I) == I*cot(2*pi)
assert coth(-3*10**73*pi*I) == I*cot(3*10**73*pi)
assert coth(7*10**103*pi*I) == -I*cot(7*10**103*pi)
assert coth(pi*I/2) == 0
assert coth(-pi*I/2) == 0
assert coth(5*pi*I/2) == 0
assert coth(7*pi*I/2) == 0
assert coth(pi*I/3) == -I/sqrt(3)
assert coth(-2*pi*I/3) == -I/sqrt(3)
assert coth(pi*I/4) == -I
assert coth(-pi*I/4) == I
assert coth(17*pi*I/4) == -I
assert coth(-3*pi*I/4) == -I
assert coth(pi*I/6) == -sqrt(3)*I
assert coth(-pi*I/6) == sqrt(3)*I
assert coth(7*pi*I/6) == -sqrt(3)*I
assert coth(-5*pi*I/6) == -sqrt(3)*I
assert coth(pi*I/105) == -cot(pi/105)*I
assert coth(-pi*I/105) == cot(pi/105)*I
assert coth(2 + 3*I) == coth(2 + 3*I)
assert coth(x*I) == -cot(x)*I
assert coth(k*pi*I) == -cot(k*pi)*I
assert coth(17*k*pi*I) == -cot(17*k*pi)*I
assert coth(k*pi*I) == -cot(k*pi)*I
def test_coth_series():
x = Symbol('x')
assert coth(x).series(x, 0, 8) == \
1/x + x/3 - x**3/45 + 2*x**5/945 - x**7/4725 + O(x**8)
def test_csch():
x, y = symbols('x,y')
k = Symbol('k', integer=True)
assert csch(nan) == nan
assert csch(zoo) == nan
assert csch(oo) == 0
assert csch(-oo) == 0
assert csch(0) == zoo
assert csch(-1) == -csch(1)
assert csch(-x) == -csch(x)
assert csch(-pi) == -csch(pi)
assert csch(-2**1024 * E) == -csch(2**1024 * E)
assert csch(pi*I) == zoo
assert csch(-pi*I) == zoo
assert csch(2*pi*I) == zoo
assert csch(-2*pi*I) == zoo
assert csch(-3*10**73*pi*I) == zoo
assert csch(7*10**103*pi*I) == zoo
assert csch(pi*I/2) == -I
assert csch(-pi*I/2) == I
assert csch(5*pi*I/2) == -I
assert csch(7*pi*I/2) == I
assert csch(pi*I/3) == -2/sqrt(3)*I
assert csch(-2*pi*I/3) == 2/sqrt(3)*I
assert csch(pi*I/4) == -sqrt(2)*I
assert csch(-pi*I/4) == sqrt(2)*I
assert csch(7*pi*I/4) == sqrt(2)*I
assert csch(-3*pi*I/4) == sqrt(2)*I
assert csch(pi*I/6) == -2*I
assert csch(-pi*I/6) == 2*I
assert csch(7*pi*I/6) == 2*I
assert csch(-7*pi*I/6) == -2*I
assert csch(-5*pi*I/6) == 2*I
assert csch(pi*I/105) == -1/sin(pi/105)*I
assert csch(-pi*I/105) == 1/sin(pi/105)*I
assert csch(x*I) == -1/sin(x)*I
assert csch(k*pi*I) == zoo
assert csch(17*k*pi*I) == zoo
assert csch(k*pi*I/2) == -1/sin(k*pi/2)*I
def test_csch_series():
x = Symbol('x')
assert csch(x).series(x, 0, 10) == \
1/ x - x/6 + 7*x**3/360 - 31*x**5/15120 + 127*x**7/604800 \
- 73*x**9/3421440 + O(x**10)
def test_sech():
x, y = symbols('x, y')
k = Symbol('k', integer=True)
assert sech(nan) == nan
assert sech(zoo) == nan
assert sech(oo) == 0
assert sech(-oo) == 0
assert sech(0) == 1
assert sech(-1) == sech(1)
assert sech(-x) == sech(x)
assert sech(pi*I) == sec(pi)
assert sech(-pi*I) == sec(pi)
assert sech(-2**1024 * E) == sech(2**1024 * E)
assert sech(pi*I/2) == zoo
assert sech(-pi*I/2) == zoo
assert sech((-3*10**73 + 1)*pi*I/2) == zoo
assert sech((7*10**103 + 1)*pi*I/2) == zoo
assert sech(pi*I) == -1
assert sech(-pi*I) == -1
assert sech(5*pi*I) == -1
assert sech(8*pi*I) == 1
assert sech(pi*I/3) == 2
assert sech(-2*pi*I/3) == -2
assert sech(pi*I/4) == sqrt(2)
assert sech(-pi*I/4) == sqrt(2)
assert sech(5*pi*I/4) == -sqrt(2)
assert sech(-5*pi*I/4) == -sqrt(2)
assert sech(pi*I/6) == 2/sqrt(3)
assert sech(-pi*I/6) == 2/sqrt(3)
assert sech(7*pi*I/6) == -2/sqrt(3)
assert sech(-5*pi*I/6) == -2/sqrt(3)
assert sech(pi*I/105) == 1/cos(pi/105)
assert sech(-pi*I/105) == 1/cos(pi/105)
assert sech(x*I) == 1/cos(x)
assert sech(k*pi*I) == 1/cos(k*pi)
assert sech(17*k*pi*I) == 1/cos(17*k*pi)
def test_sech_series():
x = Symbol('x')
assert sech(x).series(x, 0, 10) == \
1 - x**2/2 + 5*x**4/24 - 61*x**6/720 + 277*x**8/8064 + O(x**10)
def test_asinh():
x, y = symbols('x,y')
assert asinh(x) == asinh(x)
assert asinh(-x) == -asinh(x)
assert asinh(nan) == nan
assert asinh( 0) == 0
assert asinh(+1) == log(sqrt(2) + 1)
assert asinh(-1) == log(sqrt(2) - 1)
assert asinh(I) == pi*I/2
assert asinh(-I) == -pi*I/2
assert asinh(I/2) == pi*I/6
assert asinh(-I/2) == -pi*I/6
assert asinh(oo) == oo
assert asinh(-oo) == -oo
assert asinh(I*oo) == oo
assert asinh(-I *oo) == -oo
assert asinh(zoo) == zoo
assert asinh(I *(sqrt(3) - 1)/(2**(S(3)/2))) == pi*I/12
assert asinh(-I *(sqrt(3) - 1)/(2**(S(3)/2))) == -pi*I/12
assert asinh(I*(sqrt(5) - 1)/4) == pi*I/10
assert asinh(-I*(sqrt(5) - 1)/4) == -pi*I/10
assert asinh(I*(sqrt(5) + 1)/4) == 3*pi*I/10
assert asinh(-I*(sqrt(5) + 1)/4) == -3*pi*I/10
def test_asinh_series():
x = Symbol('x')
assert asinh(x).series(x, 0, 8) == \
x - x**3/6 + 3*x**5/40 - 5*x**7/112 + O(x**8)
t5 = asinh(x).taylor_term(5, x)
assert t5 == 3*x**5/40
assert asinh(x).taylor_term(7, x, t5, 0) == -5*x**7/112
def test_acosh():
# TODO please write more tests -- see issue 3751
# From http://functions.wolfram.com/ElementaryFunctions/ArcCosh/03/01/
# at specific points
x = Symbol('x')
assert acosh(-x) == acosh(-x)
assert acosh(1) == 0
assert acosh(-1) == pi*I
assert acosh(0) == I*pi/2
assert acosh(Rational(1, 2)) == I*pi/3
assert acosh(Rational(-1, 2)) == 2*pi*I/3
assert acosh(zoo) == oo
assert acosh(I) == log(I*(1 + sqrt(2)))
assert acosh(-I) == log(-I*(1 + sqrt(2)))
assert acosh((sqrt(3) - 1)/(2*sqrt(2))) == 5*pi*I/12
assert acosh(-(sqrt(3) - 1)/(2*sqrt(2))) == 7*pi*I/12
assert acosh(sqrt(2)/2) == I*pi/4
assert acosh(-sqrt(2)/2) == 3*I*pi/4
assert acosh(sqrt(3)/2) == I*pi/6
assert acosh(-sqrt(3)/2) == 5*I*pi/6
assert acosh(sqrt(2 + sqrt(2))/2) == I*pi/8
assert acosh(-sqrt(2 + sqrt(2))/2) == 7*I*pi/8
assert acosh(sqrt(2 - sqrt(2))/2) == 3*I*pi/8
assert acosh(-sqrt(2 - sqrt(2))/2) == 5*I*pi/8
assert acosh((1 + sqrt(3))/(2*sqrt(2))) == I*pi/12
assert acosh(-(1 + sqrt(3))/(2*sqrt(2))) == 11*I*pi/12
assert acosh((sqrt(5) + 1)/4) == I*pi/5
assert acosh(-(sqrt(5) + 1)/4) == 4*I*pi/5
def test_acosh_infinities():
assert acosh(oo) == oo
assert acosh(-oo) == oo
assert acosh(I*oo) == oo
assert acosh(-I*oo) == oo
def test_acosh_series():
x = Symbol('x')
assert acosh(x).series(x, 0, 8) == \
-I*x + pi*I/2 - I*x**3/6 - 3*I*x**5/40 - 5*I*x**7/112 + O(x**8)
t5 = acosh(x).taylor_term(5, x)
assert t5 == - 3*I*x**5/40
assert acosh(x).taylor_term(7, x, t5, 0) == - 5*I*x**7/112
# TODO please write more tests -- see issue 3751
def test_atanh():
# TODO please write more tests -- see issue 3751
# From http://functions.wolfram.com/ElementaryFunctions/ArcTanh/03/01/
# at specific points
x = Symbol('x')
#at specific points
assert atanh(0) == 0
assert atanh(I) == I*pi/4
assert atanh(-I) == -I*pi/4
assert atanh(1) == oo
assert atanh(-1) == -oo
# at infinites
assert atanh(I*oo) == I*pi/2
assert atanh(-I*oo) == -I*pi/2
assert atanh(zoo) == nan
#properties
assert atanh(-x) == -atanh(x)
assert atanh(I/sqrt(3)) == I*pi/6
assert atanh(-I/sqrt(3)) == -I*pi/6
assert atanh(I*sqrt(3)) == I*pi/3
assert atanh(-I*sqrt(3)) == -I*pi/3
assert atanh(I*(1 + sqrt(2))) == 3*pi*I/8
assert atanh(I*(sqrt(2) - 1)) == pi*I/8
assert atanh(I*(1 - sqrt(2))) == -pi*I/8
assert atanh(-I*(1 + sqrt(2))) == -3*pi*I/8
assert atanh(I*sqrt(5 + 2*sqrt(5))) == 2*I*pi/5
assert atanh(-I*sqrt(5 + 2*sqrt(5))) == -2*I*pi/5
assert atanh(I*(2 - sqrt(3))) == pi*I/12
assert atanh(I*(sqrt(3) - 2)) == -pi*I/12
assert atanh(oo) == -I*pi/2
def test_atanh_series():
x = Symbol('x')
assert atanh(x).series(x, 0, 10) == \
x + x**3/3 + x**5/5 + x**7/7 + x**9/9 + O(x**10)
def test_atanh_infinities():
assert atanh(oo) == -I*pi/2
assert atanh(-oo) == I*pi/2
# TODO please write more tests -- see issue 3751
def test_acoth():
# TODO please write more tests -- see issue 3751
# From http://functions.wolfram.com/ElementaryFunctions/ArcCoth/03/01/
# at specific points
x = Symbol('x')
#at specific points
assert acoth(0) == I*pi/2
assert acoth(I) == -I*pi/4
assert acoth(-I) == I*pi/4
assert acoth(1) == oo
assert acoth(-1) == -oo
# at infinites
assert acoth(oo) == 0
assert acoth(-oo) == 0
assert acoth(I*oo) == 0
assert acoth(-I*oo) == 0
assert acoth(zoo) == 0
#properties
assert acoth(-x) == -acoth(x)
assert acoth(I/sqrt(3)) == -I*pi/3
assert acoth(-I/sqrt(3)) == I*pi/3
assert acoth(I*sqrt(3)) == -I*pi/6
assert acoth(-I*sqrt(3)) == I*pi/6
assert acoth(I*(1 + sqrt(2))) == -pi*I/8
assert acoth(-I*(sqrt(2) + 1)) == pi*I/8
assert acoth(I*(1 - sqrt(2))) == 3*pi*I/8
assert acoth(I*(sqrt(2) - 1)) == -3*pi*I/8
assert acoth(I*sqrt(5 + 2*sqrt(5))) == -I*pi/10
assert acoth(-I*sqrt(5 + 2*sqrt(5))) == I*pi/10
assert acoth(I*(2 + sqrt(3))) == -pi*I/12
assert acoth(-I*(2 + sqrt(3))) == pi*I/12
assert acoth(I*(2 - sqrt(3))) == -5*pi*I/12
assert acoth(I*(sqrt(3) - 2)) == 5*pi*I/12
def test_acoth_series():
x = Symbol('x')
assert acoth(x).series(x, 0, 10) == \
I*pi/2 + x + x**3/3 + x**5/5 + x**7/7 + x**9/9 + O(x**10)
def test_inverses():
x = Symbol('x')
assert sinh(x).inverse() == asinh
raises(AttributeError, lambda: cosh(x).inverse())
assert tanh(x).inverse() == atanh
assert coth(x).inverse() == acoth
assert asinh(x).inverse() == sinh
assert acosh(x).inverse() == cosh
assert atanh(x).inverse() == tanh
assert acoth(x).inverse() == coth
def test_leading_term():
x = Symbol('x')
assert cosh(x).as_leading_term(x) == 1
assert coth(x).as_leading_term(x) == 1/x
assert acosh(x).as_leading_term(x) == I*pi/2
assert acoth(x).as_leading_term(x) == I*pi/2
for func in [sinh, tanh, asinh, atanh]:
assert func(x).as_leading_term(x) == x
for func in [sinh, cosh, tanh, coth, asinh, acosh, atanh, acoth]:
for arg in (1/x, S.Half):
eq = func(arg)
assert eq.as_leading_term(x) == eq
for func in [csch, sech]:
eq = func(S.Half)
assert eq.as_leading_term(x) == eq
def test_complex():
a, b = symbols('a,b', real=True)
z = a + b*I
for func in [sinh, cosh, tanh, coth, sech, csch]:
assert func(z).conjugate() == func(a - b*I)
for deep in [True, False]:
assert sinh(z).expand(
complex=True, deep=deep) == sinh(a)*cos(b) + I*cosh(a)*sin(b)
assert cosh(z).expand(
complex=True, deep=deep) == cosh(a)*cos(b) + I*sinh(a)*sin(b)
assert tanh(z).expand(complex=True, deep=deep) == sinh(a)*cosh(
a)/(cos(b)**2 + sinh(a)**2) + I*sin(b)*cos(b)/(cos(b)**2 + sinh(a)**2)
assert coth(z).expand(complex=True, deep=deep) == sinh(a)*cosh(
a)/(sin(b)**2 + sinh(a)**2) - I*sin(b)*cos(b)/(sin(b)**2 + sinh(a)**2)
assert csch(z).expand(complex=True, deep=deep) == cos(b) * sinh(a) / (sin(b)**2\
*cosh(a)**2 + cos(b)**2 * sinh(a)**2) - I*sin(b) * cosh(a) / (sin(b)**2\
*cosh(a)**2 + cos(b)**2 * sinh(a)**2)
assert sech(z).expand(complex=True, deep=deep) == cos(b) * cosh(a) / (sin(b)**2\
*sinh(a)**2 + cos(b)**2 * cosh(a)**2) - I*sin(b) * sinh(a) / (sin(b)**2\
*sinh(a)**2 + cos(b)**2 * cosh(a)**2)
def test_complex_2899():
a, b = symbols('a,b', real=True)
for deep in [True, False]:
for func in [sinh, cosh, tanh, coth]:
assert func(a).expand(complex=True, deep=deep) == func(a)
def test_simplifications():
x = Symbol('x')
assert sinh(asinh(x)) == x
assert sinh(acosh(x)) == sqrt(x - 1) * sqrt(x + 1)
assert sinh(atanh(x)) == x/sqrt(1 - x**2)
assert sinh(acoth(x)) == 1/(sqrt(x - 1) * sqrt(x + 1))
assert cosh(asinh(x)) == sqrt(1 + x**2)
assert cosh(acosh(x)) == x
assert cosh(atanh(x)) == 1/sqrt(1 - x**2)
assert cosh(acoth(x)) == x/(sqrt(x - 1) * sqrt(x + 1))
assert tanh(asinh(x)) == x/sqrt(1 + x**2)
assert tanh(acosh(x)) == sqrt(x - 1) * sqrt(x + 1) / x
assert tanh(atanh(x)) == x
assert tanh(acoth(x)) == 1/x
assert coth(asinh(x)) == sqrt(1 + x**2)/x
assert coth(acosh(x)) == x/(sqrt(x - 1) * sqrt(x + 1))
assert coth(atanh(x)) == 1/x
assert coth(acoth(x)) == x
assert csch(asinh(x)) == 1/x
assert csch(acosh(x)) == 1/(sqrt(x - 1) * sqrt(x + 1))
assert csch(atanh(x)) == sqrt(1 - x**2)/x
assert csch(acoth(x)) == sqrt(x - 1) * sqrt(x + 1)
assert sech(asinh(x)) == 1/sqrt(1 + x**2)
assert sech(acosh(x)) == 1/x
assert sech(atanh(x)) == sqrt(1 - x**2)
assert sech(acoth(x)) == sqrt(x - 1) * sqrt(x + 1)/x
def test_issue_4136():
assert cosh(asinh(Integer(3)/2)) == sqrt(Integer(13)/4)
def test_sinh_rewrite():
x = Symbol('x')
assert sinh(x).rewrite(exp) == (exp(x) - exp(-x))/2 \
== sinh(x).rewrite('tractable')
assert sinh(x).rewrite(cosh) == -I*cosh(x + I*pi/2)
tanh_half = tanh(S.Half*x)
assert sinh(x).rewrite(tanh) == 2*tanh_half/(1 - tanh_half**2)
coth_half = coth(S.Half*x)
assert sinh(x).rewrite(coth) == 2*coth_half/(coth_half**2 - 1)
def test_cosh_rewrite():
x = Symbol('x')
assert cosh(x).rewrite(exp) == (exp(x) + exp(-x))/2 \
== cosh(x).rewrite('tractable')
assert cosh(x).rewrite(sinh) == -I*sinh(x + I*pi/2)
tanh_half = tanh(S.Half*x)**2
assert cosh(x).rewrite(tanh) == (1 + tanh_half)/(1 - tanh_half)
coth_half = coth(S.Half*x)**2
assert cosh(x).rewrite(coth) == (coth_half + 1)/(coth_half - 1)
def test_tanh_rewrite():
x = Symbol('x')
assert tanh(x).rewrite(exp) == (exp(x) - exp(-x))/(exp(x) + exp(-x)) \
== tanh(x).rewrite('tractable')
assert tanh(x).rewrite(sinh) == I*sinh(x)/sinh(I*pi/2 - x)
assert tanh(x).rewrite(cosh) == I*cosh(I*pi/2 - x)/cosh(x)
assert tanh(x).rewrite(coth) == 1/coth(x)
def test_coth_rewrite():
x = Symbol('x')
assert coth(x).rewrite(exp) == (exp(x) + exp(-x))/(exp(x) - exp(-x)) \
== coth(x).rewrite('tractable')
assert coth(x).rewrite(sinh) == -I*sinh(I*pi/2 - x)/sinh(x)
assert coth(x).rewrite(cosh) == -I*cosh(x)/cosh(I*pi/2 - x)
assert coth(x).rewrite(tanh) == 1/tanh(x)
def test_csch_rewrite():
x = Symbol('x')
assert csch(x).rewrite(exp) == 1 / (exp(x)/2 - exp(-x)/2) \
== csch(x).rewrite('tractable')
assert csch(x).rewrite(cosh) == I/cosh(x + I*pi/2)
tanh_half = tanh(S.Half*x)
assert csch(x).rewrite(tanh) == (1 - tanh_half**2)/(2*tanh_half)
coth_half = coth(S.Half*x)
assert csch(x).rewrite(coth) == (coth_half**2 - 1)/(2*coth_half)
def test_sech_rewrite():
x = Symbol('x')
assert sech(x).rewrite(exp) == 1 / (exp(x)/2 + exp(-x)/2) \
== sech(x).rewrite('tractable')
assert sech(x).rewrite(sinh) == I/sinh(x + I*pi/2)
tanh_half = tanh(S.Half*x)**2
assert sech(x).rewrite(tanh) == (1 - tanh_half)/(1 + tanh_half)
coth_half = coth(S.Half*x)**2
assert sech(x).rewrite(coth) == (coth_half - 1)/(coth_half + 1)
def test_derivs():
x = Symbol('x')
assert coth(x).diff(x) == -sinh(x)**(-2)
assert sinh(x).diff(x) == cosh(x)
assert cosh(x).diff(x) == sinh(x)
assert tanh(x).diff(x) == -tanh(x)**2 + 1
assert csch(x).diff(x) == -coth(x)*csch(x)
assert sech(x).diff(x) == -tanh(x)*sech(x)
assert acoth(x).diff(x) == 1/(-x**2 + 1)
assert asinh(x).diff(x) == 1/sqrt(x**2 + 1)
assert acosh(x).diff(x) == 1/sqrt(x**2 - 1)
assert atanh(x).diff(x) == 1/(-x**2 + 1)
def test_sinh_expansion():
x,y = symbols('x,y')
assert sinh(x+y).expand(trig=True) == sinh(x)*cosh(y) + cosh(x)*sinh(y)
assert sinh(2*x).expand(trig=True) == 2*sinh(x)*cosh(x)
assert sinh(3*x).expand(trig=True).expand() == \
sinh(x)**3 + 3*sinh(x)*cosh(x)**2
def test_cosh_expansion():
x,y = symbols('x,y')
assert cosh(x+y).expand(trig=True) == cosh(x)*cosh(y) + sinh(x)*sinh(y)
assert cosh(2*x).expand(trig=True) == cosh(x)**2 + sinh(x)**2
assert cosh(3*x).expand(trig=True).expand() == \
3*sinh(x)**2*cosh(x) + cosh(x)**3
| Mitchkoens/sympy | sympy/functions/elementary/tests/test_hyperbolic.py | Python | bsd-3-clause | 22,379 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'MplsIpTtlPropagateDisableEnum' : _MetaInfoEnum('MplsIpTtlPropagateDisableEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_lsd_cfg',
{
'all':'ALL',
'forward':'FORWARD',
'local':'LOCAL',
}, 'Cisco-IOS-XR-mpls-lsd-cfg', _yang_ns._namespaces['Cisco-IOS-XR-mpls-lsd-cfg']),
'MplsLsd.LabelDatabases.LabelDatabase.LabelRange' : {
'meta_info' : _MetaInfoClass('MplsLsd.LabelDatabases.LabelDatabase.LabelRange',
False,
[
_MetaInfoClassMember('max-static-value', ATTRIBUTE, 'int' , None, None,
[(0, 1048575)], [],
''' Maximum static label value
''',
'max_static_value',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
_MetaInfoClassMember('max-value', ATTRIBUTE, 'int' , None, None,
[(16000, 1048575)], [],
''' Maximum label value
''',
'max_value',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
_MetaInfoClassMember('min-static-value', ATTRIBUTE, 'int' , None, None,
[(0, 1048575)], [],
''' Minimum static label value
''',
'min_static_value',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
_MetaInfoClassMember('minvalue', ATTRIBUTE, 'int' , None, None,
[(16000, 1048575)], [],
''' Minimum label value
''',
'minvalue',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
],
'Cisco-IOS-XR-mpls-lsd-cfg',
'label-range',
_yang_ns._namespaces['Cisco-IOS-XR-mpls-lsd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_lsd_cfg'
),
},
'MplsLsd.LabelDatabases.LabelDatabase' : {
'meta_info' : _MetaInfoClass('MplsLsd.LabelDatabases.LabelDatabase',
False,
[
_MetaInfoClassMember('label-database-id', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Label database identifier
''',
'label_database_id',
'Cisco-IOS-XR-mpls-lsd-cfg', True),
_MetaInfoClassMember('label-range', REFERENCE_CLASS, 'LabelRange' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_lsd_cfg', 'MplsLsd.LabelDatabases.LabelDatabase.LabelRange',
[], [],
''' Label range
''',
'label_range',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
],
'Cisco-IOS-XR-mpls-lsd-cfg',
'label-database',
_yang_ns._namespaces['Cisco-IOS-XR-mpls-lsd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_lsd_cfg'
),
},
'MplsLsd.LabelDatabases' : {
'meta_info' : _MetaInfoClass('MplsLsd.LabelDatabases',
False,
[
_MetaInfoClassMember('label-database', REFERENCE_LIST, 'LabelDatabase' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_lsd_cfg', 'MplsLsd.LabelDatabases.LabelDatabase',
[], [],
''' A label database
''',
'label_database',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
],
'Cisco-IOS-XR-mpls-lsd-cfg',
'label-databases',
_yang_ns._namespaces['Cisco-IOS-XR-mpls-lsd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_lsd_cfg'
),
},
'MplsLsd' : {
'meta_info' : _MetaInfoClass('MplsLsd',
False,
[
_MetaInfoClassMember('app-reg-delay-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable LSD application reg delay
''',
'app_reg_delay_disable',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
_MetaInfoClassMember('label-databases', REFERENCE_CLASS, 'LabelDatabases' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_lsd_cfg', 'MplsLsd.LabelDatabases',
[], [],
''' Table of label databases
''',
'label_databases',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
_MetaInfoClassMember('mpls-entropy-label', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable MPLS Entropy Label
''',
'mpls_entropy_label',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
_MetaInfoClassMember('mpls-ip-ttl-expiration-pop', ATTRIBUTE, 'int' , None, None,
[(1, 10)], [],
''' Number of labels to pop upon MPLS TTL expiry
''',
'mpls_ip_ttl_expiration_pop',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
_MetaInfoClassMember('mpls-ip-ttl-propagate-disable', REFERENCE_ENUM_CLASS, 'MplsIpTtlPropagateDisableEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_lsd_cfg', 'MplsIpTtlPropagateDisableEnum',
[], [],
''' Disable Propagation of IP TTL onto the label
stack
''',
'mpls_ip_ttl_propagate_disable',
'Cisco-IOS-XR-mpls-lsd-cfg', False),
],
'Cisco-IOS-XR-mpls-lsd-cfg',
'mpls-lsd',
_yang_ns._namespaces['Cisco-IOS-XR-mpls-lsd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_lsd_cfg'
),
},
}
_meta_table['MplsLsd.LabelDatabases.LabelDatabase.LabelRange']['meta_info'].parent =_meta_table['MplsLsd.LabelDatabases.LabelDatabase']['meta_info']
_meta_table['MplsLsd.LabelDatabases.LabelDatabase']['meta_info'].parent =_meta_table['MplsLsd.LabelDatabases']['meta_info']
_meta_table['MplsLsd.LabelDatabases']['meta_info'].parent =_meta_table['MplsLsd']['meta_info']
| abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_mpls_lsd_cfg.py | Python | apache-2.0 | 6,582 |
# -*- coding: utf-8 -*-
#
# This file is part of the bliss project
#
# Copyright (c) 2016 Beamline Control Unit, ESRF
# Distributed under the GNU LGPLv3. See LICENSE for more info.
from bliss.controllers.tango_attr_as_counter import TangoAttrCounter
class tango_fe(TangoAttrCounter):
def __init__(self, name, config):
TangoAttrCounter.__init__(self, name, config)
| tiagocoutinho/bliss | bliss/controllers/tango_fe.py | Python | lgpl-3.0 | 379 |
# Copyright (C) 2017 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
"""action element for error (from Cuckoo 2.0-rc2 to 2.0.0)
Revision ID: 181be2111077
Revises: ef1ecf216392
Create Date: 2017-02-23 15:11:39.711902
"""
# Revision identifiers, used by Alembic.
revision = "181be2111077"
down_revision = "ef1ecf216392"
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
"errors", sa.Column("action", sa.String(length=64), nullable=True)
)
def downgrade():
pass
| cuckoobox/cuckoo | cuckoo/private/db_migration/versions/from_20c2_to_200_error_action.py | Python | mit | 607 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-23 13:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('vital_records', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='birthnote',
name='applicant',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='vital_records.ApplicantInfo'),
preserve_default=False,
),
]
| cl0ne/vital-records-registry | registry/vital_records/migrations/0002_birthnote_applicant.py | Python | gpl-3.0 | 594 |
#!/usr/bin/python
# (c) James Laska
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rhn_register
short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
description:
- Manage registration to the Red Hat Network.
version_added: "1.2"
author: James Laska
notes:
- In order to register a system, rhnreg_ks requires either a username and password, or an activationkey.
requirements:
- rhnreg_ks
- either libxml2 or lxml
options:
state:
description:
- whether to register (C(present)), or unregister (C(absent)) a system
choices: [ "present", "absent" ]
default: "present"
username:
description:
- Red Hat Network username
password:
description:
- Red Hat Network password
server_url:
description:
- Specify an alternative Red Hat Network server URL
default: Current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date) is the default
activationkey:
description:
- supply an activation key for use with registration
profilename:
description:
- supply an profilename for use with registration
version_added: "2.0"
sslcacert:
description:
- supply a custom ssl CA certificate file for use with registration
version_added: "2.1"
systemorgid:
description:
- supply an organizational id for use with registration
version_added: "2.1"
channels:
description:
- Optionally specify a list of comma-separated channels to subscribe to upon successful registration.
default: []
enable_eus:
description:
- If C(no), extended update support will be requested.
type: bool
default: 'no'
nopackages:
description:
- If C(yes), the registered node will not upload its installed packages information to Satellite server
type: bool
default: 'no'
version_added: "2.5"
'''
EXAMPLES = '''
# Unregister system from RHN.
- rhn_register:
state: absent
username: joe_user
password: somepass
# Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
- rhn_register:
state: present
username: joe_user
password: somepass
# Register with activationkey (1-222333444) and enable extended update support.
- rhn_register:
state: present
activationkey: 1-222333444
enable_eus: true
# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname.
- rhn_register:
state: present
activationkey: 1-222333444
profilename: host.example.com.custom
# Register as user (joe_user) with password (somepass) against a satellite
# server specified by (server_url).
- rhn_register:
state: present
username: joe_user
password: somepass'
server_url: https://xmlrpc.my.satellite/XMLRPC
# Register as user (joe_user) with password (somepass) and enable
# channels (rhel-x86_64-server-6-foo-1) and (rhel-x86_64-server-6-bar-1).
- rhn_register:
state: present
username: joe_user
password: somepass
channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
'''
RETURN = '''
# Default return values
'''
import os
import sys
# Attempt to import rhn client tools
sys.path.insert(0, '/usr/share/rhn')
try:
import up2date_client
import up2date_client.config
HAS_UP2DATE_CLIENT = True
except ImportError:
HAS_UP2DATE_CLIENT = False
# INSERT REDHAT SNIPPETS
from ansible.module_utils import redhat
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import urllib, xmlrpc_client
class Rhn(redhat.RegistrationBase):
def __init__(self, module=None, username=None, password=None):
redhat.RegistrationBase.__init__(self, module, username, password)
self.config = self.load_config()
self.server = None
self.session = None
def logout(self):
if self.session is not None:
self.server.auth.logout(self.session)
def load_config(self):
'''
Read configuration from /etc/sysconfig/rhn/up2date
'''
if not HAS_UP2DATE_CLIENT:
return None
config = up2date_client.config.initUp2dateConfig()
return config
@property
def server_url(self):
return self.config['serverURL']
@property
def hostname(self):
'''
Return the non-xmlrpc RHN hostname. This is a convenience method
used for displaying a more readable RHN hostname.
Returns: str
'''
url = urllib.parse.urlparse(self.server_url)
return url[1].replace('xmlrpc.', '')
@property
def systemid(self):
systemid = None
xpath_str = "//member[name='system_id']/value/string"
if os.path.isfile(self.config['systemIdPath']):
fd = open(self.config['systemIdPath'], 'r')
xml_data = fd.read()
fd.close()
# Ugh, xml parsing time ...
# First, try parsing with libxml2 ...
if systemid is None:
try:
import libxml2
doc = libxml2.parseDoc(xml_data)
ctxt = doc.xpathNewContext()
systemid = ctxt.xpathEval(xpath_str)[0].content
doc.freeDoc()
ctxt.xpathFreeContext()
except ImportError:
pass
# m-kay, let's try with lxml now ...
if systemid is None:
try:
from lxml import etree
root = etree.fromstring(xml_data)
systemid = root.xpath(xpath_str)[0].text
except ImportError:
raise Exception('"libxml2" or "lxml" is required for this module.')
# Strip the 'ID-' prefix
if systemid is not None and systemid.startswith('ID-'):
systemid = systemid[3:]
return int(systemid)
@property
def is_registered(self):
'''
Determine whether the current system is registered.
Returns: True|False
'''
return os.path.isfile(self.config['systemIdPath'])
def configure_server_url(self, server_url):
'''
Configure server_url for registration
'''
self.config.set('serverURL', server_url)
self.config.save()
def enable(self):
'''
Prepare the system for RHN registration. This includes ...
* enabling the rhnplugin yum plugin
* disabling the subscription-manager yum plugin
'''
redhat.RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', True)
self.update_plugin_conf('subscription-manager', False)
def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False):
'''
Register system to RHN. If enable_eus=True, extended update
support will be requested.
'''
register_cmd = ['/usr/sbin/rhnreg_ks', '--force']
if self.username:
register_cmd.extend(['--username', self.username, '--password', self.password])
if self.server_url:
register_cmd.extend(['--serverUrl', self.server_url])
if enable_eus:
register_cmd.append('--use-eus-channel')
if nopackages:
register_cmd.append('--nopackages')
if activationkey is not None:
register_cmd.extend(['--activationkey', activationkey])
if profilename is not None:
register_cmd.extend(['--profilename', profilename])
if sslcacert is not None:
register_cmd.extend(['--sslCACert', sslcacert])
if systemorgid is not None:
register_cmd.extend(['--systemorgid', systemorgid])
rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True)
def api(self, method, *args):
'''
Convenience RPC wrapper
'''
if self.server is None:
if self.hostname != 'rhn.redhat.com':
url = "https://%s/rpc/api" % self.hostname
else:
url = "https://xmlrpc.%s/rpc/api" % self.hostname
self.server = xmlrpc_client.ServerProxy(url)
self.session = self.server.auth.login(self.username, self.password)
func = getattr(self.server, method)
return func(self.session, *args)
def unregister(self):
'''
Unregister a previously registered system
'''
# Initiate RPC connection
self.api('system.deleteSystems', [self.systemid])
# Remove systemid file
os.unlink(self.config['systemIdPath'])
def subscribe(self, channels):
if self._is_hosted():
current_channels = self.api('channel.software.listSystemChannels', self.systemid)
new_channels = [item['channel_label'] for item in current_channels]
new_channels.extend(channels)
return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels))
else:
current_channels = self.api('channel.software.listSystemChannels', self.systemid)
current_channels = [item['label'] for item in current_channels]
new_base = None
new_childs = []
for ch in channels:
if ch in current_channels:
continue
if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '':
new_base = ch
else:
if ch not in new_childs:
new_childs.append(ch)
out_base = 0
out_childs = 0
if new_base:
out_base = self.api('system.setBaseChannel', self.systemid, new_base)
if new_childs:
out_childs = self.api('system.setChildChannels', self.systemid, new_childs)
return out_base and out_childs
def _is_hosted(self):
'''
Return True if we are running against Hosted (rhn.redhat.com) or
False otherwise (when running against Satellite or Spacewalk)
'''
return 'rhn.redhat.com' in self.hostname
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent']),
username=dict(default=None, required=False),
password=dict(default=None, required=False, no_log=True),
server_url=dict(default=None, required=False),
activationkey=dict(default=None, required=False, no_log=True),
profilename=dict(default=None, required=False),
sslcacert=dict(default=None, required=False, type='path'),
systemorgid=dict(default=None, required=False),
enable_eus=dict(default=False, type='bool'),
nopackages=dict(default=False, type='bool'),
channels=dict(default=[], type='list'),
)
)
if not HAS_UP2DATE_CLIENT:
module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?")
server_url = module.params['server_url']
username = module.params['username']
password = module.params['password']
state = module.params['state']
activationkey = module.params['activationkey']
profilename = module.params['profilename']
sslcacert = module.params['sslcacert']
systemorgid = module.params['systemorgid']
channels = module.params['channels']
enable_eus = module.params['enable_eus']
nopackages = module.params['nopackages']
rhn = Rhn(module=module, username=username, password=password)
# use the provided server url and persist it to the rhn config.
if server_url:
rhn.configure_server_url(server_url)
if not rhn.server_url:
module.fail_json(
msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)"
)
# Ensure system is registered
if state == 'present':
# Check for missing parameters ...
if not (activationkey or rhn.username or rhn.password):
module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username,
rhn.password))
if not activationkey and not (rhn.username and rhn.password):
module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
# Register system
if rhn.is_registered:
module.exit_json(changed=False, msg="System already registered.")
try:
rhn.enable()
rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages)
rhn.subscribe(channels)
except Exception as exc:
module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc))
finally:
rhn.logout()
module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
# Ensure system is *not* registered
if state == 'absent':
if not rhn.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
try:
rhn.unregister()
except Exception as exc:
module.fail_json(msg="Failed to unregister: %s" % exc)
finally:
rhn.logout()
module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/packaging/os/rhn_register.py | Python | gpl-3.0 | 14,356 |
#!/bin/env python3
import urllib.request
import json
URL = 'http://api.linux-statt-windows.org/infos.json'
def callback():
return '/lsw', get_forum
def get_forum(inp):
rqst = urllib.request.urlopen(URL)
data = json.loads(rqst.read().decode('utf-8'))
forum = data[0]['forum']
return forum['name']\
+ '\n\nLink: ' + forum['long_url'] \
+ '\nKurzlink: ' + forum['short_url'] \
+ '\nFAQ: ' + forum['faq_url'] \
+ '\nRegeln: ' + forum['rules_url']
def get_help():
return '\n/lsw: Bekomme den Link zu unserer Homepage.'
if __name__ == '__main__':
print(get_forum())
| Linux-statt-Windows/BOLT | src/modules/forum.py | Python | gpl-3.0 | 645 |
from django.conf.urls import patterns, url
urlpatterns = patterns('robots.views',
url(r'^$', 'rules_list', name='robots_rule_list'),
)
| philippeowagner/django-robots | robots/urls.py | Python | bsd-3-clause | 140 |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Linux specific functions.
"""
from __future__ import with_statement
import sys,os,struct,socket,time
from select import select
from fcntl import ioctl
import scapy.utils
import scapy.utils6
from scapy.config import conf
from scapy.data import *
from scapy.supersocket import SuperSocket
import scapy.arch
from scapy.error import warning, Scapy_Exception
# From bits/ioctls.h
SIOCGIFHWADDR = 0x8927 # Get hardware address
SIOCGIFADDR = 0x8915 # get PA address
SIOCGIFNETMASK = 0x891b # get network PA mask
SIOCGIFNAME = 0x8910 # get iface name
SIOCSIFLINK = 0x8911 # set iface channel
SIOCGIFCONF = 0x8912 # get iface list
SIOCGIFFLAGS = 0x8913 # get flags
SIOCSIFFLAGS = 0x8914 # set flags
SIOCGIFINDEX = 0x8933 # name -> if_index mapping
SIOCGIFCOUNT = 0x8938 # get number of devices
SIOCGSTAMP = 0x8906 # get packet timestamp (as a timeval)
# From if.h
IFF_UP = 0x1 # Interface is up.
IFF_BROADCAST = 0x2 # Broadcast address valid.
IFF_DEBUG = 0x4 # Turn on debugging.
IFF_LOOPBACK = 0x8 # Is a loopback net.
IFF_POINTOPOINT = 0x10 # Interface is point-to-point link.
IFF_NOTRAILERS = 0x20 # Avoid use of trailers.
IFF_RUNNING = 0x40 # Resources allocated.
IFF_NOARP = 0x80 # No address resolution protocol.
IFF_PROMISC = 0x100 # Receive all packets.
# From netpacket/packet.h
PACKET_ADD_MEMBERSHIP = 1
PACKET_DROP_MEMBERSHIP = 2
PACKET_RECV_OUTPUT = 3
PACKET_RX_RING = 5
PACKET_STATISTICS = 6
PACKET_MR_MULTICAST = 0
PACKET_MR_PROMISC = 1
PACKET_MR_ALLMULTI = 2
# From bits/socket.h
SOL_PACKET = 263
# From asm/socket.h
SO_ATTACH_FILTER = 26
SOL_SOCKET = 1
# From net/route.h
RTF_UP = 0x0001 # Route usable
RTF_REJECT = 0x0200
LOOPBACK_NAME="lo"
with os.popen("tcpdump -V 2> /dev/null") as _f:
if _f.close() >> 8 == 0x7f:
log_loading.warning("Failed to execute tcpdump. Check it is installed and in the PATH")
TCPDUMP=0
else:
TCPDUMP=1
del(_f)
def get_if_raw_hwaddr(iff):
return struct.unpack("16xh6s8x",get_if(iff,SIOCGIFHWADDR))
def get_if_raw_addr(iff):
try:
return get_if(iff, SIOCGIFADDR)[20:24]
except IOError:
return "\0\0\0\0"
def get_if_list():
try:
f=open("/proc/net/dev","r")
except IOError:
warning("Can't open /proc/net/dev !")
return []
lst = []
f.readline()
f.readline()
for l in f:
lst.append(l.split(":")[0].strip())
return lst
def get_working_if():
for i in get_if_list():
if i == LOOPBACK_NAME:
continue
ifflags = struct.unpack("16xH14x",get_if(i,SIOCGIFFLAGS))[0]
if ifflags & IFF_UP:
return i
return LOOPBACK_NAME
def attach_filter(s, filter):
# XXX We generate the filter on the interface conf.iface
# because tcpdump open the "any" interface and ppp interfaces
# in cooked mode. As we use them in raw mode, the filter will not
# work... one solution could be to use "any" interface and translate
# the filter from cooked mode to raw mode
# mode
if not TCPDUMP:
return
try:
f = os.popen("%s -i %s -ddd -s 1600 '%s'" % (conf.prog.tcpdump,conf.iface,filter))
except OSError,msg:
log_interactive.warning("Failed to execute tcpdump: (%s)")
return
lines = f.readlines()
if f.close():
raise Scapy_Exception("Filter parse error")
nb = int(lines[0])
bpf = ""
for l in lines[1:]:
bpf += struct.pack("HBBI",*map(long,l.split()))
# XXX. Argl! We need to give the kernel a pointer on the BPF,
# python object header seems to be 20 bytes. 36 bytes for x86 64bits arch.
if scapy.arch.X86_64:
bpfh = struct.pack("HL", nb, id(bpf)+36)
else:
bpfh = struct.pack("HI", nb, id(bpf)+20)
s.setsockopt(SOL_SOCKET, SO_ATTACH_FILTER, bpfh)
def set_promisc(s,iff,val=1):
mreq = struct.pack("IHH8s", get_if_index(iff), PACKET_MR_PROMISC, 0, "")
if val:
cmd = PACKET_ADD_MEMBERSHIP
else:
cmd = PACKET_DROP_MEMBERSHIP
s.setsockopt(SOL_PACKET, cmd, mreq)
def read_routes():
try:
f=open("/proc/net/route","r")
except IOError:
warning("Can't open /proc/net/route !")
return []
routes = []
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifreq = ioctl(s, SIOCGIFADDR,struct.pack("16s16x",LOOPBACK_NAME))
addrfamily = struct.unpack("h",ifreq[16:18])[0]
if addrfamily == socket.AF_INET:
ifreq2 = ioctl(s, SIOCGIFNETMASK,struct.pack("16s16x",LOOPBACK_NAME))
msk = socket.ntohl(struct.unpack("I",ifreq2[20:24])[0])
dst = socket.ntohl(struct.unpack("I",ifreq[20:24])[0]) & msk
ifaddr = scapy.utils.inet_ntoa(ifreq[20:24])
routes.append((dst, msk, "0.0.0.0", LOOPBACK_NAME, ifaddr))
else:
warning("Interface lo: unkown address family (%i)"% addrfamily)
for l in f.readlines()[1:]:
iff,dst,gw,flags,x,x,x,msk,x,x,x = l.split()
flags = int(flags,16)
if flags & RTF_UP == 0:
continue
if flags & RTF_REJECT:
continue
try:
ifreq = ioctl(s, SIOCGIFADDR,struct.pack("16s16x",iff))
except IOError: # interface is present in routing tables but does not have any assigned IP
ifaddr="0.0.0.0"
else:
addrfamily = struct.unpack("h",ifreq[16:18])[0]
if addrfamily == socket.AF_INET:
ifaddr = scapy.utils.inet_ntoa(ifreq[20:24])
else:
warning("Interface %s: unkown address family (%i)"%(iff, addrfamily))
continue
routes.append((socket.htonl(long(dst,16))&0xffffffffL,
socket.htonl(long(msk,16))&0xffffffffL,
scapy.utils.inet_ntoa(struct.pack("I",long(gw,16))),
iff, ifaddr))
f.close()
return routes
############
### IPv6 ###
############
def in6_getifaddr():
"""
Returns a list of 3-tuples of the form (addr, scope, iface) where
'addr' is the address of scope 'scope' associated to the interface
'ifcace'.
This is the list of all addresses of all interfaces available on
the system.
"""
ret = []
try:
f = open("/proc/net/if_inet6","r")
except IOError, err:
return ret
l = f.readlines()
for i in l:
# addr, index, plen, scope, flags, ifname
tmp = i.split()
addr = struct.unpack('4s4s4s4s4s4s4s4s', tmp[0])
addr = scapy.utils6.in6_ptop(':'.join(addr))
ret.append((addr, int(tmp[3], 16), tmp[5])) # (addr, scope, iface)
return ret
def read_routes6():
try:
f = open("/proc/net/ipv6_route","r")
except IOError, err:
return []
# 1. destination network
# 2. destination prefix length
# 3. source network displayed
# 4. source prefix length
# 5. next hop
# 6. metric
# 7. reference counter (?!?)
# 8. use counter (?!?)
# 9. flags
# 10. device name
routes = []
def proc2r(p):
ret = struct.unpack('4s4s4s4s4s4s4s4s', p)
ret = ':'.join(ret)
return scapy.utils6.in6_ptop(ret)
lifaddr = in6_getifaddr()
for l in f.readlines():
d,dp,s,sp,nh,m,rc,us,fl,dev = l.split()
fl = int(fl, 16)
if fl & RTF_UP == 0:
continue
if fl & RTF_REJECT:
continue
d = proc2r(d) ; dp = int(dp, 16)
s = proc2r(s) ; sp = int(sp, 16)
nh = proc2r(nh)
cset = [] # candidate set (possible source addresses)
if dev == LOOPBACK_NAME:
if d == '::':
continue
cset = ['::1']
else:
devaddrs = filter(lambda x: x[2] == dev, lifaddr)
cset = scapy.utils6.construct_source_candidate_set(d, dp, devaddrs, LOOPBACK_NAME)
if len(cset) != 0:
routes.append((d, dp, nh, dev, cset))
f.close()
return routes
def get_if(iff,cmd):
s=socket.socket()
ifreq = ioctl(s, cmd, struct.pack("16s16x",iff))
s.close()
return ifreq
def get_if_index(iff):
return int(struct.unpack("I",get_if(iff, SIOCGIFINDEX)[16:20])[0])
if os.uname()[4] == 'x86_64':
def get_last_packet_timestamp(sock):
ts = ioctl(sock, SIOCGSTAMP, "1234567890123456")
s,us = struct.unpack("QQ",ts)
return s+us/1000000.0
else:
def get_last_packet_timestamp(sock):
ts = ioctl(sock, SIOCGSTAMP, "12345678")
s,us = struct.unpack("II",ts)
return s+us/1000000.0
def _flush_fd(fd):
if type(fd) is not int:
fd = fd.fileno()
while 1:
r,w,e = select([fd],[],[],0)
if r:
os.read(fd,MTU)
else:
break
class L3PacketSocket(SuperSocket):
desc = "read/write packets at layer 3 using Linux PF_PACKET sockets"
def __init__(self, type = ETH_P_ALL, filter=None, promisc=None, iface=None, nofilter=0):
self.type = type
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 0)
if iface:
self.ins.bind((iface, type))
if not nofilter:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if filter is not None:
attach_filter(self.ins, filter)
_flush_fd(self.ins)
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30)
self.outs = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
self.outs.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2**30)
if promisc is None:
promisc = conf.promisc
self.promisc = promisc
if self.promisc:
if iface is None:
self.iff = get_if_list()
else:
if iface.__class__ is list:
self.iff = iface
else:
self.iff = [iface]
for i in self.iff:
set_promisc(self.ins, i)
def close(self):
if self.closed:
return
self.closed=1
if self.promisc:
for i in self.iff:
set_promisc(self.ins, i, 0)
SuperSocket.close(self)
def recv(self, x=MTU):
pkt, sa_ll = self.ins.recvfrom(x)
if sa_ll[2] == socket.PACKET_OUTGOING:
return None
if sa_ll[3] in conf.l2types:
cls = conf.l2types[sa_ll[3]]
lvl = 2
elif sa_ll[1] in conf.l3types:
cls = conf.l3types[sa_ll[1]]
lvl = 3
else:
cls = conf.default_l2
warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s" % (sa_ll[0],sa_ll[1],sa_ll[3],cls.name))
lvl = 2
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
if lvl == 2:
pkt = pkt.payload
if pkt is not None:
pkt.time = get_last_packet_timestamp(self.ins)
return pkt
def send(self, x):
iff,a,gw = x.route()
if iff is None:
iff = conf.iface
sdto = (iff, self.type)
self.outs.bind(sdto)
sn = self.outs.getsockname()
ll = lambda x:x
if type(x) in conf.l3types:
sdto = (iff, conf.l3types[type(x)])
if sn[3] in conf.l2types:
ll = lambda x:conf.l2types[sn[3]]()/x
try:
sx = str(ll(x))
x.sent_time = time.time()
self.outs.sendto(sx, sdto)
except socket.error,msg:
x.sent_time = time.time() # bad approximation
if conf.auto_fragment and msg[0] == 90:
for p in x.fragment():
self.outs.sendto(str(ll(p)), sdto)
else:
raise
class L2Socket(SuperSocket):
desc = "read/write packets at layer 2 using Linux PF_PACKET sockets"
def __init__(self, iface = None, type = ETH_P_ALL, filter=None, nofilter=0):
if iface is None:
iface = conf.iface
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 0)
if not nofilter:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if filter is not None:
attach_filter(self.ins, filter)
self.ins.bind((iface, type))
_flush_fd(self.ins)
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30)
self.outs = self.ins
self.outs.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2**30)
sa_ll = self.outs.getsockname()
if sa_ll[3] in conf.l2types:
self.LL = conf.l2types[sa_ll[3]]
elif sa_ll[1] in conf.l3types:
self.LL = conf.l3types[sa_ll[1]]
else:
self.LL = conf.default_l2
warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s" % (sa_ll[0],sa_ll[1],sa_ll[3],self.LL.name))
def recv(self, x=MTU):
pkt, sa_ll = self.ins.recvfrom(x)
if sa_ll[2] == socket.PACKET_OUTGOING:
return None
try:
q = self.LL(pkt)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
q = conf.raw_layer(pkt)
q.time = get_last_packet_timestamp(self.ins)
return q
class L2ListenSocket(SuperSocket):
desc = "read packets at layer 2 using Linux PF_PACKET sockets"
def __init__(self, iface = None, type = ETH_P_ALL, promisc=None, filter=None, nofilter=0):
self.type = type
self.outs = None
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 0)
if iface is not None:
self.ins.bind((iface, type))
if not nofilter:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if filter is not None:
attach_filter(self.ins, filter)
if promisc is None:
promisc = conf.sniff_promisc
self.promisc = promisc
if iface is None:
self.iff = get_if_list()
else:
if iface.__class__ is list:
self.iff = iface
else:
self.iff = [iface]
if self.promisc:
for i in self.iff:
set_promisc(self.ins, i)
_flush_fd(self.ins)
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30)
def close(self):
if self.promisc:
for i in self.iff:
set_promisc(self.ins, i, 0)
SuperSocket.close(self)
def recv(self, x=MTU):
pkt, sa_ll = self.ins.recvfrom(x)
if sa_ll[3] in conf.l2types :
cls = conf.l2types[sa_ll[3]]
elif sa_ll[1] in conf.l3types:
cls = conf.l3types[sa_ll[1]]
else:
cls = conf.default_l2
warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s" % (sa_ll[0],sa_ll[1],sa_ll[3],cls.name))
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
pkt.time = get_last_packet_timestamp(self.ins)
return pkt
def send(self, x):
raise Scapy_Exception("Can't send anything with L2ListenSocket")
conf.L3socket = L3PacketSocket
conf.L2socket = L2Socket
conf.L2listen = L2ListenSocket
conf.iface = get_working_if()
| lthurlow/python-tcpsnoop | scapy/arch/linux.py | Python | mit | 16,814 |
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangode.settings')
from django.core.wsgi import get_wsgi_application # noqa
application = get_wsgi_application()
| django-de/django-de-v4 | djangode/wsgi.py | Python | mit | 177 |
#***************************************************************************
#* *
#* Copyright (c) 2015 - Przemo Firszt <przemo@firszt.eu> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD
from PySide import QtCore
class FemTools(QtCore.QRunnable, QtCore.QObject):
finished = QtCore.Signal(int)
known_analysis_types = ["static", "frequency"]
## The constructor
# @param analysis - analysis object to be used as the core object.
# @param test_mode - True indicates that no real calculations will take place, so ccx bianry is not required. Used by test module.
# "__init__" tries to use current active analysis in analysis is left empty.
# Rises exception if analysis is not set and there is no active analysis
def __init__(self, analysis=None, test_mode=False):
QtCore.QRunnable.__init__(self)
QtCore.QObject.__init__(self)
if analysis:
## @var analysis
# FEM analysis - the core object. Has to be present.
# It's set to analysis passed in "__init__" or set to current active analysis by default if nothing has been passed to "__init__".
self.analysis = analysis
else:
import FemGui
self.analysis = FemGui.getActiveAnalysis()
if self.analysis:
self.update_objects()
self.set_analysis_type()
self.set_eigenmode_parameters()
## @var base_name
# base name of .inp/.frd file (without extension). It is used to construct .inp file path that is passed to CalculiX ccx
self.base_name = ""
## @var results_present
# boolean variable indicating if there are calculation results ready for use
self.results_present = False
self.setup_working_dir()
if test_mode:
self.ccx_binary_present = True
else:
self.ccx_binary_present = False
self.setup_ccx()
self.result_object = None
else:
raise Exception('FEM: No active analysis found!')
## Removes all result objects
# @param self The python object self
def purge_results(self):
for m in self.analysis.Member:
if (m.isDerivedFrom('Fem::FemResultObject')):
self.analysis.Document.removeObject(m.Name)
self.results_present = False
## Resets mesh deformation
# @param self The python object self
def reset_mesh_deformation(self):
if self.mesh:
self.mesh.ViewObject.applyDisplacement(0.0)
## Resets mesh color
# @param self The python object self
def reset_mesh_color(self):
if self.mesh:
self.mesh.ViewObject.NodeColor = {}
self.mesh.ViewObject.ElementColor = {}
self.mesh.ViewObject.setNodeColorByScalars()
## Resets mesh color, deformation and removes all result objects
# @param self The python object self
def reset_all(self):
self.purge_results()
self.reset_mesh_color()
self.reset_mesh_deformation()
## Sets mesh color using selected type of results (Sabs by default)
# @param self The python object self
# @param result_type Type of FEM result, allowed are:
# - U1, U2, U3 - deformation
# - Uabs - absolute deformation
# - Sabs - Von Mises stress
# @param limit cutoff value. All values over the limit are treated as equel to the limit. Useful for filtering out hot spots.
def show_result(self, result_type="Sabs", limit=None):
self.update_objects()
if result_type == "None":
self.reset_mesh_color()
return
if self.result_object:
if result_type == "Sabs":
values = self.result_object.StressValues
elif result_type == "Uabs":
values = self.result_object.DisplacementLengths
else:
match = {"U1": 0, "U2": 1, "U3": 2}
d = zip(*self.result_object.DisplacementVectors)
values = list(d[match[result_type]])
self.show_color_by_scalar_with_cutoff(values, limit)
## Sets mesh color using list of values. Internally used by show_result function.
# @param self The python object self
# @param values list of values
# @param limit cutoff value. All values over the limit are treated as equel to the limit. Useful for filtering out hot spots.
def show_color_by_scalar_with_cutoff(self, values, limit=None):
if limit:
filtered_values = []
for v in values:
if v > limit:
filtered_values.append(limit)
else:
filtered_values.append(v)
else:
filtered_values = values
self.mesh.ViewObject.setNodeColorByScalars(self.result_object.NodeNumbers, filtered_values)
def show_displacement(self, displacement_factor=0.0):
self.mesh.ViewObject.setNodeDisplacementByVectors(self.result_object.NodeNumbers,
self.result_object.DisplacementVectors)
self.mesh.ViewObject.applyDisplacement(displacement_factor)
def update_objects(self):
# [{'Object':material}, {}, ...]
# [{'Object':fixed_constraints, 'NodeSupports':bool}, {}, ...]
# [{'Object':force_constraints, 'NodeLoad':value}, {}, ...
# [{'Object':pressure_constraints, 'xxxxxxxx':value}, {}, ...]
# [{'Object':beam_sections, 'xxxxxxxx':value}, {}, ...]
# [{'Object':shell_thicknesses, 'xxxxxxxx':value}, {}, ...]
## @var mesh
# mesh of the analysis. Used to generate .inp file and to show results
self.mesh = None
self.material = []
## @var fixed_constraints
# set of fixed constraints from the analysis. Updated with update_objects
# Individual constraints are "Fem::ConstraintFixed" type
self.fixed_constraints = []
## @var force_constraints
# set of force constraints from the analysis. Updated with update_objects
# Individual constraints are "Fem::ConstraintForce" type
self.force_constraints = []
## @var pressure_constraints
# set of pressure constraints from the analysis. Updated with update_objects
# Individual constraints are "Fem::ConstraintPressure" type
self.pressure_constraints = []
self.beam_sections = []
self.shell_thicknesses = []
for m in self.analysis.Member:
if m.isDerivedFrom("Fem::FemMeshObject"):
self.mesh = m
elif m.isDerivedFrom("App::MaterialObjectPython"):
material_dict = {}
material_dict['Object'] = m
self.material.append(material_dict)
elif m.isDerivedFrom("Fem::ConstraintFixed"):
fixed_constraint_dict = {}
fixed_constraint_dict['Object'] = m
self.fixed_constraints.append(fixed_constraint_dict)
elif m.isDerivedFrom("Fem::ConstraintForce"):
force_constraint_dict = {}
force_constraint_dict['Object'] = m
self.force_constraints.append(force_constraint_dict)
elif m.isDerivedFrom("Fem::ConstraintPressure"):
PressureObjectDict = {}
PressureObjectDict['Object'] = m
self.pressure_constraints.append(PressureObjectDict)
elif hasattr(m, "Proxy") and m.Proxy.Type == 'FemBeamSection':
beam_section_dict = {}
beam_section_dict['Object'] = m
self.beam_sections.append(beam_section_dict)
elif hasattr(m, "Proxy") and m.Proxy.Type == 'FemShellThickness':
shell_thickness_dict = {}
shell_thickness_dict['Object'] = m
self.shell_thicknesses.append(shell_thickness_dict)
def check_prerequisites(self):
message = ""
if not self.analysis:
message += "No active Analysis\n"
if self.analysis_type not in self.known_analysis_types:
message += "Unknown analysis type: {}\n".format(self.analysis_type)
if not self.working_dir:
message += "Working directory not set\n"
import os
if not (os.path.isdir(self.working_dir)):
message += "Working directory \'{}\' doesn't exist.".format(self.working_dir)
if not self.mesh:
message += "No mesh object in the Analysis\n"
if not self.material:
message += "No material object in the Analysis\n"
if not self.fixed_constraints:
message += "No fixed-constraint nodes defined in the Analysis\n"
if self.analysis_type == "static":
if not (self.force_constraints or self.pressure_constraints):
message += "No force-constraint or pressure-constraint defined in the Analysis\n"
if self.beam_sections:
has_no_references = False
for b in self.beam_sections:
if len(b['Object'].References) == 0:
if has_no_references is True:
message += "More than one BeamSection has empty References list (Only one empty References list is allowed!).\n"
has_no_references = True
if self.shell_thicknesses:
has_no_references = False
for s in self.shell_thicknesses:
if len(s['Object'].References) == 0:
if has_no_references is True:
message += "More than one ShellThickness has empty References list (Only one empty References list is allowed!).\n"
has_no_references = True
return message
def write_inp_file(self):
import ccxInpWriter as iw
import sys
self.inp_file_name = ""
try:
inp_writer = iw.inp_writer(self.analysis, self.mesh, self.material,
self.fixed_constraints,
self.force_constraints, self.pressure_constraints,
self.beam_sections, self.shell_thicknesses,
self.analysis_type, self.eigenmode_parameters,
self.working_dir)
self.inp_file_name = inp_writer.write_calculix_input_file()
except:
print("Unexpected error when writing CalculiX input file:", sys.exc_info()[0])
raise
def start_ccx(self):
import multiprocessing
import os
import subprocess
self.ccx_stdout = ""
self.ccx_stderr = ""
if self.inp_file_name != "" and self.ccx_binary_present:
ont_backup = os.environ.get('OMP_NUM_THREADS')
if not ont_backup:
ont_backup = ""
_env = os.putenv('OMP_NUM_THREADS', str(multiprocessing.cpu_count()))
# change cwd because ccx may crash if directory has no write permission
# there is also a limit of the length of file names so jump to the document directory
cwd = QtCore.QDir.currentPath()
f = QtCore.QFileInfo(self.inp_file_name)
QtCore.QDir.setCurrent(f.path())
p = subprocess.Popen([self.ccx_binary, "-i ", f.baseName()],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, env=_env)
self.ccx_stdout, self.ccx_stderr = p.communicate()
os.putenv('OMP_NUM_THREADS', ont_backup)
QtCore.QDir.setCurrent(cwd)
return p.returncode
return -1
## Sets eigenmode parameters for CalculiX frequency analysis
# @param self The python object self
# @param number number of eigenmodes that wll be calculated, default read for FEM prefs or 10 if not set in the FEM prefs
# @param limit_low lower value of requested eigenfrequency range, default read for FEM prefs or 0.0 if not set in the FEM prefs
# @param limit_high higher value of requested eigenfrequency range, default read for FEM prefs or 1000000.o if not set in the FEM prefs
def set_eigenmode_parameters(self, number=None, limit_low=None, limit_high=None):
self.fem_prefs = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem")
if number is not None:
_number = number
else:
try:
_number = self.analysis.NumberOfEigenmodes
except:
#Not yet in prefs, so it will always default to 10
_number = self.fem_prefs.GetInteger("NumberOfEigenmodes", 10)
if _number < 1:
_number = 1
if limit_low is not None:
_limit_low = limit_low
else:
try:
_limit_low = self.analysis.EigenmodeLowLimit
except:
#Not yet in prefs, so it will always default to 0.0
_limit_low = self.fem_prefs.GetFloat("EigenmodeLowLimit", 0.0)
if limit_high is not None:
_limit_high = limit_high
else:
try:
_limit_high = self.analysis.EigenmodeHighLimit
except:
#Not yet in prefs, so it will always default to 1000000.0
_limit_high = self.fem_prefs.GetFloat("EigenmodeHighLimit", 1000000.0)
self.eigenmode_parameters = (_number, _limit_low, _limit_high)
## Sets base_name
# @param self The python object self
# @param base_name base name of .inp/.frd file (without extension). It is used to construct .inp file path that is passed to CalculiX ccx
def set_base_name(self, base_name=None):
if base_name is None:
self.base_name = ""
else:
self.base_name = base_name
# Update inp file name
self.set_inp_file_name()
## Sets inp file name that is used to determine location and name of frd result file.
# Normally inp file name is set set by write_inp_file
# Can be used to read mock calculations file
# @param self The python object self
# @inp_file_name .inp file name. If empty the .inp file path is constructed from working_dir, base_name and string ".inp"
def set_inp_file_name(self, inp_file_name=None):
if inp_file_name is not None:
self.inp_file_name = inp_file_name
else:
self.inp_file_name = self.working_dir + '/' + self.base_name + '.inp'
## Sets analysis type.
# @param self The python object self
# @param analysis_type type of the analysis. Allowed values are:
# - static
# - frequency
def set_analysis_type(self, analysis_type=None):
if analysis_type is not None:
self.analysis_type = analysis_type
else:
try:
self.analysis_type = self.analysis.AnalysisType
except:
self.fem_prefs = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem")
self.analysis_type = self.fem_prefs.GetString("AnalysisType", "static")
## Sets working dir for ccx execution. Called with no working_dir uses WorkingDir from FEM preferences
# @param self The python object self
# @working_dir directory to be used for writing .inp file and executing CalculiX ccx
def setup_working_dir(self, working_dir=None):
import os
if working_dir is not None:
self.working_dir = working_dir
else:
try:
self.working_dir = self.analysis.WorkingDir
except:
FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem").GetString("WorkingDir")
self.working_dir = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem").GetString("WorkingDir")
if not (os.path.isdir(self.working_dir)):
try:
os.makedirs(self.working_dir)
except:
print("Dir \'{}\' doesn't exist and cannot be created.".format(self.working_dir))
import tempfile
self.working_dir = tempfile.gettempdir()
print("Dir \'{}\' will be used instead.".format(self.working_dir))
# Update inp file name
self.set_inp_file_name()
## Sets CalculiX ccx binary path and velidates if the binary can be executed
# @param self The python object self
# @ccx_binary path to ccx binary, default is guessed: "bin/ccx" windows, "ccx" for other systems
# @ccx_binary_sig expected output form ccx when run empty. Default value is "CalculiX.exe -i jobname"
def setup_ccx(self, ccx_binary=None, ccx_binary_sig="CalculiX"):
if not ccx_binary:
self.fem_prefs = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem")
ccx_binary = self.fem_prefs.GetString("ccxBinaryPath", "")
if not ccx_binary:
from platform import system
if system() == "Linux":
ccx_binary = "ccx"
elif system() == "Windows":
ccx_binary = FreeCAD.getHomePath() + "bin/ccx.exe"
else:
ccx_binary = "ccx"
self.ccx_binary = ccx_binary
import subprocess
try:
p = subprocess.Popen([self.ccx_binary], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False)
ccx_stdout, ccx_stderr = p.communicate()
if ccx_binary_sig in ccx_stdout:
self.ccx_binary_present = True
except OSError, e:
FreeCAD.Console.PrintError(e.message)
if e.errno == 2:
raise Exception("FEM: CalculiX binary ccx \'{}\' not found. Please set it in FEM preferences.".format(ccx_binary))
except Exception as e:
FreeCAD.Console.PrintError(e.message)
raise Exception("FEM: CalculiX ccx \'{}\' output \'{}\' doesn't contain expected phrase \'{}\'. Please use ccx 2.6 or newer".
format(ccx_binary, ccx_stdout, ccx_binary_sig))
## Load results of ccx calculations from .frd file.
# @param self The python object self
def load_results(self):
import ccxFrdReader
import os
self.results_present = False
frd_result_file = os.path.splitext(self.inp_file_name)[0] + '.frd'
if os.path.isfile(frd_result_file):
ccxFrdReader.importFrd(frd_result_file, self.analysis)
for m in self.analysis.Member:
if m.isDerivedFrom("Fem::FemResultObject"):
self.result_object = m
if self.result_object:
self.results_present = True
else:
raise Exception('FEM: No results found at {}!'.format(frd_result_file))
import ccxDatReader
dat_result_file = os.path.splitext(self.inp_file_name)[0] + '.dat'
if os.path.isfile(dat_result_file):
mode_frequencies = ccxDatReader.import_dat(dat_result_file, self.analysis)
else:
raise Exception('FEM: No .dat results found at {}!'.format(dat_result_file))
for m in self.analysis.Member:
if m.isDerivedFrom("Fem::FemResultObject") and m.Eigenmode > 0:
m.EigenmodeFrequency = mode_frequencies[m.Eigenmode - 1]['frequency']
m.setEditorMode("EigenmodeFrequency", 1)
def use_results(self, results_name=None):
for m in self.analysis.Member:
if m.isDerivedFrom("Fem::FemResultObject") and m.Name == results_name:
self.result_object = m
break
if not self.result_object:
raise Exception("{} doesn't exist".format(results_name))
def run(self):
ret_code = 0
message = self.check_prerequisites()
if not message:
self.write_inp_file()
from FreeCAD import Base
progress_bar = Base.ProgressIndicator()
progress_bar.start("Running CalculiX ccx...", 0)
ret_code = self.start_ccx()
self.finished.emit(ret_code)
progress_bar.stop()
else:
print("Running analysis failed! {}".format(message))
if ret_code or self.ccx_stderr:
print("Analysis failed with exit code {}".format(ret_code))
print("--------start of stderr-------")
print(self.ccx_stderr)
print("--------end of stderr---------")
print("--------start of stdout-------")
print(self.ccx_stdout)
print("--------end of stdout---------")
## Returns minimum, average and maximum value for provided result type
# @param self The python object self
# @param result_type Type of FEM result, allowed are:
# - U1, U2, U3 - deformation
# - Uabs - absolute deformation
# - Sabs - Von Mises stress
# - None - always return (0.0, 0.0, 0.0)
def get_stats(self, result_type):
stats = (0.0, 0.0, 0.0)
for m in self.analysis.Member:
if m.isDerivedFrom("Fem::FemResultObject"):
match = {"U1": (m.Stats[0], m.Stats[1], m.Stats[2]),
"U2": (m.Stats[3], m.Stats[4], m.Stats[5]),
"U3": (m.Stats[6], m.Stats[7], m.Stats[8]),
"Uabs": (m.Stats[9], m.Stats[10], m.Stats[11]),
"Sabs": (m.Stats[12], m.Stats[13], m.Stats[14]),
"None": (0.0, 0.0, 0.0)}
stats = match[result_type]
return stats
| mickele77/FreeCAD | src/Mod/Fem/FemTools.py | Python | lgpl-2.1 | 23,147 |
#* pyx509 - Python library for parsing X.509
#* Copyright (C) 2009-2010 CZ.NIC, z.s.p.o. (http://www.nic.cz)
#*
#* This library is free software; you can redistribute it and/or
#* modify it under the terms of the GNU Library General Public
#* License as published by the Free Software Foundation; either
#* version 2 of the License, or (at your option) any later version.
#*
#* This library is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#* Library General Public License for more details.
#*
#* You should have received a copy of the GNU Library General Public
#* License along with this library; if not, write to the Free
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#*
# standard library imports
import hashlib
import logging
logger = logging.getLogger("pkcs7.digest")
import base64
RSA_NAME = "RSA"
SHA1_NAME = "SHA-1"
SHA256_NAME = "SHA-256"
SHA384_NAME = "SHA-384"
SHA512_NAME = "SHA-512"
def calculate_digest(data, alg):
'''
Calculates digest according to algorithm
'''
digest_alg = None
if (alg == SHA1_NAME):
digest_alg = hashlib.sha1()
if (alg == SHA256_NAME):
digest_alg = hashlib.sha256()
if (alg == SHA384_NAME):
digest_alg = hashlib.sha384()
if (alg == SHA512_NAME):
digest_alg = hashlib.sha512()
if digest_alg is None:
logger.error("Unknown digest algorithm : %s" % alg)
return None
digest_alg.update(data)
dg = digest_alg.digest()
logger.debug("Calculated hash from input data: %s" % base64.b64encode(dg))
return dg
| plarivee/public_drown_scanner | pyx509/pkcs7/digest.py | Python | gpl-2.0 | 1,801 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class JunitTestsIntegrationTest(PantsRunIntegrationTest):
def _assert_junit_output(self, workdir):
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit',
'com.pants.examples.hello.greet.GreetingTest.out.txt')))
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit',
'com.pants.examples.hello.greet.GreetingTest.err.txt')))
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit',
'com.pants.example.hello.welcome.WelSpec.out.txt')))
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit',
'com.pants.example.hello.welcome.WelSpec.err.txt')))
def test_junit_test(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir(
['goal', 'test', 'examples/tests/java/com/pants/examples/hello/greet',
'examples/tests/scala/com/pants/example/hello/welcome',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',],
workdir)
self.assert_success(pants_run)
self._assert_junit_output(workdir)
def test_junit_test_with_emma(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir(
['goal', 'test', 'examples/tests/java//com/pants/examples/hello/greet',
'examples/tests/scala/com/pants/example/hello/welcome',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--test-junit-coverage-processor=emma', '--test-junit-coverage',
'--test-junit-coverage-xml', '--test-junit-coverage-html',],
workdir)
self.assert_success(pants_run)
self._assert_junit_output(workdir)
# TODO(Eric Ayers): Why does emma puts coverage.xml in a different directory from cobertura?
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', 'coverage', 'coverage.xml')))
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', 'coverage', 'html', 'index.html')))
# Look for emma report in stdout_data:
# 23:20:21 00:02 [emma-report][EMMA v2.1.5320 (stable) report, generated Mon Oct 13 ...
self.assertIn('[emma-report]', pants_run.stdout_data)
# See if the two test classes ended up generating data in the coverage report.
lines = pants_run.stdout_data.split('\n')
in_package_report = False
package_report = ""
for line in lines:
if 'COVERAGE BREAKDOWN BY PACKAGE:' in line:
in_package_report = True
if in_package_report:
package_report += line
self.assertIn('com.pants.example.hello.welcome', package_report)
self.assertIn('com.pants.examples.hello.greet', package_report)
def test_junit_test_with_coberta(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir(
['goal', 'test', 'examples/tests/java//com/pants/examples/hello/greet',
'examples/tests/scala/com/pants/example/hello/welcome',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--test-junit-coverage-processor=cobertura', '--test-junit-coverage',
'--test-junit-coverage-xml', '--test-junit-coverage-html',],
workdir)
self.assert_success(pants_run)
self._assert_junit_output(workdir)
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', 'coverage', 'html', 'index.html')))
# TODO(Eric Ayers): Look at the xml report. I think something is broken, it is empty
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', 'coverage', 'xml', 'coverage.xml')))
| Ervii/garage-time | garage/tests/python/pants_test/tasks/test_junit_tests_integration.py | Python | apache-2.0 | 4,225 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_categories(apps, schema_editor):
Category = apps.get_model('niqati', 'Category')
# We are going to add categories only if none already exists.
if not Category.objects.exists():
Category.objects.create(label="Idea",
ar_label="فكرة",
points=3)
Category.objects.create(label="Organizer",
ar_label="تنظيم",
points=2)
Category.objects.create(label="Participation",
ar_label="مشاركة",
points=1)
def remove_categories(apps, schema_editor):
Category = apps.get_model('niqati', 'Category')
Category.objects.filter(label__in=["Idea", "Organizer", "Participation"]).delete()
class Migration(migrations.Migration):
dependencies = [
('niqati', '0001_initial'),
]
operations = [
migrations.RunPython(
add_categories,
reverse_code=remove_categories),
]
| enjaz/enjaz | niqati/migrations/0002_add_categories.py | Python | agpl-3.0 | 1,163 |
#!/usr/bin/python
# Copyright 2014 Nervana Systems Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pycuda.driver as drv
from neon.backends.nervanagpu import NervanaGPU
from neon.backends.layer_gpu import Layer, DataLayer, ConvLayer, PoolLayer, FullLayer, Inception, BatchNorm
# Compare results here:
# https://github.com/soumith/convnet-benchmarks
# Available nets:
# "Alexnet","Overfeat","GoogLeNet1","GoogLeNet2","VGG","VGG_E"
# Note GoogLeNet2 only fits in fp16 currently. I need to work out delta sharing in inception layers.
nets = ("Alexnet","AlexnetBN","GoogLeNet1BN",)
#Available dtypes: np.float16, np.float32
dtypes = (np.float16, )
# number of full iterations
loops = 10
# show bechmark details for each layer
layer_bench = 0
# show layer stats after each operation
print_stats = 0
# run network with all zeros to see speed difference
zeros = 0
# print more stuff
verbose = 0
ng = NervanaGPU(bench=layer_bench)
print(drv.Context.get_current().get_device().name())
# common convolutional layer settings
conv11 = { "R":11, "S":11, "pad_h":2, "pad_w":2, "str_h":4, "str_w":4 }
conv11p0 = { "R":11, "S":11, "pad_h":0, "pad_w":0, "str_h":4, "str_w":4 }
conv7 = { "R":7, "S":7, "pad_h":3, "pad_w":3, "str_h":2, "str_w":2 }
conv5 = { "R":5, "S":5, "pad_h":2, "pad_w":2 }
conv5p0 = { "R":5, "S":5, "pad_h":0, "pad_w":0 }
conv3 = { "R":3, "S":3, "pad_h":1, "pad_w":1 }
conv2 = { "R":2, "S":2, "pad_h":0, "pad_w":0, "str_h":2, "str_w":2 }
conv1 = { "R":1, "S":1, "pad_h":0, "pad_w":0 }
# traditional pooling
pool2s2p0 = { "R":2, "S":2 }
pool3s2p0 = { "R":3, "S":3, "str_h":2, "str_w":2 }
pool3s2p1 = { "R":3, "S":3, "str_h":2, "str_w":2, "pad_h":1, "pad_w":1 }
pool3s1p1 = { "R":3, "S":3, "str_h":1, "str_w":1, "pad_h":1, "pad_w":1 }
pool7s1p0 = { "R":7, "S":7, "str_h":1, "str_w":1 }
# maxout pooling
pool1j2 = { "op":"max", "J":2 } # maxout in the fc layers
pool2j2 = { "op":"max", "J":2, "R":2, "S":2 }
pool3j2 = { "op":"max", "J":2, "R":3, "S":3 }
def inception1(conf):
return {
"layer":Inception, "partitions" : (
(
{ "layer":ConvLayer, "common":conv1, "relu":True, "K":conf[0][0], },
),
(
{ "layer":ConvLayer, "common":conv1, "relu":True, "K":conf[1][0], },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":conf[1][1], },
),
(
{ "layer":ConvLayer, "common":conv1, "relu":True, "K":conf[2][0], },
{ "layer":ConvLayer, "common":conv5, "relu":True, "K":conf[2][1], },
),
(
{ "layer":PoolLayer, "common":pool3s1p1, "op":"max" },
{ "layer":ConvLayer, "common":conv1, "relu":True, "K":conf[3][0], },
),
)
}
def inception1BN(conf):
return {
"layer":Inception, "partitions" : (
(
{ "layer":ConvLayer, "common":conv1, "K":conf[0][0], "bsum":True, },
{ "layer":BatchNorm, "relu":True, "bsum":True },
),
(
{ "layer":ConvLayer, "common":conv1, "K":conf[1][0], "bsum":True, },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":conf[1][1], "bsum":True, },
{ "layer":BatchNorm, "relu":True, "bsum":True },
),
(
{ "layer":ConvLayer, "common":conv1, "K":conf[2][0], "bsum":True, },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv5, "K":conf[2][1], "bsum":True, },
{ "layer":BatchNorm, "relu":True, "bsum":True },
),
(
{ "layer":PoolLayer, "common":pool3s1p1, "op":"max" },
{ "layer":ConvLayer, "common":conv1, "K":conf[3][0], "bsum":True, },
{ "layer":BatchNorm, "relu":True, "bsum":True },
),
)
}
def inception2(conf):
layer = { "layer":Inception, "partitions" : [] }
partitions = layer["partitions"]
if conf[0][0]:
partitions.append( (
{ "layer":ConvLayer, "common":conv1, "relu":True, "K":conf[0][0] },
) )
partitions.extend( (
(
{ "layer":ConvLayer, "common":conv1, "relu":True, "K":conf[1][0] },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":conf[1][1] },
),
(
{ "layer":ConvLayer, "common":conv1, "relu":True, "K":conf[2][0] },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":conf[2][1] },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":conf[2][1] },
),
) )
if conf[3][1]:
partitions.append( (
{ "layer":PoolLayer, "common":pool3s1p1, "op":conf[3][0] }, #
{ "layer":ConvLayer, "common":conv1, "relu":True, "K":conf[3][1] },
) )
else:
partitions.append( (
{ "layer":PoolLayer, "common":pool3s1p1, "op":conf[3][0] },
) )
return layer
def inception2BN(conf):
layer = { "layer":Inception, "partitions" : [] }
partitions = layer["partitions"]
if conf[0][0]:
partitions.append( (
{ "layer":ConvLayer, "common":conv1, "K":conf[0][0], "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
) )
partitions.extend( (
(
{ "layer":ConvLayer, "common":conv1, "K":conf[1][0], "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":conf[1][1], "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
),
(
{ "layer":ConvLayer, "common":conv1, "K":conf[2][0], "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":conf[2][1], "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":conf[2][1], "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
),
) )
if conf[3][1]:
partitions.append( (
{ "layer":PoolLayer, "common":pool3s1p1, "op":conf[3][0] },
{ "layer":ConvLayer, "common":conv1, "K":conf[3][1], "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
) )
else:
partitions.append( (
{ "layer":PoolLayer, "common":pool3s1p1, "op":conf[3][0] },
) )
return layer
networks = {
"Alexnet" : (
{ "warmup":4 },
{ "layer":DataLayer, "N":128, "C":3, "H":224, "W":224},
{ "layer":ConvLayer, "common":conv11, "relu":True,"K":64 },
{ "layer":PoolLayer, "common":pool3s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv5, "relu":True, "K":192 },
{ "layer":PoolLayer, "common":pool3s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":384 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":256 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":256 },
{ "layer":PoolLayer, "common":pool3s2p0, "op":"max" },
{ "layer":FullLayer, "nOut":4096, "relu":True },
{ "layer":FullLayer, "nOut":4096, "relu":True },
{ "layer":FullLayer, "nOut":1000, "relu":True },
),
"AlexnetBN" : (
{ "warmup":4 },
{ "layer":DataLayer, "N":128, "C":3, "H":224, "W":224},
{ "layer":ConvLayer, "common":conv11,"K":64, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool3s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv5, "K":192, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool3s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "K":384, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":256, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":256, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool3s2p0, "op":"max" },
{ "layer":FullLayer, "nOut":4096 },
{ "layer":BatchNorm, "relu":True },
{ "layer":FullLayer, "nOut":4096 },
{ "layer":BatchNorm, "relu":True },
{ "layer":FullLayer, "relu":True, "nOut":1000 },
),
"Overfeat" : (
{ "warmup":1 },
{ "layer":DataLayer, "N":128, "C":3, "H":231, "W":231},
{ "layer":ConvLayer, "common":conv11p0, "relu":True,"K":96 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv5p0, "relu":True, "K":256 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":1024 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":1024 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":FullLayer, "nOut":3072, "relu":True },
{ "layer":FullLayer, "nOut":4096, "relu":True },
{ "layer":FullLayer, "nOut":1000, "relu":True },
),
"OverfeatBN" : (
{ "warmup":1 },
{ "layer":DataLayer, "N":128, "C":3, "H":231, "W":231},
{ "layer":ConvLayer, "common":conv11p0,"K":96, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv5p0, "K":256, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "K":512, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":1024, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":1024, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":FullLayer, "nOut":3072 },
{ "layer":BatchNorm, "relu":True },
{ "layer":FullLayer, "nOut":4096 },
{ "layer":BatchNorm, "relu":True },
{ "layer":FullLayer, "relu":True, "nOut":1000 },
),
# See http://arxiv.org/pdf/1409.1556.pdf for variations
"VGG" : (
{ "warmup":1 },
{ "layer":DataLayer, "N":64, "C":3, "H":224, "W":224},
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":64 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":128 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":256 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":256 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":FullLayer, "nOut":4096, "relu":True },
{ "layer":FullLayer, "nOut":4096, "relu":True },
{ "layer":FullLayer, "nOut":1000, "relu":True },
),
# See http://arxiv.org/pdf/1409.1556.pdf for variations
"VGG_BN" : (
{ "warmup":1 },
{ "layer":DataLayer, "N":64, "C":3, "H":224, "W":224},
{ "layer":ConvLayer, "common":conv3, "K":64, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "K":128, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "K":256, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":256, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "K":512, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":512, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "K":512, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":512, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":FullLayer, "nOut":4096 },
{ "layer":BatchNorm, "relu":True },
{ "layer":FullLayer, "nOut":4096 },
{ "layer":BatchNorm, "relu":True },
{ "layer":FullLayer, "nOut":1000, "relu":True },
),
# Here is the biggest VGG model (19 layers)
"VGG_E" : (
{ "warmup":1 },
{ "layer":DataLayer, "N":64, "C":3, "H":224, "W":224},
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":64 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":64 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":128 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":128 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":256 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":256 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":256 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":256 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":512 },
{ "layer":PoolLayer, "common":pool2s2p0, "op":"max" },
{ "layer":FullLayer, "nOut":4096, "relu":True },
{ "layer":FullLayer, "nOut":4096, "relu":True },
{ "layer":FullLayer, "nOut":1000, "relu":True },
),
# http://arxiv.org/abs/1409.4842
"GoogLeNet1" : (
{ "warmup":1 },
{ "layer":DataLayer, "N":128, "C":3, "H":224, "W":224 },
{ "layer":ConvLayer, "common":conv7, "relu":True, "K":64 },
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
{ "layer":ConvLayer, "common":conv1, "relu":True, "K":64 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":192 },
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
inception1( [(64, ),(96, 128),(16, 32),(32, )] ),
inception1( [(128,),(128,192),(32, 96),(64, )] ),
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
inception1( [(192,),(96, 208),(16, 48),(64, )] ),
inception1( [(160,),(112,224),(24, 64),(64, )] ),
inception1( [(128,),(128,256),(24, 64),(64, )] ),
inception1( [(112,),(144,288),(32, 64),(64, )] ),
inception1( [(256,),(160,320),(32,128),(128,)] ),
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
inception1( [(256,),(160,320),(32,128),(128,)] ),
inception1( [(384,),(192,384),(48,128),(128,)] ),
{ "layer":PoolLayer, "common":pool7s1p0, "op":"avg" },
{ "layer":FullLayer, "nOut":1000, "relu":True },
),
# http://arxiv.org/abs/1409.4842
"GoogLeNet1BN" : (
{ "warmup":1 },
{ "layer":DataLayer, "N":128, "C":3, "H":224, "W":224 },
{ "layer":ConvLayer, "common":conv7, "K":64, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
{ "layer":ConvLayer, "common":conv1, "K":64, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":192, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
inception1BN( [(64, ),(96, 128),(16, 32),(32, )] ),
inception1BN( [(128,),(128,192),(32, 96),(64, )] ),
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
inception1BN( [(192,),(96, 208),(16, 48),(64, )] ),
inception1BN( [(160,),(112,224),(24, 64),(64, )] ),
inception1BN( [(128,),(128,256),(24, 64),(64, )] ),
inception1BN( [(112,),(144,288),(32, 64),(64, )] ),
inception1BN( [(256,),(160,320),(32,128),(128,)] ),
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
inception1BN( [(256,),(160,320),(32,128),(128,)] ),
inception1BN( [(384,),(192,384),(48,128),(128,)] ),
{ "layer":PoolLayer, "common":pool7s1p0, "op":"avg" },
{ "layer":FullLayer, "nOut":1000, "relu":True },
),
# adapted from: https://github.com/soumith/kaggle_retinopathy_starter.torch/blob/master/models/googlenet_cudnn.lua
"GoogLeNet2" : (
{ "warmup":1 },
{ "layer":DataLayer, "N":128, "C":3, "H":224, "W":224 },
{ "layer":ConvLayer, "common":conv7, "relu":True, "K":64 },
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
{ "layer":ConvLayer, "common":conv1, "relu":True, "K":64 },
{ "layer":ConvLayer, "common":conv3, "relu":True, "K":192 },
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
inception2( [( 64,),( 64, 64),( 64, 96),('avg', 32)] ),
inception2( [( 64,),( 64, 96),( 64, 96),('avg', 64)] ),
inception2( [( 0,),(128,160),( 64, 96),('max', 0)] ),
{ "layer":ConvLayer, "common":conv2, "relu":True, "K":576 },
inception2( [(224,),( 64, 96),( 96,128),('avg',128)] ),
inception2( [(192,),( 96,128),( 96,128),('avg',128)] ),
inception2( [(160,),(128,160),(128,160),('avg', 96)] ),
inception2( [( 96,),(128,192),(160,192),('avg', 96)] ),
inception2( [( 0,),(128,192),(192,256),('max', 0)] ),
{ "layer":ConvLayer, "common":conv2, "relu":True, "K":1024 },
inception2( [(352,),(192,320),(160,224),('avg',128)] ),
inception2( [(352,),(192,320),(192,224),('max',128)] ),
{ "layer":PoolLayer, "common":pool7s1p0, "op":"avg" },
{ "layer":FullLayer, "nOut":1000, "relu":True },
),
# adapted from: https://github.com/soumith/kaggle_retinopathy_starter.torch/blob/master/models/googlenet_cudnn.lua
"GoogLeNet2BN" : (
{ "warmup":1 },
{ "layer":DataLayer, "N":128, "C":3, "H":224, "W":224 },
{ "layer":ConvLayer, "common":conv7, "K":64, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
{ "layer":ConvLayer, "common":conv1, "K":64, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":ConvLayer, "common":conv3, "K":192, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
{ "layer":PoolLayer, "common":pool3s2p1, "op":"max" },
inception2BN( [( 64,),( 64, 64),( 64, 96),('avg', 32)] ),
inception2BN( [( 64,),( 64, 96),( 64, 96),('avg', 64)] ),
inception2BN( [( 0,),(128,160),( 64, 96),('max', 0)] ),
{ "layer":ConvLayer, "common":conv2, "K":576, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
inception2BN( [(224,),( 64, 96),( 96,128),('avg',128)] ),
inception2BN( [(192,),( 96,128),( 96,128),('avg',128)] ),
inception2BN( [(160,),(128,160),(128,160),('avg', 96)] ),
inception2BN( [( 96,),(128,192),(160,192),('avg', 96)] ),
inception2BN( [( 0,),(128,192),(192,256),('max', 0)] ),
{ "layer":ConvLayer, "common":conv2, "K":1024, "bsum":True },
{ "layer":BatchNorm, "relu":True, "bsum":True },
inception2BN( [(352,),(192,320),(160,224),('avg',128)] ),
inception2BN( [(352,),(192,320),(192,224),('max',128)] ),
{ "layer":PoolLayer, "common":pool7s1p0, "op":"avg" },
{ "layer":FullLayer, "nOut":1000, "relu":True },
),
}
for net in nets:
for dtype in dtypes:
warmup = networks[net][0]["warmup"]
network = networks[net][1:]
name = "%s (dtype=%s, N=%d)" % (net, np.dtype(dtype).name, network[0]["N"])
# only first run needs a big warmup
networks[net][0]["warmup"] = 1
print("------------------------------------------------")
print("Benchmarking: " + name)
print("------------------------------------------------")
layers = []
prev_layer = None
max_deltas = 0
max_weights = 0
max_delta_layer = None
max_weight_layer = None
shared_weights = None
shared_deltas = []
inception = False
for conf in network:
layer = Layer.create(ng, conf, prev_layer, dtype)
if type(layer) is Inception:
inception = True
# find the size of the largest buffers so they can be shared
if layer.sizeF > max_weights:
max_weights = layer.sizeF
max_weight_layer = layer
if layer.sizeI > max_deltas and type(prev_layer) is not DataLayer:
max_deltas = layer.sizeI
max_delta_layer = layer
prev_layer = layer
layers.append(layer)
# Init shared buffers (assumes consistent dtype for now)
shared_deltas.append(ng.empty(max_delta_layer.dimI, dtype=max_delta_layer.dtype))
shared_deltas.append(ng.empty(max_delta_layer.dimI, dtype=max_delta_layer.dtype))
if inception:
shared_deltas.append(ng.empty(max_delta_layer.dimI, dtype=max_delta_layer.dtype))
shared_deltas.append(ng.empty(max_delta_layer.dimI, dtype=max_delta_layer.dtype))
shared_updates = ng.empty(max_weight_layer.dimF, dtype=np.float32)
for i, layer in enumerate(layers):
if verbose:
print(layer)
# Intitalize buffers. Alernate shared delta buffer.
# One layer can't have the same buffer for both error in and error out.
layer.init_activations()
layer.init_weights(shared=shared_updates, zeros=zeros)
if i > 1:
layer.init_deltas(shared=shared_deltas)
if verbose:
remain, total = drv.mem_get_info()
print("%.3fGB of %.3fGB Allocated (%.3fGB Remaining)" %
((total-remain)/1024.**3, total/1024.**3, remain/1024.**3))
if zeros:
layers[0].init_data()
else:
# give the first layer some data
layers[0].init_data(np.random.uniform(0.0, 1.0, layers[0].dimO))
# Scale the initial weights so activations are bound around 1.0
# We do this by running it through the forward pass and collecting mean stats
# ng.bench = False
# propagation = None
# for layer in layers:
# propagation = layer.fprop(propagation, scale_weights=.5)
# ng.bench = layer_bench
start = drv.Event()
end = drv.Event()
fprop_time = 0
bprop_time = 0
fprop_flops = 0
bprop_flops = 0
# We throw away the first two runs as it includes pycuda kernel loading times and clock warmup.
# So add 1 to our loop count.
for loop in range(loops+warmup):
loop = loop - warmup + 1
if loop < 0: loop = 0
start.record()
flops = 0
#fprop
propagation = None
for layer in layers:
propagation = layer.fprop(propagation)
flops += layer.flops
if print_stats:
layer.fprop_stats()
end.record()
end.synchronize()
msecs = end.time_since(start)
print("fprop(%2d): %8.3f msecs %8.3f gflops" %
(loop, msecs, flops / (msecs * 1000000.0)))
if loop > 0:
fprop_time += msecs
fprop_flops += flops
start.record()
flops = 0
#bprop
for layer in layers[:0:-1]:
propagation = layer.bprop(propagation)
flops += layer.flops * 2
if print_stats:
layer.bprop_stats()
end.record()
end.synchronize()
msecs = end.time_since(start)
print("bprop(%2d): %8.3f msecs %8.3f gflops" %
(loop, msecs, flops / (msecs * 1000000.0)))
if loop > 0:
bprop_time += msecs
bprop_flops += flops
if loops > 0:
print("---------------------------------------------")
print(name + " Results:")
print("---------------------------------------------")
print("Avg(%d) fprop: %8.3f msecs %.3f gflops" %
(loops, fprop_time/loops, fprop_flops / (fprop_time * 1000000.0)))
print("Avg(%d) bprop: %8.3f msecs %.3f gflops" %
(loops, bprop_time/loops, bprop_flops / (bprop_time * 1000000.0)))
fprop_time += bprop_time
fprop_flops += bprop_flops
print("Avg(%d) total: %8.3f msecs %.3f gflops\n\n" %
(loops, fprop_time/loops, fprop_flops / (fprop_time * 1000000.0)))
| nhynes/neon | neon/backends/convnet-benchmarks.py | Python | apache-2.0 | 27,193 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
from __future__ import print_function, unicode_literals, division, absolute_import
from builtins import (bytes, dict, int, list, object, range, str, # noqa
ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
from future import standard_library
standard_library.install_aliases() # noqa: Counter, OrderedDict,
import pytest
| totalgood/nlpia | tests/conftest.py | Python | mit | 492 |
# -*- coding: utf-8 -*-
r"""
.. _tut_background_filtering:
===================================
Background information on filtering
===================================
Here we give some background information on filtering in general,
and how it is done in MNE-Python in particular.
Recommended reading for practical applications of digital
filter design can be found in Parks & Burrus [1]_ and
Ifeachor and Jervis [2]_, and for filtering in an
M/EEG context we recommend reading Widmann *et al.* 2015 [7]_.
To see how to use the default filters in MNE-Python on actual data, see
the :ref:`tut_artifacts_filter` tutorial.
.. contents::
:local:
Problem statement
=================
The practical issues with filtering electrophysiological data are covered
well by Widmann *et al.* in [7]_, in a follow-up to an article where they
conclude with this statement:
Filtering can result in considerable distortions of the time course
(and amplitude) of a signal as demonstrated by VanRullen (2011) [[3]_].
Thus, filtering should not be used lightly. However, if effects of
filtering are cautiously considered and filter artifacts are minimized,
a valid interpretation of the temporal dynamics of filtered
electrophysiological data is possible and signals missed otherwise
can be detected with filtering.
In other words, filtering can increase SNR, but if it is not used carefully,
it can distort data. Here we hope to cover some filtering basics so
users can better understand filtering tradeoffs, and why MNE-Python has
chosen particular defaults.
.. _tut_filtering_basics:
Filtering basics
================
Let's get some of the basic math down. In the frequency domain, digital
filters have a transfer function that is given by:
.. math::
H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + ... + b_M z^{-M}}
{1 + a_1 z^{-1} + a_2 z^{-2} + ... + a_N z^{-M}} \\
&= \frac{\sum_0^Mb_kz^{-k}}{\sum_1^Na_kz^{-k}}
In the time domain, the numerator coefficients :math:`b_k` and denominator
coefficients :math:`a_k` can be used to obtain our output data
:math:`y(n)` in terms of our input data :math:`x(n)` as:
.. math::
:label: summations
y(n) &= b_0 x(n) + b_1 x(n-1) + ... + b_M x(n-M)
- a_1 y(n-1) - a_2 y(n - 2) - ... - a_N y(n - N)\\
&= \sum_0^M b_k x(n-k) - \sum_1^N a_k y(n-k)
In other words, the output at time :math:`n` is determined by a sum over:
1. The numerator coefficients :math:`b_k`, which get multiplied by
the previous input :math:`x(n-k)` values, and
2. The denominator coefficients :math:`a_k`, which get multiplied by
the previous output :math:`y(n-k)` values.
Note that these summations in :eq:`summations` correspond nicely to
(1) a weighted `moving average`_ and (2) an autoregression_.
Filters are broken into two classes: FIR_ (finite impulse response) and
IIR_ (infinite impulse response) based on these coefficients.
FIR filters use a finite number of numerator
coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output
value of :math:`y(n)` depends only on the :math:`M` previous input values.
IIR filters depend on the previous input and output values, and thus can have
effectively infinite impulse responses.
As outlined in [1]_, FIR and IIR have different tradeoffs:
* A causal FIR filter can be linear-phase -- i.e., the same time delay
across all frequencies -- whereas a causal IIR filter cannot. The phase
and group delay characteristics are also usually better for FIR filters.
* IIR filters can generally have a steeper cutoff than an FIR filter of
equivalent order.
* IIR filters are generally less numerically stable, in part due to
accumulating error (due to its recursive calculations).
In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.*
2015 [7]_:
Despite IIR filters often being considered as computationally more
efficient, they are recommended only when high throughput and sharp
cutoffs are required (Ifeachor and Jervis, 2002[2]_, p. 321),
...FIR filters are easier to control, are always stable, have a
well-defined passband, can be corrected to zero-phase without
additional computations, and can be converted to minimum-phase.
We therefore recommend FIR filters for most purposes in
electrophysiological data analysis.
When designing a filter (FIR or IIR), there are always tradeoffs that
need to be considered, including but not limited to:
1. Ripple in the pass-band
2. Attenuation of the stop-band
3. Steepness of roll-off
4. Filter order (i.e., length for FIR filters)
5. Time-domain ringing
In general, the sharper something is in frequency, the broader it is in time,
and vice-versa. This is a fundamental time-frequency tradeoff, and it will
show up below.
FIR Filters
===========
First we will focus first on FIR filters, which are the default filters used by
MNE-Python.
"""
###############################################################################
# Designing FIR filters
# ---------------------
# Here we'll try designing a low-pass filter, and look at trade-offs in terms
# of time- and frequency-domain filter characteristics. Later, in
# :ref:`tut_effect_on_signals`, we'll look at how such filters can affect
# signals when they are used.
#
# First let's import some useful tools for filtering, and set some default
# values for our data that are reasonable for M/EEG data.
import numpy as np
from scipy import signal, fftpack
import matplotlib.pyplot as plt
from mne.time_frequency.tfr import morlet
import mne
sfreq = 1000.
f_p = 40.
ylim = [-60, 10] # for dB plots
xlim = [2, sfreq / 2.]
blue = '#1f77b4'
###############################################################################
# Take for example an ideal low-pass filter, which would give a value of 1 in
# the pass-band (up to frequency :math:`f_p`) and a value of 0 in the stop-band
# (down to frequency :math:`f_s`) such that :math:`f_p=f_s=40` Hz here
# (shown to a lower limit of -60 dB for simplicity):
nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
freq = [0, f_p, f_p, nyq]
gain = [1, 1, 0, 0]
def box_off(ax):
ax.grid(zorder=0)
for key in ('top', 'right'):
ax.spines[key].set_visible(False)
def plot_ideal(freq, gain, ax):
freq = np.maximum(freq, xlim[0])
xs, ys = list(), list()
my_freq, my_gain = list(), list()
for ii in range(len(freq)):
xs.append(freq[ii])
ys.append(ylim[0])
if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:
xs += [freq[ii], freq[ii + 1]]
ys += [ylim[1]] * 2
my_freq += np.linspace(freq[ii], freq[ii + 1], 20,
endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[ii + 1], 20,
endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (ylim[0] / 10.)))
ax.fill_between(xs, ylim[0], ys, color='r', alpha=0.1)
ax.semilogx(my_freq, my_gain, 'r--', alpha=0.5, linewidth=4, zorder=3)
xticks = [1, 2, 4, 10, 20, 40, 100, 200, 400]
ax.set(xlim=xlim, ylim=ylim, xticks=xticks, xlabel='Frequency (Hz)',
ylabel='Amplitude (dB)')
ax.set(xticklabels=xticks)
box_off(ax)
half_height = np.array(plt.rcParams['figure.figsize']) * [1, 0.5]
ax = plt.subplots(1, figsize=half_height)[1]
plot_ideal(freq, gain, ax)
ax.set(title='Ideal %s Hz lowpass' % f_p)
mne.viz.tight_layout()
plt.show()
###############################################################################
# This filter hypothetically achieves zero ripple in the frequency domain,
# perfect attenuation, and perfect steepness. However, due to the discontunity
# in the frequency response, the filter would require infinite ringing in the
# time domain (i.e., infinite order) to be realized. Another way to think of
# this is that a rectangular window in frequency is actually sinc_ function
# in time, which requires an infinite number of samples, and thus infinite
# time, to represent. So although this filter has ideal frequency suppression,
# it has poor time-domain characteristics.
#
# Let's try to naïvely make a brick-wall filter of length 0.1 sec, and look
# at the filter itself in the time domain and the frequency domain:
n = int(round(0.1 * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq # center our sinc
h = np.sinc(2 * f_p * t) / (4 * np.pi)
def plot_filter(h, title, freq, gain, show=True):
if h.ndim == 2: # second-order sections
sos = h
n = mne.filter.estimate_ringing_samples(sos)
h = np.zeros(n)
h[0] = 1
h = signal.sosfilt(sos, h)
H = np.ones(512, np.complex128)
for section in sos:
f, this_H = signal.freqz(section[:3], section[3:])
H *= this_H
else:
f, H = signal.freqz(h)
fig, axs = plt.subplots(2)
t = np.arange(len(h)) / sfreq
axs[0].plot(t, h, color=blue)
axs[0].set(xlim=t[[0, -1]], xlabel='Time (sec)',
ylabel='Amplitude h(n)', title=title)
box_off(axs[0])
f *= sfreq / (2 * np.pi)
axs[1].semilogx(f, 10 * np.log10((H * H.conj()).real), color=blue,
linewidth=2, zorder=4)
plot_ideal(freq, gain, axs[1])
mne.viz.tight_layout()
if show:
plt.show()
plot_filter(h, 'Sinc (0.1 sec)', freq, gain)
###############################################################################
# This is not so good! Making the filter 10 times longer (1 sec) gets us a
# bit better stop-band suppression, but still has a lot of ringing in
# the time domain. Note the x-axis is an order of magnitude longer here:
n = int(round(1. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, 'Sinc (1.0 sec)', freq, gain)
###############################################################################
# Let's make the stop-band tighter still with a longer filter (10 sec),
# with a resulting larger x-axis:
n = int(round(10. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, 'Sinc (10.0 sec)', freq, gain)
###############################################################################
# Now we have very sharp frequency suppression, but our filter rings for the
# entire second. So this naïve method is probably not a good way to build
# our low-pass filter.
#
# Fortunately, there are multiple established methods to design FIR filters
# based on desired response characteristics. These include:
#
# 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_)
# 2. Windowed FIR design (:func:`scipy.signal.firwin2`, `MATLAB fir2`_)
# 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_)
# 4. Frequency-domain design (construct filter in Fourier
# domain and use an :func:`IFFT <scipy.fftpack.ifft>` to invert it)
#
# .. note:: Remez and least squares designs have advantages when there are
# "do not care" regions in our frequency response. However, we want
# well controlled responses in all frequency regions.
# Frequency-domain construction is good when an arbitrary response
# is desired, but generally less clean (due to sampling issues) than
# a windowed approach for more straightfroward filter applications.
# Since our filters (low-pass, high-pass, band-pass, band-stop)
# are fairly simple and we require precisel control of all frequency
# regions, here we will use and explore primarily windowed FIR
# design.
#
# If we relax our frequency-domain filter requirements a little bit, we can
# use these functions to construct a lowpass filter that instead has a
# *transition band*, or a region between the pass frequency :math:`f_p`
# and stop frequency :math:`f_s`, e.g.:
trans_bandwidth = 10 # 10 Hz transition band
f_s = f_p + trans_bandwidth # = 50 Hz
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
ax = plt.subplots(1, figsize=half_height)[1]
plot_ideal(freq, gain, ax)
ax.set(title='%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth))
mne.viz.tight_layout()
plt.show()
###############################################################################
# Accepting a shallower roll-off of the filter in the frequency domain makes
# our time-domain response potentially much better. We end up with a
# smoother slope through the transition region, but a *much* cleaner time
# domain signal. Here again for the 1 sec filter:
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, 'Windowed 10-Hz transition (1.0 sec)', freq, gain)
###############################################################################
# Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually
# use a shorter filter (5 cycles at 10 Hz = 0.5 sec) and still get okay
# stop-band attenuation:
n = int(round(sfreq * 0.5)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, 'Windowed 10-Hz transition (0.5 sec)', freq, gain)
###############################################################################
# But then if we shorten the filter too much (2 cycles of 10 Hz = 0.2 sec),
# our effective stop frequency gets pushed out past 60 Hz:
n = int(round(sfreq * 0.2)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, 'Windowed 10-Hz transition (0.2 sec)', freq, gain)
###############################################################################
# If we want a filter that is only 0.1 seconds long, we should probably use
# something more like a 25 Hz transition band (0.2 sec = 5 cycles @ 25 Hz):
trans_bandwidth = 25
f_s = f_p + trans_bandwidth
freq = [0, f_p, f_s, nyq]
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, 'Windowed 50-Hz transition (0.2 sec)', freq, gain)
###############################################################################
# .. _tut_effect_on_signals:
#
# Applying FIR filters
# --------------------
#
# Now lets look at some practical effects of these filters by applying
# them to some data.
#
# Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part)
# plus noise (random + line). Note that the original, clean signal contains
# frequency content in both the pass band and transition bands of our
# low-pass filter.
dur = 10.
center = 2.
morlet_freq = f_p
tlim = [center - 0.2, center + 0.2]
tticks = [tlim[0], center, tlim[1]]
flim = [20, 70]
x = np.zeros(int(sfreq * dur))
blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20.
n_onset = int(center * sfreq) - len(blip) // 2
x[n_onset:n_onset + len(blip)] += blip
x_orig = x.copy()
rng = np.random.RandomState(0)
x += rng.randn(len(x)) / 1000.
x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000.
###############################################################################
# Filter it with a shallow cutoff, linear-phase FIR and compensate for
# the delay:
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
x_shallow = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, 'MNE-Python 0.14 default', freq, gain)
###############################################################################
# This is actually set to become the default type of filter used in MNE-Python
# in 0.14 (see :ref:`tut_filtering_in_python`).
#
# Let's also filter with the MNE-Python 0.13 default, which is a
# long-duration, steep cutoff FIR that gets applied twice:
transition_band = 0.5 # Hz
f_s = f_p + transition_band
filter_dur = 10. # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
x_steep = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1]
plot_filter(h, 'MNE-Python 0.13 default', freq, gain)
###############################################################################
# Finally, Let's also filter it with the
# MNE-C default, which is a long-duration steep-slope FIR filter designed
# using frequency-domain techniques:
h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5)
x_mne_c = np.convolve(h, x)[len(h) // 2:]
transition_band = 5 # Hz (default in MNE-C)
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, 'MNE-C default', freq, gain)
###############################################################################
# Both the MNE-Python 0.13 and MNE-C filhters have excellent frequency
# attenuation, but it comes at a cost of potential
# ringing (long-lasting ripples) in the time domain. Ringing can occur with
# steep filters, especially on signals with frequency content around the
# transition band. Our Morlet wavelet signal has power in our transition band,
# and the time-domain ringing is thus more pronounced for the steep-slope,
# long-duration filter than the shorter, shallower-slope filter:
axs = plt.subplots(1, 2)[1]
def plot_signal(x, offset):
t = np.arange(len(x)) / sfreq
axs[0].plot(t, x + offset)
axs[0].set(xlabel='Time (sec)', xlim=t[[0, -1]])
box_off(axs[0])
X = fftpack.fft(x)
freqs = fftpack.fftfreq(len(x), 1. / sfreq)
mask = freqs >= 0
X = X[mask]
freqs = freqs[mask]
axs[1].plot(freqs, 20 * np.log10(np.abs(X)))
axs[1].set(xlim=xlim)
yticks = np.arange(5) / -30.
yticklabels = ['Original', 'Noisy', 'FIR-shallow (0.14)', 'FIR-steep (0.13)',
'FIR-steep (MNE-C)']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
plot_signal(x_mne_c, offset=yticks[4])
axs[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.150, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axs[0].get_yticklabels():
text.set(rotation=45, size=8)
axs[1].set(xlim=flim, ylim=ylim, xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
box_off(axs[0])
box_off(axs[1])
mne.viz.tight_layout()
plt.show()
###############################################################################
# IIR filters
# ===========
#
# MNE-Python also offers IIR filtering functionality that is based on the
# methods from :mod:`scipy.signal`. Specifically, we use the general-purpose
# functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`,
# which provide unified interfaces to IIR filter design.
#
# Designing IIR filters
# ---------------------
#
# Let's continue with our design of a 40 Hz low-pass filter, and look at
# some trade-offs of different IIR filters.
#
# Often the default IIR filter is a `Butterworth filter`_, which is designed
# to have a *maximally flat pass-band*. Let's look at a few orders of filter,
# i.e., a few different number of coefficients used and therefore steepness
# of the filter:
sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(sos, 'Butterworth order=2', freq, gain)
# Eventually this will just be from scipy signal.sosfiltfilt, but 0.18 is
# not widely adopted yet (as of June 2016), so we use our wrapper...
sosfiltfilt = mne.fixes.get_sosfiltfilt()
x_shallow = sosfiltfilt(sos, x)
###############################################################################
# The falloff of this filter is not very steep.
#
# .. warning:: For brevity, we do not show the phase of these filters here.
# In the FIR case, we can design linear-phase filters, and
# compensate for the delay (making the filter acausal) if
# necessary. This cannot be done
# with IIR filters, as they have a non-linear phase.
# As the filter order increases, the
# phase distortion near and in the transition band worsens.
# However, if acausal (forward-backward) filtering can be used,
# e.g. with :func:`scipy.signal.filtfilt`, these phase issues
# can be mitigated.
#
# .. note:: Here we have made use of second-order sections (SOS)
# by using :func:`scipy.signal.sosfilt` and, under the
# hood, :func:`scipy.signal.zpk2sos` when passing the
# ``output='sos'`` keyword argument to
# :func:`scipy.signal.iirfilter`. The filter definitions
# given in tut_filtering_basics_ use the polynomial
# numerator/denominator (sometimes called "tf") form ``(b, a)``,
# which are theoretically equivalent to the SOS form used here.
# In practice, however, the SOS form can give much better results
# due to issues with numerical precision (see
# :func:`scipy.signal.sosfilt` for an example), so SOS should be
# used when possible to do IIR filtering.
#
# Let's increase the order, and note that now we have better attenuation,
# with a longer impulse response:
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(sos, 'Butterworth order=8', freq, gain)
x_steep = sosfiltfilt(sos, x)
###############################################################################
# There are other types of IIR filters that we can use. For a complete list,
# check out the documentation for :func:`scipy.signal.iirdesign`. Let's
# try a Chebychev (type I) filter, which trades off ripple in the pass-band
# to get better attenuation in the stop-band:
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=1) # dB of acceptable pass-band ripple
plot_filter(sos, 'Chebychev-1 order=8, ripple=1 dB', freq, gain)
###############################################################################
# And if we can live with even more ripple, we can get it slightly steeper,
# but the impulse response begins to ring substantially longer (note the
# different x-axis scale):
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=6)
plot_filter(sos, 'Chebychev-1 order=8, ripple=6 dB', freq, gain)
###############################################################################
# Applying IIR filters
# --------------------
#
# Now let's look at how our shallow and steep Butterworth IIR filters
# perform on our Morlet signal from before:
axs = plt.subplots(1, 2)[1]
yticks = np.arange(4) / -30.
yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
axs[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axs[0].get_yticklabels():
text.set(rotation=45, size=8)
axs[1].set(xlim=flim, ylim=ylim, xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
box_off(axs[0])
box_off(axs[1])
mne.viz.tight_layout()
plt.show()
###############################################################################
# Some pitfalls of filtering
# ==========================
#
# Multiple recent papers have noted potential risks of drawing
# errant inferences due to misapplication of filters.
#
# Low-pass problems
# -----------------
#
# Filters in general, especially those that are acausal (zero-phase), can make
# activity appear to occur earlier or later than it truly did. As
# mentioned in VanRullen 2011 [3]_, investigations of commonly (at the time)
# used low-pass filters created artifacts when they were applied to smulated
# data. However, such deleterious effects were minimal in many real-world
# examples in Rousselet 2012 [5]_.
#
# Perhaps more revealing, it was noted in Widmann & Schröger 2012 [6]_ that
# the problematic low-pass filters from VanRullen 2011 [3]_:
#
# 1. Used a least-squares design (like :func:`scipy.signal.firls`) that
# included "do-not-care" transition regions, which can lead to
# uncontrolled behavior.
# 2. Had a filter length that was independent of the transition bandwidth,
# which can cause excessive ringing and signal distortion.
#
# .. _tut_filtering_hp_problems:
#
# High-pass problems
# ------------------
#
# When it comes to high-pass filtering, using corner frequencies above 0.1 Hz
# were found in Acunzo *et al.* 2012 [4]_ to:
#
# "...generate a systematic bias easily leading to misinterpretations of
# neural activity.”
#
# In a related paper, Widmann *et al.* 2015 [7]_ also came to suggest a 0.1 Hz
# highpass. And more evidence followed in Tanner *et al.* 2015 [8]_ of such
# distortions. Using data from language ERP studies of semantic and syntactic
# processing (i.e., N400 and P600), using a high-pass above 0.3 Hz caused
# significant effects to be introduced implausibly early when compared to the
# unfiltered data. From this, the authors suggested the optimal high-pass
# value for language processing to be 0.1 Hz.
#
# We can recreate a problematic simulation from Tanner *et al.* 2015 [8]_:
#
# "The simulated component is a single-cycle cosine wave with an amplitude
# of 5µV, onset of 500 ms poststimulus, and duration of 800 ms. The
# simulated component was embedded in 20 s of zero values to avoid
# filtering edge effects... Distortions [were] caused by 2 Hz low-pass and
# high-pass filters... No visible distortion to the original waveform
# [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters...
# Filter frequencies correspond to the half-amplitude (-6 dB) cutoff
# (12 dB/octave roll-off)."
#
# .. note:: This simulated signal contains energy not just within the
# pass-band, but also within the transition and stop-bands -- perhaps
# most easily understood because the signal has a non-zero DC value,
# but also because it is a shifted cosine that has been
# *windowed* (here multiplied by a rectangular window), which
# makes the cosine and DC frequencies spread to other frequencies
# (multiplication in time is convolution in frequency, so multiplying
# by a rectangular window in the time domain means convolving a sinc
# function with the impulses at DC and the cosine frequency in the
# frequency domain).
#
x = np.zeros(int(2 * sfreq))
t = np.arange(0, len(x)) / sfreq - 0.2
onset = np.where(t >= 0.5)[0][0]
cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq
sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t)
x[onset:onset + len(sig)] = sig
iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass')
iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass')
iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass')
iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass')
x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0)
x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0)
x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0)
x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0)
xlim = t[[0, -1]]
ylim = [-2, 6]
xlabel = 'Time (sec)'
ylabel = 'Amplitude ($\mu$V)'
tticks = [0, 0.5, 1.3, t[-1]]
axs = plt.subplots(2, 2)[1].ravel()
for ax, x_f, title in zip(axs, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1],
['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']):
ax.plot(t, x, color='0.5')
ax.plot(t, x_f, color='k', linestyle='--')
ax.set(ylim=ylim, xlim=xlim, xticks=tticks,
title=title, xlabel=xlabel, ylabel=ylabel)
box_off(ax)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Similarly, in a P300 paradigm reported by Kappenman & Luck 2010 [12]_,
# they found that applying a 1 Hz high-pass decreased the probaility of
# finding a significant difference in the N100 response, likely because
# the P300 response was smeared (and inverted) in time by the high-pass
# filter such that it tended to cancel out the increased N100. However,
# they nonetheless note that some high-passing can still be useful to deal
# with drifts in the data.
#
# Even though these papers generally advise a 0.1 HZ or lower frequency for
# a high-pass, it is important to keep in mind (as most authors note) that
# filtering choices should depend on the frequency content of both the
# signal(s) of interest and the noise to be suppressed. For example, in
# some of the MNE-Python examples involving :ref:`ch_sample_data`,
# high-pass values of around 1 Hz are used when looking at auditory
# or visual N100 responses, because we analyze standard (not deviant) trials
# and thus expect that contamination by later or slower components will
# be limited.
#
# Baseline problems (or solutions?)
# ---------------------------------
#
# In an evolving discussion, Tanner *et al.* 2015 [8]_ suggest using baseline
# correction to remove slow drifts in data. However, Maess *et al.* 2016 [9]_
# suggest that baseline correction, which is a form of high-passing, does
# not offer substantial advantages over standard high-pass filtering.
# Tanner *et al.* [10]_ rebutted that baseline correction can correct for
# problems with filtering.
#
# To see what they mean, consider again our old simulated signal ``x`` from
# before:
def baseline_plot(x):
all_axs = plt.subplots(3, 2)[1]
for ri, (axs, freq) in enumerate(zip(all_axs, [0.1, 0.3, 0.5])):
for ci, ax in enumerate(axs):
if ci == 0:
iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass',
output='sos')
x_hp = sosfiltfilt(iir_hp, x, padlen=0)
else:
x_hp -= x_hp[t < 0].mean()
ax.plot(t, x, color='0.5')
ax.plot(t, x_hp, color='k', linestyle='--')
if ri == 0:
ax.set(title=('' if ci == 0 else 'No ') +
'Baseline Correction')
box_off(ax)
ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel)
ax.set_ylabel('%0.1f Hz' % freq, rotation=0,
horizontalalignment='right')
mne.viz.tight_layout()
plt.suptitle(title)
plt.show()
baseline_plot(x)
###############################################################################
# In respose, Maess *et al.* 2016 [11]_ note that these simulations do not
# address cases of pre-stimulus activity that is shared across conditions, as
# applying baseline correction will effectively copy the topology outside the
# baseline period. We can see this if we give our signal ``x`` with some
# consistent pre-stimulus activity, which makes everything look bad.
#
# .. note:: An important thing to keep in mind with these plots is that they
# are for a single simulated sensor. In multielectrode recordings
# the topology (i.e., spatial pattiern) of the pre-stimulus activity
# will leak into the post-stimulus period. This will likely create a
# spatially varying distortion of the time-domain signals, as the
# averaged pre-stimulus spatial pattern gets subtracted from the
# sensor time courses.
#
# Putting some activity in the baseline period:
n_pre = (t < 0).sum()
sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre))
x[:n_pre] += sig_pre
baseline_plot(x)
###############################################################################
# Both groups seem to acknowledge that the choices of filtering cutoffs, and
# perhaps even the application of baseline correction, depend on the
# characteristics of the data being investigated, especially when it comes to:
#
# 1. The frequency content of the underlying evoked activity relative
# to the filtering parameters.
# 2. The validity of the assumption of no consistent evoked activity
# in the baseline period.
#
# We thus recommend carefully applying baseline correction and/or high-pass
# values based on the characteristics of the data to be analyzed.
#
#
# Filtering defaults
# ==================
#
# .. _tut_filtering_in_python:
#
# Defaults in MNE-Python
# ----------------------
#
# Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level,
# and thus :func:`mne.io.Raw.filter` is used. This function under the hood
# (among other things) calls :func:`mne.filter.filter_data` to actually
# filter the data, which by default applies a zero-phase FIR filter designed
# using :func:`scipy.signal.firwin2`. In Widmann *et al.* 2015 [7]_, they
# suggest a specific set of parameters to use for high-pass filtering,
# including:
#
# "... providing a transition bandwidth of 25% of the lower passband
# edge but, where possible, not lower than 2 Hz and otherwise the
# distance from the passband edge to the critical frequency.”
#
# In practice, this means that for each high-pass value ``l_freq`` or
# low-pass value ``h_freq`` below, you would get this corresponding
# ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively,
# if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz):
#
# +------------------+-------------------+-------------------+
# | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth |
# +==================+===================+===================+
# | 0.01 | 0.01 | 2.0 |
# +------------------+-------------------+-------------------+
# | 0.1 | 0.1 | 2.0 |
# +------------------+-------------------+-------------------+
# | 1.0 | 1.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 2.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 4.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 8.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 10.0 | 2.5 | 2.5 |
# +------------------+-------------------+-------------------+
# | 20.0 | 5.0 | 5.0 |
# +------------------+-------------------+-------------------+
# | 40.0 | 10.0 | 10.0 |
# +------------------+-------------------+-------------------+
# | 45.0 | 11.25 | 5.0 |
# +------------------+-------------------+-------------------+
# | 48.0 | 12.0 | 2.0 |
# +------------------+-------------------+-------------------+
#
# MNE-Python has adopted this definition for its high-pass (and low-pass)
# transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and
# ``h_trans_bandwidth='auto'``.
#
# To choose the filter length automatically with ``filter_length='auto'``,
# the reciprocal of the shortest transition bandwidth is used to ensure
# decent attenuation at the stop frequency. Specifically, the reciprocal
# (in samples) is multiplied by 6.2, 6.6, or 11.0 for the Hann, Hamming,
# or Blackman windows, respectively as selected by the ``fir_window``
# argument.
#
# .. note:: These multiplicative factors are double what is given in
# Ifeachor and Jervis [2]_ (p. 357). The window functions have a
# smearing effect on the frequency response; I&J thus take the
# approach of setting the stop frequency as
# :math:`f_s = f_p + f_{trans} / 2.`, but our stated definitions of
# :math:`f_s` and :math:`f_{trans}` do not
# allow us to do this in a nice way. Instead, we increase our filter
# length to achieve acceptable (20+ dB) attenuation by
# :math:`f_s = f_p + f_{trans}`, and excellent (50+ dB)
# attenuation by :math:`f_s + f_{trans}` (and usually earlier).
#
# In 0.14, we default to using a Hamming window in filter design, as it
# provides up to 53 dB of stop-band attenuation with small pass-band ripple.
#
# .. note:: In band-pass applications, often a low-pass filter can operate
# effectively with fewer samples than the high-pass filter, so
# it is advisable to apply the high-pass and low-pass separately.
#
# For more information on how to use the
# MNE-Python filtering functions with real data, consult the preprocessing
# tutorial on :ref:`tut_artifacts_filter`.
#
# Defaults in MNE-C
# -----------------
# MNE-C by default uses:
#
# 1. 5 Hz transition band for low-pass filters.
# 2. 3-sample transition band for high-pass filters.
# 3. Filter length of 8197 samples.
#
# The filter is designed in the frequency domain, creating a linear-phase
# filter such that the delay is compensated for as is done with the MNE-Python
# ``phase='zero'`` filtering option.
#
# Squared-cosine ramps are used in the transition regions. Because these
# are used in place of more gradual (e.g., linear) transitions,
# a given transition width will result in more temporal ringing but also more
# rapid attenuation than the same transition width in windowed FIR designs.
#
# The default filter length will generally have excellent attenuation
# but long ringing for the sample rates typically encountered in M-EEG data
# (e.g. 500-2000 Hz).
#
# Defaults in other software
# --------------------------
# A good but possibly outdated comparison of filtering in various software
# packages is available in [7]_. Briefly:
#
# * EEGLAB
# MNE-Python in 0.14 defaults to behavior very similar to that of EEGLAB,
# see the `EEGLAB filtering FAQ`_ for more information.
# * Fieldrip
# By default FieldTrip applies a forward-backward Butterworth IIR filter
# of order 4 (band-pass and band-stop filters) or 2 (for low-pass and
# high-pass filters). Similar filters can be achieved in MNE-Python when
# filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>`
# (see also :func:`mne.filter.construct_iir_filter` for options).
# For more inforamtion, see e.g. `FieldTrip band-pass documentation`_.
#
# Summary
# =======
#
# When filtering, there are always tradeoffs that should be considered.
# One important tradeoff is between time-domain characteristics (like ringing)
# and frequency-domain attenuation characteristics (like effective transition
# bandwidth). Filters with sharp frequency cutoffs can produce outputs that
# ring for a long time when they operate on signals with frequency content
# in the transition band. In general, therefore, the wider a transition band
# that can be tolerated, the better behaved the filter will be in the time
# domain.
#
# References
# ==========
#
# .. [1] Parks TW, Burrus CS (1987). Digital Filter Design.
# New York: Wiley-Interscience.
# .. [2] Ifeachor, E. C., & Jervis, B. W. (2002). Digital Signal Processing:
# A Practical Approach. Prentice Hall.
# .. [3] Vanrullen, R. (2011). Four common conceptual fallacies in mapping
# the time course of recognition. Perception Science, 2, 365.
# .. [4] Acunzo, D. J., MacKenzie, G., & van Rossum, M. C. W. (2012).
# Systematic biases in early ERP and ERF components as a result
# of high-pass filtering. Journal of Neuroscience Methods,
# 209(1), 212–218. http://doi.org/10.1016/j.jneumeth.2012.06.011
# .. [5] Rousselet, G. A. (2012). Does filtering preclude us from studying
# ERP time-courses? Frontiers in Psychology, 3(131)
# .. [6] Widmann, A., & Schröger, E. (2012). Filter effects and filter
# artifacts in the analysis of electrophysiological data.
# Perception Science, 233.
# .. [7] Widmann, A., Schröger, E., & Maess, B. (2015). Digital filter
# design for electrophysiological data – a practical approach.
# Journal of Neuroscience Methods, 250, 34–46.
# .. [8] Tanner, D., Morgan-Short, K., & Luck, S. J. (2015).
# How inappropriate high-pass filters can produce artifactual effects
# and incorrect conclusions in ERP studies of language and cognition.
# Psychophysiology, 52(8), 997–1009. http://doi.org/10.1111/psyp.12437
# .. [9] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis.
# Commentary on: “How inappropriate high-pass filters can produce
# artefacts and incorrect conclusions in ERP studies of language
# and cognition.” Journal of Neuroscience Methods, 266, 164–165.
# .. [10] Tanner, D., Norton, J. J. S., Morgan-Short, K., & Luck, S. J. (2016).
# On high-pass filter artifacts (they’re real) and baseline correction
# (it’s a good idea) in ERP/ERMF analysis.
# .. [11] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis-continued
# discussion. Journal of Neuroscience Methods, 266, 171–172.
# Journal of Neuroscience Methods, 266, 166–170.
# .. [12] Kappenman E. & Luck, S. (2010). The effects of impedance on data
# quality and statistical significance in ERP recordings.
# Psychophysiology, 47, 888-904.
#
# .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response
# .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response
# .. _sinc: https://en.wikipedia.org/wiki/Sinc_function
# .. _moving average: https://en.wikipedia.org/wiki/Moving_average
# .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model
# .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm
# .. _matlab firpm: http://www.mathworks.com/help/signal/ref/firpm.html
# .. _matlab fir2: http://www.mathworks.com/help/signal/ref/fir2.html
# .. _matlab firls: http://www.mathworks.com/help/signal/ref/firls.html
# .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter
# .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ
# .. _fieldtrip band-pass documentation: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter # noqa
| alexandrebarachant/mne-python | tutorials/plot_background_filtering.py | Python | bsd-3-clause | 42,317 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('quest_maker_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='quest',
name='start_date',
field=models.DateField(default=datetime.datetime(2015, 6, 6, 4, 6, 3, 717390, tzinfo=utc)),
),
]
| catherinev/quest_maker | quest_maker_app/migrations/0002_auto_20150605_2306.py | Python | mit | 509 |
from __future__ import division
from __future__ import print_function
def input(in_msg):
import inspect
in_msg.input_file = inspect.getfile(inspect.currentframe())
print("*** read input from ", in_msg.input_file)
# 8=MSG1, 9=MSG2, 10=MSG3
#in_msg.sat_nr=0
#in_msg.RSS=True
#in_msg.sat_nr=8
#in_msg.RSS=False
#in_msg.sat_nr=9
#in_msg.RSS=True
in_msg.sat_nr=10
in_msg.RSS=True
#in_msg.sat_nr=11
#in_msg.RSS=False
#in_msg.delay=5 # process image 'delay' minutes before now
if False:
# offline mode (always a fixed time) # ignores command line arguments
year=2015
month=2
day=10
hour=11
minute=45
in_msg.update_datetime(year, month, day, hour, minute)
# !!! if archive is used, adjust meteosat09.cfg accordingly !!!
#----------------------
# choose RGBs
#----------------------
#-------------------
# chose RGB mode
#-------------------
## satellite channels
##in_msg.RGBs.append('VIS006') # black and white
##in_msg.RGBs.append('VIS008') # black and white
##in_msg.RGBs.append('IR_016') # black and white
##in_msg.RGBs.append('IR_039') # black and white
##in_msg.RGBs.append('WV_062') # black and white
##in_msg.RGBs.append('WV_073') # black and white
##in_msg.RGBs.append('IR_087') # black and white
##in_msg.RGBs.append('IR_097') # black and white
#in_msg.RGBs.append('IR_108') # black and white
##in_msg.RGBs.append('IR_120') # black and white
##in_msg.RGBs.append('IR_134') # black and white
#in_msg.RGBs.append('HRV') # black and white
#in_msg.RGBs.append('VIS006c') # colored version
#in_msg.RGBs.append('VIS008c') # colored version
#in_msg.RGBs.append('IR_016c') # colored version
#in_msg.RGBs.append('IR_039c') # colored version
#in_msg.RGBs.append('WV_062c') # colored version
#in_msg.RGBs.append('WV_073c') # colored version
#in_msg.RGBs.append('IR_087c') # colored version
#in_msg.RGBs.append('IR_097c') # colored version
#in_msg.RGBs.append('IR_108c') # colored version
#in_msg.RGBs.append('IR_120c') # colored version
#in_msg.RGBs.append('IR_134c') # colored version
#in_msg.RGBs.append('HRVc') # colored version
#-------------------
# satellite channel differences
#in_msg.RGBs.append('WV_062-WV_073')
#in_msg.RGBs.append('WV_062-IR_108')
#in_msg.RGBs.append('WV_073-IR_134')
#in_msg.RGBs.append('IR_087-IR_108')
#in_msg.RGBs.append('IR_087-IR_120')
#in_msg.RGBs.append('IR_120-IR_108')
#in_msg.RGBs.append('trichannel')
#in_msg.RGBs.append('IR_039-IR_108')
#in_msg.RGBs.append('VIS006-IR_016')
#-------------------
# buil in RGBs, see http://mpop.readthedocs.org/en/latest/pp.html
# or http://oiswww.eumetsat.int/~idds/html/doc/best_practices.pdf
#------------------- # RED GREEN BLUE
#in_msg.RGBs.append('airmass') # WV_062-WV_073 IR_097-IR_108 -WV_062
#in_msg.RGBs.append('ash')
#in_msg.RGBs.append('cloudtop')
#in_msg.RGBs.append('convection') # WV_062-WV_073 IR_039-IR_108 IR_016-VIS006
##in_msg.RGBs.append('convection_co2')
in_msg.RGBs.append('day_microphysics') # VIS008 IR_039(solar) IR_108 # requires the pyspectral modul
#in_msg.RGBs.append('dust') # IR_120-IR_108 IR_108-IR_087 IR_108
#in_msg.RGBs.append('fog')
#in_msg.RGBs.append('green_snow')
#in_msg.RGBs.append('ir108')
#in_msg.RGBs.append('natural') # IR_016 VIS008 VIS006
#in_msg.RGBs.append('night_fog')
#in_msg.RGBs.append('night_microphysics') # IR_120-IR_108 IR_108-IR_039 IR_108
#in_msg.RGBs.append('night_overview')
#in_msg.RGBs.append('overview')
#in_msg.RGBs.append('overview_sun')
#in_msg.RGBs.append('red_snow')
#in_msg.RGBs.append('refl39_chan') # requires the pyspectral modul
#in_msg.RGBs.append('snow') # requires the pyspectral modul
#in_msg.RGBs.append('vis06')
#in_msg.RGBs.append('wv_high')
#in_msg.RGBs.append('wv_low')
#-------------------
# user defined RGBs
#in_msg.RGBs.append('HRoverview')
##in_msg.RGBs.append('sandwich')
#in_msg.RGBs.append('sza')
##in_msg.RGBs.append('ndvi')
#in_msg.RGBs.append('HRVFog')
#in_msg.RGBs.append('DayNightFog')
#in_msg.RGBs.append('HRVir108')
#-------------------
# NWC SAF
## NWC SAF PEG 1
#in_msg.RGBs.append('CMa')
#in_msg.RGBs.append('CMa_DUST')
#in_msg.RGBs.append('CMa_VOLCANIC')
#in_msg.RGBs.append('CMa_QUALITY')
## NWC SAF PEG 2
#in_msg.RGBs.append('CT')
#in_msg.RGBs.append('CT_PHASE')
#in_msg.RGBs.append('CT_QUALITY')
## NWC SAF PEG 3
#in_msg.RGBs.append('CTT')
#in_msg.RGBs.append('CTH')
#in_msg.RGBs.append('CTP')
## NWC SAF PEG 13
#in_msg.RGBs.append('sphr_bl')
#in_msg.RGBs.append('sphr_cape')
##in_msg.RGBs.append('sphr_diffbl')
##in_msg.RGBs.append('sphr_diffhl')
##in_msg.RGBs.append('sphr_diffki')
##in_msg.RGBs.append('sphr_diffli')
##in_msg.RGBs.append('sphr_diffml')
##in_msg.RGBs.append('sphr_diffshw')
##in_msg.RGBs.append('sphr_difftpw')
#in_msg.RGBs.append('sphr_hl')
#in_msg.RGBs.append('sphr_ki')
#in_msg.RGBs.append('sphr_li')
#in_msg.RGBs.append('sphr_ml')
#in_msg.RGBs.append('sphr_quality')
##in_msg.RGBs.append('sphr_sflag')
#in_msg.RGBs.append('sphr_shw')
#in_msg.RGBs.append('sphr_tpw')
#-------------------
# experimental
#in_msg.RGBs.append('clouddepth') # test according to Mecikalski, 2010
##in_msg.RGBs.append('RII')
#----------------
# chose area
#----------------
in_msg.areas.append('ccs4') # CCS4 Swiss projection 710x640
#in_msg.areas.append('alps') # CCS4 Swiss projection 710x640
#in_msg.areas.append('ticino') # CCS4 Swiss projection 710x640
#in_msg.areas.append('EuropeCanary')
#in_msg.areas.append('EuropeCanary95')
#in_msg.areas.append('EuropeCanaryS95')
#in_msg.areas.append('germ') # Germany 1024x1024
#in_msg.areas.append('euro4') # Europe 4km, 1024x1024
#in_msg.areas.append('MSGHRVN') # High resolution northern quarter 11136x2784
#in_msg.areas.append('fullearth') # full earth 600x300 # does not yet work
#in_msg.areas.append('met09globe') # Cropped globe MSG image 3620x3620 # does not yet work
#in_msg.areas.append('met09globeFull') # Full globe MSG image 3712x3712 # does not yet work
in_msg.check_RSS_coverage()
in_msg.check_input = True # for radiances check always PRO and EPI files
#in_msg.check_input = False # for radiances check always PRO and EPI files
#in_msg.save_reprojected_data=['EuropeCanaryS95','ccs4']
#in_msg.save_reprojected_data=['ccs4']
in_msg.reprojected_data_filename='%(msg)s_%(area)s_%Y%m%d%H%M_rad.nc'
#in_msg.reprojected_data_filename='MSG_test_%Y%m%d%H%M.nc'
in_msg.reprojected_data_dir='/data/COALITION2/database/meteosat/ccs4/%Y/%m/%d/'
#in_msg.save_statistics=True
in_msg.HRV_enhancement=None
#in_msg.make_plots=False
in_msg.make_plots=True
in_msg.fill_value=(0,0,0) # black (0,0,0) / white (1,1,1) / transparent None
in_msg.outputFile = 'MSG_%(rgb)s-%(area)s_%y%m%d%H%M.png'
in_msg.outputDir = '/data/cinesat/out/'
#in_msg.outputDir='./pics/'
#in_msg.outputDir = "./%Y-%m-%d/%Y-%m-%d_%(rgb)s-%(area)s/"
#in_msg.outputDir='/data/COALITION2/PicturesSatellite/%Y-%m-%d/%Y-%m-%d_%(rgb)s-%(area)s/'
in_msg.outputFormat = ".png"
in_msg.compress_to_8bit=False
in_msg.scpOutput = False
#default: in_msg.scpOutputDir="las@lomux240:/www/proj/OTL/WOL/cll/satimages"
#default: in_msg.scpID="-i /home/cinesat/.ssh/id_dsa_las"
#default: in_msg.scpProducts = ['all']
in_msg.scpProducts = ['airmass','ash','cloudtop','convection','day_microphysics','dust','fog',\
'DayNightFog','green_snow','natural','night_fog','night_microphysics','night_overview','overview','red_snow',\
'HRoverview','THX-radar-convection','TRT-radar-convection','radar-convection']
#in_msg.scpID2="-i /opt/users/cinesat/monti-pytroll/scripts/id_rsa_las"
#in_msg.scpOutputDir2='las@zueub241:/srn/las/www/satellite/DATA/MSG_%(rgb)s-%(area)s_'
#in_msg.scpProducts2 = ['airmass','convection','HRoverview','natural']
#in_msg.scpProducts2 = ['airmass','ash','cloudtop','convection','day_microphysics','dust','fog',\
# 'DayNightFog','green_snow','natural','night_fog','night_microphysics','night_overview','overview','red_snow',\
# 'HRoverview','THX-radar-convection','TRT-radar-convection','radar-convection']
# please download the shape file
# in_msg.mapDir='/data/OWARNA/hau/maps_pytroll/'
in_msg.add_title = True
in_msg.title = None
in_msg.add_borders = True
in_msg.add_rivers = False
in_msg.add_logos = True
in_msg.add_colorscale = True
in_msg.fixed_minmax = True
in_msg.postprocessing_areas=['ccs4']
in_msg.postprocessing_composite=["THX-radar-convection","TRT-radar-convection","radar-ir108","radar-HRV"]
| meteoswiss-mdr/monti-pytroll | scripts/input_test.py | Python | lgpl-3.0 | 9,570 |
from django.contrib import admin
from webservices.models import *
# Register your models here.
class MakeAdminObj(admin.ModelAdmin):
field = ['name']
class ModelAdminObj(admin.ModelAdmin):
field = ['make', 'name', 'year']
class UserAdminObj(admin.ModelAdmin):
field = ['email', 'name']
class VehicleAdminObj(admin.ModelAdmin):
field = ['owner', 'model']
class GasStopAdminObj(admin.ModelAdmin):
fields = ('vehicle', 'date', ('latitude', 'longitude'), 'odometer', ('fuel_purchased', 'price'))
class SessionAdminObj(admin.ModelAdmin):
field = ['user', 'token']
admin.site.register(Make, MakeAdminObj)
admin.site.register(Model, ModelAdminObj)
admin.site.register(User, UserAdminObj)
admin.site.register(Vehicle, VehicleAdminObj)
admin.site.register(GasStop, GasStopAdminObj)
admin.site.register(Session, SessionAdminObj)
| Dorthu/FuelEconomizer | webservices/admin.py | Python | gpl-3.0 | 856 |
#! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from io import StringIO, BytesIO, TextIOWrapper
import sys
import os
import urllib.parse
from email.parser import FeedParser
from warnings import warn
import html
import locale
import tempfile
__all__ = ["MiniFieldStorage", "FieldStorage",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
# field keys and values (except for files) are returned as strings
# an encoding is required to decode the bytes read from self.fp
if hasattr(fp,'encoding'):
encoding = fp.encoding
else:
encoding = 'latin-1'
# fp.read() must return bytes
if isinstance(fp, TextIOWrapper):
fp = fp.buffer
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError('Maximum content length exceeded')
qs = fp.read(clength).decode(encoding)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
encoding=encoding)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urllib.parse.parse_qs instead",
DeprecationWarning, 2)
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urllib.parse.parse_qsl instead",
DeprecationWarning, 2)
return urllib.parse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
import http.client
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = http.client.parse_headers(fp)
clength = headers.get('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError('Maximum content length exceeded')
data = fp.read(bytes)
else:
data = ""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line.startswith("--"):
terminator = line.rstrip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
lines[-1] = line
data = "".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
and returns *bytes*
file: the file(-like) object from which you can read the data *as
bytes* ; None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes email.message.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary=b'',
environ=os.environ, keep_blank_values=0, strict_parsing=0,
limit=None, encoding='utf-8', errors='replace'):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
(not used when the request method is GET)
Can be :
1. a TextIOWrapper object
2. an object whose read() and readline() methods return bytes
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
limit : used internally to read parts of multipart/form-data forms,
to exit from the reading loop when reached. It is the difference
between the form content-length and the number of bytes already
read
encoding, errors : the encoding and error handler used to decode the
binary stream to strings. Must be the same as the charset defined
for the page sending the form (content-type : meta http-equiv or
header)
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
fp = BytesIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
if fp is None:
self.fp = sys.stdin.buffer
# self.fp.read() must return bytes
elif isinstance(fp, TextIOWrapper):
self.fp = fp.buffer
else:
self.fp = fp
self.encoding = encoding
self.errors = errors
self.headers = headers
if not isinstance(outerboundary, bytes):
raise TypeError('outerboundary must be bytes, not %s'
% type(outerboundary).__name__)
self.outerboundary = outerboundary
self.bytes_read = 0
self.limit = limit
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
self._binary_file = self.filename is not None
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
if 'boundary' in pdict:
self.innerboundary = pdict['boundary'].encode(self.encoding)
else:
self.innerboundary = b""
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError('Maximum content length exceeded')
self.length = clen
if self.limit is None and clen:
self.limit = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError(name)
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError("not indexable")
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError(key)
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if isinstance(value, list):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError("not indexable")
return list(set(item.name for item in self.list))
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError("not indexable")
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __nonzero__(self):
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if not isinstance(qs, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(qs).__name__))
qs = qs.decode(self.encoding, self.errors)
if self.qs_on_post:
qs += '&' + self.qs_on_post
self.list = []
query = urllib.parse.parse_qsl(
qs, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors)
for key, value in query:
self.list.append(MiniFieldStorage(key, value))
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
self.list = []
if self.qs_on_post:
query = urllib.parse.parse_qsl(
self.qs_on_post, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors)
for key, value in query:
self.list.append(MiniFieldStorage(key, value))
FieldStorageClass = None
klass = self.FieldStorageClass or self.__class__
first_line = self.fp.readline() # bytes
if not isinstance(first_line, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(first_line).__name__))
self.bytes_read += len(first_line)
# first line holds boundary ; ignore it, or check that
# b"--" + ib == first_line.strip() ?
while True:
parser = FeedParser()
hdr_text = b""
while True:
data = self.fp.readline()
hdr_text += data
if not data.strip():
break
if not hdr_text:
break
# parser takes strings, not bytes
self.bytes_read += len(hdr_text)
parser.feed(hdr_text.decode(self.encoding, self.errors))
headers = parser.close()
part = klass(self.fp, headers, ib, environ, keep_blank_values,
strict_parsing,self.limit-self.bytes_read,
self.encoding, self.errors)
self.bytes_read += part.bytes_read
self.list.append(part)
if self.bytes_read >= self.length:
break
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file()
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize)) # bytes
if not isinstance(data, bytes):
raise ValueError("%s should return bytes, got %s"
% (self.fp, type(data).__name__))
self.bytes_read += len(data)
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
if self._binary_file:
self.file = self.__file = BytesIO() # store data as bytes for files
else:
self.file = self.__file = StringIO() # as strings for other fields
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
"""line is always bytes, not string"""
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file()
data = self.__file.getvalue()
self.file.write(data)
self.__file = None
if self._binary_file:
# keep bytes
self.file.write(line)
else:
# decode to string
self.file.write(line.decode(self.encoding, self.errors))
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary.
Data is read as bytes: boundaries and line ends must be converted
to bytes for comparisons.
"""
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
delim = b""
last_line_lfend = True
_read = 0
while 1:
if _read >= self.limit:
break
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
_read += len(line)
if not line:
self.done = -1
break
if line.startswith(b"--") and last_line_lfend:
strippedline = line.rstrip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
odelim = delim
if line.endswith(b"\r\n"):
delim = b"\r\n"
line = line[:-2]
last_line_lfend = True
elif line.endswith(b"\n"):
delim = b"\n"
line = line[:-1]
last_line_lfend = True
else:
delim = b""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
last_line_lfend = True
while True:
line = self.fp.readline(1<<16)
self.bytes_read += len(line)
if not line:
self.done = -1
break
if line.endswith(b"--") and last_line_lfend:
strippedline = line.strip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
last_line_lfend = line.endswith(b'\n')
def make_file(self):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The file is opened in binary mode for files, in text mode
for other fields
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
if self._binary_file:
return tempfile.TemporaryFile("wb+")
else:
return tempfile.TemporaryFile("w+",
encoding=self.encoding, newline = '\n')
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print("Content-type: text/html")
print()
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec("testing print_exception() -- <I>italics?</I>")
def g(f=f):
f()
print("<H3>What follows is a test, not an actual exception:</H3>")
g()
except:
print_exception()
print("<H1>Second try with a small maxlen...</H1>")
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print()
print("<H3>Traceback (most recent call last):</H3>")
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print("<PRE>%s<B>%s</B></PRE>" % (
html.escape("".join(list[:-1])),
html.escape(list[-1]),
))
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = sorted(environ.keys())
print()
print("<H3>Shell Environment:</H3>")
print("<DL>")
for key in keys:
print("<DT>", html.escape(key), "<DD>", html.escape(environ[key]))
print("</DL>")
print()
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = sorted(form.keys())
print()
print("<H3>Form Contents:</H3>")
if not keys:
print("<P>No form fields.")
print("<DL>")
for key in keys:
print("<DT>" + html.escape(key) + ":", end=' ')
value = form[key]
print("<i>" + html.escape(repr(type(value))) + "</i>")
print("<DD>" + html.escape(repr(value)))
print("</DL>")
print()
def print_directory():
"""Dump the current directory as HTML."""
print()
print("<H3>Current Working Directory:</H3>")
try:
pwd = os.getcwd()
except os.error as msg:
print("os.error:", html.escape(str(msg)))
else:
print(html.escape(pwd))
print()
def print_arguments():
print()
print("<H3>Command Line Arguments:</H3>")
print()
print(sys.argv)
print()
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print("""
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
""")
# Utilities
# =========
def escape(s, quote=None):
"""Deprecated API."""
warn("cgi.escape is deprecated, use html.escape instead",
PendingDeprecationWarning, stacklevel=2)
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern=None):
import re
if isinstance(s, bytes):
_vb_pattern = b"^[ -~]{0,200}[!-~]$"
else:
_vb_pattern = "^[ -~]{0,200}[!-~]$"
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| wdv4758h/ZipPy | lib-python/3/cgi.py | Python | bsd-3-clause | 34,511 |
import os
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
v = open(os.path.join(os.path.dirname(__file__), "alembic", "__init__.py"))
VERSION = (
re.compile(r""".*__version__ = ["'](.*?)["']""", re.S)
.match(v.read())
.group(1)
)
v.close()
class UseTox(TestCommand):
RED = 31
RESET_SEQ = "\033[0m"
BOLD_SEQ = "\033[1m"
COLOR_SEQ = "\033[1;%dm"
def run_tests(self):
sys.stderr.write(
"%s%spython setup.py test is deprecated by pypa. Please invoke "
"'tox' with no arguments for a basic test run.\n%s"
% (self.COLOR_SEQ % self.RED, self.BOLD_SEQ, self.RESET_SEQ)
)
sys.exit(1)
setup(
version=VERSION,
cmdclass={"test": UseTox},
)
| sqlalchemy/alembic | setup.py | Python | mit | 795 |
# -*- coding: utf-8 -*-
#
# Copyright © 2013 Kimmo Parviainen-Jalanko.
#
import os
import re
import logging
import binascii
from .conv import to_bytes
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.CRITICAL)
_PRIME_STRS = {
1: ''' FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A63A3620 FFFFFFFF FFFFFFFF''',
2: '''FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE65381
FFFFFFFF FFFFFFFF''',
5: '''FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF''',
14: '''FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AACAA68 FFFFFFFF FFFFFFFF''',
15: '''FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64
ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7
ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B
F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31
43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF''',
16: '''FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64
ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7
ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B
F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31
43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7
88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA
2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6
287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED
1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9
93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199
FFFFFFFF FFFFFFFF''',
17: '''FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08
8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B
302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9
A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6
49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8
FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C
180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718
3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D
04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D
B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226
1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC
E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26
99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB
04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2
233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127
D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492
36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406
AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918
DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151
2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03
F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F
BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA
CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B
B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632
387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E
6DCC4024 FFFFFFFF FFFFFFFF''',
18: '''FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64
ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7
ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B
F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31
43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7
88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA
2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6
287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED
1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9
93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492
36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD
F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831
179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B
DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF
5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6
D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3
23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA
CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328
06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C
DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE
12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4
38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300
741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568
3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9
22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B
4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A
062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36
4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1
B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92
4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47
9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71
60C980DD 98EDD3DF FFFFFFFF FFFFFFFF'''
}
PRIMES = dict(
(k, int(re.sub('\s+', '', v), 16)) for k, v in _PRIME_STRS.items())
class DiffieHellman(object):
generator = 2
def __init__(self, group=14, n=64):
self.group = group
self.bits = n * 8
self.generate_private_key(n)
self.generate_public_key()
def generate_private_key(self, n):
self.private_key = int(binascii.hexlify(os.urandom(n)), 16)
def generate_public_key(self):
self.public_key = pow(self.generator, self.private_key,
PRIMES[self.group])
def derivate(self, other_key):
self._s = pow(other_key, self.private_key, PRIMES[self.group])
return self.shared_secret
@property
def shared_secret(self):
return to_bytes(self._s)
| kimvais/ike | ike/util/dh.py | Python | mit | 10,139 |
from django.contrib import admin
from contact.models import Venue
class VenueAdmin(admin.ModelAdmin):
list_display = (
'__str__',
'is_active',
'sort',
)
admin.site.register(Venue, VenueAdmin)
| vandorjw/notes | vandorjw/contact/admin.py | Python | mit | 226 |
# -*- coding: utf-8 -*-
"""Beautiful Soup bonus library: Unicode, Dammit
This library converts a bytestream to Unicode through any means
necessary. It is heavily based on code from Mark Pilgrim's Universal
Feed Parser. It works best on XML and HTML, but it does not rewrite the
XML or HTML to reflect a new encoding; that's the tree builder's job.
"""
from __future__ import unicode_literals, print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
binary_type = bytes
unicode = str
basestring = str
else:
range = xrange
text_type = unicode
binary_type = str
chr = unichr
from pdb import set_trace
import codecs
if PY3:
from html.entities import codepoint2name
else:
from htmlentitydefs import codepoint2name
import re
import logging
import string
# Import a library to autodetect character encodings.
chardet_type = None
try:
# First try the fast C implementation.
# PyPI package: cchardet
import cchardet
def chardet_dammit(s):
return cchardet.detect(s)['encoding']
except ImportError:
try:
# Fall back to the pure Python implementation
# Debian package: python-chardet
# PyPI package: chardet
import chardet
def chardet_dammit(s):
return chardet.detect(s)['encoding']
#import chardet.constants
#chardet.constants._debug = 1
except ImportError:
# No chardet available.
def chardet_dammit(s):
return None
# Available from http://cjkpython.i18n.org/.
try:
import iconv_codec
except ImportError:
pass
xml_encoding_re = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I)
html_meta_re = re.compile(
'<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I)
class EntitySubstitution(object):
"""Substitute XML or HTML entities for the corresponding characters."""
def _populate_class_variables():
lookup = {}
reverse_lookup = {}
characters_for_re = []
for codepoint, name in list(codepoint2name.items()):
character = chr(codepoint)
if codepoint != 34:
# There's no point in turning the quotation mark into
# ", unless it happens within an attribute value, which
# is handled elsewhere.
characters_for_re.append(character)
lookup[character] = name
# But we do want to turn " into the quotation mark.
reverse_lookup[name] = character
re_definition = "[%s]" % "".join(characters_for_re)
return lookup, reverse_lookup, re.compile(re_definition)
(CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER,
CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables()
CHARACTER_TO_XML_ENTITY = {
"'": "apos",
'"': "quot",
"&": "amp",
"<": "lt",
">": "gt",
"\u00a0" : "#160",
}
BARE_AMPERSAND_OR_BRACKET = re.compile(r"([<>\u00a0]|"
"&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
")")
IS_ENTITY = re.compile("(&#\d+;|&#x[0-9a-fA-F]+;|&\w+;)")
AMPERSAND_OR_BRACKET = re.compile(r"([<>&\u00a0])")
@classmethod
def _substitute_html_entity(cls, matchobj):
entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
return "&%s;" % entity
@classmethod
def _substitute_xml_entity(cls, matchobj):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
return "&%s;" % entity
@classmethod
def quoted_attribute_value(self, value):
"""Make a value into a quoted XML attribute, possibly escaping it.
Most strings will be quoted using double quotes.
Bob's Bar -> "Bob's Bar"
If a string contains double quotes, it will be quoted using
single quotes.
Welcome to "my bar" -> 'Welcome to "my bar"'
If a string contains both single and double quotes, the
double quotes will be escaped, and the string will be quoted
using double quotes.
Welcome to "Bob's Bar" -> "Welcome to "Bob's bar"
"""
"""
Robustness fix for bs4
But many other downstream processors of both html and xml
really don't deal well with single quotes instead of the more
standard double-quotes. So simply replace them with their xml
entity regardless
"""
quote_with = '"'
if '"' in value:
# if "'" in value:
# # The string contains both single and double
# # quotes. Turn the double quotes into
# # entities. We quote the double quotes rather than
# # the single quotes because the entity name is
# # """ whether this is HTML or XML. If we
# # quoted the single quotes, we'd have to decide
# # between ' and &squot;.
# replace_with = """
# value = value.replace('"', replace_with)
# else:
# # There are double quotes but no single quotes.
# # We can use single quotes to quote the attribute.
# quote_with = "'"
replace_with = """
value = value.replace('"', replace_with)
return quote_with + value + quote_with
@classmethod
def substitute_xml(cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign
will become <, the greater-than sign will become >,
and any ampersands will become &. If you want ampersands
that appear to be part of an entity definition to be left
alone, use substitute_xml_containing_entities() instead.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets and ampersands.
value = cls.AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_xml_containing_entities(
cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign will
become <, the greater-than sign will become >, and any
ampersands that are not part of an entity defition will
become &.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets, and ampersands that aren't part of
# entities.
value = cls.BARE_AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_html(cls, s):
"""Replace certain Unicode characters with named HTML entities.
This differs from data.encode(encoding, 'xmlcharrefreplace')
in that the goal is to make the result more readable (to those
with ASCII displays) rather than to recover from
errors. There's absolutely nothing wrong with a UTF-8 string
containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
character with "é" will make it more readable to some
people.
"""
# ignore already existing entities
pieces = cls.IS_ENTITY.split(s)
for i in range(0,len(pieces),2):
piece = pieces[i]
pieces[i] = cls.CHARACTER_TO_HTML_ENTITY_RE.sub(cls._substitute_html_entity, piece)
return "".join(pieces)
# return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(
# cls._substitute_html_entity, s)
class EncodingDetector:
"""Suggests a number of possible encodings for a bytestring.
Order of precedence:
1. Encodings you specifically tell EncodingDetector to try first
(the override_encodings argument to the constructor).
2. An encoding declared within the bytestring itself, either in an
XML declaration (if the bytestring is to be interpreted as an XML
document), or in a <meta> tag (if the bytestring is to be
interpreted as an HTML document.)
3. An encoding detected through textual analysis by chardet,
cchardet, or a similar external library.
4. UTF-8.
5. Windows-1252.
"""
def __init__(self, markup, override_encodings=None, is_html=False,
exclude_encodings=None):
self.override_encodings = override_encodings or []
exclude_encodings = exclude_encodings or []
self.exclude_encodings = set([x.lower() for x in exclude_encodings])
self.chardet_encoding = None
self.is_html = is_html
self.declared_encoding = None
# First order of business: strip a byte-order mark.
self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup)
def _usable(self, encoding, tried):
if encoding is not None:
encoding = encoding.lower()
if encoding in self.exclude_encodings:
return False
if encoding not in tried:
tried.add(encoding)
return True
return False
@property
def encodings(self):
"""Yield a number of encodings that might work for this markup."""
tried = set()
for e in self.override_encodings:
if self._usable(e, tried):
yield e
# Did the document originally start with a byte-order mark
# that indicated its encoding?
if self._usable(self.sniffed_encoding, tried):
yield self.sniffed_encoding
# Look within the document for an XML or HTML encoding
# declaration.
if self.declared_encoding is None:
self.declared_encoding = self.find_declared_encoding(
self.markup, self.is_html)
if self._usable(self.declared_encoding, tried):
yield self.declared_encoding
# Use third-party character set detection to guess at the
# encoding.
if self.chardet_encoding is None:
self.chardet_encoding = chardet_dammit(self.markup)
if self._usable(self.chardet_encoding, tried):
yield self.chardet_encoding
# As a last-ditch effort, try utf-8 and windows-1252.
for e in ('utf-8', 'windows-1252'):
if self._usable(e, tried):
yield e
@classmethod
def strip_byte_order_mark(cls, data):
"""If a byte-order mark is present, strip it and return the encoding it implies."""
encoding = None
if isinstance(data, text_type):
# Unicode data cannot have a byte-order mark.
return data, encoding
if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == b'\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == b'\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == b'\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
return data, encoding
@classmethod
def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False):
"""Given a document, tries to find its declared encoding.
An XML encoding is declared at the beginning of the document.
An HTML encoding is declared in a <meta> tag, hopefully near the
beginning of the document.
"""
if search_entire_document:
xml_endpos = html_endpos = len(markup)
else:
xml_endpos = 1024
html_endpos = max(2048, int(len(markup) * 0.05))
declared_encoding = None
declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos)
if not declared_encoding_match and is_html:
declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos)
if declared_encoding_match is not None:
declared_encoding = declared_encoding_match.groups()[0].decode(
'ascii', 'replace')
if declared_encoding:
return declared_encoding.lower()
return None
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = {"macintosh": "mac-roman",
"x-sjis": "shift-jis"}
ENCODINGS_WITH_SMART_QUOTES = [
"windows-1252",
"iso-8859-1",
"iso-8859-2",
]
def __init__(self, markup, override_encodings=[],
smart_quotes_to=None, is_html=False, exclude_encodings=[]):
self.smart_quotes_to = smart_quotes_to
self.tried_encodings = []
self.contains_replacement_characters = False
self.is_html = is_html
self.detector = EncodingDetector(
markup, override_encodings, is_html, exclude_encodings)
# Short-circuit if the data is in Unicode to begin with.
if isinstance(markup, text_type) or markup == b'':
self.markup = markup
self.unicode_markup = unicode(markup)
self.original_encoding = None
return
# The encoding detector may have stripped a byte-order mark.
# Use the stripped markup from this point on.
self.markup = self.detector.markup
u = None
for encoding in self.detector.encodings:
markup = self.detector.markup
u = self._convert_from(encoding)
if u is not None:
break
if not u:
# None of the encodings worked. As an absolute last resort,
# try them again with character replacement.
for encoding in self.detector.encodings:
if encoding != "ascii":
u = self._convert_from(encoding, "replace")
if u is not None:
logging.warning(
"Some characters could not be decoded, and were "
"replaced with REPLACEMENT CHARACTER.")
self.contains_replacement_characters = True
break
# If none of that worked, we could at this point force it to
# ASCII, but that would destroy so much data that I think
# giving up is better.
self.unicode_markup = u
if not u:
self.original_encoding = None
def _sub_ms_char(self, match):
"""Changes a MS smart quote character to an XML or HTML
entity, or an ASCII character."""
orig = match.group(1)
if self.smart_quotes_to == 'ascii':
sub = self.MS_CHARS_TO_ASCII.get(orig).encode()
else:
sub = self.MS_CHARS.get(orig)
if type(sub) == tuple:
if self.smart_quotes_to == 'xml':
sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
else:
sub = '&'.encode() + sub[0].encode() + ';'.encode()
else:
sub = sub.encode()
return sub
def _convert_from(self, proposed, errors="strict"):
proposed = self.find_codec(proposed)
if not proposed or (proposed, errors) in self.tried_encodings:
return None
self.tried_encodings.append((proposed, errors))
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if (self.smart_quotes_to is not None
and proposed in self.ENCODINGS_WITH_SMART_QUOTES):
smart_quotes_re = b"([\x80-\x9f])"
smart_quotes_compiled = re.compile(smart_quotes_re)
markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
try:
# print("Trying to convert document to %s (errors=%s)" % (proposed, errors))
u = self._to_unicode(markup, proposed, errors)
self.markup = u
self.original_encoding = proposed
except Exception as e:
# print("That didn't work!")
# print(e)
return None
# print("Correct encoding: %s" % proposed)
return self.markup
def _to_unicode(self, data, encoding, errors="strict"):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
return unicode(data, encoding, errors)
@property
def declared_html_encoding(self):
if not self.is_html:
return None
return self.detector.declared_encoding
def find_codec(self, charset):
value = (self._codec(self.CHARSET_ALIASES.get(charset, charset))
or (charset and self._codec(charset.replace("-", "")))
or (charset and self._codec(charset.replace("-", "_")))
or (charset and charset.lower())
or charset
)
if value:
return value.lower()
return None
def _codec(self, charset):
if not charset:
return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
# A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities.
MS_CHARS = {b'\x80': ('euro', '20AC'),
b'\x81': ' ',
b'\x82': ('sbquo', '201A'),
b'\x83': ('fnof', '192'),
b'\x84': ('bdquo', '201E'),
b'\x85': ('hellip', '2026'),
b'\x86': ('dagger', '2020'),
b'\x87': ('Dagger', '2021'),
b'\x88': ('circ', '2C6'),
b'\x89': ('permil', '2030'),
b'\x8A': ('Scaron', '160'),
b'\x8B': ('lsaquo', '2039'),
b'\x8C': ('OElig', '152'),
b'\x8D': '?',
b'\x8E': ('#x17D', '17D'),
b'\x8F': '?',
b'\x90': '?',
b'\x91': ('lsquo', '2018'),
b'\x92': ('rsquo', '2019'),
b'\x93': ('ldquo', '201C'),
b'\x94': ('rdquo', '201D'),
b'\x95': ('bull', '2022'),
b'\x96': ('ndash', '2013'),
b'\x97': ('mdash', '2014'),
b'\x98': ('tilde', '2DC'),
b'\x99': ('trade', '2122'),
b'\x9a': ('scaron', '161'),
b'\x9b': ('rsaquo', '203A'),
b'\x9c': ('oelig', '153'),
b'\x9d': '?',
b'\x9e': ('#x17E', '17E'),
b'\x9f': ('Yuml', ''),}
# A parochial partial mapping of ISO-Latin-1 to ASCII. Contains
# horrors like stripping diacritical marks to turn á into a, but also
# contains non-horrors like turning “ into ".
MS_CHARS_TO_ASCII = {
b'\x80' : 'EUR',
b'\x81' : ' ',
b'\x82' : ',',
b'\x83' : 'f',
b'\x84' : ',,',
b'\x85' : '...',
b'\x86' : '+',
b'\x87' : '++',
b'\x88' : '^',
b'\x89' : '%',
b'\x8a' : 'S',
b'\x8b' : '<',
b'\x8c' : 'OE',
b'\x8d' : '?',
b'\x8e' : 'Z',
b'\x8f' : '?',
b'\x90' : '?',
b'\x91' : "'",
b'\x92' : "'",
b'\x93' : '"',
b'\x94' : '"',
b'\x95' : '*',
b'\x96' : '-',
b'\x97' : '--',
b'\x98' : '~',
b'\x99' : '(TM)',
b'\x9a' : 's',
b'\x9b' : '>',
b'\x9c' : 'oe',
b'\x9d' : '?',
b'\x9e' : 'z',
b'\x9f' : 'Y',
b'\xa0' : ' ',
b'\xa1' : '!',
b'\xa2' : 'c',
b'\xa3' : 'GBP',
b'\xa4' : '$', #This approximation is especially parochial--this is the
#generic currency symbol.
b'\xa5' : 'YEN',
b'\xa6' : '|',
b'\xa7' : 'S',
b'\xa8' : '..',
b'\xa9' : '',
b'\xaa' : '(th)',
b'\xab' : '<<',
b'\xac' : '!',
b'\xad' : ' ',
b'\xae' : '(R)',
b'\xaf' : '-',
b'\xb0' : 'o',
b'\xb1' : '+-',
b'\xb2' : '2',
b'\xb3' : '3',
b'\xb4' : ("'", 'acute'),
b'\xb5' : 'u',
b'\xb6' : 'P',
b'\xb7' : '*',
b'\xb8' : ',',
b'\xb9' : '1',
b'\xba' : '(th)',
b'\xbb' : '>>',
b'\xbc' : '1/4',
b'\xbd' : '1/2',
b'\xbe' : '3/4',
b'\xbf' : '?',
b'\xc0' : 'A',
b'\xc1' : 'A',
b'\xc2' : 'A',
b'\xc3' : 'A',
b'\xc4' : 'A',
b'\xc5' : 'A',
b'\xc6' : 'AE',
b'\xc7' : 'C',
b'\xc8' : 'E',
b'\xc9' : 'E',
b'\xca' : 'E',
b'\xcb' : 'E',
b'\xcc' : 'I',
b'\xcd' : 'I',
b'\xce' : 'I',
b'\xcf' : 'I',
b'\xd0' : 'D',
b'\xd1' : 'N',
b'\xd2' : 'O',
b'\xd3' : 'O',
b'\xd4' : 'O',
b'\xd5' : 'O',
b'\xd6' : 'O',
b'\xd7' : '*',
b'\xd8' : 'O',
b'\xd9' : 'U',
b'\xda' : 'U',
b'\xdb' : 'U',
b'\xdc' : 'U',
b'\xdd' : 'Y',
b'\xde' : 'b',
b'\xdf' : 'B',
b'\xe0' : 'a',
b'\xe1' : 'a',
b'\xe2' : 'a',
b'\xe3' : 'a',
b'\xe4' : 'a',
b'\xe5' : 'a',
b'\xe6' : 'ae',
b'\xe7' : 'c',
b'\xe8' : 'e',
b'\xe9' : 'e',
b'\xea' : 'e',
b'\xeb' : 'e',
b'\xec' : 'i',
b'\xed' : 'i',
b'\xee' : 'i',
b'\xef' : 'i',
b'\xf0' : 'o',
b'\xf1' : 'n',
b'\xf2' : 'o',
b'\xf3' : 'o',
b'\xf4' : 'o',
b'\xf5' : 'o',
b'\xf6' : 'o',
b'\xf7' : '/',
b'\xf8' : 'o',
b'\xf9' : 'u',
b'\xfa' : 'u',
b'\xfb' : 'u',
b'\xfc' : 'u',
b'\xfd' : 'y',
b'\xfe' : 'b',
b'\xff' : 'y',
}
# A map used when removing rogue Windows-1252/ISO-8859-1
# characters in otherwise UTF-8 documents.
#
# Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in
# Windows-1252.
WINDOWS_1252_TO_UTF8 = {
0x80 : b'\xe2\x82\xac', # €
0x82 : b'\xe2\x80\x9a', # ‚
0x83 : b'\xc6\x92', # ƒ
0x84 : b'\xe2\x80\x9e', # „
0x85 : b'\xe2\x80\xa6', # …
0x86 : b'\xe2\x80\xa0', # †
0x87 : b'\xe2\x80\xa1', # ‡
0x88 : b'\xcb\x86', # ˆ
0x89 : b'\xe2\x80\xb0', # ‰
0x8a : b'\xc5\xa0', # Š
0x8b : b'\xe2\x80\xb9', # ‹
0x8c : b'\xc5\x92', # Œ
0x8e : b'\xc5\xbd', # Ž
0x91 : b'\xe2\x80\x98', # ‘
0x92 : b'\xe2\x80\x99', # ’
0x93 : b'\xe2\x80\x9c', # “
0x94 : b'\xe2\x80\x9d', # ”
0x95 : b'\xe2\x80\xa2', # •
0x96 : b'\xe2\x80\x93', # –
0x97 : b'\xe2\x80\x94', # —
0x98 : b'\xcb\x9c', # ˜
0x99 : b'\xe2\x84\xa2', # ™
0x9a : b'\xc5\xa1', # š
0x9b : b'\xe2\x80\xba', # ›
0x9c : b'\xc5\x93', # œ
0x9e : b'\xc5\xbe', # ž
0x9f : b'\xc5\xb8', # Ÿ
0xa0 : b'\xc2\xa0', #
0xa1 : b'\xc2\xa1', # ¡
0xa2 : b'\xc2\xa2', # ¢
0xa3 : b'\xc2\xa3', # £
0xa4 : b'\xc2\xa4', # ¤
0xa5 : b'\xc2\xa5', # ¥
0xa6 : b'\xc2\xa6', # ¦
0xa7 : b'\xc2\xa7', # §
0xa8 : b'\xc2\xa8', # ¨
0xa9 : b'\xc2\xa9', # ©
0xaa : b'\xc2\xaa', # ª
0xab : b'\xc2\xab', # «
0xac : b'\xc2\xac', # ¬
0xad : b'\xc2\xad', #
0xae : b'\xc2\xae', # ®
0xaf : b'\xc2\xaf', # ¯
0xb0 : b'\xc2\xb0', # °
0xb1 : b'\xc2\xb1', # ±
0xb2 : b'\xc2\xb2', # ²
0xb3 : b'\xc2\xb3', # ³
0xb4 : b'\xc2\xb4', # ´
0xb5 : b'\xc2\xb5', # µ
0xb6 : b'\xc2\xb6', # ¶
0xb7 : b'\xc2\xb7', # ·
0xb8 : b'\xc2\xb8', # ¸
0xb9 : b'\xc2\xb9', # ¹
0xba : b'\xc2\xba', # º
0xbb : b'\xc2\xbb', # »
0xbc : b'\xc2\xbc', # ¼
0xbd : b'\xc2\xbd', # ½
0xbe : b'\xc2\xbe', # ¾
0xbf : b'\xc2\xbf', # ¿
0xc0 : b'\xc3\x80', # À
0xc1 : b'\xc3\x81', # Á
0xc2 : b'\xc3\x82', # Â
0xc3 : b'\xc3\x83', # Ã
0xc4 : b'\xc3\x84', # Ä
0xc5 : b'\xc3\x85', # Å
0xc6 : b'\xc3\x86', # Æ
0xc7 : b'\xc3\x87', # Ç
0xc8 : b'\xc3\x88', # È
0xc9 : b'\xc3\x89', # É
0xca : b'\xc3\x8a', # Ê
0xcb : b'\xc3\x8b', # Ë
0xcc : b'\xc3\x8c', # Ì
0xcd : b'\xc3\x8d', # Í
0xce : b'\xc3\x8e', # Î
0xcf : b'\xc3\x8f', # Ï
0xd0 : b'\xc3\x90', # Ð
0xd1 : b'\xc3\x91', # Ñ
0xd2 : b'\xc3\x92', # Ò
0xd3 : b'\xc3\x93', # Ó
0xd4 : b'\xc3\x94', # Ô
0xd5 : b'\xc3\x95', # Õ
0xd6 : b'\xc3\x96', # Ö
0xd7 : b'\xc3\x97', # ×
0xd8 : b'\xc3\x98', # Ø
0xd9 : b'\xc3\x99', # Ù
0xda : b'\xc3\x9a', # Ú
0xdb : b'\xc3\x9b', # Û
0xdc : b'\xc3\x9c', # Ü
0xdd : b'\xc3\x9d', # Ý
0xde : b'\xc3\x9e', # Þ
0xdf : b'\xc3\x9f', # ß
0xe0 : b'\xc3\xa0', # à
0xe1 : b'\xa1', # á
0xe2 : b'\xc3\xa2', # â
0xe3 : b'\xc3\xa3', # ã
0xe4 : b'\xc3\xa4', # ä
0xe5 : b'\xc3\xa5', # å
0xe6 : b'\xc3\xa6', # æ
0xe7 : b'\xc3\xa7', # ç
0xe8 : b'\xc3\xa8', # è
0xe9 : b'\xc3\xa9', # é
0xea : b'\xc3\xaa', # ê
0xeb : b'\xc3\xab', # ë
0xec : b'\xc3\xac', # ì
0xed : b'\xc3\xad', # í
0xee : b'\xc3\xae', # î
0xef : b'\xc3\xaf', # ï
0xf0 : b'\xc3\xb0', # ð
0xf1 : b'\xc3\xb1', # ñ
0xf2 : b'\xc3\xb2', # ò
0xf3 : b'\xc3\xb3', # ó
0xf4 : b'\xc3\xb4', # ô
0xf5 : b'\xc3\xb5', # õ
0xf6 : b'\xc3\xb6', # ö
0xf7 : b'\xc3\xb7', # ÷
0xf8 : b'\xc3\xb8', # ø
0xf9 : b'\xc3\xb9', # ù
0xfa : b'\xc3\xba', # ú
0xfb : b'\xc3\xbb', # û
0xfc : b'\xc3\xbc', # ü
0xfd : b'\xc3\xbd', # ý
0xfe : b'\xc3\xbe', # þ
}
MULTIBYTE_MARKERS_AND_SIZES = [
(0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF
(0xe0, 0xef, 3), # 3-byte characters start with E0-EF
(0xf0, 0xf4, 4), # 4-byte characters start with F0-F4
]
FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0]
LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1]
@classmethod
def detwingle(cls, in_bytes, main_encoding="utf8",
embedded_encoding="windows-1252"):
"""Fix characters from one encoding embedded in some other encoding.
Currently the only situation supported is Windows-1252 (or its
subset ISO-8859-1), embedded in UTF-8.
The input must be a bytestring. If you've already converted
the document to Unicode, you're too late.
The output is a bytestring in which `embedded_encoding`
characters have been converted to their `main_encoding`
equivalents.
"""
if embedded_encoding.replace('_', '-').lower() not in (
'windows-1252', 'windows_1252'):
raise NotImplementedError(
"Windows-1252 and ISO-8859-1 are the only currently supported "
"embedded encodings.")
if main_encoding.lower() not in ('utf8', 'utf-8'):
raise NotImplementedError(
"UTF-8 is the only currently supported main encoding.")
byte_chunks = []
chunk_start = 0
pos = 0
while pos < len(in_bytes):
byte = in_bytes[pos]
if not isinstance(byte, int):
# Python 2.x
byte = ord(byte)
if (byte >= cls.FIRST_MULTIBYTE_MARKER
and byte <= cls.LAST_MULTIBYTE_MARKER):
# This is the start of a UTF-8 multibyte character. Skip
# to the end.
for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES:
if byte >= start and byte <= end:
pos += size
break
elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8:
# We found a Windows-1252 character!
# Save the string up to this point as a chunk.
byte_chunks.append(in_bytes[chunk_start:pos])
# Now translate the Windows-1252 character into UTF-8
# and add it as another, one-byte chunk.
byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte])
pos += 1
chunk_start = pos
else:
# Go on to the next character.
pos += 1
if chunk_start == 0:
# The string is unchanged.
return in_bytes
else:
# Store the final chunk.
byte_chunks.append(in_bytes[chunk_start:])
return b''.join(byte_chunks)
| varlog00/Sigil | src/Resource_Files/plugin_launchers/python/sigil_bs4/dammit.py | Python | gpl-3.0 | 30,930 |
#!/usr/bin/env python
#coding:utf-8
from toughlogger.console.handlers.base import BaseHandler
from toughlogger.common.permit import permit
class LogoutHandler(BaseHandler):
def get(self):
if not self.current_user:
self.clear_all_cookies()
self.redirect("/login")
return
self.clear_all_cookies()
self.redirect("/login",permanent=False)
permit.add_handler(LogoutHandler, r"/logout")
| talkincode/toughlogger | toughlogger/console/handlers/logout.py | Python | agpl-3.0 | 452 |
import shelve
with shelve.open('persondb') as db:
for key in db.keys():
print(key, ' ', db[key])
sue = db['Sue Jones']
sue.giveRaise(0.1)
db['Sue Jones'] = sue
db.close()
| skellykiernan/pylearn | VI/ch28/updatedb.py | Python | bsd-3-clause | 200 |
"""
Django settings for simcon project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SERVER_EMAIL = 'django@pdx.edu'
SITE_ID = "http://127.0.0.1:8000"
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xb5@_&)+qo7edldwq95!^wdd)a&%5g3(d!2ud4-!_ta@6b-t(3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'), )
ALLOWED_HOSTS = []
FIXTURE_DIRS = (os.path.join(BASE_DIR, 'fixtures'), )
# Application definition
INSTALLED_APPS = (
'simcon',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce', # for rich text embeds
#'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
#'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
MEDIA_URL = 'media/'
MEDIA_ROOT = os.path.join(os.getcwd(), MEDIA_URL)
#absolute file system path to directory containing audio files
#part of response app model. Must be different than STATIC_ROOT -Griff
ROOT_URLCONF = 'simcon.urls'
WSGI_APPLICATION = 'simcon.wsgi.application'
# Password Recovery Options
# FIXME: Some set of these need to be populated in production
# EMAIL_HOST = 'localhost'
# EMAIL_PORT = 1025
# EMAIL_HOST_USER = ''
# EMAIL_HOST_PASSWORD = ''
# EMAIL_USE_TLS = False
# DEFAULT_FROM_EMAIL = 'noreply@pdx.edu'
# In a debug environment, we can use a dummy smptd
# Run the following in a separate terminal in your VM:
# $ python -m smtpd -n -c DebuggingServer localhost:1025
if DEBUG:
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
DEFAULT_FROM_EMAIL = 'noreply@pdx.edu'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
AUTH_PROFILE_MODULE = 'simcon.researcher'
#DEBUG_TOOLBAR_PATCH_SETTINGS = False
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
from django.core.urlresolvers import reverse_lazy
LOGIN_URL=reverse_lazy('login')
#Logging functionality
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'logfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': BASE_DIR + "/logfile",
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'simcon': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
},
}
}
| djorda9/Simulated-Conversations | vagrant/simcon/settings.py | Python | mit | 4,696 |
# Find the Lowest Common Ancestor (LCA) in a Binary Search Tree
# A Binary Search Tree node
class Node:
# Constructor to initialise node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
class BST:
def __init__(self):
self.root = None
def insert_node(self, data):
if self.root is None:
self.root = Node(data)
else:
self._insert(data, self.root)
def _insert(self, data, current_node):
if data <= current_node.data:
if current_node.left is not None:
self._insert(data, current_node.left)
else:
current_node.left = Node(data)
else:
if current_node.right is not None:
self._insert(data, current_node.right)
else:
current_node.right = Node(data)
def inorder(self):
current_node = self.root
self._inorder(current_node)
print('End')
def _inorder(self, current_node):
if current_node is None:
return
self._inorder(current_node.left)
print(current_node.data, " -> ", end='')
self._inorder(current_node.right)
# assuming both nodes are present in the tree
def lca_bst(root, value1, value2):
while root is not None:
if value2 > root.data < value1:
root = root.right
elif value2 < root.data > value1:
root = root.left
else:
return root.data
if __name__ == '__main__':
tree = BST()
tree.insert_node(6)
tree.insert_node(8)
tree.insert_node(9)
tree.insert_node(6)
tree.insert_node(5)
tree.insert_node(7)
tree.insert_node(3)
tree.insert_node(2)
tree.insert_node(4)
print(lca_bst(tree.root, 4, 2))
"""
given tree:
6
6 8
5 7 9
3
2 4
"""
| anubhavshrimal/Data_Structures_Algorithms_In_Python | Tree/BinarySearchTree/Lowest_Common_Ancestor.py | Python | mit | 1,935 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from .htmlwriter import LibdocHtmlWriter
from .xmlwriter import LibdocXmlWriter
def LibdocWriter(format=None):
format = (format or 'HTML').upper()
if format == 'HTML':
return LibdocHtmlWriter()
if format == 'XML':
return LibdocXmlWriter()
raise DataError("Format must be either 'HTML' or 'XML', got '%s'." % format)
| caio2k/RIDE | src/robotide/lib/robot/libdocpkg/writer.py | Python | apache-2.0 | 994 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkInterfaceAssociation(Model):
"""Network interface and its custom security rules.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Network interface ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules:
list[~azure.mgmt.network.v2017_10_01.models.SecurityRule]
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'},
}
def __init__(self, *, security_rules=None, **kwargs) -> None:
super(NetworkInterfaceAssociation, self).__init__(**kwargs)
self.id = None
self.security_rules = security_rules
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/network_interface_association_py3.py | Python | mit | 1,345 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import imp
import hmac
import hashlib
import six
from flask import Flask, abort, request
DEBUG = os.environ.get("DEBUG", False) == 'True'
HOST = os.environ.get("HOST", '0.0.0.0')
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
REPO_DIR = os.path.join(ROOT_DIR, "repos")
GITHUB_EVENTS = [
"commit_comment",
"create",
"delete",
"deployment",
"deployment_status",
"fork",
"gollum",
"issue_comment",
"issues",
"member",
"membership",
"page_build",
"public",
"pull_request_review_comment",
"pull_request",
"push",
"repository",
"release",
"status",
"team_add",
"watch",
"ping", # sent by github to check if the endpoint is available
]
app = Flask(__name__)
def hook(repo):
"""Processes an incoming webhook, see GITHUB_EVENTS for possible events.
"""
event, signature = (
request.headers.get('X-Github-Event', False),
request.headers.get('X-Hub-Signature', False)
)
# If we are not running on DEBUG, the X-Hub-Signature header has to be set.
# Raising a 404 is not the right http return code, but we don't
# want to give someone that is attacking this endpoint a clue
# that we are serving this repo alltogether if he doesn't
# know our secret key
if not DEBUG:
if not signature:
abort(404)
# Check that the payload is signed by the secret key. Again,
# if this is not the case, abort with a 404
if not is_signed(payload=request.get_data(as_text=True), signature=signature, secret=repo.SECRET):
abort(404)
# make sure the event is set
if event not in GITHUB_EVENTS:
abort(400)
data = request.get_json()
# call the always function and the event function (when implemented)
for function in ["always", event]:
if hasattr(repo, function):
getattr(repo, function)(data)
return "ok"
def is_signed(payload, signature, secret):
"""
https://developer.github.com/webhooks/securing/#validating-payloads-from-github
"""
if six.PY3: # pragma: no cover
payload = payload.encode("utf-8")
secret = secret.encode("utf-8")
digest = "sha1=" + hmac.new(
secret,
msg=payload,
digestmod=hashlib.sha1
).hexdigest()
return digest == signature
def import_repo_by_name(name):
module_name = ".".join(["repos", name])
full_path = os.path.join(REPO_DIR, name + ".py")
module = imp.load_source(module_name, full_path)
env_var = "{name}_SECRET".format(name=name.upper())
if env_var not in os.environ:
if DEBUG:
print("WARNING: You need to set the environment variable {env_var}"
" when not in DEBUG mode.".format(
env_var=env_var
))
else:
raise AssertionError(
"You need to set {env_var}".format(
env_var=env_var)
)
else:
setattr(module, "SECRET", os.environ.get(env_var))
return module
def build_routes():
for _, _, filenames in os.walk(REPO_DIR):
for filename in filenames:
if filename.endswith(".py"):
name, _, _ = filename.partition(".py")
app.add_url_rule(
rule="/{}/".format(name),
endpoint=name,
view_func=hook,
methods=["POST"],
defaults={"repo": import_repo_by_name(name)}
)
if __name__ == "__main__": # pragma: no cover
if DEBUG:
print("WARNING: running in DEBUG mode. Incoming webhooks will not be checked for a "
"valid signature.")
build_routes()
app.run(host=HOST, debug=DEBUG)
| pyupio/octohook | hook/hook.py | Python | mit | 3,954 |
""" Utilities class providing useful functions and methods. """
import requests, os, subprocess, shutil, pip, sys, stat
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from os.path import expanduser
from filecmp import dircmp
from drupdates.settings import Settings
from drupdates.settings import DrupdatesError
from drupdates.drush import Drush
class DrupdatesAPIError(DrupdatesError):
""" Error thrown bu api_call. """
class Utils(object):
""" Class of utilities used throughout the module. """
def __init__(self):
self.settings = Settings()
@staticmethod
def detect_home_dir(directory):
""" If dir is relative to home dir rewrite as OS agnostic path. """
parts = directory.split('/')
if parts[0] == '~' or parts[0].upper() == '$HOME':
del parts[0]
directory = os.path.join(expanduser('~'), '/'.join(parts))
return directory
@staticmethod
def check_dir(directory):
""" Ensure the directory is writable. """
directory = Utils.detect_home_dir(directory)
if not os.path.isdir(directory):
try:
os.makedirs(directory)
except OSError as error:
msg = 'Unable to create non-existant directory {0} \n'.format(directory)
msg += 'Error: {0}\n'.format(error.strerror)
msg += 'Moving to next working directory, if applicable'
raise DrupdatesError(20, msg)
filepath = os.path.join(directory, "text.txt")
try:
open(filepath, "w")
except IOError:
msg = 'Unable to write to directory {0} \n'.format(directory)
raise DrupdatesError(20, msg)
os.remove(filepath)
return directory
@staticmethod
def remove_dir(directory):
""" Try and remove the directory. """
if os.path.isdir(directory):
try:
shutil.rmtree(directory)
except OSError as error:
msg = "Can't remove site dir {0}\n Error: {1}".format(directory, error.strerror)
raise DrupdatesError(20, msg)
return True
def find_make_file(self, site_name, directory):
""" Find the make file and test to ensure it exists. """
make_format = self.settings.get('makeFormat')
make_folder = self.settings.get('makeFolder')
file_name = self.settings.get('makeFileName')
make_file = site_name + '.make'
if file_name:
make_file_short = file_name
else:
make_file_short = site_name
if make_format == 'yaml':
make_file += '.yaml'
make_file_short += '.yaml'
if make_folder:
directory = os.path.join(directory, make_folder)
file_name = os.path.join(directory, make_file)
file_name_short = os.path.join(directory, make_file_short)
if os.path.isfile(file_name):
return file_name
if os.path.isfile(file_name_short):
return file_name_short
return False
def make_site(self, site_name, site_dir):
""" Build a webroot based on a make file. """
web_root = self.settings.get('webrootDir')
folder = os.path.join(site_dir, web_root)
make_file = self.find_make_file(site_name, site_dir)
Utils.remove_dir(folder)
if make_file and web_root:
# Run drush make
# Get the repo webroot
make_opts = self.settings.get('makeOpts')
make_cmds = ['make', make_file, folder]
make_cmds += make_opts
make = Drush.call(make_cmds)
return make
@staticmethod
def api_call(uri, name, method='get', **kwargs):
""" Perform and API call, expecting a JSON response.
Largely a wrapper around the request module
Keyword arguments:
uri -- the uri of the Restful Web Service (required)
name -- the human readable label for the service being called (required)
method -- HTTP method to use (default = 'get')
kwargs -- dictionary of arguments passed directly to requests module method
"""
# Ensure uri is valid
if not bool(urlparse(uri).netloc):
msg = ("Error: {0} is not a valid url").format(uri)
raise DrupdatesAPIError(20, msg)
func = getattr(requests, method)
args = {}
args['timeout'] = (10, 10)
for key, value in kwargs.items():
args[key] = value
try:
response = func(uri, **args)
except requests.exceptions.Timeout:
msg = "The api call to {0} timed out".format(uri)
raise DrupdatesAPIError(20, msg)
except requests.exceptions.TooManyRedirects:
msg = "The api call to {0} appears incorrect, returned: too many re-directs".format(uri)
raise DrupdatesAPIError(20, msg)
except requests.exceptions.RequestException as error:
msg = "The api call to {0} failed\n Error {1}".format(uri, error)
raise DrupdatesAPIError(20, msg)
try:
response_dictionary = response.json()
except ValueError:
return response
#If API call errors out print the error and quit the script
if response.status_code not in [200, 201]:
if 'errors' in response_dictionary:
errors = response_dictionary.pop('errors')
first_error = errors.pop()
elif 'error' in response_dictionary:
first_error = response_dictionary.pop('error')
else:
first_error['message'] = "No error message provided by response"
msg = "{0} returned an error, exiting the script.\n".format(name)
msg += "Status Code: {0} \n".format(response.status_code)
msg += "Error: {0}".format(first_error['message'])
raise DrupdatesAPIError(20, msg)
else:
return response_dictionary
def sys_commands(self, obj, phase=''):
""" Run a system command based on the subprocess.popen method.
For example: maybe you want a symbolic link, on a unix box,
from /opt/drupal to /var/www/drupal you would add the command(s)
to the appropriate phase setting in you yaml settings files.
Note: the format of the setting is a multi-dimensional list
Example (from Sitebuild.build():
postBuildCmds:
value:
-
- ln
- -s
- /var/www/drupal
- /opt/drupal
Note: You can refer to an attribute in the calling class, assuming they are
set, by prefixing them with "att_" in the settings yaml above,
ex. att_site_dir would pass the Sitebuild.site_dir attribute
Keyword arguments:
phase -- the phase the script is at when sysCommands is called (default "")
object -- the object the call to sysCommand is housed within
"""
commands = self.settings.get(phase)
if commands and isinstance(commands, list):
for command in commands:
if isinstance(command, list):
# Find list items that match the string after "att_",
# these are names names of attribute in the calling class
for key, item in enumerate(command):
if item[:4] == 'att_':
attribute = item[4:]
try:
command[key] = getattr(obj, attribute)
except AttributeError:
continue
try:
popen = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as error:
msg = "Cannot run {0} the command doesn't exist,\n".format(command.pop(0))
msg += "Error: {1}".format(error.strerror)
print(msg)
results = popen.communicate()
if results[1]:
print("Running {0}, \n Error: {1}".format(command, results[1]))
else:
continue
def rm_common(self, dir_delete, dir_compare):
""" Delete files in dir_delete that are in dir_compare.
Iterate over the sites directory and delete any files/folders not in the
commonIgnore setting.
keyword arguments:
dir_delete -- The directory to have it's file/folders deleted.
dir_compare -- The directory to compare dirDelete with.
"""
ignore = self.settings.get('commonIgnore')
if isinstance(ignore, str):
ignore = [ignore]
dcmp = dircmp(dir_delete, dir_compare, ignore)
for file_name in dcmp.common_files:
os.remove(dir_delete + '/' + file_name)
for directory in dcmp.common_dirs:
shutil.rmtree(dir_delete + '/' + directory)
def write_debug_file(self):
""" Write debug file for this run.
Write file containing your system settings to be used to record python
and Drupdates state at the time Drupdates was run.
"""
base_dir = self.settings.get('baseDir')
directory = Utils.check_dir(base_dir)
debug_file_name = os.path.join(directory, 'drupdates.debug')
debug_file = open(debug_file_name, 'w')
debug_file.write("Python Version:\n")
python_version = "{0}\n\n".format(sys.version)
debug_file.write(python_version)
# Get version data for system dependancies
dependancies = ['sqlite3', 'drush', 'git', 'php']
for dependancy in dependancies:
commands = [dependancy, '--version']
popen = subprocess.Popen(commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
results = popen.communicate()
if popen.returncode != 0:
stdout = "Check returned error."
else:
stdout = results[0]
debug_file.write("{0} Version:\n".format(dependancy.title()))
debug_file.write("{0}\n".format(stdout.decode()))
installed_packages = pip.get_installed_distributions()
if len(installed_packages):
debug_file.write("Installed Packages:\n\n")
for i in installed_packages:
package = "{0}\n".format(str(i))
debug_file.write(package)
settings = self.settings.list()
debug_file.write("\nDrupdates Settings:\n\n")
for name, setting in settings.items():
line = "{0} : {1}\n".format(name, str(setting['value']))
debug_file.write(line)
def load_dir_settings(self, dir):
""" Add custom settings for the a given directory. """
settings_file = os.path.join(dir, '.drupdates/settings.yaml')
if os.path.isfile(settings_file):
self.settings.add(settings_file, True)
@staticmethod
def copytree(src, dst, symlinks = False, ignore = None):
""" Recursively copy a directory tree from src to dst.
Taken from http://stackoverflow.com/a/22331852/1120125.
Needed because distutils.dir_util.copy_tree will only copy a given
directory one time. Which is annoying!
"""
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
Utils.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
| jalama/drupdates | drupdates/utils.py | Python | mit | 12,467 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.