code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# Python Network Programming Cookbook -- Chapter - 7
# This program is optimized for Python 2.7.
# It may run on any other version with/without modifications.
from getpass import getpass
from fabric.api import run, env, prompt, cd
def remote_server():
env.hosts = ['127.0.0.1']
env.user = prompt('Enter your system username: ')
env.password = getpass('Enter your system user password: ')
env.mysqlhost = 'localhost'
env.mysqluser = prompt('Enter your db username: ')
env.mysqlpassword = getpass('Enter your db user password: ')
env.db_name = ''
def show_dbs():
""" Wraps mysql show databases cmd"""
q = "show databases"
run("echo '%s' | mysql -u%s -p%s" %(q, env.mysqluser, env.mysqlpassword))
def run_sql(db_name, query):
""" Generic function to run sql"""
with cd('/tmp'):
run("echo '%s' | mysql -u%s -p%s -D %s" %(query, env.mysqluser, env.mysqlpassword, db_name))
def create_db():
"""Create a MySQL DB for App version"""
if not env.db_name:
db_name = prompt("Enter the DB name:")
else:
db_name = env.db_name
run('echo "CREATE DATABASE %s default character set utf8 collate utf8_unicode_ci;"|mysql --batch --user=%s --password=%s --host=%s'\
% (db_name, env.mysqluser, env.mysqlpassword, env.mysqlhost), pty=True)
def ls_db():
""" List a dbs with size in MB """
if not env.db_name:
db_name = prompt("Which DB to ls?")
else:
db_name = env.db_name
query = """SELECT table_schema "DB Name",
Round(Sum(data_length + index_length) / 1024 / 1024, 1) "DB Size in MB"
FROM information_schema.tables
WHERE table_schema = \"%s\"
GROUP BY table_schema """ %db_name
run_sql(db_name, query)
def empty_db():
""" Empty all tables of a given DB """
db_name = prompt("Enter DB name to empty:")
cmd = """
(echo 'SET foreign_key_checks = 0;';
(mysqldump -u%s -p%s --add-drop-table --no-data %s |
grep ^DROP);
echo 'SET foreign_key_checks = 1;') | \
mysql -u%s -p%s -b %s
""" %(env.mysqluser, env.mysqlpassword, db_name, env.mysqluser, env.mysqlpassword, db_name)
run(cmd)
| simontakite/sysadmin | pythonscripts/pythonnetworkingcoookbook/chapter7/7_5_run_mysql_command_remotely.py | Python | gpl-2.0 | 2,253 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from flask.ext.script import Manager, Command, Option
from eventviz import settings
from eventviz.app import app
from eventviz.db import insert_item, connection, get_database_names
from eventviz.lib.parsers import get_parser_by_name, get_parser_names
class LoadData(Command):
"""
Parses a file and load its data in the database.
"""
option_list = (
Option('-f', '--filename', dest='filename', required=True),
Option('-p', '--parser', dest='parser_name', required=True),
Option('-P', '--project', dest='project_name', required=True)
)
def run(self, filename=None, parser_name=None, project_name=None):
parser_cls = get_parser_by_name(parser_name)
if parser_cls is None:
print "Unknown parser: %s" % parser_name
return
if not os.path.exists(filename):
print "File not found: %s" % filename
return
parser = parser_cls(filename)
count = 0
for item in parser.run():
if settings.DEBUG_PARSERS:
count += 1
else:
if insert_item(project_name, parser, item):
count += 1
if count % 100 == 0:
msg = "Inserted %d events..." % count
sys.stdout.write(msg)
sys.stdout.flush()
sys.stdout.write('\b' * len(msg))
sys.stdout.write("Inserted %d events...\n" % count)
class ListParsers(Command):
"""
Lists currently available parser types.
"""
def run(self):
print "Available parsers:"
for parser_name in get_parser_names():
print '*', parser_name
class DropProject(Command):
"""
Drops database for given project.
"""
option_list = (
Option('-P', '--project', dest='project', required=True),
)
def run(self, project=None):
if project not in get_database_names():
print "No such project: %s" % project
return
connection.drop_database(project)
print "Dropped '%s' project" % project
class DropCollection(Command):
"""
Drops a project's data for given parser.
"""
option_list = (
Option('-P', '--project', dest='project', required=True),
Option('-p', '--parser', dest='parser', required=True)
)
def run(self, project=None, parser=None):
if project not in get_database_names():
print "No such project: %s" % project
return
db = connection[project]
if parser not in db.collection_names():
print "No data for parser: %s" % parser
return
db.drop_collection(parser)
print "Dropped data for '%s' in project '%s'" % (parser, project)
class ListProjects(Command):
"""
Lists available projects.
"""
def run(self):
print "Available projects:"
for db_name in get_database_names():
print '*', db_name
class TestParser(Command):
"""
Tests a parser (for debugging purpose only).
"""
option_list = (
Option('-p', '--parser', dest='parser', required=True),
Option('-i', '--inputfile', dest='inputfile', required=True)
)
def run(self, parser=None, inputfile=None):
settings.DEBUG = True
parser_cls = get_parser_by_name(parser)
if parser_cls is None:
print "Unknown parser: %s" % parser
return
if not os.path.exists(inputfile):
print "File not found: %s" % inputfile
return
parser = parser_cls(inputfile)
for _ in parser.run():
pass
manager = Manager(app)
manager.add_command('load_data', LoadData())
manager.add_command('list_parsers', ListParsers())
manager.add_command('drop_project', DropProject())
manager.add_command('drop_collection', DropCollection())
manager.add_command('list_projects', ListProjects())
manager.add_command('test_parser', TestParser())
manager.run()
| mattoufoutu/EventViz | manage.py | Python | mit | 4,068 |
# $Id$
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" unit testing code for surface calculations
FIX: add tests for LabuteASA
"""
from __future__ import print_function
from rdkit import RDConfig
import unittest,os
from rdkit.six.moves import cPickle
from rdkit import Chem
from rdkit.Chem import MolSurf
import os.path
def feq(n1,n2,tol=1e-4):
return abs(n1-n2)<=tol
class TestCase(unittest.TestCase):
def setUp(self):
if doLong:
print('\n%s: '%self.shortDescription(),end='')
def testTPSAShort(self):
" Short TPSA test "
inName = RDConfig.RDDataDir+'/NCI/first_200.tpsa.csv'
with open(inName,'r') as inF:
lines = inF.readlines()
for line in lines:
if line[0] != '#':
line.strip()
smi,ans = line.split(',')
ans = float(ans)
mol = Chem.MolFromSmiles(smi)
calc = MolSurf.TPSA(mol)
assert feq(calc,ans),'bad TPSA for SMILES %s (%.2f != %.2f)'%(smi,calc,ans)
def _testTPSALong(self):
" Longer TPSA test "
#inName = RDConfig.RDDataDir+'/NCI/first_5k.tpsa.csv'
inName = os.path.join(RDConfig.RDCodeDir,'Chem','test_data','NCI_5K_TPSA.csv')
with open(inName,'r') as inF:
lines = inF.readlines()
lineNo = 0
for line in lines:
lineNo += 1
if line[0] != '#':
line.strip()
smi,ans = line.split(',')
ans = float(ans)
try:
mol = Chem.MolFromSmiles(smi)
except:
mol = None
if not mol:
print('molecule construction failed on line %d'%lineNo)
else:
ok = 1
try:
calc = MolSurf.TPSA(mol)
except:
ok=0
assert ok,'Line %d: TPSA Calculation failed for SMILES %s'%(lineNo,smi)
assert feq(calc,ans),'Line %d: bad TPSA for SMILES %s (%.2f != %.2f)'%(lineNo,smi,calc,ans)
def testHsAndTPSA(self):
"""
testing the impact of Hs in the graph on PSA calculations
This was sf.net issue 1969745
"""
mol = Chem.MolFromSmiles('c1c[nH]cc1')
molH = Chem.AddHs(mol)
psa = MolSurf.TPSA(mol)
psaH = MolSurf.TPSA(molH)
if(psa!=psaH):
psac = MolSurf.rdMolDescriptors._CalcTPSAContribs(mol)
psaHc = MolSurf.rdMolDescriptors._CalcTPSAContribs(molH)
for i,v in enumerate(psac):
print('\t',i,'\t',v,'\t',psaHc[i])
while i<len(psaHc):
print('\t\t\t',psaHc[i])
i+=1
self.assertEqual(psa,psaH)
inName = RDConfig.RDDataDir+'/NCI/first_200.tpsa.csv'
with open(inName,'r') as inF:
lines = inF.readlines()
for line in lines:
if line[0] != '#':
line.strip()
smi,ans = line.split(',')
ans = float(ans)
mol = Chem.MolFromSmiles(smi)
mol = Chem.AddHs(mol)
calc = MolSurf.TPSA(mol)
self.assertTrue(feq(calc,ans),'bad TPSA for SMILES %s (%.2f != %.2f)'%(smi,calc,ans))
if doLong:
inName = os.path.join(RDConfig.RDCodeDir,'Chem','test_data','NCI_5K_TPSA.csv')
with open(inName,'r') as inF:
lines = inF.readlines()
for line in lines:
if line[0] != '#':
line.strip()
smi,ans = line.split(',')
ans = float(ans)
mol = Chem.MolFromSmiles(smi)
mol = Chem.AddHs(mol)
calc = MolSurf.TPSA(mol)
self.assertTrue(feq(calc,ans),'bad TPSA for SMILES %s (%.2f != %.2f)'%(smi,calc,ans))
if __name__ == '__main__':
import sys,getopt,re
doLong=0
if len(sys.argv) >1:
args,extras=getopt.getopt(sys.argv[1:],'l')
for arg,val in args:
if arg=='-l':
doLong=1
sys.argv.remove('-l')
if doLong:
for methName in dir(TestCase):
if re.match('_test',methName):
newName = re.sub('_test','test',methName)
exec('TestCase.%s = TestCase.%s'%(newName,methName))
unittest.main()
| soerendip42/rdkit | rdkit/Chem/UnitTestSurf.py | Python | bsd-3-clause | 4,122 |
import graphlab as gl
import re
import random
from copy import copy
import os
import graphlab.aggregate as agg
import array
import sys
model_name = "pooling-3"
which_model = 0
print "Running model %d, %s" % (which_model, model_name)
alt_path = os.path.expanduser("~/data/tmp/")
if os.path.exists(alt_path):
gl.set_runtime_config("GRAPHLAB_CACHE_FILE_LOCATIONS", alt_path)
model_path = "nn_360x360/models/model-%d-%s/" % (which_model, model_name)
model_filename = model_path + "nn_model"
X_train = gl.SFrame("image-sframes/train-%d-0/" % which_model)
X_valid = gl.SFrame("image-sframes/valid-%d-0/" % which_model)
X_train_raw = gl.SFrame("image-sframes/train-raw/")
X_test = gl.SFrame("image-sframes/test/")
################################################################################
# init_random vs random_type in ConvolutionLayer.
dll = gl.deeplearning.layers
nn = gl.deeplearning.NeuralNet()
nn.layers.append(dll.ConvolutionLayer(
kernel_size = 12,
stride=3,
num_channels=96,
init_random="xavier"))
# nn.layers.append(dll.MaxPoolingLayer(
# kernel_size = 5,
# stride=1))
# nn.layers.append(dll.SigmoidLayer())
# for i in xrange(3):
# nn.layers.append(dll.ConvolutionLayer(
# kernel_size = 3,
# padding = 1,
# stride=1,
# num_channels=64 - 8 * i,
# init_random="xavier"))
# nn.layers.append(dll.MaxPoolingLayer(
# kernel_size = 3,
# padding = 1,
# stride=2))
# nn.layers.append(dll.RectifiedLinearLayer())
# nn.layers.append(dll.ConvolutionLayer(
# kernel_size = 8,
# stride=4,
# num_channels=32,
# init_random="gaussian"))
# nn.layers.append(dll.RectifiedLinearLayer())
# nn.layers.append(dll.SigmoidLayer())
nn.layers.append(dll.SigmoidLayer())
nn.layers.append(dll.FlattenLayer())
nn.layers.append(dll.FullConnectionLayer(
num_hidden_units = 128,
init_sigma = 0.001,
init_bias = 0,
init_random = "gaussian"))
nn.layers.append(dll.RectifiedLinearLayer())
nn.layers.append(dll.FullConnectionLayer(
num_hidden_units = 64,
init_sigma = 0.005,
init_bias = 0,
init_random = "gaussian"))
nn.layers.append(dll.RectifiedLinearLayer())
nn.layers.append(dll.FullConnectionLayer(
num_hidden_units = 32,
init_sigma = 0.005,
init_bias = 1,
init_random = "gaussian"))
nn.layers.append(dll.RectifiedLinearLayer())
nn.layers.append(dll.FullConnectionLayer(
num_hidden_units = 5 if which_model == 0 else 2,
init_sigma = 0.005,
init_random = "gaussian"))
nn.layers.append(dll.SoftmaxLayer())
nn.params["batch_size"] = 64
# nn.params["momentum"] = 0.9
nn.params["init_random"] = "gaussian"
nn.params["init_sigma"] = 0.001
# nn.params["learning_rate"] = 0.01
# nn.params["l2_regularization"] = 0.0005
# nn.params["bias_learning_rate"] = 0.02
# nn.params["learning_rate_schedule"] = "exponential_decay"
# nn.params["learning_rate_gamma"] = 0.1
################################################################################
if os.path.exists("image-sframes/mean_image"):
mean_image_sf = gl.SFrame("image-sframes/mean_image")
mean_image = mean_image_sf["image"][0]
else:
mean_image = X_train["image"].mean()
mean_image_sf = gl.SFrame({"image" : [mean_image]})
mean_image_sf.save("image-sframes/mean_image")
if which_model == 0:
m = gl.classifier.neuralnet_classifier.create(
X_train, features = ["image"], target = "level",
network = nn, mean_image = mean_image,
device = "gpu", random_mirror=True, max_iterations = 100,
validation_set=X_valid,
model_checkpoint_interval = 1,
model_checkpoint_path = model_filename + "-checkpoint")
else:
assert which_model in [1,2,3,4]
X_train["class"] = (X_train["level"] >= which_model)
X_valid["class"] = (X_valid["level"] >= which_model)
# Downsample the less common class
n_class_0 = (X_train["class"] == 0).sum()
n_class_1 = (X_train["class"] == 1).sum()
m = gl.classifier.neuralnet_classifier.create(
X_train,
features = ["image"], target = "class",
# network = nn,
mean_image = mean_image,
model_checkpoint_path = model_filename + "-checkpoint",
model_checkpoint_interval = 1,
device = "gpu", random_mirror=True, max_iterations = 10, validation_set=X_valid)
m.save(model_filename)
X_train["class_scores"] = \
(m.predict_topk(X_train[["image"]], k= (5 if which_model == 0 else 2))\
.unstack(["class", "score"], "scores").sort("row_id")["scores"])
X_train_raw["class_scores"] = \
(m.predict_topk(X_train_raw[["image"]], k= (5 if which_model == 0 else 2))\
.unstack(["class", "score"], "scores").sort("row_id")["scores"])
X_test["class_scores"] = \
(m.predict_topk(X_test[["image"]], k=(5 if which_model == 0 else 2))
.unstack(["class", "score"], "scores").sort("row_id")["scores"])
X_train["features"] = m.extract_features(X_train[["image"]])
X_train_raw["features"] = m.extract_features(X_train_raw[["image"]])
X_test["features"] = m.extract_features(X_test[["image"]])
def flatten_dict(d):
out_d = {}
def _add_to_dict(base, out_d, d):
if type(d) in [array.array, list]:
for j, v in enumerate(d):
new_key = str(j) if base is None else (base + ".%d" % j)
_add_to_dict(new_key, out_d, v)
elif type(d) is dict:
for k, v in d.iteritems():
new_key = k if base is None else (base + '.' + str(k))
_add_to_dict(new_key, out_d, v)
else:
if d != 0:
out_d[base] = d
_add_to_dict(None, out_d, d)
return out_d
score_column = "scores_%d" % which_model
features_column = "features_%d" % which_model
Xt = X_train[["name", "class_scores", "level", "features"]]
Xty = Xt.groupby(["name", "level"], {"cs" : agg.CONCAT("class_scores")})
Xty[score_column] = Xty["cs"].apply(flatten_dict)
Xty2 = Xt.groupby("name", {"ft" : agg.CONCAT("features")})
Xty2[features_column] = Xty2["ft"].apply(flatten_dict)
Xty = Xty.join(Xty2[["name", features_column]], on = "name")
Xty[["name", score_column, "level", features_column]].save(model_path + "scores_train")
Xtst = X_test[["name", "class_scores", "features"]]
Xtsty = Xtst.groupby("name", {"cs" : agg.CONCAT("class_scores")})
Xtsty[score_column] = Xtsty["cs"].apply(flatten_dict)
Xtsty2 = Xtst.groupby("name", {"ft" : agg.CONCAT("features")})
Xtsty2[features_column] = Xtsty2["ft"].apply(flatten_dict)
Xtsty = Xtsty.join(Xtsty2[["name", features_column]], on = "name")
Xtsty[["name", score_column, features_column]].save(model_path + "scores_test")
Xtraw = X_train_raw[["name", "class_scores", "features"]]
Xtrawy = Xtraw.groupby("name", {"cs" : agg.CONCAT("class_scores")})
Xtrawy[score_column] = Xtrawy["cs"].apply(flatten_dict)
Xtrawy2 = Xtraw.groupby("name", {"ft" : agg.CONCAT("features")})
Xtrawy2[features_column] = Xtrawy2["ft"].apply(flatten_dict)
Xtrawy = Xtrawy.join(Xtrawy2[["name", features_column]], on = "name")
Xtrawy[["name", score_column, features_column]].save(model_path + "scores_train_raw")
| hoytak/diabetic-retinopathy-code-private | create_nn_model.py | Python | mit | 7,235 |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
#__version__ = "pre-3.3-" + "$Revision: 1.2 $"[11:15] + "-cvs"
__version__ = "3.3"
__license__ = "Python"
__copyright__ = "Copyright 2002-4, Mark Pilgrim"
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. This is off by default because of reports of crashing on some
# platforms. If it crashes for you, please submit a bug report with your OS
# platform, Python version, and the URL of the feed you were attempting to parse.
# Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
TIDY_MARKUP = 0
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# timeoutsocket allows feedparser to time out rather than hang forever on ultra-slow servers.
# Python 2.3 now has this functionality available in the standard socket library, so under
# 2.3 you don't need to install anything. But you probably should anyway, because the socket
# module is buggy and timeoutsocket is better.
try:
import timeoutsocket # http://www.timo-tasi.org/python/timeoutsocket.py
timeoutsocket.setDefaultSocketTimeout(60)
except ImportError:
import socket
if hasattr(socket, 'setdefaulttimeout'):
socket.setdefaulttimeout(60)
import urllib, urllib2
_mxtidy = None
if TIDY_MARKUP:
try:
from mx.Tidy import Tidy as _mxtidy
except:
pass
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# ---------- don't touch these ----------
class CharacterEncodingOverride(Exception): pass
class CharacterEncodingUnknown(Exception): pass
class NonXMLContentType(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
def __getitem__(self, key):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'modified',
'date_parsed': 'modified_parsed',
'description': ['tagline', 'summary']}
realkey = keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def has_key(self, key):
return hasattr(self, key) or UserDict.has_key(self, key)
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
"".join(map(chr, range(256))), "".join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
class _FeedParserMixin:
namespaces = {"": "",
"http://backend.userland.com/rss": "",
"http://blogs.law.harvard.edu/tech/rss": "",
"http://purl.org/rss/1.0/": "",
"http://my.netscape.com/rdf/simple/0.9/": "",
"http://example.com/newformat#": "",
"http://example.com/necho": "",
"http://purl.org/echo/": "",
"uri/of/echo/namespace#": "",
"http://purl.org/pie/": "",
"http://purl.org/atom/ns#": "",
"http://purl.org/rss/1.0/modules/rss091#": "",
"http://webns.net/mvcb/": "admin",
"http://purl.org/rss/1.0/modules/aggregation/": "ag",
"http://purl.org/rss/1.0/modules/annotate/": "annotate",
"http://media.tangent.org/rss/1.0/": "audio",
"http://backend.userland.com/blogChannelModule": "blogChannel",
"http://web.resource.org/cc/": "cc",
"http://backend.userland.com/creativeCommonsRssModule": "creativeCommons",
"http://purl.org/rss/1.0/modules/company": "co",
"http://purl.org/rss/1.0/modules/content/": "content",
"http://my.theinfo.org/changed/1.0/rss/": "cp",
"http://purl.org/dc/elements/1.1/": "dc",
"http://purl.org/dc/terms/": "dcterms",
"http://purl.org/rss/1.0/modules/email/": "email",
"http://purl.org/rss/1.0/modules/event/": "ev",
"http://postneo.com/icbm/": "icbm",
"http://purl.org/rss/1.0/modules/image/": "image",
"http://xmlns.com/foaf/0.1/": "foaf",
"http://freshmeat.net/rss/fm/": "fm",
"http://purl.org/rss/1.0/modules/link/": "l",
"http://madskills.com/public/xml/rss/module/pingback/": "pingback",
"http://prismstandard.org/namespaces/1.2/basic/": "prism",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/2000/01/rdf-schema#": "rdfs",
"http://purl.org/rss/1.0/modules/reference/": "ref",
"http://purl.org/rss/1.0/modules/richequiv/": "reqv",
"http://purl.org/rss/1.0/modules/search/": "search",
"http://purl.org/rss/1.0/modules/slash/": "slash",
"http://purl.org/rss/1.0/modules/servicestatus/": "ss",
"http://hacks.benhammersley.com/rss/streaming/": "str",
"http://purl.org/rss/1.0/modules/subscription/": "sub",
"http://purl.org/rss/1.0/modules/syndication/": "sy",
"http://purl.org/rss/1.0/modules/taxonomy/": "taxo",
"http://purl.org/rss/1.0/modules/threading/": "thr",
"http://purl.org/rss/1.0/modules/textinput/": "ti",
"http://madskills.com/public/xml/rss/module/trackback/":"trackback",
"http://wellformedweb.org/CommentAPI/": "wfw",
"http://purl.org/rss/1.0/modules/wiki/": "wiki",
"http://schemas.xmlsoap.org/soap/envelope/": "soap",
"http://www.w3.org/1999/xhtml": "xhtml",
"http://www.w3.org/XML/1998/namespace": "xml"
}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'comments', 'license']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'copyright', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'copyright', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write("initializing FeedParser\n")
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
# the following are used internally to track state;
# some of this is kind of out of control and should
# probably be refactored into a finite state machine
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.contentparams = FeedParserDict()
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = baseuri
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.get('mode') == 'escaped':
# element declared itself as escaped markup, but it isn't really
self.contentparams['mode'] = 'xml'
if self.incontent and self.contentparams.get('mode') == 'xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data("<%s%s>" % (tag, "".join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.get('mode') == 'escaped':
# element declared itself as escaped markup, but it isn't really
self.contentparams['mode'] = 'xml'
if self.incontent and self.contentparams.get('mode') == 'xml':
tag = tag.split(':')[-1]
self.handle_data("</%s>" % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for " ", ref will be "160"
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = "&#%s;" % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for "©", ref will be "copy"
if not self.elementstack: return
if _debug: sys.stderr.write("entering handle_entityref with %s\n" % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def name2cp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: name2cp(ref)
except KeyError: text = "&%s;" % ref
else: text = unichr(name2cp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('mode') == 'xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write("entering parse_declaration\n")
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def trackNamespace(self, prefix, uri):
if (prefix, uri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if uri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if not prefix: return
if uri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
if self.namespaces.has_key(uri):
self.namespacemap[prefix] = self.namespaces[uri]
def resolveURI(self, uri):
return urlparse.urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = "".join(pieces)
output = output.strip()
if not expectingText: return output
# decode base64 content
if self.contentparams.get('mode') == 'base64' and base64:
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
output = self.decodeEntities(element, output)
# resolve relative URIs within embedded markup
if self.contentparams.get('type', 'text/html') in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.contentparams.get('type', 'text/html') in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and (type(output) == types.StringType):
try:
output = unicode(output, self.encoding)
except:
pass
# store output in appropriate place(s)
if self.inentry:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'category':
self.entries[-1][element] = output
domain = self.entries[-1]['categories'][-1][0]
self.entries[-1]['categories'][-1] = (domain, output)
elif element == 'source':
self.entries[-1]['source']['value'] = output
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif self.infeed and (not self.intextinput) and (not self.inimage):
if element == 'description':
element = 'tagline'
self.feeddata[element] = output
if element == 'category':
domain = self.feeddata['categories'][-1][0]
self.feeddata['categories'][-1] = (domain, output)
elif element == 'link':
self.feeddata['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.feeddata[element + '_detail'] = contentparams
return output
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _save(self, key, value):
if self.inentry:
self.entries[-1].setdefault(key, value)
elif self.feeddata:
self.feeddata.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
def _end_name(self):
value = self.pop('name')
if self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('url', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('url')
if self.inauthor:
self._save_author('url', value)
elif self.incontributor:
self._save_contributor('url', value)
elif self.inimage:
context = self._getContext()
context['image']['url'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
def _end_email(self):
value = self.pop('email')
if self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
pass
def _getContext(self):
if self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value):
context = self._getContext()
context.setdefault('author_detail', FeedParserDict())
context['author_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = "%s (%s)" % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r"""(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))""", author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_tagline(self, attrsD):
self.incontent += 1
self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'),
'type': attrsD.get('type', 'text/plain'),
'language': self.lang,
'base': self.baseuri})
self.push('tagline', 1)
_start_subtitle = _start_tagline
def _end_tagline(self):
value = self.pop('tagline')
self.incontent -= 1
self.contentparams.clear()
if self.infeed:
self.feeddata['description'] = value
_end_subtitle = _end_tagline
def _start_copyright(self, attrsD):
self.incontent += 1
self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'),
'type': attrsD.get('type', 'text/plain'),
'language': self.lang,
'base': self.baseuri})
self.push('copyright', 1)
_start_dc_rights = _start_copyright
def _end_copyright(self):
self.pop('copyright')
self.incontent -= 1
self.contentparams.clear()
_end_dc_rights = _end_copyright
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_dcterms_issued(self, attrsD):
self.push('issued', 1)
_start_issued = _start_dcterms_issued
def _end_dcterms_issued(self):
value = self.pop('issued')
self._save('issued_parsed', _parse_date(value))
_end_issued = _end_dcterms_issued
def _start_dcterms_created(self, attrsD):
self.push('created', 1)
_start_created = _start_dcterms_created
def _end_dcterms_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_created = _end_dcterms_created
def _start_dcterms_modified(self, attrsD):
self.push('modified', 1)
_start_modified = _start_dcterms_modified
_start_dc_date = _start_dcterms_modified
_start_pubdate = _start_dcterms_modified
def _end_dcterms_modified(self):
value = self.pop('modified')
parsed_value = _parse_date(value)
self._save('modified_parsed', parsed_value)
_end_modified = _end_dcterms_modified
_end_dc_date = _end_dcterms_modified
_end_pubdate = _end_dcterms_modified
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _start_category(self, attrsD):
self.push('category', 1)
domain = self._getAttribute(attrsD, 'domain')
cats = []
if self.inentry:
cats = self.entries[-1].setdefault('categories', [])
elif self.infeed:
cats = self.feeddata.setdefault('categories', [])
cats.append((domain, None))
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_category(self):
self.pop('category')
_end_dc_subject = _end_category
_end_keywords = _end_category
def _start_cloud(self, attrsD):
self.feeddata['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry
if self.inentry:
self.entries[-1].setdefault('links', [])
self.entries[-1]['links'].append(FeedParserDict(attrsD))
elif self.infeed:
self.feeddata.setdefault('links', [])
self.feeddata['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if attrsD.get('type', '') in self.html_types:
if self.inentry:
self.entries[-1]['link'] = attrsD['href']
elif self.infeed:
self.feeddata['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
if self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
if self.inimage:
context = self._getContext()
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if "ispermalink" is not present or is "true",
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_id(self, attrsD):
self.push('id', 1)
def _end_id(self):
value = self.pop('id')
def _start_title(self, attrsD):
self.incontent += 1
if _debug: sys.stderr.write('attrsD.xml:lang = %s\n' % attrsD.get('xml:lang'))
if _debug: sys.stderr.write('self.lang = %s\n' % self.lang)
self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'),
'type': attrsD.get('type', 'text/plain'),
'language': self.lang,
'base': self.baseuri})
self.push('title', self.infeed or self.inentry)
_start_dc_title = _start_title
def _end_title(self):
value = self.pop('title')
self.incontent -= 1
self.contentparams.clear()
if self.intextinput:
context = self._getContext()
context['textinput']['title'] = value
elif self.inimage:
context = self._getContext()
context['image']['title'] = value
_end_dc_title = _end_title
def _start_description(self, attrsD, default_content_type='text/html'):
self.incontent += 1
self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'),
'type': attrsD.get('type', default_content_type),
'language': self.lang,
'base': self.baseuri})
self.push('description', self.infeed or self.inentry)
def _start_abstract(self, attrsD):
return self._start_description(attrsD, 'text/plain')
def _end_description(self):
value = self.pop('description')
self.incontent -= 1
self.contentparams.clear()
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
# elif self.inentry:
# context['summary'] = value
# elif self.infeed:
# context['tagline'] = value
_end_abstract = _end_description
def _start_info(self, attrsD):
self.incontent += 1
self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'),
'type': attrsD.get('type', 'text/plain'),
'language': self.lang,
'base': self.baseuri})
self.push('info', 1)
def _end_info(self):
self.pop('info')
self.incontent -= 1
self.contentparams.clear()
def _start_generator(self, attrsD):
if attrsD:
if attrsD.has_key('url'):
attrsD['url'] = self.resolveURI(attrsD['url'])
self.feeddata['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
if self.feeddata.has_key('generator_detail'):
self.feeddata['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self.feeddata['generator_detail'] = FeedParserDict({"url": value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
self.incontent += 1
self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'),
'type': attrsD.get('type', 'text/plain'),
'language': self.lang,
'base': self.baseuri})
self.push('summary', 1)
def _end_summary(self):
value = self.pop('summary')
if self.entries:
self.entries[-1]['description'] = value
self.incontent -= 1
self.contentparams.clear()
def _start_enclosure(self, attrsD):
if self.inentry:
self.entries[-1].setdefault('enclosures', [])
self.entries[-1]['enclosures'].append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if self.inentry:
self.entries[-1]['source'] = FeedParserDict(attrsD)
self.push('source', 1)
def _end_source(self):
self.pop('source')
def _start_content(self, attrsD):
self.incontent += 1
self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'xml'),
'type': attrsD.get('type', 'text/plain'),
'language': self.lang,
'base': self.baseuri})
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.incontent += 1
self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'xml'),
'type': attrsD.get('type', 'text/html'),
'language': self.lang,
'base': self.baseuri})
self.push('content', 1)
def _start_body(self, attrsD):
self.incontent += 1
self.contentparams = FeedParserDict({'mode': 'xml',
'type': 'application/xhtml+xml',
'language': self.lang,
'base': self.baseuri})
self.push('content', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.incontent += 1
self.contentparams = FeedParserDict({'mode': 'escaped',
'type': 'text/html',
'language': self.lang,
'base': self.baseuri})
self.push('content', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
value = self.pop('content')
if self.contentparams.get('type') in (['text/plain'] + self.html_types):
self._save('description', value)
self.incontent -= 1
self.contentparams.clear()
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
namespace = str(namespace or '')
if namespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
prefix = self.namespaces.get(namespace, 'unknown')
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
prefix = self.namespaces.get(namespace, '')
if prefix:
attrlocalname = prefix + ":" + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
# def resolveEntity(self, publicId, systemId):
# return _StringIO()
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
namespace = str(namespace)
prefix = self.namespaces.get(namespace, '')
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<(\S+)/>', r'<\1></\1>', data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and (type(data) == types.UnicodeType):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
# if self.encoding:
# if _debug: sys.stderr.write('normalize_attrs, encoding=%s\n' % self.encoding)
# attrs = [(k, v.encode(self.encoding)) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class="screen">, tag="pre", attrs=[("class", "screen")]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
strattrs = "".join([' %s="%s"' % (key, value) for key, value in attrs])
if tag in self.elements_no_end_tag:
self.pieces.append("<%(tag)s%(strattrs)s />" % locals())
else:
self.pieces.append("<%(tag)s%(strattrs)s>" % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be "pre"
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for " ", ref will be "160"
# Reconstruct the original character reference.
self.pieces.append("&#%(ref)s;" % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for "©", ref will be "copy"
# Reconstruct the original entity reference.
self.pieces.append("&%(ref)s;" % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append("<!--%(text)s-->" % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append("<?%(text)s>" % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append("<!%(text)s>" % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
"""Return processed HTML as a single string"""
return "".join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.get('mode') == 'escaped':
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return urlparse.urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write("entering _resolveRelativeURIs\n")
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if _mxtidy and TIDY_MARKUP:
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, output_xhtml=1, numeric_entities=1, wrap=0)
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, "read"):
return url_file_stream_or_string
if url_file_stream_or_string == "-":
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = "%s://%s%s" % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header("User-Agent", agent)
if etag:
request.add_header("If-None-Match", etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
request.add_header("If-Modified-Since", "%s, %02d %s %04d %02d:%02d:%02d GMT" % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header("Referer", referrer)
if gzip and zlib:
request.add_header("Accept-encoding", "gzip, deflate")
elif gzip:
request.add_header("Accept-encoding", "gzip")
elif zlib:
request.add_header("Accept-encoding", "deflate")
else:
request.add_header("Accept-encoding", "")
if auth:
request.add_header("Authorization", "Basic %s" % auth)
if ACCEPT_HEADER:
request.add_header("Accept", ACCEPT_HEADER)
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
"""Register a date handler function (takes string, returns 9-tuple date in GMT)"""
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
"""Parse a variety of ISO-8601-compatible formats like 20040105"""
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get("ordinal", 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get("year", "--")
if not year or year == "--":
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get("month", "-")
if not month or month == "-":
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get("day", 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get("century", 0) or \
params.get("year", 0) or params.get("month", 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if "century" in params.keys():
year = (int(params["century"]) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ["hour", "minute", "second", "tzhour", "tzmin"]:
if not params.get(field, None):
params[field] = 0
hour = int(params.get("hour", 0))
minute = int(params.get("minute", 0))
second = int(params.get("second", 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get("tz")
if tz and tz != "Z":
if tz[0] == "-":
tm[3] += int(params.get("tzhour", 0))
tm[4] += int(params.get("tzmin", 0))
elif tz[0] == "+":
tm[3] -= int(params.get("tzhour", 0))
tm[4] -= int(params.get("tzmin", 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
"""Parse a string according to the OnBlog 8-bit date format"""
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s" % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write("OnBlog date parsed as: %s\n" % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
"""Parse a string according to the Nate 8-bit date format"""
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s" % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write("Nate date parsed as: %s\n" % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})\.\d+')
def _parse_date_mssql(dateString):
"""Parse a string according to the MS SQL date format"""
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s" % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write("MS SQL date parsed as: %s\n" % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
"""Parse a string according to a Greek 8-bit date format."""
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = "%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s" % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write("Greek date parsed as: %s\n" % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
"""Parse a string according to a Hungarian 8-bit date format."""
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s" % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write("Hungarian date parsed as: %s\n" % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group("year"))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group("julian")
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group("month")
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group("day")
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group("hours")
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group("minutes"))
seconds = m.group("seconds")
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
"""Return the Time Zone Designator as an offset in seconds from UTC."""
if not m:
return 0
tzd = m.group("tzd")
if not tzd:
return 0
if tzd == "Z":
return 0
hours = int(m.group("tzdhours"))
minutes = m.group("tzdminutes")
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == "+":
return -offset
return offset
__date_re = ("(?P<year>\d\d\d\d)"
"(?:(?P<dsep>-|)"
"(?:(?P<julian>\d\d\d)"
"|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?")
__tzd_re = "(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)"
__tzd_rx = re.compile(__tzd_re)
__time_re = ("(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)"
"(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?"
+ __tzd_re)
__datetime_re = "%s(?:T%s)?" % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
"""Parse an RFC822, RFC1123, RFC2822, or asctime-style date"""
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# "ET" is equivalent to "EST", etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
"""Parses a variety of date formats into a 9-tuple in GMT"""
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write("date handler function must return 9-tuple\n")
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write("%s raised %s\n" % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
"""Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ("XML Media Types"), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to "utf-8" if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to "us-ascii" if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
"iso-8859-1" as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
"""
def _parseHTTPContentType(content_type):
"""takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
"""
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", "")
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get("content-type"))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
"""Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
"""
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = """<?xml version='1.0' encoding='utf-8'?>"""
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode("utf-8")
def _stripDoctype(data):
"""Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be "rss091n" or None
stripped_data is the same XML document, minus the DOCTYPE
"""
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
"""Parse a feed from a URL, file, stream, or string"""
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, "headers"):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the "Accept-encoding: gzip" header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, "info"):
info = f.info()
result["etag"] = info.getheader("ETag")
last_modified = info.getheader("Last-Modified")
if last_modified:
result["modified"] = _parse_date(last_modified)
if hasattr(f, "url"):
result["url"] = f.url
result["status"] = 200
if hasattr(f, "status"):
result["status"] = f.status
if hasattr(f, "headers"):
result["headers"] = f.headers.dict
if hasattr(f, "close"):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get("headers", {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('url'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get("status", 0) == 304:
result['version'] = ''
result['debug_message'] = "The feed has not changed since you last checked, " + \
"so the server sent no data. This is a feature, not a bug!"
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding, 'utf-8', 'windows-1252'):
if proposed_encoding in tried_encodings: continue
if not proposed_encoding: continue
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = 1
use_strict_parser = 1
break
except:
pass
tried_encodings.append(proposed_encoding)
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
"document encoding unknown, I tried " + \
"%s, %s, utf-8, and windows-1252 but nothing worked" % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
"documented declared as %s, but parsed as %s" % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
print url
print
result = parse(url)
pprint(result)
print
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink="false" attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: "description" gets
# description (which can come from description, summary, or full content if no
# description), "content" gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result["version"]
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of "name", "url", "email"); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ("text/xml; charset:iso-8859-1")
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for "Content-encoding: deflate";
# send blank "Accept-encoding: " header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as "text/plain"; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang="" to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
| ktf/newspipe | feedparser.py | Python | gpl-2.0 | 113,078 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Taggers
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (minor additions)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
NLTK Taggers
This package contains classes and interfaces for part-of-speech
tagging, or simply "tagging".
A "tag" is a case-sensitive string that specifies some property of a token,
such as its part of speech. Tagged tokens are encoded as tuples
``(tag, token)``. For example, the following tagged token combines
the word ``'fly'`` with a noun part of speech tag (``'NN'``):
>>> tagged_tok = ('fly', 'NN')
An off-the-shelf tagger is available. It uses the Penn Treebank tagset:
>>> from nltk import pos_tag, word_tokenize
>>> pos_tag(word_tokenize("John's big idea isn't all that bad."))
[('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'),
("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')]
This package defines several taggers, which take a list of tokens,
assign a tag to each one, and return the resulting list of tagged tokens.
Most of the taggers are built automatically based on a training corpus.
For example, the unigram tagger tags each word *w* by checking what
the most frequent tag for *w* was in a training corpus:
>>> from nltk.corpus import brown
>>> from nltk.tag import UnigramTagger
>>> tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500])
>>> sent = ['Mitchell', 'decried', 'the', 'high', 'rate', 'of', 'unemployment']
>>> for word, tag in tagger.tag(sent):
... print(word, '->', tag)
Mitchell -> NP
decried -> None
the -> AT
high -> JJ
rate -> NN
of -> IN
unemployment -> None
Note that words that the tagger has not seen during training receive a tag
of ``None``.
We evaluate a tagger on data that was not seen during training:
>>> tagger.evaluate(brown.tagged_sents(categories='news')[500:600])
0.73...
For more information, please consult chapter 5 of the NLTK Book.
"""
from __future__ import print_function
from nltk.tag.api import TaggerI
from nltk.tag.util import str2tuple, tuple2str, untag
from nltk.tag.sequential import (SequentialBackoffTagger, ContextTagger,
DefaultTagger, NgramTagger, UnigramTagger,
BigramTagger, TrigramTagger, AffixTagger,
RegexpTagger, ClassifierBasedTagger,
ClassifierBasedPOSTagger)
from nltk.tag.brill import BrillTagger
from nltk.tag.brill_trainer import BrillTaggerTrainer
from nltk.tag.tnt import TnT
from nltk.tag.hunpos import HunposTagger
from nltk.tag.stanford import StanfordTagger, StanfordPOSTagger, StanfordNERTagger
from nltk.tag.hmm import HiddenMarkovModelTagger, HiddenMarkovModelTrainer
from nltk.tag.senna import SennaTagger, SennaChunkTagger, SennaNERTagger
from nltk.tag.mapping import tagset_mapping, map_tag
from nltk.tag.crf import CRFTagger
from nltk.tag.perceptron import PerceptronTagger
from nltk.data import load, find
RUS_PICKLE = 'taggers/averaged_perceptron_tagger_ru/averaged_perceptron_tagger_ru.pickle'
def _get_tagger(lang=None):
if lang == 'rus':
tagger = PerceptronTagger(False)
ap_russian_model_loc = 'file:' + str(find(RUS_PICKLE))
tagger.load(ap_russian_model_loc)
else:
tagger = PerceptronTagger()
return tagger
def _pos_tag(tokens, tagset, tagger):
tagged_tokens = tagger.tag(tokens)
if tagset:
tagged_tokens = [(token, map_tag('en-ptb', tagset, tag)) for (token, tag) in tagged_tokens]
return tagged_tokens
def pos_tag(tokens, tagset=None, lang='eng'):
"""
Use NLTK's currently recommended part of speech tagger to
tag the given list of tokens.
>>> from nltk.tag import pos_tag
>>> from nltk.tokenize import word_tokenize
>>> pos_tag(word_tokenize("John's big idea isn't all that bad."))
[('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'),
("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')]
>>> pos_tag(word_tokenize("John's big idea isn't all that bad."), tagset='universal')
[('John', 'NOUN'), ("'s", 'PRT'), ('big', 'ADJ'), ('idea', 'NOUN'), ('is', 'VERB'),
("n't", 'ADV'), ('all', 'DET'), ('that', 'DET'), ('bad', 'ADJ'), ('.', '.')]
NB. Use `pos_tag_sents()` for efficient tagging of more than one sentence.
:param tokens: Sequence of tokens to be tagged
:type tokens: list(str)
:param tagset: the tagset to be used, e.g. universal, wsj, brown
:type tagset: str
:param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian
:type lang: str
:return: The tagged tokens
:rtype: list(tuple(str, str))
"""
tagger = _get_tagger(lang)
return _pos_tag(tokens, tagset, tagger)
def pos_tag_sents(sentences, tagset=None, lang='eng'):
"""
Use NLTK's currently recommended part of speech tagger to tag the
given list of sentences, each consisting of a list of tokens.
:param tokens: List of sentences to be tagged
:type tokens: list(list(str))
:param tagset: the tagset to be used, e.g. universal, wsj, brown
:type tagset: str
:param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian
:type lang: str
:return: The list of tagged sentences
:rtype: list(list(tuple(str, str)))
"""
tagger = _get_tagger(lang)
return [_pos_tag(sent, tagset, tagger) for sent in sentences]
| arju88nair/projectCulminate | venv/lib/python3.5/site-packages/nltk/tag/__init__.py | Python | apache-2.0 | 5,809 |
from core.himesis import Himesis
class HeclassOUTeOperationsSolveRefEClassEOperationEClassEOperation(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HeclassOUTeOperationsSolveRefEClassEOperationEClassEOperation.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HeclassOUTeOperationsSolveRefEClassEOperationEClassEOperation, self).__init__(name='HeclassOUTeOperationsSolveRefEClassEOperationEClassEOperation', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """eclassOUTeOperationsSolveRefEClassEOperationEClassEOperation"""
self["GUID__"] = 1978634013385514902
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 2517532533648662885
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 1376281682252862235
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 6597778609308859427
self.vs[3]["associationType"] = """eOperations"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 951748661951976089
self.vs[4]["associationType"] = """eOperations"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 4991072133922880865
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EClass"""
self.vs[5]["mm__"] = """EClass"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 6120985007253492729
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 7251367458087575278
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EOperation"""
self.vs[7]["mm__"] = """EOperation"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 7154449097454651421
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 2041339595589821480
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EClass"""
self.vs[9]["mm__"] = """EClass"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 5987017436268474592
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 8797509015852127828
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EOperation"""
self.vs[11]["mm__"] = """EOperation"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 1204046274037994408
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 2310104955353223654
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 6376438141223773572
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 2522384603297663574
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 8539777213793394590
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 510717554956897733
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 3417960432796638183
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 6889083972618139998
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 755211839417180077
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 5388846640980110718
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 8146402503070924045
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 2585667424221604689
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 8423814483472754989
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 1480124650021406173
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 53308935682951260
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 684516318178810309
| levilucio/SyVOLT | ECore_Copier_MM/transformation-Large/HeclassOUTeOperationsSolveRefEClassEOperationEClassEOperation.py | Python | mit | 4,976 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetContext
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_Contexts_GetContext_sync]
from google.cloud import dialogflow_v2
def sample_get_context():
# Create a client
client = dialogflow_v2.ContextsClient()
# Initialize request argument(s)
request = dialogflow_v2.GetContextRequest(
name="name_value",
)
# Make the request
response = client.get_context(request=request)
# Handle the response
print(response)
# [END dialogflow_v2_generated_Contexts_GetContext_sync]
| googleapis/python-dialogflow | samples/generated_samples/dialogflow_v2_generated_contexts_get_context_sync.py | Python | apache-2.0 | 1,425 |
from django.conf.urls import patterns, include, url, static
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
#admin
url(r'^admin/', include(admin.site.urls)),
)
# static url. Need remove on production server
urlpatterns += static.static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| dev-tools/chat-server-backend | chatsrvbackend/urls.py | Python | mit | 404 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .channel_partner_links import (
ChannelPartnerLink,
ChannelPartnerLinkState,
ChannelPartnerLinkView,
)
from .common import (
AdminUser,
CloudIdentityInfo,
EduData,
Value,
)
from .customers import (
ContactInfo,
Customer,
)
from .entitlements import (
AssociationInfo,
CommitmentSettings,
Entitlement,
Parameter,
ProvisionedService,
RenewalSettings,
TransferableSku,
TransferEligibility,
TrialSettings,
)
from .offers import (
Constraints,
CustomerConstraints,
Offer,
ParameterDefinition,
Period,
Plan,
Price,
PriceByResource,
PricePhase,
PriceTier,
PaymentPlan,
PaymentType,
PeriodType,
PromotionalOrderType,
ResourceType,
)
from .operations import OperationMetadata
from .products import (
MarketingInfo,
Media,
Product,
Sku,
MediaType,
)
from .service import (
ActivateEntitlementRequest,
CancelEntitlementRequest,
ChangeOfferRequest,
ChangeParametersRequest,
ChangeRenewalSettingsRequest,
CheckCloudIdentityAccountsExistRequest,
CheckCloudIdentityAccountsExistResponse,
CloudIdentityCustomerAccount,
CreateChannelPartnerLinkRequest,
CreateCustomerRequest,
CreateEntitlementRequest,
DeleteCustomerRequest,
GetChannelPartnerLinkRequest,
GetCustomerRequest,
GetEntitlementRequest,
ImportCustomerRequest,
ListChannelPartnerLinksRequest,
ListChannelPartnerLinksResponse,
ListCustomersRequest,
ListCustomersResponse,
ListEntitlementsRequest,
ListEntitlementsResponse,
ListOffersRequest,
ListOffersResponse,
ListProductsRequest,
ListProductsResponse,
ListPurchasableOffersRequest,
ListPurchasableOffersResponse,
ListPurchasableSkusRequest,
ListPurchasableSkusResponse,
ListSkusRequest,
ListSkusResponse,
ListSubscribersRequest,
ListSubscribersResponse,
ListTransferableOffersRequest,
ListTransferableOffersResponse,
ListTransferableSkusRequest,
ListTransferableSkusResponse,
LookupOfferRequest,
ProvisionCloudIdentityRequest,
PurchasableOffer,
PurchasableSku,
RegisterSubscriberRequest,
RegisterSubscriberResponse,
StartPaidServiceRequest,
SuspendEntitlementRequest,
TransferableOffer,
TransferEntitlementsRequest,
TransferEntitlementsResponse,
TransferEntitlementsToGoogleRequest,
UnregisterSubscriberRequest,
UnregisterSubscriberResponse,
UpdateChannelPartnerLinkRequest,
UpdateCustomerRequest,
)
from .subscriber_event import (
CustomerEvent,
EntitlementEvent,
SubscriberEvent,
)
__all__ = (
"ChannelPartnerLink",
"ChannelPartnerLinkState",
"ChannelPartnerLinkView",
"AdminUser",
"CloudIdentityInfo",
"EduData",
"Value",
"ContactInfo",
"Customer",
"AssociationInfo",
"CommitmentSettings",
"Entitlement",
"Parameter",
"ProvisionedService",
"RenewalSettings",
"TransferableSku",
"TransferEligibility",
"TrialSettings",
"Constraints",
"CustomerConstraints",
"Offer",
"ParameterDefinition",
"Period",
"Plan",
"Price",
"PriceByResource",
"PricePhase",
"PriceTier",
"PaymentPlan",
"PaymentType",
"PeriodType",
"PromotionalOrderType",
"ResourceType",
"OperationMetadata",
"MarketingInfo",
"Media",
"Product",
"Sku",
"MediaType",
"ActivateEntitlementRequest",
"CancelEntitlementRequest",
"ChangeOfferRequest",
"ChangeParametersRequest",
"ChangeRenewalSettingsRequest",
"CheckCloudIdentityAccountsExistRequest",
"CheckCloudIdentityAccountsExistResponse",
"CloudIdentityCustomerAccount",
"CreateChannelPartnerLinkRequest",
"CreateCustomerRequest",
"CreateEntitlementRequest",
"DeleteCustomerRequest",
"GetChannelPartnerLinkRequest",
"GetCustomerRequest",
"GetEntitlementRequest",
"ImportCustomerRequest",
"ListChannelPartnerLinksRequest",
"ListChannelPartnerLinksResponse",
"ListCustomersRequest",
"ListCustomersResponse",
"ListEntitlementsRequest",
"ListEntitlementsResponse",
"ListOffersRequest",
"ListOffersResponse",
"ListProductsRequest",
"ListProductsResponse",
"ListPurchasableOffersRequest",
"ListPurchasableOffersResponse",
"ListPurchasableSkusRequest",
"ListPurchasableSkusResponse",
"ListSkusRequest",
"ListSkusResponse",
"ListSubscribersRequest",
"ListSubscribersResponse",
"ListTransferableOffersRequest",
"ListTransferableOffersResponse",
"ListTransferableSkusRequest",
"ListTransferableSkusResponse",
"LookupOfferRequest",
"ProvisionCloudIdentityRequest",
"PurchasableOffer",
"PurchasableSku",
"RegisterSubscriberRequest",
"RegisterSubscriberResponse",
"StartPaidServiceRequest",
"SuspendEntitlementRequest",
"TransferableOffer",
"TransferEntitlementsRequest",
"TransferEntitlementsResponse",
"TransferEntitlementsToGoogleRequest",
"UnregisterSubscriberRequest",
"UnregisterSubscriberResponse",
"UpdateChannelPartnerLinkRequest",
"UpdateCustomerRequest",
"CustomerEvent",
"EntitlementEvent",
"SubscriberEvent",
)
| googleapis/python-channel | google/cloud/channel_v1/types/__init__.py | Python | apache-2.0 | 5,887 |
"""
Copyright (C) 2014, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import importlib
class IterationTerminator:
def __init__(self, iteration):
self.iteration = iteration
def __call__(self, optimizer, optimization):
return optimization.iteration < self.iteration
class EvaluationTerminator:
def __init__(self, evaluation):
self.evaluation = evaluation
def __call__(self, optimizer, optimization):
return optimizer.GetProblem().GetNumberOfEvaluations() < self.evaluation
def get_terminator(config, optimizer):
termination = config.get('optimization', 'termination')
if termination == 'iteration':
module, function = config.get('optimization', termination).rsplit('.', 1)
module = importlib.import_module(module)
iteration = getattr(module, function)(config, optimizer)
terminator = IterationTerminator(iteration)
elif termination == 'evaluation':
module, function = config.get('optimization', termination).rsplit('.', 1)
module = importlib.import_module(module)
evaluation = getattr(module, function)(config, optimizer)
terminator = EvaluationTerminator(evaluation)
return terminator
| O-T-L/PyOptimization | pyoptimization/optimizer/terminator.py | Python | lgpl-3.0 | 1,823 |
from unittest import TestCase
import mock
import arithmetic_computing.helper.file_access as file_access
class TestFileAccess(TestCase):
@mock.patch('arithmetic_computing.helper.file_access.os.path')
def test_read_lines_file_not_exists(self, mock_path):
mock_path.isfile.return_value = False
with self.assertRaises(IOError):
file_access.read_lines("test")
@mock.patch('arithmetic_computing.helper.file_access.open', create=True)
@mock.patch('arithmetic_computing.helper.file_access.os.path')
def test_read_lines_empty_file_open_called(self, mock_path, mock_open):
mock_path.isfile.return_value = True
fake_path = "/fake/path"
mock_path.abspath.return_value = fake_path
mock_path.expanduser.return_value = fake_path
# Mock the open in the with statement
file_mock = mock.MagicMock(spec=file)
mock_open.return_value.__enter__.return_value = file_mock
# Empty file
file_mock.read.return_value = ""
result = file_access.read_lines(fake_path)
self.assertEqual(result, [])
@mock.patch('arithmetic_computing.helper.file_access.open', create=True)
@mock.patch('arithmetic_computing.helper.file_access.os.path')
def test_read_lines_file_open_called(self, mock_path, mock_open):
mock_path.isfile.return_value = True
fake_path = "/fake/path"
mock_path.abspath.return_value = fake_path
mock_path.expanduser.return_value = fake_path
# Mock the open in the with statement
file_mock = mock.MagicMock(spec=file)
mock_open.return_value.__enter__.return_value = file_mock
file_mock.read.return_value = "line_1\nline_2"
result = file_access.read_lines(fake_path)
self.assertEqual(result, ['line_1', 'line_2'])
@mock.patch('arithmetic_computing.helper.file_access.open', create=True)
def test_writes_lines_empty(self, mock_open):
# Mock the open in the with statement
file_mock = mock.MagicMock(spec=file)
mock_open.return_value.__enter__.return_value = file_mock
path = "fake/path"
file_access.write_lines(path, [])
file_mock.write.assert_called_with('\n')
@mock.patch('arithmetic_computing.helper.file_access.open', create=True)
def test_writes_lines(self, mock_open):
# Mock the open in the with statement
file_mock = mock.MagicMock(spec=file)
mock_open.return_value.__enter__.return_value = file_mock
path = "fake/path"
file_access.write_lines(path, ['line_1', 'line_2'])
file_mock.write.assert_called_with('line_1\nline_2\n')
| Darkheir/arithmetic-computing | tests/helper/test_file_access.py | Python | mit | 2,668 |
string = "75Number9"
numArr = []
newstr = list(string)
i = (len(newstr)-2)
while i > 0:
if newstr[i].isdigit() and newstr[i+1].isdigit():
print newstr[i]
print newstr[i+1]
numArr.append(int(newstr[i]+newstr[i+1]))
i += 1
elif newstr[i].isdigit():
numArr.append(int(newstr[i]))
i -= 1
print(numArr)
total = sum(numArr)
print (total) | ismk/Python-Examples | coderbyte exercises.py | Python | mit | 345 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-09 22:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crm', '0011_auto_20170107_2208'),
]
operations = [
migrations.CreateModel(
name='MasterListSelections',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('geo', models.CharField(blank=True, choices=[('East', 'East'), ('West', 'West'), ('Maritimes/East', 'Maritimes'), ('USA', 'USA'), ('Other', 'Other Foreign'), ('Unknown', 'Unknown'), ('', '---')], default='', max_length=10)),
('main_category', models.CharField(blank=True, choices=[('HR', 'HR'), ('FIN', 'FIN'), ('Industry', 'Industry'), ('Aboriginal', 'Aboriginal'), ('Gov', 'Gov'), ('NA', 'None'), ('', '---')], default='', max_length=25)),
('main_category2', models.CharField(blank=True, choices=[('HR', 'HR'), ('FIN', 'FIN'), ('Industry', 'Industry'), ('Aboriginal', 'Aboriginal'), ('Gov', 'Gov'), ('NA', 'None'), ('', '---')], default='', max_length=15)),
('company', models.CharField(blank=True, max_length=100)),
('industry', models.CharField(blank=True, max_length=100)),
('dept', models.CharField(blank=True, max_length=255)),
('include_exclude', models.CharField(choices=[('include', 'include'), ('exclude', 'exclude')], default='include', max_length=7)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crm.Event')),
],
),
]
| asterix135/infonex_crm | crm/migrations/0012_masterlistselections.py | Python | mit | 1,747 |
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.rest import assert_response
from cfme.utils.rest import delete_resources_from_collection
from cfme.utils.rest import delete_resources_from_detail
from cfme.utils.wait import wait_for
pytestmark = [test_requirements.rest, pytest.mark.tier(1)]
@pytest.mark.ignore_stream("5.10")
@pytest.mark.meta(automates=[1695566])
def test_update_advanced_settings_new_key(appliance, request):
"""
This test case checks updating advanced settings with a new key-value pair
and tests that this change does not break the Configuration page
Polarion:
assignee: pvala
casecomponent: Configuration
caseimportance: medium
initialEstimate: 1/10h
Bugzilla:
1695566
"""
data = {"new_key": "new value"}
appliance.update_advanced_settings(data)
@request.addfinalizer
def _reset_settings():
data["new_key"] = "<<reset>>"
appliance.update_advanced_settings(data)
assert "new_key" in appliance.advanced_settings
view = navigate_to(appliance.server, "Advanced")
assert view.is_displayed
@pytest.mark.meta(automates=[1805844])
@pytest.mark.parametrize("from_detail", [True, False], ids=["from_detail", "from_collection"])
def test_edit_region(temp_appliance_preconfig_funcscope, from_detail):
"""
Bugzilla:
1805844
Polarion:
assignee: pvala
casecomponent: Configuration
caseimportance: medium
initialEstimate: 1/10h
"""
appliance = temp_appliance_preconfig_funcscope
ui_region = appliance.server.zone.region
view = navigate_to(ui_region, "Details")
payload = {
"description": fauxfactory.gen_alpha(start=f"Edited {ui_region.region_string} ", length=20)
}
expected_title = f'CFME Region "{payload["description"]} [{ui_region.number}]"'
currently_selected = f'CFME Region: {payload["description"]} [{ui_region.number}]'
region = ui_region.rest_api_entity
if from_detail:
region.action.edit(**payload)
else:
payload.update(region._ref_repr())
appliance.rest_api.collections.regions.action.edit(payload)
assert_response(appliance)
wait_for(
lambda: region.description == payload["description"], fail_func=region.reload, timeout=30
)
wait_for(lambda: view.title.text == expected_title, fail_func=view.browser.refresh, timeout=800)
assert currently_selected in view.accordions.settings.tree.currently_selected
@pytest.mark.meta(automates=1805844)
class TestZoneRESTAPI:
@pytest.fixture
def zone(self, temp_appliance_preconfig_funcscope):
appliance = temp_appliance_preconfig_funcscope
payload = {
"name": fauxfactory.gen_alpha(start="Zone "),
"description": fauxfactory.gen_alpha(start="Zone desc ", length=12),
}
zone = appliance.rest_api.collections.zones.action.create(payload)[0]
yield zone
if zone.exists:
zone.action.delete()
def test_create_zone(self, zone, temp_appliance_preconfig_funcscope):
"""
Bugzilla:
1805844
Polarion:
assignee: pvala
casecomponent: Configuration
caseimportance: medium
initialEstimate: 1/10h
"""
get_zone = temp_appliance_preconfig_funcscope.rest_api.collections.zones.get(id=zone.id)
assert get_zone.name == zone.name
assert get_zone.description == zone.description
@pytest.mark.parametrize("from_detail", [True, False], ids=["from_detail", "from_collection"])
def test_edit_zone(self, zone, temp_appliance_preconfig_funcscope, from_detail):
"""
Bugzilla:
1805844
Polarion:
assignee: pvala
casecomponent: Configuration
caseimportance: medium
initialEstimate: 1/10h
"""
appliance = temp_appliance_preconfig_funcscope
ui_zone = appliance.collections.zones.instantiate(
name=zone.name, description=zone.description, id=zone.id
)
view = navigate_to(ui_zone, "Zone")
payload = {"name": fauxfactory.gen_alpha(start=f"edited-{zone.name}-", length=21)}
if from_detail:
zone.action.edit(**payload)
else:
payload.update(zone._ref_repr())
appliance.rest_api.collections.zones.action.edit(payload)
assert_response(appliance)
wait_for(lambda: zone.name == payload["name"], fail_func=zone.reload, timeout=30)
wait_for(
lambda: view.zone.basic_information.get_text_of("Name") == zone.name,
fail_func=view.browser.refresh,
timeout=30,
)
@pytest.mark.parametrize("method", ["POST", "DELETE"])
def test_delete_zone_from_detail(self, zone, method):
"""
Bugzilla:
1805844
Polarion:
assignee: pvala
casecomponent: Configuration
caseimportance: medium
initialEstimate: 1/10h
"""
delete_resources_from_detail([zone], method=method)
def test_delete_zone_from_collections(self, zone):
"""
Bugzilla:
1805844
Polarion:
assignee: pvala
casecomponent: Configuration
caseimportance: medium
initialEstimate: 1/10h
"""
delete_resources_from_collection([zone])
def test_delete_assigned_zone(self, zone, temp_appliance_preconfig_funcscope):
"""
Bugzilla:
1805844
Polarion:
assignee: pvala
casecomponent: Configuration
caseimportance: medium
initialEstimate: 1/10h
setup:
1. Create a new zone.
testSteps:
1. Assign the zone to server.
2. Perform delete action on the zone.
3. Check if the zone exists
4. Unassign the zone from server.
5. Perform delete action on the zone.
6. Check if the zone exists.
expectedResults:
1. Zone is successfully assigned to the server.
2. Zone is not deleted because it is used by the server.
3. Zone exists.
4. Zone unassigned successfully.
5. Zone is deleted.
6. Zone does not exist.
"""
appliance = temp_appliance_preconfig_funcscope
default_zone = appliance.server.zone
server_settings = appliance.server.settings
server_settings.update_basic_information({"appliance_zone": zone.name})
assert zone.description == appliance.server.zone.description
response = zone.action.delete()
assert not response["success"]
assert response["message"] == f"zone name '{zone.name}' is used by a server"
assert zone.exists
server_settings.update_basic_information({"appliance_zone": default_zone.name})
assert default_zone.description == appliance.server.zone.description
zone.action.delete()
assert_response(appliance)
assert zone.wait_not_exists()
def test_delete_default_zone(self, temp_appliance_preconfig_funcscope):
"""
Bugzilla:
1805844
Polarion:
assignee: pvala
casecomponent: Configuration
caseimportance: medium
initialEstimate: 1/10h
testSteps:
1. Get the default zone assigned to the server.
2. Assert the zone name is "default"
3. Perform delete action on the zone.
4. Check if the zone exists.
expectedResults:
1.
2.
3. Delete action fails because zone is named default.
4. Zone still exists.
"""
appliance = temp_appliance_preconfig_funcscope
zone = appliance.server.zone
assert zone.name == "default"
response = zone.rest_api_entity.action.delete()
assert not response["success"]
assert response["message"] == "cannot delete default zone"
assert zone.exists
@pytest.mark.meta(automates=[1805844])
class TestServerRESTAPI:
@pytest.mark.parametrize("from_detail", [True, False], ids=["from_detail", "from_collection"])
def test_edit_server(self, temp_appliance_preconfig_funcscope, from_detail):
"""
Bugzilla:
1805844
Polarion:
assignee: pvala
casecomponent: Configuration
caseimportance: medium
initialEstimate: 1/10h
"""
appliance = temp_appliance_preconfig_funcscope
server = appliance.server.rest_api_entity
payload = {"name": fauxfactory.gen_alpha(start=f"Edited-{server.name}", length=15)}
view = navigate_to(appliance.server, "Server")
if from_detail:
server.action.edit(**payload)
else:
payload.update(server._ref_repr())
appliance.rest_api.collections.servers.action.edit(payload)
assert_response(appliance)
wait_for(lambda: server.name == payload["name"], fail_func=server.reload, timeout=30)
wait_for(
lambda: view.basic_information.appliance_name.read() == server.name,
fail_func=view.browser.refresh,
timeout=30,
)
@pytest.mark.parametrize("from_detail", [True, False], ids=["from_detail", "from_collection"])
def test_delete_server(self, temp_appliance_preconfig_funcscope, from_detail):
"""
Bugzilla:
1805844
Polarion:
assignee: pvala
casecomponent: Configuration
caseimportance: medium
initialEstimate: 1/10h
"""
appliance = temp_appliance_preconfig_funcscope
server = appliance.server.rest_api_entity
if from_detail:
response = server.action.delete()
else:
response = appliance.rest_api.collections.servers.action.delete(server._ref_repr())[0]
assert not response["success"]
assert response["message"] == "Failed to destroy the record"
| nachandr/cfme_tests | cfme/tests/configure/test_rest_config.py | Python | gpl-2.0 | 10,376 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from sentry.models import Rule
from sentry.web.frontend.base import ProjectView
class ProjectRuleRemoveView(ProjectView):
required_scope = 'project:write'
def post(self, request, organization, team, project, rule_id):
path = reverse('sentry-project-rules', args=[organization.slug, project.slug])
try:
rule = Rule.objects.get(project=project, id=rule_id)
except Rule.DoesNotExist:
return self.redirect(path)
rule.delete()
messages.add_message(request, messages.SUCCESS,
_('The rule was removed.'))
return self.redirect(path)
| nicholasserra/sentry | src/sentry/web/frontend/project_rule_remove.py | Python | bsd-3-clause | 802 |
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.utils.safestring import mark_safe
from django.utils.functional import cached_property
from django.db import models
from django.db.models import Avg, Min, Max, Count, Sum, F
from django.db.models.fields import FieldDoesNotExist
from six import text_type
from report_builder.unique_slugify import unique_slugify
from report_utils.model_introspection import get_model_from_path_string
from .utils import sort_data, increment_total, formatter
from dateutil import parser
from decimal import Decimal
from functools import reduce
import time
import datetime
import re
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def get_allowed_models():
models = ContentType.objects.all()
if getattr(settings, 'REPORT_BUILDER_INCLUDE', False):
all_model_names = []
additional_models = []
for element in settings.REPORT_BUILDER_INCLUDE:
split_element = element.split('.')
if len(split_element) == 2:
additional_models.append(models.filter(app_label=split_element[0], model=split_element[1]))
else:
all_model_names.append(element)
models = models.filter(model__in=all_model_names)
for additional_model in additional_models:
models = models | additional_model
if getattr(settings, 'REPORT_BUILDER_EXCLUDE', False):
all_model_names = []
for element in settings.REPORT_BUILDER_EXCLUDE:
split_element = element.split('.')
if len(split_element) == 2:
models = models.exclude(app_label=split_element[0], model=split_element[1])
else:
all_model_names.append(element)
models = models.exclude(model__in=all_model_names)
return models
class Report(models.Model):
""" A saved report with queryset and descriptive fields
"""
def _get_model_manager(self):
""" Get manager from settings else use objects
"""
model_manager = 'objects'
if getattr(settings, 'REPORT_BUILDER_MODEL_MANAGER', False):
model_manager = settings.REPORT_BUILDER_MODEL_MANAGER
return model_manager
@staticmethod
def allowed_models():
return get_allowed_models()
name = models.CharField(max_length=255)
slug = models.SlugField(verbose_name="Short Name")
description = models.TextField(blank=True)
root_model = models.ForeignKey(
ContentType, limit_choices_to={'pk__in': get_allowed_models})
created = models.DateField(auto_now_add=True)
modified = models.DateField(auto_now=True)
user_created = models.ForeignKey(
AUTH_USER_MODEL, editable=False, blank=True, null=True)
user_modified = models.ForeignKey(
AUTH_USER_MODEL, editable=False, blank=True, null=True,
related_name="report_modified_set")
distinct = models.BooleanField(default=False)
report_file = models.FileField(upload_to="report_files", blank=True)
report_file_creation = models.DateTimeField(blank=True, null=True)
starred = models.ManyToManyField(
AUTH_USER_MODEL, blank=True,
help_text="These users have starred this report for easy reference.",
related_name="report_starred_set")
def save(self, *args, **kwargs):
if not self.id:
unique_slugify(self, self.name)
super(Report, self).save(*args, **kwargs)
def add_aggregates(self, queryset, display_fields=None):
agg_funcs = {
'Avg': Avg, 'Min': Min, 'Max': Max, 'Count': Count, 'Sum': Sum
}
if display_fields is None:
display_fields = self.displayfield_set.filter(
aggregate__isnull=False)
for display_field in display_fields:
if display_field.aggregate:
func = agg_funcs[display_field.aggregate]
full_name = display_field.path + display_field.field
queryset = queryset.annotate(func(full_name))
return queryset
@property
def root_model_class(self):
return self.root_model.model_class()
def get_field_type(self, field_name, path=""):
""" Get field type for given field name.
field_name is the full path of the field
path is optional
"""
model = get_model_from_path_string(
self.root_model_class, path + field_name)
# Is it a ORM field?
try:
return model._meta.get_field_by_name(
field_name)[0].get_internal_type()
except FieldDoesNotExist:
pass
# Is it a property?
field_attr = getattr(model, field_name, None)
if isinstance(field_attr, (property, cached_property)):
return "Property"
# Is it a custom field?
try:
model().get_custom_field(field_name)
return "Custom Field"
except (ObjectDoesNotExist, AttributeError):
pass
return "Invalid"
def get_good_display_fields(self):
""" Returns only valid display fields """
display_fields = self.displayfield_set.all()
bad_display_fields = []
for display_field in display_fields:
if display_field.field_type == "Invalid":
bad_display_fields.append(display_field)
return display_fields.exclude(id__in=[o.id for o in bad_display_fields])
def report_to_list(self, queryset=None, user=None, preview=False):
""" Convert report into list. """
property_filters = []
if queryset is None:
queryset = self.get_query()
for field in self.filterfield_set.all():
if field.field_type in ["Property"]:
property_filters += [field]
display_fields = self.get_good_display_fields()
# Need the pk for inserting properties later
display_field_paths = ['pk']
display_field_properties = []
display_totals = []
insert_property_indexes = []
choice_lists = {}
display_formats = {}
i = 0
for display_field in display_fields:
if display_field.total:
display_field.total_count = Decimal(0.0)
display_totals.append(display_field)
display_field_type = display_field.field_type
if display_field_type == "Property":
display_field_properties.append(display_field.field_key)
insert_property_indexes.append(i)
else:
i += 1
if display_field.aggregate:
display_field_paths += [
display_field.field_key +
'__' + display_field.aggregate.lower()]
else:
display_field_paths += [display_field.field_key]
# Build display choices list
if display_field.choices and hasattr(display_field, 'choices_dict'):
choice_list = display_field.choices_dict
# Insert blank and None as valid choices.
choice_list[''] = ''
choice_list[None] = ''
choice_lists[display_field.position] = choice_list
# Build display format list
if (
hasattr(display_field, 'display_format')
and display_field.display_format
):
display_formats[display_field.position] = \
display_field.display_format
property_filters = []
for filter_field in self.filterfield_set.all():
filter_field_type = filter_field.field_type
if filter_field_type == "Property":
property_filters += [field]
group = [df.path + df.field for df in display_fields if df.group]
# To support group-by with multiple fields, we turn all the other
# fields into aggregations. The default aggregation is `Max`.
if group:
for field in display_fields:
if (not field.group) and (not field.aggregate):
field.aggregate = 'Max'
values = queryset.values(*group)
values = self.add_aggregates(values, display_fields)
data_list = []
for row in values:
row_data = []
for field in display_field_paths:
if field == 'pk':
continue
try:
row_data.append(row[field])
except KeyError:
row_data.append(row[field + '__max'])
for total in display_totals:
increment_total(total, row_data)
data_list.append(row_data)
else:
values_list = list(queryset.values_list(*display_field_paths))
data_list = []
values_index = 0
for obj in queryset:
display_property_values = []
for display_property in display_field_properties:
relations = display_property.split('__')
val = reduce(getattr, relations, obj)
display_property_values.append(val)
value_row = values_list[values_index]
while value_row[0] == obj.pk:
add_row = True
data_row = list(value_row[1:]) # Remove added pk
# Insert in the location dictated by the order of display fields
for i, prop_value in enumerate(display_property_values):
data_row.insert(insert_property_indexes[i], prop_value)
for property_filter in property_filters:
relations = property_filter.field_key.split('__')
val = reduce(getattr, relations, obj)
if property_filter.filter_property(val):
add_row = False
if add_row is True:
for total in display_totals:
increment_total(total, data_row)
# Replace choice data with display choice string
for position, choice_list in choice_lists.items():
try:
data_row[position] = text_type(choice_list[data_row[position]])
except Exception:
data_row[position] = text_type(data_row[position])
for position, style in display_formats.items():
data_row[position] = formatter(data_row[position], style)
data_list.append(data_row)
values_index += 1
try:
value_row = values_list[values_index]
except IndexError:
break
for display_field in display_fields.filter(
sort__gt=0
).order_by('-sort'):
data_list = sort_data(data_list, display_field)
if display_totals:
display_totals_row = []
i = 0
for display_field in display_totals:
while i < display_field.position:
i += 1
display_totals_row.append('')
i += 1
display_totals_row.append(display_field.total_count)
# Add formats to display totals
for pos, style in display_formats.items():
display_totals_row[pos] = formatter(display_totals_row[pos], style)
data_list += [
['TOTALS'] + (len(display_fields) - 1) * ['']
] + [display_totals_row]
return data_list
def get_query(self):
report = self
model_class = self.root_model_class
# Check for report_builder_model_manger property on the model
if getattr(model_class, 'report_builder_model_manager', False):
objects = getattr(model_class, 'report_builder_model_manager').all()
else:
# Get global model manager
manager = report._get_model_manager()
objects = getattr(model_class, manager).all()
# Filters
# NOTE: group all the filters together into one in order to avoid
# unnecessary joins
filters = {}
excludes = {}
for filter_field in report.filterfield_set.order_by('position'):
# exclude properties from standard ORM filtering
if filter_field.field_type == "Property":
continue
if filter_field.field_type == "Custom Field":
continue
if filter_field.filter_type in ('max', 'min'):
# Annotation-filters are handled below.
continue
filter_string = str(filter_field.path + filter_field.field)
if filter_field.filter_type:
filter_string += '__' + filter_field.filter_type
# Check for special types such as isnull
if (filter_field.filter_type == "isnull"
and filter_field.filter_value in ["0", "False"]):
filter_ = {filter_string: False}
elif filter_field.filter_type == "in":
filter_ = {filter_string: filter_field.filter_value.split(',')}
else:
filter_value = filter_field.filter_value
if filter_field.filter_type == 'range':
filter_value = [filter_value, filter_field.filter_value2]
filter_ = {filter_string: filter_value}
if not filter_field.exclude:
filters.update(filter_)
else:
excludes.update(filter_)
if filters:
objects = objects.filter(**filters)
if excludes:
objects = objects.exclude(**excludes)
# Apply annotation-filters after regular filters.
for filter_field in report.filterfield_set.order_by('position'):
if filter_field.filter_type in ('max', 'min'):
func = {'max': Max, 'min': Min}[filter_field.filter_type]
column_name = '{0}{1}__{2}'.format(
filter_field.path,
filter_field.field,
filter_field.field_type
)
filter_string = filter_field.path + filter_field.field
annotate_args = {column_name: func(filter_string)}
filter_args = {column_name: F(filter_field.field)}
objects = objects.annotate(**annotate_args).filter(**filter_args)
# Aggregates
objects = self.add_aggregates(objects)
# Distinct
if report.distinct:
objects = objects.distinct()
return objects
@models.permalink
def get_absolute_url(self):
return ("report_update_view", [str(self.id)])
def edit(self):
return mark_safe(
'<a href="{0}"><img style="width: 26px; margin: -6px" src="{1}report_builder/img/edit.svg"/></a>'.format(
self.get_absolute_url(),
getattr(settings, 'STATIC_URL', '/static/')
)
)
edit.allow_tags = True
def download_xlsx(self):
if getattr(settings, 'REPORT_BUILDER_ASYNC_REPORT', False):
return mark_safe(
'<a href="javascript:void(0)" onclick="get_async_report({0})"><img style="width: 26px; margin: -6px" src="{1}report_builder/img/download.svg"/></a>'.format(
self.id,
getattr(settings, 'STATIC_URL', '/static/'),
)
)
else:
return mark_safe(
'<a href="{0}"><img style="width: 26px; margin: -6px" src="{1}report_builder/img/download.svg"/></a>'.format(
reverse('report_download_file', args=[self.id]),
getattr(settings, 'STATIC_URL', '/static/'),
)
)
download_xlsx.short_description = "Download"
download_xlsx.allow_tags = True
def copy_report(self):
return '<a href="{0}"><img style="width: 26px; margin: -6px" src="{1}report_builder/img/copy.svg"/></a>'.format(
reverse('report_builder.views.create_copy', args=[self.id]),
getattr(settings, 'STATIC_URL', '/static/'),
)
copy_report.short_description = "Copy"
copy_report.allow_tags = True
def check_report_display_field_positions(self):
""" After report is saved, make sure positions are sane
"""
for i, display_field in enumerate(self.displayfield_set.all()):
if display_field.position != i + 1:
display_field.position = i + 1
display_field.save()
class Format(models.Model):
""" A specifies a Python string format for e.g. `DisplayField`s.
"""
name = models.CharField(max_length=50, blank=True, default='')
string = models.CharField(
max_length=300, blank=True, default='',
help_text='Python string format. Ex ${} would place a $ in front of the result.'
)
def __unicode__(self):
return self.name
class AbstractField(models.Model):
report = models.ForeignKey(Report)
path = models.CharField(max_length=2000, blank=True)
path_verbose = models.CharField(max_length=2000, blank=True)
field = models.CharField(max_length=2000)
field_verbose = models.CharField(max_length=2000)
position = models.PositiveSmallIntegerField(blank=True, null=True)
class Meta:
abstract = True
ordering = ['position']
@property
def field_type(self):
return self.report.get_field_type(self.field, self.path)
@property
def field_key(self):
""" This key can be passed to a Django ORM values_list """
return self.path + self.field
@property
def choices(self):
if self.pk:
model = get_model_from_path_string(
self.report.root_model.model_class(), self.path)
return self.get_choices(model, self.field)
class DisplayField(AbstractField):
""" A display field to show in a report. Always belongs to a Report
"""
name = models.CharField(max_length=2000)
sort = models.IntegerField(blank=True, null=True)
sort_reverse = models.BooleanField(verbose_name="Reverse", default=False)
width = models.IntegerField(default=15)
aggregate = models.CharField(
max_length=5,
choices=(
('Sum', 'Sum'),
('Count', 'Count'),
('Avg', 'Avg'),
('Max', 'Max'),
('Min', 'Min'),
),
blank = True
)
total = models.BooleanField(default=False)
group = models.BooleanField(default=False)
display_format = models.ForeignKey(Format, blank=True, null=True)
def get_choices(self, model, field_name):
try:
model_field = model._meta.get_field_by_name(field_name)[0]
except:
model_field = None
if model_field and model_field.choices:
return ((model_field.get_prep_value(key), val) for key, val in model_field.choices)
@property
def choices_dict(self):
choices = self.choices
choices_dict = {}
if choices:
for choice in choices:
choices_dict.update({choice[0]: choice[1]})
return choices_dict
def __unicode__(self):
return self.name
class FilterField(AbstractField):
""" A display field to show in a report. Always belongs to a Report
"""
filter_type = models.CharField(
max_length=20,
choices=(
('exact', 'Equals'),
('iexact', 'Equals (case-insensitive)'),
('contains', 'Contains'),
('icontains', 'Contains (case-insensitive)'),
('in', 'in (comma seperated 1,2,3)'),
('gt', 'Greater than'),
('gte', 'Greater than equals'),
('lt', 'Less than'),
('lte', 'Less than equals'),
('startswith', 'Starts with'),
('istartswith', 'Starts with (case-insensitive)'),
('endswith', 'Ends with'),
('iendswith', 'Ends with (case-insensitive)'),
('range', 'range'),
('week_day', 'Week day'),
('isnull', 'Is null'),
('regex', 'Regular Expression'),
('iregex', 'Reg. Exp. (case-insensitive)'),
('max', 'Max (annotation-filter)'),
('min', 'Min (annotation-filter)'),
),
blank=True,
default = 'icontains',
)
filter_value = models.CharField(max_length=2000)
filter_value2 = models.CharField(max_length=2000, blank=True)
exclude = models.BooleanField(default=False)
def clean(self):
if self.filter_type == 'range' and self.filter_value2 in [None, '']:
raise ValidationError('Range filters must have two values')
if self.filter_type in ('max', 'min'):
# These filter types ignore their value.
pass
elif self.field_type == 'DateField' and self.filter_type != 'isnull':
date_form = forms.DateField()
date_value = parser.parse(self.filter_value).date()
date_form.clean(date_value)
self.filter_value = str(date_value)
return super(FilterField, self).clean()
def get_choices(self, model, field_name):
try:
model_field = model._meta.get_field_by_name(field_name)[0]
except:
model_field = None
if model_field and model_field.choices:
return model_field.choices
def filter_property(self, value):
""" Determine if passed value should be filtered or not """
filter_field = self
filter_type = filter_field.filter_type
filter_value = filter_field.filter_value
filtered = True
WEEKDAY_INTS = {
'monday': 0,
'tuesday': 1,
'wednesday': 2,
'thursday': 3,
'friday': 4,
'saturday': 5,
'sunday': 6,
}
if filter_type == 'exact' and str(value) == filter_value:
filtered = False
if filter_type == 'iexact' and str(value).lower() == str(filter_value).lower():
filtered = False
if filter_type == 'contains' and filter_value in value:
filtered = False
if filter_type == 'icontains' and str(filter_value).lower() in str(value).lower():
filtered = False
if filter_type == 'in' and value in filter_value:
filtered = False
# convert dates and datetimes to timestamps in order to compare digits and date/times the same
if isinstance(value, datetime.datetime) or isinstance(value, datetime.date):
value = str(time.mktime(value.timetuple()))
try:
filter_value_dt = parser.parse(filter_value)
filter_value = str(time.mktime(filter_value_dt.timetuple()))
except ValueError:
pass
if filter_type == 'gt' and Decimal(value) > Decimal(filter_value):
filtered = False
if filter_type == 'gte' and Decimal(value) >= Decimal(filter_value):
filtered = False
if filter_type == 'lt' and Decimal(value) < Decimal(filter_value):
filtered = False
if filter_type == 'lte' and Decimal(value) <= Decimal(filter_value):
filtered = False
if filter_type == 'startswith' and str(value).startswith(str(filter_value)):
filtered = False
if filter_type == 'istartswith' and str(value).lower().startswith(str(filter_value)):
filtered = False
if filter_type == 'endswith' and str(value).endswith(str(filter_value)):
filtered = False
if filter_type == 'iendswith' and str(value).lower().endswith(str(filter_value)):
filtered = False
if filter_type == 'range' and value in [int(x) for x in filter_value]:
filtered = False
if filter_type == 'week_day' and WEEKDAY_INTS.get(str(filter_value).lower()) == value.weekday:
filtered = False
if filter_type == 'isnull' and value is None:
filtered = False
if filter_type == 'regex' and re.search(filter_value, value):
filtered = False
if filter_type == 'iregex' and re.search(filter_value, value, re.I):
filtered = False
if filter_field.exclude:
return not filtered
return filtered
@property
def field_type(self):
return self.report.get_field_type(self.field, self.path)
@property
def choices(self):
if self.pk:
model = get_model_from_path_string(
self.report.root_model.model_class(), self.path)
return self.get_choices(model, self.field)
def __unicode__(self):
return self.field
| BrendanBerkley/django-report-builder | report_builder/models.py | Python | bsd-3-clause | 25,293 |
def add(a, b):
print "ADDING %d + %d" % (a, b)
return a + b
def subtract(a, b):
print "SUBTRACTING %d - %d" % (a, b)
return a - b
def multiply(a, b):
print "MULTIPLYING %d * %d" % (a, b)
return a * b
def divide(a, b):
print "DIVIDING %d / %d" % (a, b)
return a / b
print "Let's do some math with just functions!"
age = add(30, 5)
height = subtract(78, 4)
weight = multiply(90, 2)
iq = divide(100, 2)
print "Age: %d, Height: %d, Weight: %d, IQ: %d" % (age, height, weight, iq)
# A puzzle for the extra credit, type it in anyway.
print "Here is a puzzle."
what = add(age, subtract(height, multiply(weight, divide(iq, 2))))
print "That becomes: ", what, "Can you do it by hand?"
| cloudedge/LearningPython | ex21.py | Python | apache-2.0 | 748 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('municipal_finance', '0022_auto_20201125_0906'),
]
operations = [
]
| Code4SA/municipal-data | municipal_finance/migrations/0023_capital_initial_data.py | Python | mit | 251 |
"""
@author Silver Bullet
@since 四月-16
"""
"""
A generic factory implementation.
Examples:
>>f=Factory()
>>class A:pass
>>f.register("createA",A)
>>f.createA()
<__main__.A instance at 01491E7C>
>>> class B:
... def __init__(self, a,b=1):
... self.a=a
... self.b=b
...
>>> f.register("createB",B,1,b=2)
>>> f.createB()
>>> b=f.createB()
>>>
>>> b.a
1
>>> b.b
2
>>> class C:
... def __init__(self,a,b,c=1,d=2):
... self.values = (a,b,c,d)
...
>>> f.register("createC",C,1,c=3)
>>> c=f.createC(2,d=4)
>>> c.values
(1, 2, 3, 4)
>>> f.register("importSerialization",__import__,"cPickle")
>>> pickle=f.importSerialization()
>>> pickle
<module 'cPickle' (built-in)>
>>> f.register("importSerialization",__import__,"marshal")
>>> pickle=f.importSerialization()
>>> pickle
<module 'marshal' (built-in)>
>>> f.unregister("importSerialization")
>>> f.importSerialization()
Traceback (most recent call last):
File "<interactive input>", line 1, in ?
AttributeError: Factory instance has no attribute 'importSerialization'
"""
class Factory:
def register(self, methodName, constructor, *args, **kargs):
"""register a constructor"""
_args = [constructor]
_args.extend(args)
setattr(self, methodName, apply(Functor, _args, kargs))
def unregister(self, methodName):
"""unregister a constructor"""
delattr(self, methodName)
class Functor:
def __init__(self, function, *args, **kargs):
assert callable(function), "function should be a callable obj"
self._function = function
self._args = args
self._kargs = kargs
def __call__(self, *args, **kargs):
"""call function"""
_args = list(self._args)
_args.extend(args)
_kargs = self._kargs.copy()
_kargs.update(kargs)
return apply(self._function, _args, _kargs)
| yanjinbin/learnPython | chapter_special/factory.py | Python | gpl-3.0 | 1,863 |
#!/usr/bin/env python
"""This is a very simple client for the backdoor daemon. This is intended more
for testing rather than normal use. See bd_serv.py """
import socket
import sys
import time
import select
def recv_wrapper(s):
r, w, e = select.select([s.fileno()], [], [], 2)
if not r:
return ''
#cols = int(s.recv(4))
#rows = int(s.recv(4))
cols = 80
rows = 24
packet_size = cols * rows * 2 # double it for good measure
return s.recv(packet_size)
# HOST = '' #'localhost' # The remote host
# PORT = 1664 # The same port as used by the server
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(sys.argv[1]) # (HOST, PORT))
time.sleep(1)
# s.setblocking(0)
#s.send('COMMAND' + '\x01' + sys.argv[1])
s.send(':sendline ' + sys.argv[2])
print recv_wrapper(s)
s.close()
sys.exit()
# while True:
# data = recv_wrapper(s)
# if data == '':
# break
# sys.stdout.write (data)
# sys.stdout.flush()
# s.close()
| youtube/cobalt | third_party/llvm-project/lldb/third_party/Python/module/pexpect-2.4/examples/bd_client.py | Python | bsd-3-clause | 982 |
import pytest
from cotoolz._coiter import coiter
def test_coiter_iter():
assert tuple(coiter((1, 2, 3))) == (1, 2, 3)
it = coiter((1, 2, 3))
for n in (1, 2, 3):
assert it.send(None) == n
def gen():
yield 1
yield 2 # pragma: no cover
yield 3 # pragma: no cover
def co():
yield (yield (yield 1))
def co_throwable():
try:
yield 1
except Exception as e:
yield e
def test_coiter_send():
c = coiter(co())
assert next(c) == 1
for n in (2, 3):
assert c.send(n) == n
def test_coiter_throw():
c = coiter(co_throwable())
assert next(c) == 1
e = ValueError()
assert c.throw(e) is e
d = coiter((1, 2, 3))
with pytest.raises(ValueError) as exc:
d.throw(e)
assert exc.value is e
f = coiter((1, 2, 3))
with pytest.raises(ValueError) as exc:
f.throw(ValueError, 'v')
assert exc.value.args == ('v',)
def test_coiter_close():
c = coiter(gen())
assert next(c) == 1
c.close()
assert tuple(c) == ()
| llllllllll/cotoolz | cotoolz/tests/test_coiter.py | Python | gpl-2.0 | 1,048 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing random variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.ops.distributions import special_math
def test_moment_matching(
samples,
number_moments,
dist,
stride=0):
"""Return z-test scores for sample moments to match analytic moments.
Given `samples`, check that the first sample `number_moments` match
the given `dist` moments by doing a z-test.
Args:
samples: Samples from target distribution.
number_moments: Python `int` describing how many sample moments to check.
dist: SciPy distribution object that provides analytic moments.
stride: Distance between samples to check for statistical properties.
A stride of 0 means to use all samples, while other strides test for
spatial correlation.
Returns:
Array of z_test scores.
"""
sample_moments = []
expected_moments = []
variance_sample_moments = []
x = samples.flat
for i in range(1, number_moments + 1):
strided_range = x[::(i - 1) * stride + 1]
sample_moments.append(np.mean(strided_range ** i))
expected_moments.append(dist.moment(i))
variance_sample_moments.append(
(dist.moment(2 * i) - dist.moment(i) ** 2) / len(strided_range))
z_test_scores = []
for i in range(1, number_moments + 1):
# Assume every operation has a small numerical error.
# It takes i multiplications to calculate one i-th moment.
total_variance = (
variance_sample_moments[i - 1] +
i * np.finfo(samples.dtype).eps)
tiny = np.finfo(samples.dtype).tiny
assert np.all(total_variance > 0)
if total_variance < tiny:
total_variance = tiny
# z_test is approximately a unit normal distribution.
z_test_scores.append(abs(
(sample_moments[i - 1] - expected_moments[i - 1]) / np.sqrt(
total_variance)))
return z_test_scores
def chi_squared(x, bins):
"""Pearson's Chi-squared test."""
x = np.ravel(x)
n = len(x)
histogram, _ = np.histogram(x, bins=bins, range=(0, 1))
expected = n / float(bins)
return np.sum(np.square(histogram - expected) / expected)
def normal_cdf(x):
"""Cumulative distribution function for a standard normal distribution."""
return 0.5 + 0.5 * np.vectorize(math.erf)(x / math.sqrt(2))
def anderson_darling(x):
"""Anderson-Darling test for a standard normal distribution."""
x = np.sort(np.ravel(x))
n = len(x)
i = np.linspace(1, n, n)
z = np.sum((2 * i - 1) * np.log(normal_cdf(x)) +
(2 * (n - i) + 1) * np.log(1 - normal_cdf(x)))
return -n - z / n
def test_truncated_normal(assert_equal, assert_all_close, n, y,
mean_atol=5e-4, median_atol=8e-4, variance_rtol=1e-3):
"""Tests truncated normal distribution's statistics."""
def _normal_cdf(x):
return .5 * math.erfc(-x / math.sqrt(2))
def normal_pdf(x):
return math.exp(-(x**2) / 2.) / math.sqrt(2 * math.pi)
def probit(x):
return special_math.ndtri(x)
a = -2.
b = 2.
mu = 0.
sigma = 1.
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
z = _normal_cdf(beta) - _normal_cdf(alpha)
assert_equal((y >= a).sum(), n)
assert_equal((y <= b).sum(), n)
# For more information on these calculations, see:
# Burkardt, John. "The Truncated Normal Distribution".
# Department of Scientific Computing website. Florida State University.
expected_mean = mu + (normal_pdf(alpha) - normal_pdf(beta)) / z * sigma
y = y.astype(float)
actual_mean = np.mean(y)
assert_all_close(actual_mean, expected_mean, atol=mean_atol)
expected_median = mu + probit(
(_normal_cdf(alpha) + _normal_cdf(beta)) / 2.) * sigma
actual_median = np.median(y)
assert_all_close(actual_median, expected_median, atol=median_atol)
expected_variance = sigma**2 * (1 + (
(alpha * normal_pdf(alpha) - beta * normal_pdf(beta)) / z) - (
(normal_pdf(alpha) - normal_pdf(beta)) / z)**2)
actual_variance = np.var(y)
assert_all_close(
actual_variance,
expected_variance,
rtol=variance_rtol)
| chemelnucfin/tensorflow | tensorflow/python/kernel_tests/random/util.py | Python | apache-2.0 | 4,832 |
'''notes.py
Used to highlight user-defined "annotations" such as TODO, README, etc.,
depending user choice.
'''
import sublime
from base_linter import BaseLinter
CONFIG = {
'language': 'Annotations'
}
class Linter(BaseLinter):
DEFAULT_NOTES = ["TODO", "README", "FIXME"]
def built_in_check(self, view, code, filename):
annotations = self.select_annotations(view)
regions = []
for annotation in annotations:
regions.extend(self.find_all(code, annotation, view))
return regions
def select_annotations(self, view):
'''selects the list of annotations to use'''
return view.settings().get("annotations", self.DEFAULT_NOTES)
def extract_annotations(self, code, view, filename):
'''extract all lines with annotations'''
annotations = self.select_annotations(view)
annotation_starts = []
for annotation in annotations:
start = 0
length = len(annotation)
while True:
start = code.find(annotation, start)
if start != -1:
end = start + length
annotation_starts.append(start)
start = end
else:
break
regions_with_notes = set([])
for point in annotation_starts:
regions_with_notes.add(view.extract_scope(point))
regions_with_notes = sorted(list(regions_with_notes))
text = []
for region in regions_with_notes:
row, col = view.rowcol(region.begin())
text.append("[{0}:{1}]".format(filename, row + 1))
text.append(view.substr(region))
return '\n'.join(text)
def find_all(self, text, string, view):
''' finds all occurences of "string" in "text" and notes their positions
as a sublime Region
'''
found = []
length = len(string)
start = 0
while True:
start = text.find(string, start)
if start != -1:
end = start + length
found.append(sublime.Region(start, end))
start = end
else:
break
return found
| tangledhelix/SublimeLinter-for-ST2 | sublimelinter/modules/notes.py | Python | mit | 2,249 |
import numpy as np
import corner
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import cm
from matplotlib import rcParams
from matplotlib import colors
from matplotlib.mlab import detrend_mean
from mpl_toolkits.mplot3d import Axes3D
from numpy import array, reshape, empty, ceil, percentile, median, nan, flatnonzero, core
from lib.Plotting_Libraries.dazer_plotter import Plot_Conf
from lib.Plotting_Libraries.sigfig import round_sig
from lib.CodeTools.File_Managing_Tools import Pdf_printer
from scipy import stats
from scipy.integrate import simps
def label_formatting(line_label):
label = line_label.replace('_', '\,\,')
if label[-1] == 'A':
label = label[0:-1] + '\AA'
label = '$' + label + '$'
return label
def _histplot_bins(column, bins=100):
"""Helper to get bins for histplot."""
col_min = np.min(column)
col_max = np.max(column)
return range(col_min, col_max + 2, max((col_max - col_min) // bins, 1))
def numberStringFormat(value):
if value > 0.001:
newFormat = str(round_sig(value, 4))
else:
newFormat = r'${:.3e}$'.format(value)
return newFormat
class Basic_plots(Plot_Conf):
def __init__(self):
# Class with plotting tools
Plot_Conf.__init__(self)
def prefit_input(self):
size_dict = {'figure.figsize': (20, 14), 'axes.labelsize': 16, 'legend.fontsize': 18}
self.FigConf(plotSize=size_dict)
# Input continuum
self.data_plot(self.inputWave, self.inputContinuum, 'Input object continuum')
# Observed continuum
self.data_plot(self.obj_data['wave_resam'], self.obj_data['flux_norm'], 'Observed spectrum', linestyle=':')
# Nebular contribution removed if available
self.data_plot(self.nebDefault['wave_neb'], self.nebDefault['synth_neb_flux'], 'Nebular contribution removed',
linestyle='--')
# #In case of a synthetic observation:
# if 'neb_SED' in self.obj_data:
# self.data_plot(self.input_wave, self.obj_data['neb_SED']['neb_int_norm'], 'Nebular continuum')
# title_label = 'Observed spectrum'
# if 'stellar_flux' in self.obj_data:
# self.data_plot(self.obj_data['obs_wave'], self.obj_data['stellar_flux']/self.obj_data['normFlux_coeff'], 'Stellar continuum')
# self.data_plot(self.obj_data['obs_wave'], self.obj_data['stellar_flux_err']/ self.obj_data['normFlux_coeff'], 'Stellar continuum with uncertainty', linestyle=':')
# title_label = 'Synthetic spectrum'
self.FigWording(xlabel='Wavelength $(\AA)$', ylabel='Observed flux',
title='Observed spectrum and prefit input continuum')
return
def prefit_comparison(self, obj_ssp_fit_flux):
size_dict = {'figure.figsize': (20, 14), 'axes.labelsize': 22, 'legend.fontsize': 22}
self.FigConf(plotSize=size_dict)
self.data_plot(self.inputWave, self.obj_data['flux_norm'], 'Object normed flux')
self.data_plot(self.inputWave, self.nebDefault['synth_neb_flux'], 'Nebular continuum')
self.data_plot(self.inputWave, obj_ssp_fit_flux + self.nebDefault['synth_neb_flux'], 'Prefit continuum output',
linestyle='-')
self.Axis.set_yscale('log')
self.FigWording(xlabel='Wavelength $(\AA)$', ylabel='Normalised flux', title='')
return
def prefit_ssps(self, sspPrefitCoeffs):
size_dict = {'figure.figsize': (20, 14), 'axes.labelsize': 22, 'legend.fontsize': 22}
self.FigConf(plotSize=size_dict)
# TODO This function should go to my collection
ordinal_generator = lambda n: "%d%s" % (n, "tsnrhtdd"[(n / 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
ordinal_bases = [ordinal_generator(n) for n in range(len(sspPrefitCoeffs))]
counter = 0
for i in range(len(self.onBasesFluxNorm)):
if sspPrefitCoeffs[i] > self.lowlimit_sspContribution:
counter += 1
label_i = '{} base: flux coeff {}, norm coeff {:.2E}'.format(ordinal_bases[i], sspPrefitCoeffs[i],
self.onBasesFluxNormCoeffs[i])
label_i = '{} base'.format(ordinal_bases[i])
self.data_plot(self.onBasesWave, self.onBasesFluxNorm[i], label_i)
self.area_fill(self.ssp_lib['norm_interval'][0], self.ssp_lib['norm_interval'][1],
'Norm interval: {} - {}'.format(self.ssp_lib['norm_interval'][0],
self.ssp_lib['norm_interval'][1]), alpha=0.5)
title = 'SSP prefit contributing stellar populations {}/{}'.format(
(sspPrefitCoeffs > self.lowlimit_sspContribution).sum(), len(self.onBasesFluxNorm))
self.FigWording(xlabel='Wavelength $(\AA)$', ylabel='Normalised flux', title='')
return
def continuumFit(self, db_dict):
outputSSPsCoefs, Av_star = np.median(db_dict['w_i']['trace'], axis=0), db_dict['Av_star']['median']
outputSSPsCoefs_std, Av_star_std = np.std(db_dict['w_i']['trace'], axis=0), db_dict['Av_star'][
'standard deviation']
stellarFit = outputSSPsCoefs.dot(self.onBasesFluxNorm) * np.power(10, -0.4 * Av_star * self.Xx_stellar)
stellarPrefit = self.sspPrefitCoeffs.dot(self.onBasesFluxNorm) * np.power(10, -0.4 * self.stellarAv_prior[
0] * self.Xx_stellar)
nebularFit = self.nebDefault['synth_neb_flux']
stellarFit_upper = (outputSSPsCoefs + outputSSPsCoefs_std).dot(self.onBasesFluxNorm) * np.power(10, -0.4 * (
Av_star - Av_star_std) * self.Xx_stellar) + nebularFit
stellarFit_lower = (outputSSPsCoefs - outputSSPsCoefs_std).dot(self.onBasesFluxNorm) * np.power(10, -0.4 * (
Av_star + Av_star_std) * self.Xx_stellar) + nebularFit
size_dict = {'figure.figsize': (20, 14), 'axes.labelsize': 16, 'legend.fontsize': 18}
self.FigConf(plotSize=size_dict)
self.data_plot(self.inputWave, self.inputContinuum, 'Object normed flux')
self.data_plot(self.inputWave, nebularFit, 'Nebular continuum')
self.data_plot(self.inputWave, stellarFit + nebularFit, 'Continuum fit', color='tab:green')
self.Axis.fill_between(self.inputWave, stellarFit_lower, stellarFit_upper, color='tab:green', alpha=0.5)
self.data_plot(self.inputWave, stellarPrefit + nebularFit, 'prefit fit', color='tab:red', linestyle=':')
self.Axis.set_yscale('log')
self.FigWording(xlabel='Wavelength $(\AA)$', ylabel='Observed flux', title='SSPs continuum prefit')
return
def masked_observation(self):
size_dict = {'figure.figsize': (20, 14), 'axes.labelsize': 16, 'legend.fontsize': 16}
self.FigConf(plotSize=size_dict)
nLineMasks = self.boolean_matrix.shape[0]
inputContinuum = self.obj_data['flux_norm'] - self.nebDefault['synth_neb_flux']
self.data_plot(self.inputWave, inputContinuum, 'Unmasked input continuum')
self.data_plot(self.inputWave, inputContinuum * np.invert(self.object_mask), 'Object mask', linestyle=':')
for i in range(nLineMasks):
self.data_plot(self.inputWave, inputContinuum * self.boolean_matrix[i, :],
'Line mask ' + self.obj_data['lineLabels'][i], linestyle='--')
self.Axis.set_xlabel('Wavelength $(\AA)$')
self.Axis.set_ylabel('Observed flux')
self.Axis.set_title('Spectrum masks')
self.Axis.set_xscale('log')
self.Axis.legend(bbox_to_anchor=(0.95, 1), loc=2, borderaxespad=0.)
return
def resampled_observation(self):
size_dict = {'figure.figsize': (20, 14), 'axes.labelsize': 16, 'legend.fontsize': 18}
self.FigConf(plotSize=size_dict)
self.data_plot(self.obj_data['obs_wavelength'], self.obj_data['obs_flux'], 'Observed spectrum')
self.data_plot(self.obj_data['wave_resam'], self.obj_data['flux_resam'], 'Resampled spectrum', linestyle='--')
self.data_plot(self.obj_data['wave_resam'], self.obj_data['flux_norm'] * self.obj_data['normFlux_coeff'],
r'Normalized spectrum $\cdot$ {:.2E}'.format(self.obj_data['normFlux_coeff']), linestyle=':')
self.area_fill(self.obj_data['norm_interval'][0], self.obj_data['norm_interval'][1],
'Norm interval: {} - {}'.format(self.obj_data['norm_interval'][0],
self.obj_data['norm_interval'][1]), alpha=0.5)
self.FigWording(xlabel='Wavelength $(\AA)$', ylabel='Observed flux', title='Resampled observation')
return
def linesGrid(self, linesDf, wave, flux, plotAddress):
# TODO dangerous function which should not be here
# Get number of lines to generate the figure
lineLabels = linesDf.index.values
nLabels = lineLabels.size
if nLabels <= 56:
nRows, nColumns = 7, 8
else:
nRows, nColumns = 8, 8
# Generate figure
size_dict = {'figure.figsize': (30, 20), 'axes.titlesize': 12, 'axes.labelsize': 10, 'legend.fontsize': 10}
self.FigConf(plotSize=size_dict, Figtype='Grid', n_columns=nColumns, n_rows=nRows)
# Get line regions
wavesMatrix = linesDf.loc[:, 'w1':'w6'].values
wavesIdcs = np.searchsorted(wave, wavesMatrix)
idcsLines = (wave[wavesIdcs[:, 2]] <= wave[:, None]) & (wave[:, None] <= wave[wavesIdcs[:, 3]])
idcsContinua = ((wave[wavesIdcs[:, 0]] <= wave[:, None]) & (wave[:, None] <= wave[wavesIdcs[:, 1]])) | (
(wave[wavesIdcs[:, 4]] <= wave[:, None]) & (wave[:, None] <= wave[wavesIdcs[:, 5]]))
idcsRedContinuum = ((wave[wavesIdcs[:, 0]] <= wave[:, None]) & (wave[:, None] <= wave[wavesIdcs[:, 1]]))
idcsBlueContinuum = ((wave[wavesIdcs[:, 4]] <= wave[:, None]) & (wave[:, None] <= wave[wavesIdcs[:, 5]]))
# Loop through the line and plot them
for i in range(nLabels):
lineLabel = lineLabels[i]
lineType = linesDf.iloc[i].region_label
# Get line_i wave and continua
lineWave, lineFlux = wave[idcsLines[:, i]], flux[idcsLines[:, i]]
continuaWave, continuaFlux = wave[idcsContinua[:, i]], flux[idcsContinua[:, i]]
continuaRedWave, continuaRedFlux = wave[idcsRedContinuum[:, i]], flux[idcsRedContinuum[:, i]]
continuaBlueWave, continuaBlueFlux = wave[idcsBlueContinuum[:, i]], flux[idcsBlueContinuum[:, i]]
lineRes = lineWave[1] - lineWave[0] # TODO need to understand this better
# Compute linear line continuum and get the standard deviation on the continuum
slope, intercept, r_value, p_value, std_err = stats.linregress(continuaWave, continuaFlux)
linearLineContinua = lineWave * slope + intercept
# lineFlux_i = linesDf.loc[lineLabel, 'line_Flux'].nominal_value
# lineFlux_iSimps = simps(lineFlux, lineWave) - simps(linearLineContinua, lineWave)
# lineFlux_iSum = (lineFlux.sum() - linearLineContinua.sum()) * lineRes
# Excluding Hbeta
recombCheck = True if (('H1' in lineLabel) or ('He1' in lineLabel) or ('He2' in lineLabel)) and (
lineLabel != 'H1_4861A') and ('_w' not in lineLabel) else False
blendedCheck = True if (lineType is not 'continuum_mask') and (lineType != 'None') else False
# print i, lineLabels[i], recombCheck, blendedCheck #linesDf.iloc[i].obs_flux, simps(lineFlux, lineWave), lineFlux.sum()
# # Add flux for isolated recombination lines
# if recombCheck and blendedCheck is False:
# # Assign new values
# fluxContinuum = linearLineContinua.sum() * lineRes
# linesDf.loc[lineLabel, 'obs_flux'] = linesDf.iloc[i].obs_flux + fluxContinuum
#
# self.Axis[i].plot(lineWave, linearLineContinua, color='tab:green')
#
# # Add flux for blended recombination lines
# if recombCheck and blendedCheck:
# muLine, sigmaLine = linesDf.iloc[i].mu, linesDf.iloc[i].sigma
# lineHalfWidth = 3 * sigmaLine
# w3, w4 = muLine - lineHalfWidth, muLine + lineHalfWidth
# idx3, idx4 = np.searchsorted(wave, [w3, w4])
# idcsLines_trim = (wave[idx3] <= wave) & (wave <= wave[idx4])
# lineWaveTrim, linearLineContinuaTrim = wave[idcsLines_trim], wave[idcsLines_trim] * slope + intercept
# fluxContinuum = linearLineContinuaTrim.sum() * lineRes
#
# # Assign new values
# linesDf.loc[lineLabel, 'obs_flux'] = linesDf.iloc[i].obs_flux + fluxContinuum
# linesDf.loc[lineLabel, 'w3'], linesDf.loc[lineLabel, 'w4'] = w3, w4
#
# # Generate the plot
# self.Axis[i].plot(lineWaveTrim, linearLineContinuaTrim, color='tab:purple')
# Adjust the flux in N2_6548
if lineLabel == 'N2_6548A':
if linesDf.loc[lineLabel, 'region_label'] != 'None':
linesDf.loc[lineLabel, 'obs_fluxErr'] = linesDf.loc['N2_6584A', 'obs_fluxErr']
if (linesDf.loc['N2_6548A', 'obs_fluxErr'] / linesDf.loc['N2_6548A', 'obs_flux']) > 0.1:
linesDf.loc['N2_6548A', 'obs_fluxErr'] = linesDf.loc['N2_6548A', 'obs_flux'] * 0.1
if (linesDf.loc['N2_6584A', 'obs_fluxErr'] / linesDf.loc['N2_6584A', 'obs_flux']) > 0.1:
linesDf.loc['N2_6584A', 'obs_fluxErr'] = linesDf.loc['N2_6584A', 'obs_flux'] * 0.1
# Plot the data
self.Axis[i].plot(continuaRedWave, continuaRedFlux, color='tab:orange')
self.Axis[i].plot(continuaBlueWave, continuaBlueFlux, color='tab:orange')
self.Axis[i].plot(lineWave, lineFlux, color='tab:blue')
# Format the plot
self.Axis[i].get_yaxis().set_visible(False)
self.Axis[i].set_yticks([])
self.Axis[i].get_xaxis().set_visible(False)
self.Axis[i].set_xticks([])
self.Axis[i].set_yscale('log')
# Wording plot
self.Axis[i].set_title(lineLabels[i])
# Plot the data
plt.savefig(plotAddress, dpi=200, bbox_inches='tight')
return
def linesGrid_noContinuum(self, linesDf, wave, flux, plotAddress):
# Get number of lines to generate the figure
lineLabels = linesDf.index.values
nLabels = lineLabels.size
if nLabels <= 56:
nRows, nColumns = 7, 8
else:
nRows, nColumns = 8, 8
# Generate figure
size_dict = {'figure.figsize': (30, 20), 'axes.titlesize': 12, 'axes.labelsize': 10, 'legend.fontsize': 10}
self.FigConf(plotSize=size_dict, Figtype='Grid', n_columns=nColumns, n_rows=nRows)
# Get line regions
wavesMatrix = linesDf.loc[:, 'w1':'w6'].values
wavesIdcs = np.searchsorted(wave, wavesMatrix)
idcsLines = (wave[wavesIdcs[:, 2]] <= wave[:, None]) & (wave[:, None] <= wave[wavesIdcs[:, 3]])
idcsContinua = ((wave[wavesIdcs[:, 0]] <= wave[:, None]) & (wave[:, None] <= wave[wavesIdcs[:, 1]])) | (
(wave[wavesIdcs[:, 4]] <= wave[:, None]) & (wave[:, None] <= wave[wavesIdcs[:, 5]]))
idcsRedContinuum = ((wave[wavesIdcs[:, 0]] <= wave[:, None]) & (wave[:, None] <= wave[wavesIdcs[:, 1]]))
idcsBlueContinuum = ((wave[wavesIdcs[:, 4]] <= wave[:, None]) & (wave[:, None] <= wave[wavesIdcs[:, 5]]))
# Loop through the line and plot them
for i in range(nLabels):
lineLabel = lineLabels[i]
lineType = linesDf.iloc[i].region_label
# Get line_i wave and continua
lineWave, lineFlux = wave[idcsLines[:, i]], flux[idcsLines[:, i]]
continuaWave, continuaFlux = wave[idcsContinua[:, i]], flux[idcsContinua[:, i]]
continuaRedWave, continuaRedFlux = wave[idcsRedContinuum[:, i]], flux[idcsRedContinuum[:, i]]
continuaBlueWave, continuaBlueFlux = wave[idcsBlueContinuum[:, i]], flux[idcsBlueContinuum[:, i]]
lineRes = lineWave[1] - lineWave[0] # TODO need to understand this better
# Compute linear line continuum and get the standard deviation on the continuum
slope, intercept, r_value, p_value, std_err = stats.linregress(continuaWave, continuaFlux)
linearLineContinua = lineWave * slope + intercept
# lineFlux_i = linesDf.loc[lineLabel, 'line_Flux'].nominal_value
# lineFlux_iSimps = simps(lineFlux, lineWave) - simps(linearLineContinua, lineWave)
# lineFlux_iSum = (lineFlux.sum() - linearLineContinua.sum()) * lineRes
# Excluding Hbeta
recombCheck = True if (('H1' in lineLabel) or ('He1' in lineLabel) or ('He2' in lineLabel)) and (
lineLabel != 'H1_4861A') and ('_w' not in lineLabel) else False
blendedCheck = True if (lineType is not 'continuum_mask') and (lineType != 'None') else False
# print i, lineLabels[i], recombCheck, blendedCheck #linesDf.iloc[i].obs_flux, simps(lineFlux, lineWave), lineFlux.sum()
# Plot the data
self.Axis[i].plot(continuaRedWave, continuaRedFlux, color='tab:orange')
self.Axis[i].plot(continuaBlueWave, continuaBlueFlux, color='tab:orange')
self.Axis[i].plot(lineWave, lineFlux, color='tab:blue')
# Add flux for isolated recombination lines
if recombCheck and blendedCheck is False:
# Assign new values
fluxContinuum = 0.0
linesDf.loc[lineLabel, 'obs_flux'] = linesDf.iloc[i].obs_flux + fluxContinuum
self.Axis[i].plot(lineWave, linearLineContinua, color='tab:green')
# Add flux for blended recombination lines
if recombCheck and blendedCheck:
muLine, sigmaLine = linesDf.iloc[i].mu, linesDf.iloc[i].sigma
lineHalfWidth = 3 * sigmaLine
w3, w4 = muLine - lineHalfWidth, muLine + lineHalfWidth
idx3, idx4 = np.searchsorted(wave, [w3, w4])
idcsLines_trim = (wave[idx3] <= wave) & (wave <= wave[idx4])
lineWaveTrim, linearLineContinuaTrim = wave[idcsLines_trim], wave[idcsLines_trim] * slope + intercept
fluxContinuum = 0.0
# Assign new values
linesDf.loc[lineLabel, 'obs_flux'] = linesDf.iloc[i].obs_flux + fluxContinuum
linesDf.loc[lineLabel, 'w3'], linesDf.loc[lineLabel, 'w4'] = w3, w4
# Generate the plot
self.Axis[i].plot(lineWaveTrim, linearLineContinuaTrim, color='tab:purple')
# Adjust the flux in N2_6548
if lineLabel == 'N2_6548A':
if linesDf.loc[lineLabel, 'region_label'] != 'None':
linesDf.loc[lineLabel, 'obs_fluxErr'] = linesDf.loc['N2_6584A', 'obs_fluxErr']
if (linesDf.loc['N2_6548A', 'obs_fluxErr'] / linesDf.loc['N2_6548A', 'obs_flux']) > 0.1:
linesDf.loc['N2_6548A', 'obs_fluxErr'] = linesDf.loc['N2_6548A', 'obs_flux'] * 0.1
if (linesDf.loc['N2_6584A', 'obs_fluxErr'] / linesDf.loc['N2_6584A', 'obs_flux']) > 0.1:
linesDf.loc['N2_6584A', 'obs_fluxErr'] = linesDf.loc['N2_6584A', 'obs_flux'] * 0.1
# Adjust the flux in N2_6548
if lineLabel == 'O2_7319A':
if linesDf.loc[lineLabel, 'region_label'] != 'None':
if (linesDf.loc['O2_7319A', 'obs_fluxErr'] / linesDf.loc['O2_7319A', 'obs_flux']) > 0.1:
linesDf.loc['O2_7319A', 'obs_fluxErr'] = linesDf.loc['O2_7319A', 'obs_flux'] * 0.1
if (linesDf.loc['O2_7330A', 'obs_fluxErr'] / linesDf.loc['O2_7330A', 'obs_flux']) > 0.1:
linesDf.loc['O2_7330A', 'obs_fluxErr'] = linesDf.loc['O2_7330A', 'obs_flux'] * 0.1
# Format the plot
self.Axis[i].get_yaxis().set_visible(False)
self.Axis[i].set_yticks([])
self.Axis[i].get_xaxis().set_visible(False)
self.Axis[i].set_xticks([])
self.Axis[i].set_yscale('log')
# Wording plot
self.Axis[i].set_title(lineLabels[i])
# Plot the data
plt.savefig(plotAddress, dpi=200, bbox_inches='tight')
return
def emissivitySurfaceFit_2D(self, line_label, emisCoeffs, emisGrid, funcEmis, te_ne_grid, denRange, tempRange):
# Plot format
size_dict = {'figure.figsize': (20, 14), 'axes.titlesize': 16, 'axes.labelsize': 16, 'legend.fontsize': 18}
rcParams.update(size_dict)
# Generate figure
fig, ax = plt.subplots(1, 1)
# Generate fitted surface points
surface_points = funcEmis(te_ne_grid, *emisCoeffs)
# Plot plane
plt.imshow(surface_points.reshape((denRange.size, tempRange.size)), aspect=0.03,
extent=(te_ne_grid[1].min(), te_ne_grid[1].max(), te_ne_grid[0].min(), te_ne_grid[0].max()))
# Compare pyneb values with values from fitting
percentage_difference = (1 - surface_points / emisGrid.flatten()) * 100
# Points with error below 1.0 are transparent:
idx_interest = percentage_difference < 1.0
x_values, y_values = te_ne_grid[1][idx_interest], te_ne_grid[0][idx_interest]
ax.scatter(x_values, y_values, c="None", edgecolors='black', linewidths=0.35, label='Error below 1%')
if idx_interest.sum() < emisGrid.size:
# Plot grid points
plt.scatter(te_ne_grid[1][~idx_interest], te_ne_grid[0][~idx_interest],
c=percentage_difference[~idx_interest],
edgecolors='black', linewidths=0.1, cmap=cm.OrRd, label='Error above 1%')
# Color bar
cbar = plt.colorbar()
cbar.ax.set_ylabel('% difference', rotation=270, fontsize=15)
# Trim the axis
ax.set_xlim(te_ne_grid[1].min(), te_ne_grid[1].max())
ax.set_ylim(te_ne_grid[0].min(), te_ne_grid[0].max())
# Add labels
ax.update({'xlabel': 'Density ($cm^{-3}$)', 'ylabel': 'Temperature $(K)$', 'title': line_label})
return
def emissivitySurfaceFit_3D(self, line_label, emisCoeffs, emisGrid, funcEmis, te_ne_grid):
# Plot format
size_dict = {'figure.figsize': (20, 14), 'axes.titlesize': 16, 'axes.labelsize': 16, 'legend.fontsize': 18}
rcParams.update(size_dict)
# Plot the grid points
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# # Generate fitted surface points
# matrix_edge = int(np.sqrt(te_ne_grid[0].shape[0]))
#
# # Plotting pyneb emissivities
# x_values, y_values = te_ne_grid[0].reshape((matrix_edge, matrix_edge)), te_ne_grid[1].reshape((matrix_edge, matrix_edge))
# ax.plot_surface(x_values, y_values, emisGrid.reshape((matrix_edge, matrix_edge)), color='g', alpha=0.5)
# Generate fitted surface points
x_values = te_ne_grid[0].reshape((self.denRange.size, self.tempRange.size))
y_values = te_ne_grid[1].reshape((self.denRange.size, self.tempRange.size))
ax.plot_surface(x_values, y_values, emisGrid.reshape((self.denRange.size, self.tempRange.size)), color='g',
alpha=0.5)
# Plotting emissivity parametrization
fit_points = funcEmis(te_ne_grid, *emisCoeffs)
ax.scatter(te_ne_grid[0], te_ne_grid[1], fit_points, color='r', alpha=0.5)
# Add labels
ax.update({'ylabel': 'Density ($cm^{-3}$)', 'xlabel': 'Temperature $(K)$', 'title': line_label})
return
def traces_plot(self, traces_list, stats_dic):
# Remove operations from the parameters list
traces = traces_list[
[i for i, v in enumerate(traces_list) if ('_Op' not in v) and ('_log__' not in v) and ('w_i' not in v)]]
# Number of traces to plot
n_traces = len(traces)
# Declare figure format
size_dict = {'figure.figsize': (14, 20), 'axes.titlesize': 26, 'axes.labelsize': 24, 'legend.fontsize': 18}
self.FigConf(plotSize=size_dict, Figtype='Grid', n_columns=1, n_rows=n_traces)
# Generate the color map
self.gen_colorList(0, n_traces)
# Plot individual traces
for i in range(n_traces):
# Current trace
trace_code = traces[i]
trace_array = stats_dic[trace_code]['trace']
# Label for the plot
mean_value = stats_dic[trace_code]['mean']
std_dev = stats_dic[trace_code]['standard deviation']
if mean_value > 0.001:
label = r'{} = ${}$ $\pm${}'.format(self.labels_latex_dic[trace_code], round_sig(mean_value, 4),
round_sig(std_dev, 4))
else:
label = r'{} = ${:.3e}$ $\pm$ {:.3e}'.format(self.labels_latex_dic[trace_code], mean_value, std_dev)
# Plot the data
self.Axis[i].plot(trace_array, label=label, color=self.get_color(i))
self.Axis[i].axhline(y=mean_value, color=self.get_color(i), linestyle='--')
self.Axis[i].set_ylabel(self.labels_latex_dic[trace_code])
if i < n_traces - 1:
self.Axis[i].set_xticklabels([])
# Add legend
self.legend_conf(self.Axis[i], loc=2)
return
def tracesPosteriorPlot(self, params_list, stats_dic):
# Remove operations from the parameters list # TODO addapt this line to discremenate better
traces_list = stats_dic.keys()
#traces = traces_list[[i for i, v in enumerate(traces_list) if ('_Op' not in v) and ('_log__' not in v) and ('w_i' not in v)]]
traces = result = [item for item in params_list if item in traces_list]
# Number of traces to plot
n_traces = len(traces)
# Declare figure format
size_dict = {'axes.titlesize': 20, 'axes.labelsize': 20, 'legend.fontsize': 10, 'xtick.labelsize':8, 'ytick.labelsize':8}
rcParams.update(size_dict)
fig = plt.figure(figsize=(8, n_traces))
# # Generate the color map
self.gen_colorList(0, n_traces)
gs = gridspec.GridSpec(n_traces * 2, 4)
gs.update(wspace=0.2, hspace=1.8)
for i in range(n_traces):
# Creat figure axis
axTrace = fig.add_subplot(gs[2 * i:2 * (1 + i), :3])
axPoterior = fig.add_subplot(gs[2 * i:2 * (1 + i), 3])
# Current trace
trace_code = traces[i]
trace_array = stats_dic[trace_code]
# Label for the plot
mean_value = np.mean(stats_dic[trace_code])
std_dev = np.std(stats_dic[trace_code])
if mean_value > 0.001:
label = r'{} = ${}$ $\pm${}'.format(self.labels_latex_dic[trace_code], round_sig(mean_value, 4),
round_sig(std_dev, 4))
else:
label = r'{} = ${:.3e}$ $\pm$ {:.3e}'.format(self.labels_latex_dic[trace_code], mean_value, std_dev)
# Plot the traces
axTrace.plot(trace_array, label=label, color=self.get_color(i))
axTrace.axhline(y=mean_value, color=self.get_color(i), linestyle='--')
axTrace.set_ylabel(self.labels_latex_dic[trace_code])
# Plot the histograms
axPoterior.hist(trace_array, bins=50, histtype='step', color=self.get_color(i), align='left')
# Plot the axis as percentile
median, percentile16th, percentile84th = np.median(trace_array), np.percentile(trace_array, 16), np.percentile(trace_array, 84)
# Add true value if available
if trace_code + '_true' in self.obj_data:
value_param = self.obj_data[trace_code + '_true']
if isinstance(value_param, (list, tuple, np.ndarray)):
nominal_value, std_value = value_param[0], 0.0 if value_param.size == 1 else value_param[1]
axPoterior.axvline(x=nominal_value, color=self.get_color(i), linestyle='solid')
axPoterior.axvspan(nominal_value - std_value, nominal_value + std_value, alpha=0.5, color=self.get_color(i))
else:
nominal_value = value_param
axPoterior.axvline(x=nominal_value, color=self.get_color(i), linestyle='solid')
# Add legend
axTrace.legend(loc=7)
# Remove ticks and labels
if i < n_traces - 1:
axTrace.get_xaxis().set_visible(False)
axTrace.set_xticks([])
axPoterior.yaxis.set_major_formatter(plt.NullFormatter())
axPoterior.set_yticks([])
axPoterior.set_xticks([percentile16th, median, percentile84th])
axPoterior.set_xticklabels(['',numberStringFormat(median),''])
axTrace.set_yticks((percentile16th, median, percentile84th))
axTrace.set_yticklabels((numberStringFormat(percentile16th), '', numberStringFormat(percentile84th)))
return
def posteriors_plot(self, traces_list, stats_dic):
# Remove operations from the parameters list
traces = traces_list[[i for i, v in enumerate(traces_list) if ('_Op' not in v) and ('_log__' not in v) and ('w_i' not in v)]]
# Number of traces to plot
n_traces = len(traces)
# Declare figure format
size_dict = {'figure.figsize': (14, 20), 'axes.titlesize': 22, 'axes.labelsize': 22, 'legend.fontsize': 14}
self.FigConf(plotSize=size_dict, Figtype='Grid', n_columns=1, n_rows=n_traces)
# Generate the color map
self.gen_colorList(0, n_traces)
# Plot individual traces
for i in range(len(traces)):
# Current trace
trace_code = traces[i]
mean_value = stats_dic[trace_code]['mean']
trace_array = stats_dic[trace_code]['trace']
# Plot HDP limits
HDP_coords = stats_dic[trace_code]['95% HPD interval']
for HDP in HDP_coords:
if mean_value > 0.001:
label_limits = 'HPD interval: {} - {}'.format(round_sig(HDP_coords[0], 4),
round_sig(HDP_coords[1], 4))
label_mean = 'Mean value: {}'.format(round_sig(mean_value, 4))
else:
label_limits = 'HPD interval: {:.3e} - {:.3e}'.format(HDP_coords[0], HDP_coords[1])
label_mean = 'Mean value: {:.3e}'.format(mean_value)
self.Axis[i].axvline(x=HDP, label=label_limits, color='grey', linestyle='dashed')
self.Axis[i].axvline(x=mean_value, label=label_mean, color='grey', linestyle='solid')
self.Axis[i].hist(trace_array, histtype='stepfilled', bins=35, alpha=.7, color=self.get_color(i),
normed=False)
# Add true value if available
if 'true_value' in stats_dic[trace_code]:
value_true = stats_dic[trace_code]['true_value']
if value_true is not None:
label_true = 'True value {:.3e}'.format(value_true)
self.Axis[i].axvline(x=value_true, label=label_true, color='black', linestyle='solid')
# Figure wording
self.Axis[i].set_ylabel(self.labels_latex_dic[trace_code])
self.legend_conf(self.Axis[i], loc=2)
def fluxes_distribution(self, lines_list, ions_list, function_key, db_dict, obsFluxes=None, obsErr=None):
# Declare plot grid size
n_columns = 3
n_lines = len(lines_list)
n_rows = int(np.ceil(float(n_lines)/float(n_columns)))
# Declare figure format
size_dict = {'figure.figsize': (9, 22), 'axes.titlesize': 14, 'axes.labelsize': 10, 'legend.fontsize': 10,
'xtick.labelsize': 8, 'ytick.labelsize': 3}
self.FigConf(plotSize=size_dict, Figtype='Grid', n_columns=n_columns, n_rows=n_rows)
# Generate the color dict
self.gen_colorList(0, 10)
colorDict = dict(H1r=0, O2=1, O3=2, N2=3, S2=4, S3=5, Ar3=6, Ar4=7, He1r=8, He2r=9)
# Flux statistics
traces_array = db_dict[function_key]
median_values = median(db_dict[function_key], axis=0)
p16th_fluxes = percentile(db_dict[function_key], 16, axis=0)
p84th_fluxes = percentile(db_dict[function_key], 84, axis=0)
# Plot individual traces
for i in range(n_lines):
# Current line
label = lines_list[i]
trace = traces_array[:, i]
median_flux = median_values[i]
label_mean = 'Mean value: {}'.format(round_sig(median_flux, 4))
self.Axis[i].hist(trace, histtype='stepfilled', bins=35, alpha=.7, color=self.get_color(colorDict[ions_list[i]]), normed=False)
if obsFluxes is not None:
true_value, fitErr = obsFluxes[i], obsErr[i]
label_true = 'True value: {}'.format(round_sig(true_value, 3))
self.Axis[i].axvline(x=true_value, label=label_true, color='black', linestyle='solid')
self.Axis[i].axvspan(true_value - fitErr, true_value + fitErr, alpha=0.5, color='grey')
self.Axis[i].get_yaxis().set_visible(False)
self.Axis[i].set_yticks([])
# Plot wording
self.Axis[i].set_title(r'{}'.format(self.linesDb.loc[label, 'latex_code']))
return
def acorr_plot(self, traces_list, stats_dic, n_columns=4, n_rows=2):
# Remove operations from the parameters list
traces = traces_list[
[i for i, v in enumerate(traces_list) if ('_Op' not in v) and ('_log__' not in v) and ('w_i' not in v)]]
# Number of traces to plot
n_traces = len(traces)
# Declare figure format
size_dict = {'figure.figsize': (14, 14), 'axes.titlesize': 20, 'legend.fontsize': 10}
self.FigConf(plotSize=size_dict, Figtype='Grid', n_columns=n_columns, n_rows=n_rows)
# Generate the color map
self.gen_colorList(0, n_traces)
# Plot individual traces
for i in range(n_traces):
# Current trace
trace_code = traces[i]
label = self.labels_latex_dic[trace_code]
trace_array = stats_dic[trace_code]['trace']
if trace_code != 'ChiSq':
maxlags = min(len(trace_array) - 1, 100)
self.Axis[i].acorr(x=trace_array, color=self.get_color(i), detrend=detrend_mean, maxlags=maxlags)
else:
# Apano momentaneo
chisq_adapted = reshape(trace_array, len(trace_array))
maxlags = min(len(chisq_adapted) - 1, 100)
self.Axis[i].acorr(x=chisq_adapted, color=self.get_color(i), detrend=detrend_mean, maxlags=maxlags)
self.Axis[i].set_xlim(0, maxlags)
self.Axis[i].set_title(label)
return
def corner_plot(self, params_list, stats_dic, true_values=None):
# Remove operations from the parameters list
traces_list = stats_dic.keys()
traces = [item for item in params_list if item in traces_list]
# Number of traces to plot
n_traces = len(traces)
# Set figure conf
sizing_dict = {}
sizing_dict['figure.figsize'] = (14, 14)
sizing_dict['legend.fontsize'] = 30
sizing_dict['axes.labelsize'] = 30
sizing_dict['axes.titlesize'] = 15
sizing_dict['xtick.labelsize'] = 12
sizing_dict['ytick.labelsize'] = 12
rcParams.update(sizing_dict)
# Reshape plot data
list_arrays, labels_list = [], []
for trace_code in traces:
trace_array = stats_dic[trace_code]
list_arrays.append(trace_array)
if trace_code == 'Te':
labels_list.append(r'$T_{low}$')
else:
labels_list.append(self.labels_latex_dic[trace_code])
traces_array = np.array(list_arrays).T
# # Reshape true values
# true_values_list = [None] * len(traces)
# for i in range(len(traces)):
# reference = traces[i] + '_true'
# if reference in true_values:
# value_param = true_values[reference]
# if isinstance(value_param, (list, tuple, np.ndarray)):
# true_values_list[i] = value_param[0]
# else:
# true_values_list[i] = value_param
#
# # Generate the plot
# self.Fig = corner.corner(traces_array[:, :], fontsize=30, labels=labels_list, quantiles=[0.16, 0.5, 0.84],
# show_titles=True, title_args={"fontsize": 200}, truths=true_values_list,
# truth_color='#ae3135', title_fmt='0.3f')
# Generate the plot
self.Fig = corner.corner(traces_array[:, :], fontsize=30, labels=labels_list, quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_args={"fontsize": 200},
truth_color='#ae3135', title_fmt='0.3f')
return
class Basic_tables(Pdf_printer):
def __init__(self):
# Class with pdf tools
Pdf_printer.__init__(self)
def table_mean_outputs(self, table_address, db_dict, true_values=None):
# Table headers
headers = ['Parameter', 'F2018 value', 'Mean', 'Standard deviation', 'Number of points', 'Median',
r'$16^{th}$ percentil', r'$84^{th}$ percentil', r'Difference $\%$']
# Generate pdf
#self.create_pdfDoc(table_address, pdf_type='table')
self.pdf_insert_table(headers)
# Loop around the parameters
parameters_list = db_dict.keys()
for param in parameters_list:
if ('_Op' not in param) and param not in ['w_i']:
# Label for the plot
label = self.labels_latex_dic[param]
mean_value = np.mean(db_dict[param])
std = np.std(db_dict[param])
n_traces = db_dict[param].size
median = np.median(db_dict[param])
p_16th = np.percentile(db_dict[param], 16)
p_84th = np.percentile(db_dict[param], 84)
true_value, perDif = 'None', 'None'
if param + '_true' in true_values:
value_param = true_values[param + '_true']
if isinstance(value_param, (list, tuple, np.ndarray)):
true_value = value_param[0]
else:
true_value = value_param
perDif = (1 - (true_value / median)) * 100
self.addTableRow([label, true_value, mean_value, std, n_traces, median, p_16th, p_84th, perDif],
last_row=False if parameters_list[-1] != param else True)
#self.generate_pdf(clean_tex=True)
self.generate_pdf(output_address=table_address)
return
def table_line_fluxes(self, table_address, lines_list, function_key, db_dict, true_data=None):
# Generate pdf
self.create_pdfDoc(table_address, pdf_type='table')
# Table headers
headers = ['Line Label', 'Observed flux', 'Mean', 'Standard deviation', 'Median', r'$16^{th}$ $percentil$',
r'$84^{th}$ $percentil$', r'$Difference\,\%$']
self.pdf_insert_table(headers)
# Data for table
true_values = ['None'] * len(lines_list) if true_data is None else true_data
mean_line_values = db_dict[function_key].mean(axis=0)
std_line_values = db_dict[function_key].std(axis=0)
median_line_values = median(db_dict[function_key], axis=0)
p16th_line_values = percentile(db_dict[function_key], 16, axis=0)
p84th_line_values = percentile(db_dict[function_key], 84, axis=0)
diff_Percentage = ['None'] * len(lines_list) if true_data is None else (1 - (median_line_values / true_values)) * 100
for i in range(len(lines_list)):
label = label_formatting(lines_list[i])
row_i = [label, true_values[i], mean_line_values[i], std_line_values[i], median_line_values[i], p16th_line_values[i],
p84th_line_values[i], diff_Percentage[i]]
self.addTableRow(row_i, last_row=False if lines_list[-1] != lines_list[i] else True)
self.generate_pdf(clean_tex=True)
class MCMC_printer(Basic_plots, Basic_tables):
def __init__(self):
# Supporting classes
Basic_plots.__init__(self)
Basic_tables.__init__(self)
def plot_emisFits(self, linelabels, emisCoeffs_dict, emisGrid_dict, output_folder):
# Temperature and density meshgrids
# X, Y = np.meshgrid(self.tem_grid_range, self.den_grid_range)
# XX, YY = X.flatten(), Y.flatten()
# te_ne_grid = (XX, YY)
te_ne_grid = (self.tempGridFlatten, self.denGridFlatten)
for i in range(linelabels.size):
lineLabel = linelabels[i]
print '--Fitting surface', lineLabel
# 2D Comparison between PyNeb values and the fitted equation
self.emissivitySurfaceFit_2D(lineLabel, emisCoeffs_dict[lineLabel], emisGrid_dict[lineLabel],
self.ionEmisEq[lineLabel], te_ne_grid, self.denRange, self.tempRange)
output_address = '{}{}_{}_temp{}-{}_den{}-{}'.format(output_folder, 'emissivityTeDe2D', lineLabel,
self.tempGridFlatten[0], self.tempGridFlatten[-1],
self.denGridFlatten[0], self.denGridFlatten[-1])
self.savefig(output_address, resolution=200)
plt.clf()
# # 3D Comparison between PyNeb values and the fitted equation
# self.emissivitySurfaceFit_3D(lineLabel, emisCoeffs_dict[lineLabel], emisGrid_dict[lineLabel],
# self.ionEmisEq[lineLabel], te_ne_grid)
#
# output_address = '{}{}_{}_temp{}-{}_den{}-{}'.format(output_folder, 'emissivityTeDe3D', lineLabel,
# self.tempGridFlatten[0], self.tempGridFlatten[-1],
# self.denGridFlatten[0], self.denGridFlatten[-1])
# self.savefig(output_address, resolution=200)
# plt.clf()
return
def plot_emisRatioFits(self, diagnoslabels, emisCoeffs_dict, emisGrid_array, output_folder):
# Temperature and density meshgrids
X, Y = np.meshgrid(self.tem_grid_range, self.den_grid_range)
XX, YY = X.flatten(), Y.flatten()
te_ne_grid = (XX, YY)
for i in range(diagnoslabels.size):
lineLabel = diagnoslabels[i]
print '--Fitting surface', lineLabel
# 2D Comparison between PyNeb values and the fitted equation
self.emissivitySurfaceFit_2D(lineLabel, emisCoeffs_dict[lineLabel], emisGrid_array[:, i],
self.EmisRatioEq_fit[lineLabel], te_ne_grid)
output_address = '{}{}_{}'.format(output_folder, 'emissivityTeDe2D', lineLabel)
self.savefig(output_address, resolution=200)
plt.clf()
# 3D Comparison between PyNeb values and the fitted equation
self.emissivitySurfaceFit_3D(lineLabel, emisCoeffs_dict[lineLabel], emisGrid_array[:, i],
self.EmisRatioEq_fit[lineLabel], te_ne_grid)
output_address = '{}{}_{}'.format(output_folder, 'emissivityTeDe3D', lineLabel)
self.savefig(output_address, resolution=200)
plt.clf()
return
def plotInputSSPsynthesis(self):
# Plot input data
self.prefit_input()
self.savefig(self.input_folder + self.objName + '_prefit_input', resolution=200)
# Plot resampling
self.resampled_observation()
self.savefig(self.input_folder + self.objName + '_resampled_spectra', resolution=200)
# Spectrum masking
self.masked_observation()
self.savefig(self.input_folder + self.objName + '_spectrum_mask', resolution=200)
return
def plotOutputSSPsynthesis(self, pymc2_dbPrefit, pymc2_db_dictPrefit, obj_ssp_fit_flux=None, sspPrefitCoeffs=None):
# Continua components from ssp prefit
if obj_ssp_fit_flux is not None:
self.prefit_comparison(obj_ssp_fit_flux)
self.savefig(self.input_folder + self.objName + '_prefit_Output', resolution=200)
# Prefit SSPs norm plot
if sspPrefitCoeffs is not None:
self.prefit_ssps(sspPrefitCoeffs)
self.savefig(self.input_folder + self.objName + '_prefit_NormSsps', resolution=200)
# SSP prefit traces # TODO increase flexibility for a big number of parameter
traces_names = np.array(['Av_star', 'sigma_star'])
# Stellar prefit Traces
self.traces_plot(traces_names, pymc2_db_dictPrefit)
self.savefig(self.input_folder + self.objName + '_PrefitTracesPlot', resolution=200)
# Stellar prefit Posteriors
# self.posteriors_plot(traces_names, pymc2_db_dictPrefit)
# self.savefig(self.input_folder + self.objName + '_PrefitPosteriorPlot', resolution=200)
# Stellar prefit Posteriors
self.tracesPosteriorPlot(traces_names, pymc2_db_dictPrefit)
self.savefig(self.input_folder + self.objName + '_ParamsTracesPosterios_StellarPrefit', resolution=200)
return
def plotOuputData(self, database_address, db_dict, model_params, include_no_model_check = False):
if self.stellarCheck:
self.continuumFit(db_dict)
self.savefig(database_address + '_ContinuumFit', resolution=200)
if self.emissionCheck:
# Table mean values
print '-- Model parameters table'
self.table_mean_outputs(database_address + '_meanOutput', db_dict, self.obj_data)
# Line fluxes values
print '-- Line fluxes table' # TODO divide in two
self.table_line_fluxes(database_address + '_LineFluxes', self.lineLabels, 'calcFluxes_Op', db_dict, true_data=self.obsLineFluxes)
self.fluxes_distribution(self.lineLabels, self.lineIons, 'calcFluxes_Op', db_dict, obsFluxes=self.obsLineFluxes, obsErr=self.fitLineFluxErr)
self.savefig(database_address + '_LineFluxesPosteriors', resolution=200)
# Traces and Posteriors
print '-- Model parameters posterior diagram'
self.tracesPosteriorPlot(model_params, db_dict)
self.savefig(database_address + '_ParamsTracesPosterios', resolution=200)
# Corner plot
print '-- Scatter plot matrix'
self.corner_plot(model_params, db_dict, self.obj_data)
self.savefig(database_address + '_CornerPlot', resolution=50)
return
| Delosari/dazer | bin/lib/Astro_Libraries/spectrum_fitting/plot_tools.py | Python | mit | 47,742 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import gpg
import sys
from groups import group_lists
# Copyright (C) 2018 Ben McGinnes <ben@gnupg.org>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License and the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License and the GNU
# Lesser General Public along with this program; if not, see
# <https://www.gnu.org/licenses/>.
"""
Uses the groups module to encrypt to multiple recipients.
"""
c = gpg.Context(armor=True)
if len(sys.argv) > 3:
group_id = sys.argv[1]
filepath = sys.argv[2:]
elif len(sys.argv) == 3:
group_id = sys.argv[1]
filepath = sys.argv[2]
elif len(sys.argv) == 2:
group_id = sys.argv[1]
filepath = input("Enter the filename to encrypt: ")
else:
group_id = input("Enter the group name to encrypt to: ")
filepath = input("Enter the filename to encrypt: ")
with open(filepath, "rb") as f:
text = f.read()
for i in range(len(group_lists)):
if group_lists[i][0] == group_id:
klist = group_lists[i][1]
else:
klist = None
logrus = []
if klist is not None:
for i in range(len(klist)):
apattern = list(c.keylist(pattern=klist[i], secret=False))
if apattern[0].can_encrypt == 1:
logrus.append(apattern[0])
else:
pass
try:
ciphertext, result, sign_result = c.encrypt(text, recipients=logrus,
add_encrypt_to=True)
except gpg.errors.InvalidRecipients as e:
for i in range(len(e.recipients)):
for n in range(len(logrus)):
if logrus[n].fpr == e.recipients[i].fpr:
logrus.remove(logrus[n])
else:
pass
try:
ciphertext, result, sign_result = c.encrypt(text,
recipients=logrus,
add_encrypt_to=True)
except:
pass
with open("{0}.asc".format(filepath), "wb") as f:
f.write(ciphertext)
else:
pass
# EOF
| gpg/gpgme | lang/python/examples/howto/encrypt-to-group-trustno1.py | Python | lgpl-2.1 | 2,917 |
import click
@click.command('config', short_help='Display remote client config')
@click.pass_obj
def cli(obj):
"""Display client config downloaded from API server."""
for k, v in obj.items():
if isinstance(v, list):
v = ', '.join(v)
click.echo(f'{k:20}: {v}')
| alerta/python-alerta-client | alertaclient/commands/cmd_config.py | Python | apache-2.0 | 298 |
# Copyright (c) 2017, Lenovo. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION_TABLE = 'lenovo_alembic_version'
| lenovo-network/networking-lenovo | networking_lenovo/db/migration/alembic_migrations/__init__.py | Python | apache-2.0 | 639 |
# Copyright 2009-2015 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT, arg_user
class CreateSigningCertificate(IAMRequest):
DESCRIPTION = '[Eucalyptus only] Create a new signing certificate'
ARGS = [arg_user(nargs='?', help='''user to create the signing
certificate for (default: current user)'''),
Arg('--out', metavar='FILE', route_to=None,
help='file to write the certificate to (default: stdout)'),
Arg('--keyout', metavar='FILE', route_to=None,
help='file to write the private key to (default: stdout)'),
AS_ACCOUNT]
def postprocess(self, result):
if self.args['out']:
with open(self.args['out'], 'w') as certfile:
certfile.write(result['Certificate']['CertificateBody'])
if self.args['keyout']:
old_umask = os.umask(0o077)
with open(self.args['keyout'], 'w') as keyfile:
keyfile.write(result['Certificate']['PrivateKey'])
os.umask(old_umask)
def print_result(self, result):
print result['Certificate']['CertificateId']
if not self.args['out']:
print result['Certificate']['CertificateBody']
if not self.args['keyout']:
print result['Certificate']['PrivateKey']
| jhajek/euca2ools | euca2ools/commands/iam/createsigningcertificate.py | Python | bsd-2-clause | 2,698 |
# Load the needed modules from bemio
from bemio.io.wamit import read
from bemio.io.output import write_hdf5
from bemio.utilities.hdf_utilities import combine_h5
from bemio.utilities.hdf_utilities import create_hydro_data
# Load the data using the wamit module.
wamit_1 = read(out_file='wamit_data/coer_comp_f.out')
write_hdf5(wamit_1,out_file='wamit_1.h5')
# Load the data using the wamit module.
wamit_2 = read(out_file='wamit_data/coer_comp_f.out')
write_hdf5(wamit_2,out_file='wamit_2.h5')
hdf5_data = combine_h5(['wamit_1.h5','wamit_2.h5'])
hydro_data = create_hydro_data(hdf5_data)
| NREL/OpenWARP | source/automated_test/bemio/test_h5_single_read/run.py | Python | apache-2.0 | 590 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the MRUList Windows Registry plugin."""
import unittest
from plaso.dfwinreg import definitions as dfwinreg_definitions
from plaso.dfwinreg import fake as dfwinreg_fake
from plaso.formatters import winreg as _ # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers.winreg_plugins import mrulist
from tests.parsers.winreg_plugins import test_lib
class TestMRUListStringPlugin(test_lib.RegistryPluginTestCase):
"""Tests for the string MRUList plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = mrulist.MRUListStringPlugin()
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path: the Windows Registry key path.
time_string: string containing the key last written date and time.
Returns:
A Windows Registry key (instance of dfwinreg.WinRegistryKey).
"""
filetime = dfwinreg_fake.Filetime()
filetime.CopyFromString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
u'MRU', key_path=key_path, last_written_time=filetime.timestamp,
offset=1456)
value_data = u'acb'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'MRUList', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=123)
registry_key.AddValue(registry_value)
value_data = u'Some random text here'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'a', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=1892)
registry_key.AddValue(registry_value)
value_data = u'c:/evil.exe'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'b', data=value_data, data_type=dfwinreg_definitions.REG_BINARY,
offset=612)
registry_key.AddValue(registry_value)
value_data = u'C:/looks_legit.exe'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'c', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=1001)
registry_key.AddValue(registry_value)
return registry_key
def testProcess(self):
"""Tests the Process function."""
key_path = u'\\Microsoft\\Some Windows\\InterestingApp\\MRU'
time_string = u'2012-08-28 09:23:49.002031'
registry_key = self._CreateTestKey(key_path, time_string)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, registry_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 1)
event_object = event_objects[0]
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, self._plugin.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(time_string)
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_message = (
u'[{0:s}] '
u'Index: 1 [MRU Value a]: Some random text here '
u'Index: 2 [MRU Value c]: C:/looks_legit.exe '
u'Index: 3 [MRU Value b]: c:/evil.exe').format(key_path)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
class TestMRUListShellItemListPlugin(test_lib.RegistryPluginTestCase):
"""Tests for the shell item list MRUList plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = mrulist.MRUListShellItemListPlugin()
def _CreateTestKey(self, key_path, time_string):
"""Creates MRUList Registry keys and values for testing.
Args:
key_path: the Windows Registry key path.
time_string: string containing the key last written date and time.
Returns:
A Windows Registry key (instance of dfwinreg.WinRegistryKey).
"""
filetime = dfwinreg_fake.Filetime()
filetime.CopyFromString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
u'DesktopStreamMRU', key_path=key_path,
last_written_time=filetime.timestamp, offset=1456)
value_data = u'a'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'MRUList', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=123)
registry_key.AddValue(registry_value)
value_data = b''.join(map(chr, [
0x14, 0x00, 0x1f, 0x00, 0xe0, 0x4f, 0xd0, 0x20, 0xea, 0x3a, 0x69, 0x10,
0xa2, 0xd8, 0x08, 0x00, 0x2b, 0x30, 0x30, 0x9d, 0x19, 0x00, 0x23, 0x43,
0x3a, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0xee, 0x15, 0x00, 0x31,
0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x3e, 0x7a, 0x60, 0x10, 0x80, 0x57,
0x69, 0x6e, 0x6e, 0x74, 0x00, 0x00, 0x18, 0x00, 0x31, 0x00, 0x00, 0x00,
0x00, 0x00, 0x2e, 0x3e, 0xe4, 0x62, 0x10, 0x00, 0x50, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x73, 0x00, 0x00, 0x25, 0x00, 0x31, 0x00, 0x00, 0x00,
0x00, 0x00, 0x2e, 0x3e, 0xe4, 0x62, 0x10, 0x00, 0x41, 0x64, 0x6d, 0x69,
0x6e, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x00, 0x41, 0x44,
0x4d, 0x49, 0x4e, 0x49, 0x7e, 0x31, 0x00, 0x17, 0x00, 0x31, 0x00, 0x00,
0x00, 0x00, 0x00, 0x2e, 0x3e, 0xe4, 0x62, 0x10, 0x00, 0x44, 0x65, 0x73,
0x6b, 0x74, 0x6f, 0x70, 0x00, 0x00, 0x00, 0x00]))
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'a', data=value_data, data_type=dfwinreg_definitions.REG_BINARY,
offset=612)
registry_key.AddValue(registry_value)
return registry_key
def testProcess(self):
"""Tests the Process function."""
key_path = (
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\'
u'DesktopStreamMRU')
time_string = u'2012-08-28 09:23:49.002031'
registry_key = self._CreateTestKey(key_path, time_string)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, registry_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 5)
# A MRUList event object.
event_object = event_objects[4]
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, self._plugin.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(time_string)
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_message = (
u'[{0:s}] '
u'Index: 1 [MRU Value a]: Shell item path: '
u'<My Computer> C:\\Winnt\\Profiles\\Administrator\\Desktop').format(
key_path)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
# A shell item event object.
event_object = event_objects[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-01-14 12:03:52')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_message = (
u'Name: Winnt '
u'Shell item path: <My Computer> C:\\Winnt '
u'Origin: {0:s}').format(key_path)
expected_short_message = (
u'Name: Winnt '
u'Origin: \\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer'
u'\\Deskt...')
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| ostree/plaso | tests/parsers/winreg_plugins/mrulist.py | Python | apache-2.0 | 7,693 |
# -*- coding: utf-8 -*-
import re
import pycurl
from module.plugins.internal.SimpleHoster import SimpleHoster
from module.plugins.internal.misc import encode, json, timestamp
class UploadingCom(SimpleHoster):
__name__ = "UploadingCom"
__type__ = "hoster"
__version__ = "0.48"
__status__ = "broken"
__pattern__ = r'http://(?:www\.)?uploading\.com/files/(?:get/)?(?P<ID>\w+)'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium" , "bool", "Use premium account if available" , True),
("fallback" , "bool", "Fallback to free download if premium fails" , True),
("chk_filesize", "bool", "Check file size" , True),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )]
__description__ = """Uploading.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("jeix", "jeix@hasnomail.de"),
("mkaay", "mkaay@mkaay.de"),
("zoidberg", "zoidberg@mujmail.cz")]
NAME_PATTERN = r'id="file_title">(?P<N>.+?)</'
SIZE_PATTERN = r'size tip_container">(?P<S>[\d.,]+) (?P<U>[\w^_]+)<'
OFFLINE_PATTERN = r'(Page|file) not found'
COOKIES = [("uploading.com", "lang", "1"),
(".uploading.com", "language", "1"),
(".uploading.com", "setlang", "en"),
(".uploading.com", "_lang", "en")]
def process(self, pyfile):
if not "/get/" in pyfile.url:
pyfile.url = pyfile.url.replace("/files", "/files/get")
self.data = self.load(pyfile.url)
self.get_fileInfo()
if self.premium:
self.handle_premium(pyfile)
else:
self.handle_free(pyfile)
def handle_premium(self, pyfile):
postData = {'action': 'get_link',
'code' : self.info['pattern']['ID'],
'pass' : 'undefined'}
self.data = self.load('http://uploading.com/files/get/?JsHttpRequest=%d-xml' % timestamp(), post=postData)
url = re.search(r'"link"\s*:\s*"(.*?)"', self.data)
if url:
self.link = url.group(1).replace("\\/", "/")
raise Exception("Plugin defect")
def handle_free(self, pyfile):
m = re.search('<h2>((Daily )?Download Limit)</h2>', self.data)
if m is not None:
pyfile.error = encode(m.group(1))
self.log_warning(pyfile.error)
self.retry(6, (6 * 60 if m.group(2) else 15) * 60, pyfile.error)
ajax_url = "http://uploading.com/files/get/?ajax"
self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
self.req.http.lastURL = pyfile.url
html = self.load(ajax_url,
post={'action': 'second_page',
'code' : self.info['pattern']['ID']})
res = json.loads(html)
if 'answer' in res and 'wait_time' in res['answer']:
wait_time = int(res['answer']['wait_time'])
self.log_info(_("Waiting %d seconds") % wait_time)
self.wait(wait_time)
else:
self.error(_("No AJAX/WAIT"))
html = self.load(ajax_url,
post={'action': 'get_link',
'code' : self.info['pattern']['ID'],
'pass' : 'false'})
res = json.loads(html)
if 'answer' in res and 'link' in res['answer']:
url = res['answer']['link']
else:
self.error(_("No AJAX/URL"))
self.data = self.load(url)
m = re.search(r'<form id="file_form" action="(.*?)"', self.data)
if m is not None:
url = m.group(1)
else:
self.error(_("No URL"))
self.link = url
| Guidobelix/pyload | module/plugins/hoster/UploadingCom.py | Python | gpl-3.0 | 3,940 |
# Copyright 2015 Hewlett-Packard Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.serialization import jsonutils
import webob.exc
from glance.common import exception
from glance.common import utils
import glance.gateway
import glance.search
from glance.search.api.v0_1 import search as search
from glance.tests.unit import base
import glance.tests.unit.utils as unit_test_utils
import glance.tests.utils as test_utils
def _action_fixture(op_type, data, index=None, doc_type=None, _id=None,
**kwargs):
action = {
'action': op_type,
'id': _id,
'index': index,
'type': doc_type,
'data': data,
}
if kwargs:
action.update(kwargs)
return action
def _image_fixture(op_type, _id=None, index='glance', doc_type='image',
data=None, **kwargs):
image_data = {
'name': 'image-1',
'disk_format': 'raw',
}
if data is not None:
image_data.update(data)
return _action_fixture(op_type, image_data, index, doc_type, _id, **kwargs)
class TestSearchController(base.IsolatedUnitTest):
def setUp(self):
super(TestSearchController, self).setUp()
self.search_controller = search.SearchController()
def test_search_all(self):
request = unit_test_utils.get_fake_request()
self.search_controller.search = mock.Mock(return_value="{}")
query = {"match_all": {}}
index = "glance"
doc_type = "metadef"
fields = None
offset = 0
limit = 10
self.search_controller.search(
request, query, index, doc_type, fields, offset, limit)
self.search_controller.search.assert_called_once_with(
request, query, index, doc_type, fields, offset, limit)
def test_search_all_repo(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.search = mock.Mock(return_value="{}")
query = {"match_all": {}}
index = "glance"
doc_type = "metadef"
fields = []
offset = 0
limit = 10
self.search_controller.search(
request, query, index, doc_type, fields, offset, limit)
repo.search.assert_called_once_with(
index, doc_type, query, fields, offset, limit, True)
def test_search_forbidden(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.search = mock.Mock(side_effect=exception.Forbidden)
query = {"match_all": {}}
index = "glance"
doc_type = "metadef"
fields = []
offset = 0
limit = 10
self.assertRaises(
webob.exc.HTTPForbidden, self.search_controller.search,
request, query, index, doc_type, fields, offset, limit)
def test_search_not_found(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.search = mock.Mock(side_effect=exception.NotFound)
query = {"match_all": {}}
index = "glance"
doc_type = "metadef"
fields = []
offset = 0
limit = 10
self.assertRaises(
webob.exc.HTTPNotFound, self.search_controller.search, request,
query, index, doc_type, fields, offset, limit)
def test_search_duplicate(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.search = mock.Mock(side_effect=exception.Duplicate)
query = {"match_all": {}}
index = "glance"
doc_type = "metadef"
fields = []
offset = 0
limit = 10
self.assertRaises(
webob.exc.HTTPConflict, self.search_controller.search, request,
query, index, doc_type, fields, offset, limit)
def test_search_internal_server_error(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.search = mock.Mock(side_effect=Exception)
query = {"match_all": {}}
index = "glance"
doc_type = "metadef"
fields = []
offset = 0
limit = 10
self.assertRaises(
webob.exc.HTTPInternalServerError, self.search_controller.search,
request, query, index, doc_type, fields, offset, limit)
def test_index_complete(self):
request = unit_test_utils.get_fake_request()
self.search_controller.index = mock.Mock(return_value="{}")
actions = [{'action': 'create', 'index': 'myindex', 'id': 10,
'type': 'MyTest', 'data': '{"name": "MyName"}'}]
default_index = 'glance'
default_type = 'image'
self.search_controller.index(
request, actions, default_index, default_type)
self.search_controller.index.assert_called_once_with(
request, actions, default_index, default_type)
def test_index_repo_complete(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.index = mock.Mock(return_value="{}")
actions = [{'action': 'create', 'index': 'myindex', 'id': 10,
'type': 'MyTest', 'data': '{"name": "MyName"}'}]
default_index = 'glance'
default_type = 'image'
self.search_controller.index(
request, actions, default_index, default_type)
repo.index.assert_called_once_with(
default_index, default_type, actions)
def test_index_repo_minimal(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.index = mock.Mock(return_value="{}")
actions = [{'action': 'create', 'index': 'myindex', 'id': 10,
'type': 'MyTest', 'data': '{"name": "MyName"}'}]
self.search_controller.index(request, actions)
repo.index.assert_called_once_with(None, None, actions)
def test_index_forbidden(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.index = mock.Mock(side_effect=exception.Forbidden)
actions = [{'action': 'create', 'index': 'myindex', 'id': 10,
'type': 'MyTest', 'data': '{"name": "MyName"}'}]
self.assertRaises(
webob.exc.HTTPForbidden, self.search_controller.index,
request, actions)
def test_index_not_found(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.index = mock.Mock(side_effect=exception.NotFound)
actions = [{'action': 'create', 'index': 'myindex', 'id': 10,
'type': 'MyTest', 'data': '{"name": "MyName"}'}]
self.assertRaises(
webob.exc.HTTPNotFound, self.search_controller.index,
request, actions)
def test_index_duplicate(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.index = mock.Mock(side_effect=exception.Duplicate)
actions = [{'action': 'create', 'index': 'myindex', 'id': 10,
'type': 'MyTest', 'data': '{"name": "MyName"}'}]
self.assertRaises(
webob.exc.HTTPConflict, self.search_controller.index,
request, actions)
def test_index_exception(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.index = mock.Mock(side_effect=Exception)
actions = [{'action': 'create', 'index': 'myindex', 'id': 10,
'type': 'MyTest', 'data': '{"name": "MyName"}'}]
self.assertRaises(
webob.exc.HTTPInternalServerError, self.search_controller.index,
request, actions)
def test_plugins_info(self):
request = unit_test_utils.get_fake_request()
self.search_controller.plugins_info = mock.Mock(return_value="{}")
self.search_controller.plugins_info(request)
self.search_controller.plugins_info.assert_called_once_with(request)
def test_plugins_info_repo(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.plugins_info = mock.Mock(return_value="{}")
self.search_controller.plugins_info(request)
repo.plugins_info.assert_called_once_with()
def test_plugins_info_forbidden(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.plugins_info = mock.Mock(side_effect=exception.Forbidden)
self.assertRaises(
webob.exc.HTTPForbidden, self.search_controller.plugins_info,
request)
def test_plugins_info_not_found(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.plugins_info = mock.Mock(side_effect=exception.NotFound)
self.assertRaises(webob.exc.HTTPNotFound,
self.search_controller.plugins_info, request)
def test_plugins_info_internal_server_error(self):
request = unit_test_utils.get_fake_request()
repo = glance.search.CatalogSearchRepo
repo.plugins_info = mock.Mock(side_effect=Exception)
self.assertRaises(webob.exc.HTTPInternalServerError,
self.search_controller.plugins_info, request)
class TestSearchDeserializer(test_utils.BaseTestCase):
def setUp(self):
super(TestSearchDeserializer, self).setUp()
self.deserializer = search.RequestDeserializer(
utils.get_search_plugins()
)
def test_single_index(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'index': 'glance',
})
output = self.deserializer.search(request)
self.assertEqual(['glance'], output['index'])
def test_single_doc_type(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'type': 'image',
})
output = self.deserializer.search(request)
self.assertEqual(['image'], output['doc_type'])
def test_empty_request(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({})
output = self.deserializer.search(request)
self.assertEqual(['glance'], output['index'])
self.assertEqual(sorted(['image', 'metadef']),
sorted(output['doc_type']))
def test_empty_request_admin(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({})
request.context.is_admin = True
output = self.deserializer.search(request)
self.assertEqual(['glance'], output['index'])
self.assertEqual(sorted(['image', 'metadef']),
sorted(output['doc_type']))
def test_invalid_index(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'index': 'invalid',
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_invalid_doc_type(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'type': 'invalid',
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_forbidden_schema(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'schema': {},
})
self.assertRaises(webob.exc.HTTPForbidden, self.deserializer.search,
request)
def test_forbidden_self(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'self': {},
})
self.assertRaises(webob.exc.HTTPForbidden, self.deserializer.search,
request)
def test_fields_restriction(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'index': ['glance'],
'type': ['metadef'],
'query': {'match_all': {}},
'fields': ['description'],
})
output = self.deserializer.search(request)
self.assertEqual(['glance'], output['index'])
self.assertEqual(['metadef'], output['doc_type'])
self.assertEqual(['description'], output['fields'])
def test_highlight_fields(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'index': ['glance'],
'type': ['metadef'],
'query': {'match_all': {}},
'highlight': {'fields': {'name': {}}}
})
output = self.deserializer.search(request)
self.assertEqual(['glance'], output['index'])
self.assertEqual(['metadef'], output['doc_type'])
self.assertEqual({'name': {}}, output['query']['highlight']['fields'])
def test_invalid_limit(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'index': ['glance'],
'type': ['metadef'],
'query': {'match_all': {}},
'limit': 'invalid',
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.search,
request)
def test_negative_limit(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'index': ['glance'],
'type': ['metadef'],
'query': {'match_all': {}},
'limit': -1,
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.search,
request)
def test_invalid_offset(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'index': ['glance'],
'type': ['metadef'],
'query': {'match_all': {}},
'offset': 'invalid',
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.search,
request)
def test_negative_offset(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'index': ['glance'],
'type': ['metadef'],
'query': {'match_all': {}},
'offset': -1,
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.search,
request)
def test_limit_and_offset(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'index': ['glance'],
'type': ['metadef'],
'query': {'match_all': {}},
'limit': 1,
'offset': 2,
})
output = self.deserializer.search(request)
self.assertEqual(['glance'], output['index'])
self.assertEqual(['metadef'], output['doc_type'])
self.assertEqual(1, output['limit'])
self.assertEqual(2, output['offset'])
class TestIndexDeserializer(test_utils.BaseTestCase):
def setUp(self):
super(TestIndexDeserializer, self).setUp()
self.deserializer = search.RequestDeserializer(
utils.get_search_plugins()
)
def test_empty_request(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_empty_actions(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'default_index': 'glance',
'default_type': 'image',
'actions': [],
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_missing_actions(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'default_index': 'glance',
'default_type': 'image',
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_invalid_operation_type(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [_image_fixture('invalid', '1')]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_invalid_default_index(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'default_index': 'invalid',
'actions': [_image_fixture('create', '1')]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_invalid_default_doc_type(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'default_type': 'invalid',
'actions': [_image_fixture('create', '1')]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_empty_operation_type(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [_image_fixture('', '1')]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_missing_operation_type(self):
action = _image_fixture('', '1')
action.pop('action')
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [action]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': '1',
'_index': 'glance',
'_op_type': 'index',
'_source': {'disk_format': 'raw', 'name': 'image-1'},
'_type': 'image'
}],
'default_index': None,
'default_type': None
}
self.assertEqual(expected, output)
def test_create_single(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [_image_fixture('create', '1')]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': '1',
'_index': 'glance',
'_op_type': 'create',
'_source': {'disk_format': 'raw', 'name': 'image-1'},
'_type': 'image'
}],
'default_index': None,
'default_type': None
}
self.assertEqual(expected, output)
def test_create_multiple(self):
actions = [
_image_fixture('create', '1'),
_image_fixture('create', '2', data={'name': 'image-2'}),
]
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': actions,
})
output = self.deserializer.index(request)
expected = {
'actions': [
{
'_id': '1',
'_index': 'glance',
'_op_type': 'create',
'_source': {'disk_format': 'raw', 'name': 'image-1'},
'_type': 'image'
},
{
'_id': '2',
'_index': 'glance',
'_op_type': 'create',
'_source': {'disk_format': 'raw', 'name': 'image-2'},
'_type': 'image'
},
],
'default_index': None,
'default_type': None
}
self.assertEqual(expected, output)
def test_create_missing_data(self):
action = _image_fixture('create', '1')
action.pop('data')
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [action]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_create_with_default_index(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'default_index': 'glance',
'actions': [_image_fixture('create', '1', index=None)]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': '1',
'_index': None,
'_op_type': 'create',
'_source': {'disk_format': 'raw', 'name': 'image-1'},
'_type': 'image'
}],
'default_index': 'glance',
'default_type': None
}
self.assertEqual(expected, output)
def test_create_with_default_doc_type(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'default_type': 'image',
'actions': [_image_fixture('create', '1', doc_type=None)]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': '1',
'_index': 'glance',
'_op_type': 'create',
'_source': {'disk_format': 'raw', 'name': 'image-1'},
'_type': None
}],
'default_index': None,
'default_type': 'image'
}
self.assertEqual(expected, output)
def test_create_with_default_index_and_doc_type(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'default_index': 'glance',
'default_type': 'image',
'actions': [_image_fixture('create', '1', index=None,
doc_type=None)]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': '1',
'_index': None,
'_op_type': 'create',
'_source': {'disk_format': 'raw', 'name': 'image-1'},
'_type': None
}],
'default_index': 'glance',
'default_type': 'image'
}
self.assertEqual(expected, output)
def test_create_missing_id(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [_image_fixture('create')]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': None,
'_index': 'glance',
'_op_type': 'create',
'_source': {'disk_format': 'raw', 'name': 'image-1'},
'_type': 'image'
}],
'default_index': None,
'default_type': None,
}
self.assertEqual(expected, output)
def test_create_empty_id(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [_image_fixture('create', '')]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': '',
'_index': 'glance',
'_op_type': 'create',
'_source': {'disk_format': 'raw', 'name': 'image-1'},
'_type': 'image'
}],
'default_index': None,
'default_type': None
}
self.assertEqual(expected, output)
def test_create_invalid_index(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [_image_fixture('create', index='invalid')]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_create_invalid_doc_type(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [_image_fixture('create', doc_type='invalid')]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_create_missing_index(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [_image_fixture('create', '1', index=None)]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_create_missing_doc_type(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [_image_fixture('create', '1', doc_type=None)]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_update_missing_id(self):
action = _image_fixture('update')
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [action]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_update_missing_data(self):
action = _image_fixture('update', '1')
action.pop('data')
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [action]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_update_using_data(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [_image_fixture('update', '1')]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': '1',
'_index': 'glance',
'_op_type': 'update',
'_type': 'image',
'doc': {'disk_format': 'raw', 'name': 'image-1'}
}],
'default_index': None,
'default_type': None
}
self.assertEqual(expected, output)
def test_update_using_script(self):
action = _image_fixture('update', '1', script='<sample script>')
action.pop('data')
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [action]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': '1',
'_index': 'glance',
'_op_type': 'update',
'_type': 'image',
'params': {},
'script': '<sample script>'
}],
'default_index': None,
'default_type': None,
}
self.assertEqual(expected, output)
def test_update_using_script_and_data(self):
action = _image_fixture('update', '1', script='<sample script>')
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [action]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': '1',
'_index': 'glance',
'_op_type': 'update',
'_type': 'image',
'params': {'disk_format': 'raw', 'name': 'image-1'},
'script': '<sample script>'
}],
'default_index': None,
'default_type': None,
}
self.assertEqual(expected, output)
def test_delete_missing_id(self):
action = _image_fixture('delete')
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [action]
})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_delete_single(self):
action = _image_fixture('delete', '1')
action.pop('data')
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [action]
})
output = self.deserializer.index(request)
expected = {
'actions': [{
'_id': '1',
'_index': 'glance',
'_op_type': 'delete',
'_source': {},
'_type': 'image'
}],
'default_index': None,
'default_type': None
}
self.assertEqual(expected, output)
def test_delete_multiple(self):
action_1 = _image_fixture('delete', '1')
action_1.pop('data')
action_2 = _image_fixture('delete', '2')
action_2.pop('data')
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({
'actions': [action_1, action_2],
})
output = self.deserializer.index(request)
expected = {
'actions': [
{
'_id': '1',
'_index': 'glance',
'_op_type': 'delete',
'_source': {},
'_type': 'image'
},
{
'_id': '2',
'_index': 'glance',
'_op_type': 'delete',
'_source': {},
'_type': 'image'
},
],
'default_index': None,
'default_type': None
}
self.assertEqual(expected, output)
class TestResponseSerializer(test_utils.BaseTestCase):
def setUp(self):
super(TestResponseSerializer, self).setUp()
self.serializer = search.ResponseSerializer()
def test_plugins_info(self):
expected = {
"plugins": [
{
"index": "glance",
"type": "image"
},
{
"index": "glance",
"type": "metadef"
}
]
}
request = webob.Request.blank('/v0.1/search')
response = webob.Response(request=request)
result = {
"plugins": [
{
"index": "glance",
"type": "image"
},
{
"index": "glance",
"type": "metadef"
}
]
}
self.serializer.search(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
def test_search(self):
expected = [{
'id': '1',
'name': 'image-1',
'disk_format': 'raw',
}]
request = webob.Request.blank('/v0.1/search')
response = webob.Response(request=request)
result = [{
'id': '1',
'name': 'image-1',
'disk_format': 'raw',
}]
self.serializer.search(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
def test_index(self):
expected = {
'success': '1',
'failed': '0',
'errors': [],
}
request = webob.Request.blank('/v0.1/index')
response = webob.Response(request=request)
result = {
'success': '1',
'failed': '0',
'errors': [],
}
self.serializer.index(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
| wkoathp/glance | glance/tests/unit/v0_1/test_search.py | Python | apache-2.0 | 33,040 |
# -*- coding: utf-8 -*-
"""
Django settings for outdoors project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join, dirname
from configurations import Configuration, values
BASE_DIR = dirname(dirname(__file__))
class Common(Configuration):
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'avatar', # for user avatars
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# MIGRATIONS CONFIGURATION
MIGRATION_MODULES = {
'sites': 'contrib.sites.migrations'
}
# END MIGRATIONS CONFIGURATION
# DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = 'CHANGEME!!!'
# END SECRET CONFIGURATION
# FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
# END FIXTURE CONFIGURATION
# EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
# END EMAIL CONFIGURATION
# MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Jeremy""", 'jstratm@yahoo.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# END MANAGER CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/outdoors')
# END DATABASE CONFIGURATION
# CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify (used on heroku) is painful to install on windows.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# END CACHING
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# END GENERAL CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# END TEMPLATE CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# URL Configuration
ROOT_URLCONF = 'urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
# End URL Configuration
# AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# END AUTHENTICATION CONFIGURATION
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# END Custom user app defaults
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# END SLUGLIFIER
# LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# END LOGGING CONFIGURATION
@classmethod
def post_setup(cls):
cls.DATABASES['default']['ATOMIC_REQUESTS'] = True
# Your common stuff: Below this line define 3rd party library settings
| wildamerica/outdoors | outdoors/config/common.py | Python | bsd-3-clause | 8,962 |
'''
Created on 13 Aug 2015
@author: NoNotCar
'''
import pygame
# import pygame._view
import sys
screen = pygame.display.set_mode((400, 256))
import Img
from Enemies import *
def die(screen):
pygame.mixer.music.stop()
pygame.display.flip()
pygame.time.wait(1000)
screen.fill((0, 0, 0))
Img.bcentre(Img.bfont, "FOOL", screen, col=(255, 255, 255))
pygame.display.flip()
pygame.time.wait(1000)
def Instruct(instructions, time):
words = instructions.split()
text = ""
for i in range(len(words)):
pygame.event.pump()
if i:
text += " "
text += words[i]
screen.fill((255, 255, 255))
Img.bcentre(Img.dfont, text, screen, col=(0, 0, 0))
pygame.display.flip()
pygame.time.wait(time)
class Player(object):
def __init__(self):
self.radius = 100
self.angle = 0.0
self.direction = 1
self.speedmult = 1
self.lasedown = 0
def get_x(self):
return int(round(self.radius * math.sin(math.radians(self.angle)))) + 128
def get_y(self):
return int(round(self.radius * math.cos(math.radians(self.angle)))) + 128
def get_speed(self):
return self.radius ** -1 * 100 * self.speedmult
levels = (([Asteroid], 15, 1), ([Asteroid, BigAsteroid], 20, 1.5), ([Hostage, Asteroid], 30, 1),
([BigAsteroid, SmallAsteroid], 30, 2), ([MustShoot], 30, 1),
([Asteroid, Obstacle], 30, 1), ([Obstacle2], 30, 1), ([EnemyShip], 30, 1), ([Ranged], 30, 1),
([Obstacle, MustShoot], 30, 1.5), ([EnemyShip2], 60, 2))
level = 0
Instruct("UP/DOWN TO MOVE", 500)
pygame.time.wait(500)
Instruct("SHOOT WITH SPACE", 500)
while True:
p = Player()
c = pygame.time.Clock()
obstacles = []
plasers = []
score = 0
tick = 0
dead = False
if level == 4:
Instruct("MUST SHOOT YELLOW", 500)
elif level == 5:
Instruct("RED IS IMMORTAL", 500)
elif level == 2:
Instruct("DON'T SHOOT PINK", 500)
elif level == len(levels) - 1:
Instruct("ULTIMATE DEFENCE", 1000)
p.speedmult = levels[level][2]
if level != 9:
Instruct("LEVEL " + str(level + 1), 500)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.fill((255, 255, 255) if level != len(levels) - 1 else (255, 150, 0))
for obsc in levels[level][0]:
obsc.generate(obstacles)
keys = pygame.key.get_pressed()
if keys[pygame.K_DOWN]:
if p.radius > 30:
p.radius -= 1
elif keys[pygame.K_UP]:
if p.radius < 100:
p.radius += 1
if keys[pygame.K_SPACE] and not p.lasedown:
plasers.append([p.get_x() - 8, p.get_y() - 2])
p.lasedown = 20
if p.lasedown > 0:
p.lasedown -= 1
pygame.draw.circle(screen, (127, 127, 127), (128, 128), p.radius, 1)
orects = []
plrects = []
for obstacle in obstacles:
orects.append((pygame.draw.rect(screen, obstacle.col,
pygame.Rect(obstacle.x, obstacle.y, obstacle.w, obstacle.h)), obstacle))
obstacle.update(obstacles)
for pos in plasers:
plrects.append(pygame.draw.rect(screen, (0, 0, 255), pygame.Rect(pos[0], pos[1], 16, 4)))
pos[0] += 4
prect = pygame.draw.rect(screen, (0, 0, 0), pygame.Rect(p.get_x() - 8, p.get_y() - 8, 16, 16))
for ore in [o for o in orects if o[1].plaser]:
for pr in plrects:
if ore[0].colliderect(pr):
obstacles.remove(ore[1])
if ore[1].hostage:
die(screen)
dead = True
for ore in orects:
if ore[1].isdeadly and ore[0].colliderect(prect):
die(screen)
dead = True
for obstacle in obstacles:
if obstacle.x <= -obstacle.w:
if not obstacle.deadgooff:
obstacles.remove(obstacle)
else:
die(screen)
dead = True
if dead:
break
for ore in orects:
if not ore[1].isdeadly and ore[0].colliderect(prect):
obstacles.remove(ore[1])
for pos in plasers:
if pos[0] > 400:
plasers.remove(pos)
p.angle = (p.angle - p.get_speed()) % 360
pygame.display.flip()
c.tick(60)
if tick == 60:
score += 1
tick = 0
if score == levels[level][1]:
pygame.mixer.music.stop()
Instruct("WELL DONE", 500)
level += 1
if level == 10:
Instruct("YOU WIN!", 2000)
sys.exit()
break
else:
tick += 1
| NoNotCar/orbital | Orbital/orbital.py | Python | cc0-1.0 | 4,970 |
from twisted.trial import unittest
from game.direction import FORWARD, BACKWARD, LEFT, RIGHT
from game.test.util import PlayerCreationMixin
from game.vector import Vector
# Expedient hack until I switch to decimals
_epsilon = 0.0001
class DirectionObserver(object):
"""
Recorder implementation of the direction observer interface used to verify
that direction observation works.
@ivar changes: C{list} of three-tuples of player objects, positions, and
directions. One element per call to C{directionChanged}.
"""
def __init__(self):
self.changes = []
def directionChanged(self, player):
"""
Record a direction change event for the given player.
@param player: The player which changed direction.
"""
self.changes.append((
player, player.getPosition(), player.direction))
class PlayerTests(unittest.TestCase, PlayerCreationMixin):
"""
There should be an object which has a position and can be moved in
eight directions.
"""
def test_setPosition(self):
"""
Players have a position which can be set with C{setPosition}.
"""
player = self.makePlayer(Vector(1, 0, 2))
player.setPosition(Vector(-2, 0, 1))
self.assertEqual(player.getPosition(), Vector(-2, 0, 1))
def test_setPositionAfterSomeMotion(self):
"""
Players should be placed at the correct position if C{setPosition} is
called after they have been moving around a little bit.
"""
player = self.makePlayer(Vector(1, 0, 2))
player.setDirection(FORWARD)
self.advanceTime(1)
player.setPosition(Vector(-2, 0, 1))
self.assertEqual(player.getPosition(), Vector(-2, 0, 1))
def test_getPosition(self):
"""
Players have a C{getPosition} method the initial return value of which
is based on initializer parameters.
"""
player = self.makePlayer(Vector(0, 0, 0))
v = player.getPosition()
self.assertEqual(v, Vector(0, 0, 0))
def test_setDirection(self):
"""
L{Player.setDirection} should accept a vector which sets the direction
of the player's movement.
"""
player = self.makePlayer(Vector(3, 0, 2))
player.setDirection(FORWARD + LEFT)
self.assertEqual(player.direction, FORWARD + LEFT)
player.setDirection(BACKWARD)
self.assertEqual(player.direction, BACKWARD)
def test_getPositionWithoutMovementAfterTimePasses(self):
"""
Directionless L{Player}s should remain stationary.
"""
position = Vector(2, 5, 3)
player = self.makePlayer(position)
self.advanceTime(10)
self.assertEqual(player.getPosition(), position)
def test_getPositionWithMovementAfterTimePasses(self):
"""
Directed L{Player}s should change position.
"""
v = Vector(3, 0, -2)
player = self.makePlayer(v)
player.setDirection(LEFT)
self.advanceTime(1)
self.assertEqual(player.getPosition(), Vector(v.x - 1, v.y, v.z))
def test_greaterSpeedResultsInGreaterDisplacement(self):
"""
A L{Player} which is moving more quickly should travel further.
"""
v = Vector(2, 3, 0)
speed = 5
player = self.makePlayer(v, speed=speed)
player.setDirection(RIGHT)
self.advanceTime(1)
p = player.getPosition()
self.assertTrue(abs(p.x - v.x - speed) < _epsilon)
self.assertTrue(abs(p.y - v.y) < _epsilon)
self.assertTrue(abs(p.z - v.z) < _epsilon)
def test_getPositionWithMovementAfterTimePassesTwice(self):
"""
Twice-directed players should have an accurate position after each
change in direction after some time passes.
"""
v = Vector(3, 0, -2)
player = self.makePlayer(v)
player.setDirection(RIGHT)
self.advanceTime(1)
self.assertEqual(player.getPosition(), Vector(v.x + 1, v.y, v.z))
player.setDirection(FORWARD)
self.advanceTime(1)
self.assertEquals(player.getPosition(), Vector(v.x + 1, v.y, v.z - 1))
def test_getPositionFloats(self):
"""
L{Player.getPosition} will returns C{float} values if the player's
coordinates don't fall exactly onto integer values.
"""
player = self.makePlayer(Vector(0, 0, 0))
player.setDirection(FORWARD)
self.advanceTime(0.5)
self.assertEquals(player.getPosition(), Vector(0, 0, -0.5))
def test_stop(self):
"""
Setting the player's direction to C{None} makes the player cease moving.
"""
x, y = 49, 27
player = self.makePlayer(Vector(x, 0, y))
player.setDirection(None)
self.advanceTime(1)
self.assertEqual(player.getPosition(), Vector(x, 0, y))
player.setDirection(RIGHT)
self.advanceTime(1)
self.assertEqual(player.getPosition(), Vector(x + 1, 0, y))
player.setDirection(None)
self.advanceTime(1)
self.assertEqual(player.getPosition(), Vector(x + 1, 0, y))
def test_observeDirection(self):
"""
Setting the player's direction should notify any observers registered
with that player of the new direction.
"""
position = Vector(6, 3, 2)
player = self.makePlayer(position)
observer = DirectionObserver()
player.addObserver(observer)
player.setDirection(FORWARD)
self.assertEqual(observer.changes, [(player, position, FORWARD)])
def test_getPositionInsideObserver(self):
"""
L{Player.getPosition} should return an accurate value when called
within an observer's C{directionChanged} callback.
"""
position = Vector(1, 0, 1)
player = self.makePlayer(position)
player.setDirection(RIGHT)
self.advanceTime(1)
observer = DirectionObserver()
player.addObserver(observer)
player.setDirection(None)
[(p, v, d)] = observer.changes
self.assertIdentical(p, player)
self.assertIsInstance(v, Vector)
self.assertIdentical(d, None)
# XXX Switch to decimal (seriously)
self.assertTrue(abs(v.x - 2) < _epsilon)
self.assertTrue(abs(v.y - 0) < _epsilon)
self.assertTrue(abs(v.z - 1) < _epsilon)
def test_turn(self):
"""
L{Player.turn} rotates the player's perspective along the horizontal
(that is, about the Y axis) and vertical (that is, about the X axis)
planes.
"""
player = self.makePlayer(Vector(0, 0, 0))
player.turn(0, 1)
self.assertEquals(player.orientation, Vector(0, 1, 0))
player.turn(0, 2)
self.assertEquals(player.orientation, Vector(0, 3, 0))
player.turn(0, -4)
self.assertEquals(player.orientation, Vector(0, -1, 0))
player.turn(1, 0)
self.assertEquals(player.orientation, Vector(1, -1, 0))
player.turn(-2, 0)
self.assertEquals(player.orientation, Vector(-1, -1, 0))
player.turn(4, 0)
self.assertEquals(player.orientation, Vector(3, -1, 0))
def test_forwardMotionFollowsOrientation(self):
"""
Motion in the forward direction translates the player's position in the
direction they are facing.
"""
player = self.makePlayer(Vector(0, 0, 0))
player.turn(0, 90)
player.setDirection(FORWARD)
self.advanceTime(1)
p = player.getPosition()
self.assertTrue(abs(p.x - 1) < _epsilon)
self.assertTrue(abs(p.y) < _epsilon)
self.assertTrue(abs(p.z) < _epsilon)
| eriknelson/gam3 | game/test/test_player.py | Python | mit | 7,777 |
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2018 Bitcraze AB
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Simple example that connects to one crazyflie (check the address at the top
and update it to your crazyflie address) and uses the high level commander
to send setpoints and trajectory to fly a figure 8.
This example is intended to work with any positioning system (including LPS).
It aims at documenting how to set the Crazyflie in position control mode
and how to send setpoints using the high level commander.
"""
import sys
import time
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.high_level_commander import HighLevelCommander
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie.mem import CompressedSegment
from cflib.crazyflie.mem import CompressedStart
from cflib.crazyflie.mem import MemoryElement
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.crazyflie.syncLogger import SyncLogger
from cflib.utils import uri_helper
# URI to the Crazyflie to connect to
uri = uri_helper.uri_from_env(default='radio://0/80/2M/E7E7E7E7E7')
# The trajectory to fly
a = 0.9
b = 0.5
c = 0.5
trajectory = [
CompressedStart(0.0, 0.0, 0.0, 0.0),
CompressedSegment(2.0, [0.0, 1.0, 1.0], [0.0, a, 0.0], [], []),
CompressedSegment(2.0, [1.0, b, 0.0], [-a, -c, 0.0], [], []),
CompressedSegment(2.0, [-b, -1.0, -1.0], [c, a, 0.0], [], []),
CompressedSegment(2.0, [-1.0, 0.0, 0.0], [-a, 0.0, 0.0], [], []),
]
class Uploader:
def __init__(self):
self._is_done = False
self._sucess = True
def upload(self, trajectory_mem):
print('Uploading data')
trajectory_mem.write_data(self._upload_done,
write_failed_cb=self._upload_failed)
while not self._is_done:
time.sleep(0.2)
return self._sucess
def _upload_done(self, mem, addr):
print('Data uploaded')
self._is_done = True
self._sucess = True
def _upload_failed(self, mem, addr):
print('Data upload failed')
self._is_done = True
self._sucess = False
def wait_for_position_estimator(scf):
print('Waiting for estimator to find position...')
log_config = LogConfig(name='Kalman Variance', period_in_ms=500)
log_config.add_variable('kalman.varPX', 'float')
log_config.add_variable('kalman.varPY', 'float')
log_config.add_variable('kalman.varPZ', 'float')
var_y_history = [1000] * 10
var_x_history = [1000] * 10
var_z_history = [1000] * 10
threshold = 0.001
with SyncLogger(scf, log_config) as logger:
for log_entry in logger:
data = log_entry[1]
var_x_history.append(data['kalman.varPX'])
var_x_history.pop(0)
var_y_history.append(data['kalman.varPY'])
var_y_history.pop(0)
var_z_history.append(data['kalman.varPZ'])
var_z_history.pop(0)
min_x = min(var_x_history)
max_x = max(var_x_history)
min_y = min(var_y_history)
max_y = max(var_y_history)
min_z = min(var_z_history)
max_z = max(var_z_history)
# print("{} {} {}".
# format(max_x - min_x, max_y - min_y, max_z - min_z))
if (max_x - min_x) < threshold and (
max_y - min_y) < threshold and (
max_z - min_z) < threshold:
break
def reset_estimator(cf):
cf.param.set_value('kalman.resetEstimation', '1')
time.sleep(0.1)
cf.param.set_value('kalman.resetEstimation', '0')
wait_for_position_estimator(cf)
def activate_high_level_commander(cf):
cf.param.set_value('commander.enHighLevel', '1')
def activate_mellinger_controller(cf):
cf.param.set_value('stabilizer.controller', '2')
def upload_trajectory(cf, trajectory_id, trajectory):
trajectory_mem = cf.mem.get_mems(MemoryElement.TYPE_TRAJ)[0]
trajectory_mem.trajectory = trajectory
upload_result = Uploader().upload(trajectory_mem)
if not upload_result:
print('Upload failed, aborting!')
sys.exit(1)
cf.high_level_commander.define_trajectory(
trajectory_id,
0,
len(trajectory),
type=HighLevelCommander.TRAJECTORY_TYPE_POLY4D_COMPRESSED)
total_duration = 0
# Skip the start element
for segment in trajectory[1:]:
total_duration += segment.duration
return total_duration
def run_sequence(cf, trajectory_id, duration):
commander = cf.high_level_commander
commander.takeoff(1.0, 2.0)
time.sleep(3.0)
relative = True
commander.start_trajectory(trajectory_id, 1.0, relative)
time.sleep(duration)
commander.land(0.0, 2.0)
time.sleep(2)
commander.stop()
if __name__ == '__main__':
cflib.crtp.init_drivers()
with SyncCrazyflie(uri, cf=Crazyflie(rw_cache='./cache')) as scf:
cf = scf.cf
trajectory_id = 1
activate_high_level_commander(cf)
# activate_mellinger_controller(cf)
duration = upload_trajectory(cf, trajectory_id, trajectory)
print('The sequence is {:.1f} seconds long'.format(duration))
reset_estimator(cf)
run_sequence(cf, trajectory_id, duration)
| bitcraze/crazyflie-lib-python | examples/autonomy/autonomous_sequence_high_level_compressed.py | Python | gpl-2.0 | 6,136 |
from lldbsuite.test import lldbinline
from lldbsuite.test import decorators
lldbinline.MakeInlineTest(__file__, globals(),
lldbinline.expectedFailureAll(oslist=["windows"]))
| endlessm/chromium-browser | third_party/llvm/lldb/test/API/lang/cpp/function_refs/TestFunctionRefs.py | Python | bsd-3-clause | 201 |
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}-{{last_name}}',
'{{first_name_female}} {{last_name}} {{last_name}}',
'{{prefix}} {{first_name}} {{last_name}}',
)
# extracted from https://www.bfs.admin.ch/bfs/it/home/statistiche/popolazione/nascite-decessi/nomi-svizzera.assetdetail.3243313.html # noqa E501
first_names_male = (
'Peter', 'Hans', 'Daniel', 'Thomas', 'Andreas', 'Martin', 'Markus',
'Michael', 'Christian', 'Stefan', 'Walter', 'Urs', 'Marco', 'Bruno',
'Patrick', 'Werner', 'René', 'Marcel', 'Beat', 'Roland', 'Kurt',
'Josef', 'David', 'Simon', 'Rolf', 'Heinz', 'Rudolf', 'Paul', 'Roger',
'Christoph', 'Ernst', 'Pascal', 'Adrian', 'Lukas', 'Marc', 'Robert',
'Reto', 'Manuel', 'Fabian', 'Alfred', 'Philipp', 'Jürg', 'Matthias',
'Stephan', 'Franz', 'Anton', 'André', 'Alexander', 'Samuel', 'Jan',
'Johann', 'Luca', 'Max', 'Roman', 'Mario', 'Fritz', 'Ulrich', 'Dominik',
'Karl', 'Tobias', 'Oliver', 'Florian', 'Antonio', 'Benjamin', 'Sandro',
'Bernhard', 'Jonas', 'Felix', 'Raphael', 'Kevin', 'Erich', 'Fabio',
'Jakob', 'Sven', 'Dario', 'Giuseppe', 'Remo', 'Nicolas', 'Albert',
'Erwin', 'Richard', 'Nico', 'Michel', 'José', 'Claudio', 'Tim', 'Noah',
'Joel', 'Heinrich', 'Jörg', 'Robin', 'Sebastian', 'Armin', 'Guido',
'Silvan', 'Lars', 'Ivan', 'Julian', 'Alois', 'Francesco', 'Sascha',
'Dominic', 'Johannes', 'Georg', 'Gabriel', 'Manfred', 'Herbert', 'Otto',
'Alessandro', 'Gerhard', 'Patrik', 'Gian', 'Mathias', 'Leon', 'Willi',
'Eduard', 'Nicola', 'Hugo', 'Ali', 'Yves', 'Elias', 'Hermann',
'Philippe', 'Leo', 'Emil', 'Frank', 'Dieter', 'Friedrich', 'Luis',
'Giovanni', 'Niklaus', 'Alex', 'Roberto', 'Rafael', 'Hanspeter',
'Diego', 'Nils', 'Leandro', 'Ramon', 'Severin', 'Salvatore', 'Mike',
'Alain', 'Timo', 'Carlos', 'Arthur', 'Yannick', 'Eric', 'Angelo', 'Ivo',
'Wolfgang', 'Matteo', 'Joël', 'Andrin', 'Pius', 'Moritz', 'Valentin',
'Louis', 'Wilhelm', 'Renato', 'Levin', 'Silvio', 'Willy', 'Andrea',
'Jonathan', 'Jean', 'Livio', 'Loris', 'Damian', 'Theodor', 'Michele',
'Vincenzo', 'Elia', 'Ralph', 'Klaus', 'Eugen', 'Mark', 'Konrad',
'Denis', 'Norbert', 'Lorenz', 'Viktor', 'Mehmet', 'Marko', 'Kilian',
'Hans-Peter', 'Cédric', 'Ralf', 'Aaron', 'Maximilian', 'Carlo',
'Alessio', 'Olivier', 'Jürgen', 'Luigi', 'Philip', 'Lucas', 'Mauro',
'Janis', 'Cyrill', 'Linus', 'Davide', 'Othmar', 'Flavio', 'Nino',
'Arnold', 'Nick', 'Rainer', 'Domenico', 'Adolf', 'Emanuel', 'Oskar',
'Ben', 'Joshua', 'Leonardo', 'Franco', 'Pierre', 'John', 'Gregor',
'Fernando', 'Marius', 'Claude', 'Edwin', 'Colin', 'Mustafa', 'Pedro',
'Stefano', 'Sergio', 'Dominique', 'Juan', 'Nikola', 'Enrico', 'Jens',
'Daniele', 'Thierry', 'Jose', 'Liam', 'Francisco', 'Ricardo', 'Rico',
'Christof', 'Aleksandar', 'Dennis', 'Mohamed', 'Joseph', 'Charles',
'Noel', 'Miguel', 'Laurin', 'Milan', 'Reinhard', 'Lionel', 'Dragan',
'Hasan', 'Paulo', 'Edgar', 'Silas', 'Hubert', 'Helmut', 'Ibrahim',
'Ruben', 'Timon', 'Vincent', 'Christopher', 'Finn', 'Ronny', 'Kaspar',
'Mattia', 'Lorenzo', 'Pietro', 'Björn', 'Hansruedi', 'Gottfried',
'Joachim', 'Benno', 'Harald', 'Jorge', 'Cedric', 'Nevio', 'Paolo',
'Gianluca', 'Boris', 'Kai', 'Maurizio', 'Steven', 'Mischa', 'Patric',
'Zoran', 'Mirco', 'Marvin', 'Dirk', 'Benedikt', 'Uwe', 'Hans-Rudolf',
'Maurice', 'Massimo', 'Hansjörg', 'Jeremy', 'Niklas', 'Ahmet',
'Fridolin', 'Dejan', 'Goran', 'Micha', 'Mohammad', 'Ronald', 'Bernd',
'Mirko', 'Erik', 'Jason', 'Tiago', 'Riccardo', 'Jérôme', 'Igor',
'Siegfried', 'Pasquale', 'Andri', 'Tom', 'Ueli', 'Amir', 'Cyril',
'Adriano', 'Alberto', 'Ferdinand', 'Justin', 'Raffael', 'Julien',
'Lenny', 'Luka', 'Marcus', 'Pirmin', 'Janik', 'Julius', 'Meinrad',
'Adam', 'James', 'Hüseyin', 'Alexandre', 'Rocco', 'Luc', 'Victor',
'João', 'Andres', 'Luan', 'Flurin', 'Filip', 'Ismail', 'Danilo',
'Laurent', 'Raffaele', 'Ahmed', 'Günter', 'Joao', 'Rui', 'Xaver',
'Fabrizio', 'William', 'Vito', 'Miroslav', 'Lino', 'Albin', 'Jean-Pierre',
'Basil', 'Till', 'Horst', 'Romeo', 'Aldo', 'Murat', 'Harry',
'Alfons', 'Pablo', 'Bernard', 'Noé', 'Luciano', 'August', 'Levi',
'Nando', 'Fabrice', 'Raymond', 'Jamie', 'Georges', 'Steffen', 'Serge',
'Cristian', 'Samir', 'António', 'Marlon', 'Omar', 'Lian', 'Oscar',
'Yanick', 'Armando', 'Nikolaus', 'Dylan', 'Hannes', 'Sacha', 'Nuno',
'Toni', 'Dino', 'Elmar', 'Arno', 'Joaquim', 'Sasa', 'Henry', 'Vladimir',
'Arben', 'Ryan', 'Bekim', 'Milos', 'Giorgio', 'Ludwig', 'Leonard',
'Adnan', 'Gilbert', 'Yannik', 'Aron', 'Iwan', 'Maik', 'Dimitri',
'Erhard', 'François', 'Gabriele', 'Sami', 'Elio', 'Antonino', 'Fynn',
'Simone', 'Andrew', 'Alan', 'Nenad', 'Frédéric', 'Etienne', 'Janick',
'Steve', 'Christophe', 'Gianni', 'Urban', 'Anthony', 'Deniz', 'Jon',
'Alejandro', 'Axel', 'Ian', 'Theo', 'Andrej', 'Brian', 'Lucien', 'Gino',
'Clemens', 'Yanik', 'Adem', 'Emir', 'Tino', 'Miro', 'Enis', 'Gregory',
'Danijel', 'Osman', 'Michal', 'Carmine', 'Orlando', 'Enes', 'Giuliano',
'Timothy', 'Fredy', 'Besnik', 'Vitor', 'Holger', 'Kim', 'Eduardo',
'Petar', 'Jacques', 'Karim', 'Darko', 'Gustav', 'Emilio', 'Mateo',
'Alban', 'Marek', 'Oswald', 'Noël', 'Donato', 'Mohammed', 'Roy', 'Kay',
'Nathan', 'Enea', 'Silvano', 'Josip', 'Valerio', 'Artur', 'Besim',
'Mika', 'Torsten', 'Romano', 'Heiko', 'Yusuf', 'Chris', 'Naim', 'Burim',
'Gaetano', 'Hans-Ulrich', 'Olaf', 'Maurus', 'Volker', 'Jean-Claude',
'Henri', 'Nik', 'Rodrigo', 'Florin', 'Mael', 'Amar', 'Agron', 'Muhamed',
'Tristan', 'Valon', 'Ahmad', 'Ilir', 'Javier', 'Lorin', 'Yanis',
'Fatmir', 'Bajram', 'Carmelo', 'Agim', 'Enzo', 'Moreno', 'Cornel',
'Andy', 'Jeton', 'Blerim', 'Bojan', 'Federico', 'Attila', 'Juri',
'Tomas', 'Valentino', 'Ismet', 'Jannik', 'Ruedi', 'Afrim', 'Yannic',
'Ramadan', 'Alfredo', 'Josua', 'Cosimo', 'Gerardo', 'Bastian',
'Filippo', 'Raoul', 'Halil', 'Yann', 'Georgios', 'Jannis', 'Nicholas',
'Sean', 'Wilfried', 'Günther', 'Dusan', 'Beda', 'Gerold', 'Gottlieb',
'Filipe', 'Ilija', 'Carl', 'Ardian', 'Marcello', 'Enver', 'Dean',
'Dion', 'Tenzin', 'Zeljko', 'Carsten', 'Diogo', 'Alen', 'Egon', 'Aurel',
'Yannis', 'Edin', 'Hans-Jörg', 'Tomislav', 'Mohamad', 'Bujar', 'Raul',
'Slobodan', 'Driton', 'Maxim', 'Francis', 'Hansueli', 'Ivica', 'Nelson',
'Emanuele', 'Konstantin', 'Fred', 'Naser', 'Gerd', 'Kristian', 'Selim',
'Corsin', 'Dietmar', 'George', 'Piotr', 'Giacomo', 'Ingo', 'Andre',
'Malik', 'Lothar', 'Jochen', 'Sinan', 'Thorsten', 'Tiziano', 'Gilles',
'Avni', 'Jann', 'Lio', 'Niels', 'Emmanuel', 'Leonhard', 'Lorik',
'Aurelio', 'Gion', 'Liridon', 'Marino', 'Can', 'Kenan', 'Ewald',
'Stéphane', 'Dalibor', 'Jozef', 'Noe', 'Bryan', 'Dan', 'Santiago',
'Damiano', 'Arian', 'Rosario', 'Giancarlo', 'Nathanael', 'Emre',
'Stephen', 'Hassan', 'Jovan', 'Egzon', 'Reinhold', 'Tomasz', 'Vittorio',
'Patrice', 'Tibor', 'Jost', 'Elvis', 'Lean', 'Henrik', 'Musa', 'Noa',
'Udo', 'Almir', 'Van', 'Dietrich', 'Mladen', 'Armend', 'Arlind', 'Milo',
'Arsim', 'Bashkim', 'Dimitrios', 'Matthew', 'Ömer', 'Abdullah', 'Hakan',
'Gerald', 'Tommaso', 'Joris', 'Damir', 'Vinzenz', 'Marcos', 'Raphaël',
'Ennio', 'Melvin', 'Leander', 'Kuno', 'Massimiliano', 'Maël', 'Anto',
'Branko', 'Fadil', 'Kemal', 'Muhammed', 'Hendrik', 'Pawel', 'Jeremias',
'Léon', 'Leano', 'Rémy', 'Giulio', 'Muhamet', 'Lulzim', 'Konstantinos',
'Pavel', 'Rinaldo', 'Omer', 'Simeon', 'Gian-Luca', 'Maurin', 'Antoine',
'Frederik', 'Janic', 'Faton', 'Marcin', 'Sébastien', 'Cem', 'Curdin',
'Endrit', 'Nemanja', 'Karsten', 'Renzo', 'Jerome', 'Krzysztof',
'Jeffrey', 'Sebastiano', 'Ernesto', 'Lazar', 'Ramazan', 'Gérard',
'Ajan', 'Emin', 'Ioannis', 'Jesus', 'Alfonso', 'Yasin', 'Jaron',
'Alexis', 'Orhan', 'Artan', 'Morris', 'Angel', 'Janosch', 'Rene',
'Shaban', 'Jakub', 'Loïc', 'Kristijan', 'Enrique', 'Skender',
'Gianfranco', 'Mathieu', 'Xavier', 'Mathis', 'Didier', 'Arif', 'Hamza',
'Jacob', 'Leart', 'Laszlo', 'Predrag', 'Mentor', 'Wendelin', 'Luís',
'Constantin', 'Erion', 'Berat', 'Dardan', 'Melchior', 'Serkan',
'Dorian', 'Eren', 'Fatih', 'Luzius', 'Nebojsa', 'Metin', 'Diar', 'Rino',
'Ekrem', 'Isa', 'Jetmir', 'Edward', 'Nikolaos', 'Gazmend', 'Haris',
'Kian', 'Ensar', 'Mirsad', 'Danny', 'Senad', 'Donat', 'Bilal', 'Ron',
'Nael', 'Guy', 'Julio', 'Kujtim', 'Kushtrim', 'Lutz', 'Balthasar',
'Rouven', 'Lias', 'Neil', 'Abraham', 'Magnus', 'Sérgio', 'Hansjürg',
'Said', 'Ismael', 'Detlef', 'Umberto', 'Admir', 'Jayden', 'Jaime',
'Karl-Heinz', 'Tomás', 'Florim', 'Achim', 'Devin', 'Maxime', 'Fitim',
'Jean-Marc', 'Rayan', 'Sadik', 'Tarik', 'Abdul', 'Jack', 'Mergim',
'Nelio', 'Sam', 'Flamur', 'Ignaz', 'Samuele', 'Tony', 'Petr',
'Waldemar', 'Arda', 'Ardit', 'Lukasz', 'Milorad', 'Nicolai', 'Ramiz',
'Aziz', 'Kamil', 'Rinor', 'Safet', 'Piero', 'Erkan', 'Niko', 'Zsolt',
'Ernest', 'Miodrag', 'Alvaro', 'Astrit', 'Edmund', 'Jules', 'Cristiano',
'Ivano', 'Kenneth', 'Saverio', 'Semir', 'Burak', 'Theophil', 'Altin',
'Andrzej', 'Jonah', 'Jiri', 'Salih', 'Zoltán', 'Ferenc', 'Grzegorz',
'Irfan', 'Johan', 'Kaan', 'Süleyman', 'Hussein', 'Rexhep', 'Besart',
'Janos', 'Labinot', 'Onur', 'Stjepan', 'Domenic', 'Siro', 'Abel',
'Florent', 'Christos', 'Swen', 'Branislav', 'Mato', 'Amin', 'Matej',
'Slavko', 'Jusuf', 'Luke', 'Slavisa', 'Erol', 'Gabor', 'Jasmin',
'Visar', 'Sinisa', 'Isidor', 'Merlin', 'Claus', 'Marin', 'Zoltan',
'Muhammad', 'Neo', 'Zeno', 'Istvan', 'Adis', 'Edon', 'Gil', 'Leopold',
'Hartmut', 'Raimund', 'Ken', 'Csaba', 'Kerim', 'Norman', 'Lucio',
'László', 'Marjan', 'Damjan', 'Eugenio', 'Domingos', 'Reiner',
'Augusto', 'Gzim', 'Nazmi', 'Laurenz', 'Zlatko', 'Jaroslav', 'Nevin',
'Biagio', 'Felice', 'Balz', 'Boban', 'Marcelo', 'Caspar', 'Ledion',
'Rodolfo', 'Aldin', 'Matti', 'Remzi', 'Ljubisa', 'Til', 'Péter', 'Umut',
'Baris', 'Lirim', 'Mehdi', 'Edmond', 'Gonçalo', 'Jasin', 'Niclas',
'Jordan', 'Mahmoud', 'Stanislav', 'Cornelius', 'Jona', 'Khaled',
'Quentin', 'Gökhan', 'Imer', 'Volkan', 'Harun', 'Miran', 'Damien',
'Gennaro', 'Jari', 'Marian', 'Rüdiger', 'Albrecht', 'Mile', 'Thiago',
'Yvan', 'Alwin', 'Gani', 'Mahmut', 'Pero', 'Evan', 'Fisnik', 'Idriz',
'Sergej', 'Sabri', 'Felipe', 'István', 'Dave', 'Hans-Jürgen', 'Jean-Luc',
'Kastriot', 'Mariusz', 'Arne', 'Faruk', 'Gebhard', 'German',
'Tamás', 'Anes', 'Arbnor', 'Mats', 'Drilon', 'Fábio', 'Mihajlo',
'Sedat', 'Tahir',
)
# extracted from https://www.bfs.admin.ch/bfs/it/home/statistiche/popolazione/nascite-decessi/nomi-svizzera.assetdetail.3243318.html # noqa E501
first_names_female = (
'Maria', 'Anna', 'Ursula', 'Ruth', 'Elisabeth', 'Sandra', 'Monika',
'Claudia', 'Verena', 'Nicole', 'Barbara', 'Silvia', 'Andrea', 'Marie',
'Daniela', 'Christine', 'Karin', 'Marianne', 'Erika', 'Margrit',
'Brigitte', 'Susanne', 'Rita', 'Laura', 'Sarah', 'Katharina',
'Rosmarie', 'Esther', 'Heidi', 'Anita', 'Manuela', 'Rosa', 'Doris',
'Sonja', 'Beatrice', 'Yvonne', 'Gertrud', 'Jacqueline', 'Sara', 'Irene',
'Ana', 'Franziska', 'Cornelia', 'Fabienne', 'Gabriela', 'Patricia',
'Martina', 'Julia', 'Edith', 'Eva', 'Isabelle', 'Sabrina', 'Nathalie',
'Alexandra', 'Corinne', 'Angela', 'Melanie', 'Alice', 'Nadine',
'Jessica', 'Denise', 'Elena', 'Vanessa', 'Simone', 'Anne', 'Regula',
'Susanna', 'Carmen', 'Sophie', 'Caroline', 'Emma', 'Nina', 'Tanja',
'Catherine', 'Sabine', 'Lara', 'Petra', 'Lea', 'Céline', 'Jasmin',
'Therese', 'Stefanie', 'Johanna', 'Nadia', 'Tamara', 'Chantal',
'Martha', 'Michelle', 'Christina', 'Marina', 'Adelheid', 'Dora',
'Monique', 'Rahel', 'Hedwig', 'Lisa', 'Janine', 'Pia', 'Anja',
'Elsbeth', 'Madeleine', 'Eveline', 'Judith', 'Diana', 'Françoise',
'Charlotte', 'Maja', 'Eliane', 'Renate', 'Christiane', 'Michèle',
'Jennifer', 'Bettina', 'Chiara', 'Bernadette', 'Aline', 'Carla',
'Helena', 'Brigitta', 'Mirjam', 'Theresia', 'Astrid', 'Nadja', 'Jana',
'Selina', 'Priska', 'Lena', 'Stephanie', 'Lucia', 'Linda', 'Regina',
'Agnes', 'Olivia', 'Sonia', 'Valérie', 'Klara', 'Ramona', 'Lina',
'Elsa', 'Helene', 'Monica', 'Iris', 'Hanna', 'Valentina', 'Annemarie',
'Elisa', 'Margrith', 'Dominique', 'Beatrix', 'Cristina', 'Paula',
'Magdalena', 'Livia', 'Sofia', 'Patrizia', 'Liliane', 'Nelly', 'Marion',
'Ida', 'Alina', 'Isabel', 'Vera', 'Stéphanie', 'Giulia', 'Leonie',
'Jeannette', 'Christa', 'Alessia', 'Véronique', 'Myriam', 'Emilie',
'Olga', 'Nora', 'Julie', 'Sylvia', 'Margaretha', 'Claudine', 'Marlise',
'Miriam', 'Sibylle', 'Sylvie', 'Lydia', 'Katja', 'Lorena', 'Jolanda',
'Rebecca', 'Mia', 'Irma', 'Larissa', 'Luana', 'Martine', 'Deborah',
'Francesca', 'Veronika', 'Isabella', 'Noemi', 'Ingrid', 'Frieda',
'Suzanne', 'Liselotte', 'Michaela', 'Florence', 'Evelyne', 'Hildegard',
'Corina', 'Danielle', 'Laurence', 'Carole', 'Milena', 'Cécile', 'Mara',
'Luzia', 'Sandrine', 'Gisela', 'Simona', 'Mélanie', 'Béatrice', 'Marta',
'Antonia', 'Erna', 'Gabriele', 'Katrin', 'Kathrin', 'Melissa',
'Camille', 'Adriana', 'Fiona', 'Lucie', 'Natalie', 'Teresa', 'Renata',
'Josiane', 'Sophia', 'Clara', 'Luisa', 'Silvana', 'Jeannine', 'Pascale',
'Hélène', 'Emilia', 'Joëlle', 'Gabriella', 'Maya', 'Marianna', 'Ines',
'Léa', 'Claire', 'Marisa', 'Sina', 'Lia', 'Paola', 'Mathilde', 'Sabina',
'Alessandra', 'Ivana', 'Anne-Marie', 'Elvira', 'Bianca', 'Samira',
'Cindy', 'Amélie', 'Chloé', 'Kim', 'Victoria', 'Annette', 'Angelina',
'Dorothea', 'Antoinette', 'Tina', 'Tania', 'Angelika', 'Valeria',
'Flavia', 'Margaritha', 'Rachel', 'Marguerite', 'Jeanne', 'Yvette',
'Natalia', 'Alicia', 'Giovanna', 'Mireille', 'Liliana', 'Pauline',
'Seraina', 'Elodie', 'Ariane', 'Helga', 'Zoé', 'Natascha', 'Muriel',
'Francine', 'Joana', 'Melina', 'Aurélie', 'Thi', 'Giuseppina',
'Tatiana', 'Margareta', 'Louise', 'Marija', 'Debora', 'Salome',
'Viviane', 'Fanny', 'Katia', 'Carolina', 'Irina', 'Bertha', 'Marlene',
'Noémie', 'Amanda', 'Sarina', 'Marlies', 'Lilian', 'Irène', 'Laetitia',
'Kristina', 'Jasmine', 'Ella', 'Jenny', 'Gabrielle', 'Carmela', 'Manon',
'Helen', 'Fatima', 'Stefania', 'Virginie', 'Ladina', 'Jelena', 'Berta',
'Antonella', 'Rebekka', 'Audrey', 'Anaïs', 'Tatjana', 'Annina',
'Margot', 'Carina', 'Samantha', 'Evelyn', 'Annamarie', 'Tiziana',
'Arlette', 'Emily', 'Kerstin', 'Svenja', 'Caterina', 'Christelle',
'Saskia', 'Elin', 'Lilly', 'Anouk', 'Rose', 'Fatma', 'Lynn', 'Elina',
'Colette', 'Josette', 'Leila', 'Gerda', 'Susana', 'Geneviève',
'Désirée', 'Naomi', 'Stella', 'Romina', 'Delphine', 'Aurora', 'Estelle',
'Juliette', 'Tabea', 'Anina', 'Thérèse', 'Mariana', 'Beatriz', 'Hilda',
'Lotti', 'Séverine', 'Delia', 'Ronja', 'Gina', 'Mila', 'Antonietta',
'Veronica', 'Aleksandra', 'Gisèle', 'Lidia', 'Natacha', 'Laure',
'Pamela', 'Rosemarie', 'Marie-Louise', 'Jael', 'Eleonora', 'Zoe',
'Franca', 'Hannah', 'Yolanda', 'Birgit', 'Amina', 'Leandra', 'Elise',
'Alma', 'Anastasia', 'Marlis', 'Fernanda', 'Irmgard', 'Micheline',
'Elfriede', 'Selma', 'Ilona', 'Danièle', 'Justine', 'Magali',
'Georgette', 'Graziella', 'Cynthia', 'Cäcilia', 'Loredana', 'Géraldine',
'Sylviane', 'Heidy', 'Alexia', 'Mary', 'Ingeborg', 'Emine', 'Yara',
'Ursina', 'Marlène', 'Morgane', 'Michela', 'Katarina', 'Marine',
'Ulrike', 'Daria', 'Bruna', 'Jasmina', 'Mira', 'Soraya', 'Juliana',
'Marlyse', 'Agnès', 'Carine', 'Gloria', 'Alena', 'Svetlana', 'Josefina',
'Annelise', 'Myrta', 'Roberta', 'Pierrette', 'Celine', 'Annika',
'Mirjana', 'Andrée', 'Célia', 'Serena', 'Christel', 'Susan', 'Jocelyne',
'Renée', 'Vesna', 'Andreia', 'Elizabeth', 'Cinzia', 'Karen', 'Cecilia',
'Karine', 'Marlen', 'Ilaria', 'Virginia', 'Suzana', 'Rose-Marie',
'Jeanine', 'Margarita', 'Joanna', 'Coralie', 'Elif', 'Dina', 'Janina',
'Josefine', 'Mina', 'Hannelore', 'Gordana', 'Luciana', 'Heike',
'Aurelia', 'Luna', 'Dagmar', 'Filomena', 'Dolores', 'Raymonde',
'Prisca', 'Annick', 'Huguette', 'Elisabetha', 'Dragana', 'Leona',
'Elke', 'Inès', 'Valerie', 'Ayse', 'Amelia', 'Flurina', 'Marie-Thérèse',
'Roswitha', 'Rosanna', 'Ginette', 'Matilde', 'Mélissa', 'Yolande',
'Océane', 'Giada', 'Murielle', 'Danijela', 'Sanja', 'Slavica',
'Adelina', 'Valentine', 'Catarina', 'Raquel', 'Emmanuelle', 'Dana',
'Erica', 'Marcelle', 'Nancy', 'Germaine', 'Concetta', 'Gianna', 'Jade',
'Lucienne', 'Letizia', 'Fatime', 'Odette', 'Solange', 'Lily', 'Nada',
'Lucy', 'Margherita', 'Hana', 'Elisabetta', 'Leana', 'Vivienne',
'Viola', 'Ljiljana', 'Yasmin', 'Agatha', 'Jutta', 'Anabela', 'Laila',
'Romana', 'Gaëlle', 'Belinda', 'Aida', 'Federica', 'Giuliana', 'Marie-Claire',
'Mirella', 'Eliana', 'Paulina', 'Diane', 'Paulette', 'Mona',
'Milica', 'Corinna', 'Yasmine', 'Annalise', 'Hatice', 'Alyssa', 'Ellen',
'Kelly', 'Biljana', 'Noelia', 'Alisha', 'Léonie', 'Amandine', 'Amelie',
'Amy', 'Lilli', 'Nelli', 'Margaux', 'Melisa', 'Anneliese', 'Marie-Claude',
'Sheila', 'Dragica', 'Xenia', 'Violeta', 'Annie', 'Lou',
'Meret', 'Ute', 'Irena', 'Catia', 'Giuseppa', 'Sybille', 'Lana',
'Celina', 'Aylin', 'Zita', 'Karolina', 'Louisa', 'Luise', 'Rosina',
'Jeanette', 'Sharon', 'Henriette', 'Joy', 'Inge', 'Carola', 'Tiffany',
'Margarete', 'Marietta', 'Josefa', 'Leyla', 'Nuria', 'Anne-Lise',
'Gilberte', 'Giorgia', 'Emanuela', 'Daisy', 'Angelica', 'Josephine',
'Ilse', 'Natasa', 'Andrina', 'Fabiana', 'Flora', 'Maude', 'Melinda',
'Silke', 'Enya', 'Amira', 'Beate', 'Viktoria', 'Francisca', 'Merita',
'Odile', 'Snezana', 'Ariana', 'Carol', 'Medina', 'Romy', 'Noëlle',
'Alissa', 'Elisabete', 'Camilla', 'Miranda', 'Leonora', 'Lejla',
'Zeynep', 'Maeva', 'Domenica', 'Raffaella', 'Salomé', 'Ornella',
'Rosaria', 'Alisa', 'Alba', 'Zorica', 'Roxane', 'Raphaela', 'Inês',
'Hermine', 'Waltraud', 'Aude', 'Selin', 'Claude', 'Arianna',
'Angélique', 'Leticia', 'Malin', 'Viviana', 'Annelies', 'Damaris',
'Liv', 'Maëlle', 'Sigrid', 'Jill', 'Karina', 'Liana', 'Eline', 'Lotte',
'Lise', 'Rina', 'Morena', 'Marilena', 'Leonor', 'Annamaria', 'Albina',
'Dijana', 'Grazia', 'Ester', 'Vivien', 'Käthi', 'Tara', 'Aurore',
'Katarzyna', 'Amalia', 'Celia', 'Seline', 'Anisa', 'Azra', 'Adeline',
'Fabiola', 'Agnieszka', 'Greta', 'Jane', 'Vincenza', 'Rosalia', 'Marie-Christine',
'Marijana', 'Jara', 'Gudrun', 'Edona', 'Gioia', 'Marcia',
'Myrtha', 'Ekaterina', 'Lucette', 'Gertrude', 'Ljubica', 'Adrienne',
'Malika', 'Ava', 'Yael', 'Lola', 'Marinette', 'Teuta', 'Joelle',
'Beata', 'Line', 'Priscilla', 'Rosalie', 'Mariette', 'Ada', 'Marielle',
'Juliane', 'Emina', 'Arta', 'Margarida', 'Claire-Lise', 'Gaia', 'Antje',
'Raffaela', 'Mercedes', 'Vlora', 'Arlinda', 'Nicoletta', 'Alison',
'Ottilia', 'Clémence', 'Lisbeth', 'Shqipe', 'Adele', 'Maryline',
'Sónia', 'Ewa', 'Drita', 'Gladys', 'Dilara', 'Malgorzata', 'Eleni',
'Sandy', 'Marika', 'Marthe', 'Norma', 'Carolin', 'Ina', 'Agathe',
'Alea', 'Anke', 'Zora', 'Cristiana', 'Marie-José', 'Liridona', 'Romane',
'Noa', 'Shpresa', 'Esma', 'Assunta', 'Vittoria', 'Blerta', 'Ema',
'Elma', 'Anika', 'Marie-France', 'Samanta', 'Mariella', 'Meryem',
'Tânia', 'Ghislaine', 'Marica', 'Desirée', 'Britta', 'Joséphine',
'Moira', 'Maud', 'Gemma', 'Silja', 'Sladjana', 'Sanela', 'Iva', 'Ann',
'Nadège', 'Corine', 'Frida', 'Cheyenne', 'Theres', 'Lilia', 'Matilda',
'Geraldine', 'Lisette', 'Margaret', 'Eloïse', 'Felicia', 'Hulda',
'Kathleen', 'Erina', 'Jovana', 'Timea', 'Sofie', 'Wanda', 'Anne-Sophie',
'Zahra', 'Florentina', 'Alexa', 'Ruzica', 'Ganimete', 'Herta', 'Agata',
'Yasemin', 'Frédérique', 'Nicola', 'Norah', 'Lorenza', 'Ilenia',
'Khadija', 'Elda', 'Felicitas', 'Charline', 'Ela', 'Eliza', 'Katalin',
'Rafaela', 'Tanya', 'Theresa', 'Floriane', 'Katherine', 'Asia',
'Mathilda', 'Fabia', 'Fatmire', 'Imelda', 'Susi', 'Zuzana', 'Cassandra',
'Donatella', 'Antonina', 'Luz', 'Yasmina', 'Eleonore', 'Bluette',
'Malea', 'Danica', 'Dunja', 'Kirsten', 'Eileen', 'Mirela', 'Vanesa',
'Filipa', 'Léna', 'Jaqueline', 'Evelin', 'Violette', 'Vjollca',
'Mariam', 'Maryam', 'Amela', 'Luigia', 'Noëmi', 'Joyce', 'Pierina',
'Aferdita', 'Cátia', 'Mandy', 'Regine', 'Branka', 'Radmila', 'Vreneli',
'Marcella', 'Grace', 'Ludivine', 'Natasha', 'Olena', 'Elea', 'Jil',
'Anne-Laure', 'Eléonore', 'Ayla', 'Mégane', 'Maddalena', 'Sereina',
'Tenzin', 'Dafina', 'Eve', 'Leslie', 'Alix', 'Kiara', 'Ardita', 'Aisha',
'Margit', 'Janet', 'Kira', 'Margreth', 'Amra', 'Marcela', 'Solène',
'Kristin', 'Fitore', 'Rosalba', 'Edina', 'Mariangela', 'Agnese',
'Albulena', 'Joanne', 'Ylenia', 'Clarissa', 'Magda', 'Marie-Laure',
'Anna-Maria', 'Luljeta', 'Marjorie', 'Annalisa', 'Lidija', 'Ajla',
'Sanije', 'Wendy', 'Wilma', 'Layla', 'Thea', 'Esra', 'Jaël', 'Fernande',
'Vania', 'Lindita', 'Tessa', 'Mimoza', 'Kata', 'Maryse', 'Dalia',
'Käthe', 'Blanka', 'Katerina', 'Ophélie', 'Leni', 'Egzona', 'Eugenia',
'Lavinia', 'Léane', 'Bukurije', 'Cordula', 'Teodora', 'Nikolina',
'Özlem', 'Lauriane', 'Milka', 'Patrícia', 'Aloisia', 'Lya', 'Derya',
'Margret', 'Juana', 'Vilma', 'Annabelle', 'Besarta', 'Norina',
'Cláudia', 'Nives', 'Hanife', 'Blerina', 'Lydie', 'Gerlinde', 'Déborah',
'Mirlinda', 'Vivian', 'María', 'Shania', 'Romaine', 'Tuana', 'Berthe',
'Friederike', 'Susann', 'Rosetta', 'Hava', 'Kaltrina', 'Marie-Jeanne',
'Iryna', 'Mihaela',
)
first_names = first_names_male + first_names_female
last_names = (
'Ackermann', 'Aebi', 'Albrecht', 'Ammann', 'Amrein', 'Arnold',
'Bachmann', 'Bader', 'Bär', 'Bättig', 'Bauer', 'Baumann',
'Baumgartner', 'Baur', 'Beck', 'Benz', 'Berger', 'Bernasconi',
'Betschart', 'Bianchi', 'Bieri', 'Blaser', 'Blum', 'Bolliger',
'Bosshard', 'Braun', 'Brun', 'Brunner', 'Bucher', 'Bühler', 'Bühlmann',
'Burri', 'Christen', 'Egger', 'Egli', 'Eichenberger', 'Erni', 'Ernst',
'Eugster', 'Fankhauser', 'Favre', 'Fehr', 'Felber', 'Felder',
'Ferrari', 'Fischer', 'Flückiger', 'Forster', 'Frei', 'Frey', 'Frick',
'Friedli', 'Fuchs', 'Furrer', 'Gasser', 'Geiger', 'Gerber', 'Gfeller',
'Giger', 'Gloor', 'Graf', 'Grob', 'Gross', 'Gut', 'Haas', 'Häfliger',
'Hafner', 'Hartmann', 'Hasler', 'Hauser', 'Hermann', 'Herzog', 'Hess',
'Hirt', 'Hodel', 'Hofer', 'Hoffmann', 'Hofmann', 'Hofstetter', 'Hotz',
'Huber', 'Hug', 'Hunziker', 'Hürlimann', 'Imhof', 'Isler', 'Iten',
'Jäggi', 'Jenni', 'Jost', 'Kägi', 'Kaiser', 'Kälin', 'Käser',
'Kaufmann', 'Keller', 'Kern', 'Kessler', 'Knecht', 'Koch', 'Kohler',
'Kuhn', 'Küng', 'Kunz', 'Lang', 'Lanz', 'Lehmann', 'Leu', 'Leunberger',
'Lüscher', 'Lustenberger', 'Lüthi', 'Lutz', 'Mäder', 'Maier', 'Marti',
'Martin', 'Maurer', 'Mayer', 'Meier', 'Meili', 'Meister', 'Merz',
'Mettler', 'Meyer', 'Michel', 'Moser', 'Müller', 'Näf', 'Ott', 'Peter',
'Pfister', 'Portmann', 'Probst', 'Rey', 'Ritter', 'Roos', 'Roth',
'Rüegg', 'Schäfer', 'Schaller', 'Schär', 'Schärer', 'Schaub',
'Scheidegger', 'Schenk', 'Scherrer', 'Schlatter', 'Schmid', 'Schmidt',
'Schneider', 'Schnyder', 'Schoch', 'Schuler', 'Schumacher', 'Schürch',
'Schwab', 'Schwarz', 'Schweizer', 'Seiler', 'Senn', 'Sidler',
'Siegrist', 'Sigrist', 'Spörri', 'Stadelmann', 'Stalder', 'Staub',
'Stauffer', 'Steffen', 'Steiger', 'Steiner', 'Steinmann', 'Stettler',
'Stocker', 'Stöckli', 'Stucki', 'Studer', 'Stutz', 'Suter', 'Sutter',
'Tanner', 'Thommen', 'Tobler', 'Vogel', 'Vogt', 'Wagner', 'Walder',
'Walter', 'Weber', 'Wegmann', 'Wehrli', 'Weibel', 'Wenger',
'Wettstein', 'Widmer', 'Winkler', 'Wirth', 'Wirz', 'Wolf', 'Wüthrich',
'Wyss', 'Zbinden', 'Zehnder', 'Ziegler', 'Zimmermann', 'Zingg',
'Zollinger', 'Zürcher',
)
prefixes = ('Dr.', 'Prof.')
| deanishe/alfred-fakeum | src/libs/faker/providers/person/de_CH/__init__.py | Python | mit | 26,038 |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
########################################################################
#
# Odoo Tools by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
import os, datetime, pwd, grp, getpass, re, tempfile
import logging
from odootools.lib import config, git_lib, tools, apache, phppgadmin
_logger = logging.getLogger('odootools.odoo.server')
class odooServer(object):
def __init__(self, instance=None):
self._os_info = tools.get_os()
self._postgresql_version = ''
if self._os_info['os'] == 'Linux' and self._os_info['version'][0] == 'Ubuntu':
#Support for previous LTS
if self._os_info['version'][1] == '12.04':
self._postgresql_version = '9.1'
#Support for versions between both LTS version
elif self._os_info['version'][1] < '14.04':
self._postgresql_version = '9.1'
#Support for current LTS
elif self._os_info['version'][1] == '14.04':
self._postgresql_version = '9.3'
else:
self._postgresql_version = '9.3' #This should fail unsupported version
elif self._os_info['os'] == 'Linux' and self._os_info['version'][0] == 'LinuxMint':
#Support for previous LTS
if self._os_info['version'][1] == '13':
self._postgresql_version = '9.1'
#Support for versions between both LTS version
elif self._os_info['version'][1] < '17':
self._postgresql_version = '9.1'
#Support for current LTS
elif self._os_info['version'][1] == '17':
self._postgresql_version = '9.3'
else:
self._postgresql_version = '9.3' #This should fail unsupported version
# TODO check versions for Arch Linux
elif self._os_info['os'] == 'Linux' and self._os_info['version'][0] == 'arch':
self._postgresql_version = '9.1'
if instance:
_logger.debug('Server initialized with instance: %s' % instance)
_logger.debug('Branch: %s' % instance._branch)
self._branch = instance._branch
else:
_logger.debug('Server initialized without instance: %s' % instance)
self._branch = config.params['branch'] or '8.0'
self._installation_type = config.params[self._branch+'_installation_type'] or config.params['installation_type'] or 'dev'
self._user = config.params[self._branch+'_user'] or config.params['user'] or None
if 'install_odoo_clearcorp' in config.params:
self._install_odoo_clearcorp = config.params['install_odoo_clearcorp']
elif self._branch+'_install_odoo_clearcorp' in config.params:
self._install_odoo_clearcorp = config.params[self._branch+'_install_odoo_clearcorp']
else:
self._install_odoo_clearcorp = True
if 'install_odoo_costa_rica' in config.params:
self._install_odoo_costa_rica = config.params['install_odoo_costa_rica']
elif self._branch+'_install_odoo_costa_rica' in config.params:
self._install_odoo_costa_rica = config.params[self._branch+'_install_odoo_costa_rica']
else:
self._install_odoo_costa_rica = True
if self._branch+'_update_postgres_hba' in config.params:
self._update_postgres_hba = config.params[self._branch+'_update_postgres_hba']
elif 'update_postgres_hba' in config.params:
self._update_postgres_hba = config.params['update_postgres_hba']
else:
self._update_postgres_hba = True
if self._branch+'_create_postgres_user' in config.params:
self._create_postgres_user = config.params[self._branch+'_create_postgres_user']
elif 'create_postgres_user' in config.params:
self._create_postgres_user = config.params['create_postgres_user']
else:
self._create_postgres_user = True
if self._branch+'_install_apache' in config.params:
self._install_apache = config.params[self._branch+'_install_apache']
elif 'install_apache' in config.params:
self._install_apache = config.params['install_apache']
else:
self._install_apache = True
self._admin_password = config.params[self._branch+'_admin_password'] or config.params['admin_password'] or None
self._postgresql_password = config.params[self._branch+'_postgresql_password'] or config.params['postgresql_password'] or None
return super(odooServer, self).__init__()
def _add_odoo_user(self):
try:
group = grp.getgrnam('odoo')
except:
_logger.info('odoo group doesn\'t exist, creating group.')
# TODO: no addgroup in arch
tools.exec_command('addgroup odoo', as_root=True)
group = False
else:
_logger.debug('odoo group already exists.')
try:
pw = pwd.getpwnam(self._user)
except:
_logger.info('Creating user: (%s)' % self._user)
tools.exec_command('adduser --system --home /var/run/odoo --no-create-home --ingroup odoo %s' % self._user, as_root=True)
else:
_logger.info('User %s already exists, adding to odoo group.' % self._user)
tools.exec_command('adduser %s odoo' % self._user, as_root=True)
return True
#Todo check packages
def get_packages_distro_match(self, distro):
match = {
'ubuntu': {
'ghostscript': 'ghostscript',
'graphviz': 'graphviz',
'libjs-mochikit': 'libjs-mochikit',
'libjs-mootools': 'libjs-mootools',
'python-beaker': 'python-beaker',
'python-cherrypy3': 'python-cherrypy3',
'python-dateutil': 'python-dateutil',
'python-docutils': 'python-docutils',
'python-egenix': 'python-egenix-mxdatetime',
'python-feedparser': 'python-feedparser',
'python-formencode': 'python-formencode',
'python-gdata': 'python-gdata',
'python-imaging': 'python-imaging',
'python-jinja2': 'python-jinja2',
'python-ldap': 'python-ldap',
'python-libxslt1': 'python-libxslt1',
'python-lxml': 'python-lxml',
'python-mako': 'python-mako',
'python-matplotlib': 'python-matplotlib',
'python-mock': 'python-mock',
'python-openid': 'python-openid',
'python-openssl': 'python-openssl',
'python-psutil': 'python-psutil',
'python-psycopg2': 'python-psycopg2',
'python-pybabel': 'python-pybabel',
'python-pychart': 'python-pychart',
'python-pydot': 'python-pydot',
'python-pyparsing': 'python-pyparsing',
'python-reportlab': 'python-reportlab',
'python-setuptools': 'python-setuptools',
'python-simplejson': 'python-simplejson',
'python-tz': 'python-tz',
'python-unittest2': 'python-unittest2',
'python-vatnumber': 'python-vatnumber',
'python-vobject': 'python-vobject',
'python-webdav': 'python-webdav',
'python-werkzeug': 'python-werkzeug',
'python-xlwt': 'python-xlwt',
'python-yaml': 'python-yaml',
'python-zsi': 'python-zsi',
'tinymce': 'tinymce',
'wget': 'wget',
'poppler-utils': 'poppler-utils'
},
'arch': {
'repo': {
'ghostscript': 'ghostscript',
'graphviz': 'graphviz',
'libjs-mochikit': None,
'libjs-mootools': 'pycow',
'python-beaker': 'python2-beaker',
'python-cherrypy3': 'python2-cherrypy',
'python-dateutil': 'python2-dateutil',
'python-docutils': 'python2-docutils',
'python-egenix': 'python2-egenix-mx-base',
'python-feedparser': 'python2-feedparser',
'python-formencode': 'python2-formencode',
'python-gdata': 'python2-gdata',
'python-imaging': 'python2-imaging',
'python-jinja2': 'python2-jinja',
'python-ldap': 'python2-ldap',
'python-libxslt1': 'python2-lxml',
'python-lxml': 'python2-lxml',
'python-mako': 'python2-mako',
'python-matplotlib': 'python2-matplotlib',
'python-openssl': 'python2-pyopenssl',
'python-psutil': 'python2-psutil',
'python-psycopg2': 'python2-psycopg2',
'python-pybabel': 'python2-babel',
'python-pychart': 'python2-pychart',
'python-pyparsing': 'python2-pyparsing',
'python-reportlab': 'python2-reportlab',
'python-setuptools': 'python2-distribute',
'python-simplejson': 'python2-simplejson',
'python-tz': 'python2-pytz',
'python-vobject': 'python2-vobject',
'python-werkzeug': 'python2-werkzeug',
'python-yaml': 'python2-yaml',
'tinymce': None,
'wget': 'wget',
'poppler-utils': 'poppler-utils' # Needs review on arch
},
'aur': {
'python-mock': 'python2-mock',
'python-openid': 'python2-openid',
'python-pydot': 'pydot',
'python-unittest2': 'python2-unittest2',
'python-vatnumber': 'python2-vatnumber',
'python-webdav': 'python2-pywebdav',
'python-xlwt': 'python2-xlwt',
'python-zsi': 'zsi',
}
}
}
if distro in match:
return match[distro]
else:
return False
def _install_python_libs(self):
_logger.info('Installing the required packages and python libraries for Odoo.')
packages = []
# Packages need by the installer
packages.append('python-setuptools')
packages.append('wget')
# Packages for 7.0
if self._branch == '7.0':
packages += [
# Dependencies
'python-dateutil',
'python-docutils',
'python-feedparser',
'python-gdata', #Google data parser
'python-jinja2',
'python-ldap',
'python-libxslt1', #Excel spreadsheets
'python-lxml',
'python-mako',
'python-mock', #For testing
'python-openid',
'python-psutil',
'python-psycopg2',
'python-pybabel',
'python-pychart',
'python-pydot',
'python-pyparsing',
'python-reportlab',
'python-simplejson',
'python-tz',
'python-unittest2', #For testing
'python-vatnumber',
'python-vobject',
'python-webdav', #For document-webdav
'python-werkzeug',
'python-xlwt', #Excel spreadsheets
'python-yaml',
'python-zsi',
# Recomended
'graphviz',
'ghostscript',
'python-imaging',
'python-matplotlib',
'poppler-utils',
]
# Packages for 8.0
if self._branch == '8.0' or self._branch == 'trunk':
packages += [
# Dependencies
'python-dateutil',
'python-docutils',
'python-feedparser',
'python-gdata', #Google data parser
'python-jinja2',
'python-ldap',
'python-libxslt1', #Excel spreadsheets
'python-lxml',
'python-mako',
'python-mock', #For testing
'python-openid',
'python-psutil',
'python-psycopg2',
'python-pybabel',
'python-pychart',
'python-pydot',
'python-pyparsing',
'python-reportlab',
'python-simplejson',
'python-tz',
'python-unittest2', #For testing
'python-vatnumber',
'python-vobject',
'python-webdav', #For document-webdav
'python-werkzeug',
'python-xlwt', #Excel spreadsheets
'python-yaml',
'python-zsi',
# Recomended
'graphviz',
'ghostscript',
'python-imaging',
'python-matplotlib',
'poppler-utils',
]
# Test distro and call appropriate function
if self._os_info and self._os_info['os'] == 'Linux':
if (self._os_info['version'][0] == 'Ubuntu') or (self._os_info['version'][0] == 'LinuxMint'):
return self._ubuntu_install_python_libs(packages)
elif self._os_info['version'][0] == 'arch':
return self._arch_install_python_libs(packages)
_logger.error('Can\'t install python libraries in this OS: %s. Exiting.' % self._os_info['version'][0])
return False
def _ubuntu_install_python_libs(self, packages):
#Initialize packages match
distro_match_packages = self.get_packages_distro_match('ubuntu')
#Build definitive package list
distro_packages = []
for package in packages:
if package in distro_match_packages:
if distro_match_packages[package]:
distro_packages.append(distro_match_packages[package])
#No package for this distro available
else:
_logger.warning('No installable package for %s in Ubuntu. Please install it manually.' % package)
else:
_logger.warning('Package unknown: %s. Exiting.' % package)
# Install packages
return tools.ubuntu_install_package(distro_packages)
def _arch_install_python_libs(self, packages):
#Initialize packages match
distro_match_packages = self.get_packages_distro_match('arch')
#Build definitive package list
distro_packages = []
aur_packages = []
for package in packages:
if package in distro_match_packages['repo']:
if distro_match_packages['repo'][package]:
distro_packages.append(distro_match_packages['repo'][package])
#No package for this distro available
else:
_logger.warning('No installable package for %s in Arch. Please install it manually.' % package)
elif package in distro_match_packages['aur']:
if distro_match_packages['aur'][package]:
aur_packages.append(distro_match_packages['aur'][package])
#No package for this distro available
else:
_logger.warning('No installable package for %s in Arch. Please install it manually.' % package)
else:
_logger.warning('Package unknown: %s. Exiting.' % package)
# Install packages
_logger.info('Installing packages with pacman.')
_logger.debug('Packages: %s' % str(distro_packages))
error = False
if not tools.arch_install_repo_package(distro_packages):
_logger.warning('Failed to install some repo packages. This can render the OpenERP installation unusable.')
error = True
_logger.info('Installing packages from AUR.')
_logger.debug('Packages: %s' % str(aur_packages))
if not tools.arch_install_repo_package(aur_packages):
_logger.warning('Failed to install some AUR packages. This can render the OpenERP installation unusable.')
error = True
return not error
def _install_postgresql(self):
_logger.info('Installing PostgreSQL.')
# Test distro and call appropriate function
if self._os_info and self._os_info['os'] == 'Linux':
if (self._os_info['version'][0] == 'Ubuntu') or (self._os_info['version'][0] == 'LinuxMint'):
return self._ubuntu_install_postgresql()
elif self._os_info['version'][0] == 'arch':
return self._arch_install_postgresql()
_logger.error('Can\'t install python libraries in this OS: %s. Exiting.' % self._os_info['version'][0])
return False
def _ubuntu_install_postgresql(self):
return tools.ubuntu_install_package(['postgresql'])
def _arch_install_postgresql(self):
#TODO: change some of this calls with a python way to do it (because of perms)
if os.path.isdir('/var/lib/postgres/data'):
_logger.info('PostgreSQL appears to be already configured. Skipping.')
return False
if tools.arch_install_package(['postgresql']):
_logger.error('Failed to install PostgreSQL. Exiting.')
return False
if tools.exec_command('systemd-tmpfiles --create postgresql.conf', as_root=True):
_logger.error('Failed to configure PostgreSQL systemd script. Exiting.')
return False
if tools.exec_command('mkdir /var/lib/postgres/data', as_root=True):
_logger.error('Failed to create PostgreSQL data directory. Exiting.')
return False
if tools.exec_command('chown -c postgres:postgres /var/lib/postgres/data', as_root=True):
_logger.error('Failed to set PostgreSQL data directory permisions. Exiting.')
return False
if tools.exec_command('sudo -i -u postgres initdb -D \'/var/lib/postgres/data\''):
_logger.error('Failed to init PostgreSQL database. Exiting.')
return False
if tools.exec_command('systemctl enable postgresql', as_root=True):
_logger.error('Failed to enable PostgreSQL init script. Exiting.')
return False
if tools.exec_command('systemctl start postgresql', as_root=True):
_logger.error('Failed to start PostgreSQL. Exiting.')
return False
return True
def _do_update_postgres_hba(self):
_logger.info('Updating PostgreSQL pg_hba.conf file.')
# Test distro and call appropriate function
if self._os_info and self._os_info['os'] == 'Linux':
if (self._os_info['version'][0] == 'Ubuntu') or (self._os_info['version'][0] == 'LinuxMint'):
return self._ubuntu_do_update_postgres_hba()
elif self._os_info['version'][0] == 'arch':
return self._arch_do_update_postgres_hba()
_logger.error('Can\'t update PostgreSQL pg_hba.conf in this OS: %s. Exiting.' % self._os_info['version'][0])
return False
def _ubuntu_do_update_postgres_hba(self):
#TODO: change sed command with python lib to do the change (be carefull with the user perms)
if tools.exec_command("sed -i 's/\(local[[:space:]]*all[[:space:]]*all[[:space:]]*\)\(ident[[:space:]]*sameuser\)/\1md5/g' /etc/postgresql/%s/main/pg_hba.conf" % self._postgresql_version, as_root=True):
_logger.error('Failed to set PostgreSQL pg_hba.conf file. Exiting.')
return False
if tools.exec_command('service postgresql restart', as_root=True):
_logger.error('Failed to start PostgreSQL. Exiting.')
return False
return True
def _arch_do_update_postgres_hba(self):
#TODO: lp:1133383 change sed command with python lib to do the change (be carefull with the user perms)
#if tools.exec_command("sed -i 's/\(local[[:space:]]*all[[:space:]]*all[[:space:]]*\)\(ident[[:space:]]*sameuser\)/\1md5/g' /etc/postgresql/%s/main/pg_hba.conf" % self._postgresql_version, as_root=True):
# _logger.error('Failed to set PostgreSQL pg_hba.conf file. Exiting.')
# return False
#if tools.exec_command('/etc/init.d/postgresql%s restart' % self._postgresql_init_suffix, as_root=True)
# _logger.error('Failed to start PostgreSQL. Exiting.')
# return False
return True
def _add_postgresql_user(self):
_logger.info('Adding PostgreSQL user: %s.' % self._user)
if tools.exec_command('sudo -u postgres createuser %s --superuser --createdb --no-createrole' % self._user):
_logger.error('Failed to add PostgreSQL user. Exiting.')
return False
if tools.exec_command('sudo -u postgres psql template1 -U postgres -c "alter user \\"%s\\" with password \'%s\'"' % (self._user, self._admin_password)):
_logger.error('Failed to set PostgreSQL user password. Exiting.')
return False
return True
def _change_postgresql_admin_password(self):
_logger.info('Changing PostgreSQL admin password.')
if tools.exec_command('sudo -u postgres psql template1 -U postgres -c "alter user postgres with password \'%s\'"' % self._postgresql_password):
_logger.error('Failed to set PostgreSQL admin password. Exiting.')
return False
return True
def change_perms(self):
_logger.debug('User: %s' % self._user)
if os.path.isdir('/srv/odoo'):
if tools.exec_command('chown -R %s:odoo /srv/odoo' % self._user, as_root=True):
_logger.warning('Failed to set /srv/odoo owner. Skipping.')
if tools.exec_command('chmod -R g+w /srv/odoo', as_root=True):
_logger.warning('Failed to set /srv/odoo perms. Skipping.')
if os.path.isdir('/var/log/odoo'):
if tools.exec_command('chown -R %s:odoo /var/log/odoo' % self._user, as_root=True):
_logger.warning('Failed to set /var/log/odoo owner. Skipping.')
if tools.exec_command('chmod -R g+w /var/log/odoo', as_root=True):
_logger.warning('Failed to set /var/log/odoo perms. Skipping.')
if os.path.isdir('/var/run/odoo'):
if tools.exec_command('chown -R %s:odoo /var/run/odoo' % self._user, as_root=True):
_logger.warning('Failed to set /var/run/odoo owner. Skipping.')
if tools.exec_command('chmod -R g+w /var/run/odoo', as_root=True):
_logger.warning('Failed to set /var/run/odoo perms. Skipping.')
if os.path.isdir('/etc/odoo'):
if tools.exec_command('chown -R %s:odoo /etc/odoo' % self._user, as_root=True):
_logger.warning('Failed to set /etc/odoo owner. Skipping.')
return True
def _download_odoo_repo(self):
if os.path.isdir('/srv/odoo'):
_logger.warning('/srv/odoo already exists. Not downloading repo.')
return False
_logger.info('Creating the directory structure.')
if tools.exec_command('mkdir -p /srv/odoo/%s/instances' % self._branch, as_root=True):
_logger.error('Failed to create the odoo directory structure. Exiting.')
return False
if tools.exec_command('mkdir -p /srv/odoo/%s/src' % self._branch, as_root=True):
_logger.error('Failed to create the odoo src directory structure. Exiting.')
return False
cwd = os.getcwd()
os.chdir('/srv/odoo/%s/src/' % self._branch)
_logger.info('Cloning the latest Odoo git repository from https://github.com/CLEARCORP/odoo.git latest %s branch.' % self._branch)
repo = git_lib.git_clone('https://github.com/CLEARCORP/odoo.git', 'odoo', branch=self._branch)
os.chdir(cwd)
_logger.info('Cloning finished.')
return True
def _download_git_repo(self, source, name):
if not os.path.isdir('/srv/odoo/%s/src/' % self._branch):
_logger.warning('/srv/odoo/%s/src/ do not exists. Not cloning repo.' % self._branch)
return False
cwd = os.getcwd()
os.chdir('/srv/odoo/%s/src/' % self._branch)
_logger.info('Cloning from %s latest %s branch.' % (source, self._branch))
repo = git_lib.git_clone(source, name, branch=self._branch)
os.chdir(cwd)
_logger.info('Cloning finished.')
return True
def _download_odoo(self, modules_to_install):
_logger.info('Downloading latest Odoo %s release.' % self._branch)
if os.path.isdir('/srv/odoo'):
_logger.info('Odoo repo already exists.')
repo_downloaded = True
else:
repo_downloaded = self._download_odoo_repo()
self.change_perms()
if 'odoo-clearcorp' in modules_to_install and modules_to_install['odoo-clearcorp']:
self._download_git_repo('https://github.com/CLEARCORP/odoo-clearcorp.git', 'odoo-clearcorp')
if 'odoo-costa-rica' in modules_to_install and modules_to_install['odoo-costa-rica']:
self._download_git_repo('https://github.com/CLEARCORP/odoo-costa-rica.git', 'odoo-costa-rica')
self.change_perms()
return True
def _config_odoo_version(self):
_logger.info('Configuring Odoo %s' % self._branch)
cwd = os.getcwd()
# Odoo Server bin
_logger.debug('Copy bin script skeleton to /etc')
if tools.exec_command('mkdir -p /etc/odoo/%s/server/' % self._branch, as_root=True):
_logger.error('Failed to make /etc/odoo/%s/server/ directory. Exiting.' % self._branch)
return False
if tools.exec_command('cp %s/odootools/odoo/static/bin/server-bin-%s-skeleton /etc/odoo/%s/server/bin-skeleton' % (config.params['odootools_path'], self._branch, self._branch), as_root=True):
_logger.error('Failed to copy bin skeleton. Exiting.')
return False
if tools.exec_command('sed -i "s#@BRANCH@#%s#g" /etc/odoo/%s/server/bin-skeleton' % (self._branch, self._branch), as_root=True): # TODO: check sed command
_logger.error('Failed to config bin skeleton. Exiting.')
return False
# Odoo Server init
if tools.exec_command('cp %s/odootools/odoo/static/init/server-init-%s-skeleton /etc/odoo/%s/server/init-skeleton' % (config.params['odootools_path'], self._branch, self._branch), as_root=True):
_logger.error('Failed to copy init skeleton. Exiting.')
return False
if tools.exec_command('sed -i "s#@PATH@#/usr/local#g" /etc/odoo/%s/server/init-skeleton' % self._branch, as_root=True):
_logger.error('Failed to config init skeleton. Exiting.')
return False
if tools.exec_command('sed -i "s#@BRANCH@#%s#g" /etc/odoo/%s/server/init-skeleton' % (self._branch, self._branch), as_root=True):
_logger.error('Failed to config init skeleton. Exiting.')
return False
if self._installation_type == 'dev':
if tools.exec_command('sed -i "s#@USER@#%s#g" /etc/odoo/%s/server/init-skeleton' % (self._user, self._branch), as_root=True):
_logger.error('Failed to config init skeleton. Exiting.')
return False
if tools.exec_command('sed -i "s#@DBFILTER@#\${SERVERNAME}_.*#g" /etc/odoo/%s/server/init-skeleton' % self._branch, as_root=True):
_logger.error('Failed to config init skeleton. Exiting.')
return False
else:
if tools.exec_command('sed -i "s#@DBFILTER@#.*#g" /etc/odoo/%s/server/init-skeleton' % self._branch, as_root=True):
_logger.error('Failed to config init skeleton. Exiting.')
return False
# Odoo Server config
if tools.exec_command('cp %s/odootools/odoo/static/conf/server.conf-%s-skeleton /etc/odoo/%s/server/conf-skeleton' % (config.params['odootools_path'], self._branch, self._branch), as_root=True):
_logger.error('Failed to copy conf skeleton. Exiting.')
return False
if tools.exec_command('sed -i "s#@BRANCH@#%s#g" /etc/odoo/%s/server/conf-skeleton' % (self._branch, self._branch), as_root=True):
_logger.error('Failed to config init skeleton. Exiting.')
return False
if self._installation_type == 'dev':
if tools.exec_command('sed -i "s#@INTERFACE@##g" /etc/odoo/%s/server/conf-skeleton' % self._branch, as_root=True):
_logger.error('Failed to set interface in config skeleton. Exiting.')
return False
else:
if tools.exec_command('sed -i "s#@INTERFACE@#localhost#g" /etc/odoo/%s/server/conf-skeleton' % self._branch, as_root=True):
_logger.error('Failed to set interface in config skeleton. Exiting.')
return False
if tools.exec_command('mkdir -p /var/run/odoo', as_root=True):
_logger.error('Failed to create /var/run/odoo. Exiting.')
return False
return True
def _do_install_apache(self):
os_version = tools.get_os()
if os_version['os'] == 'Linux':
if (os_version['version'][0] == 'Ubuntu') or (os_version['version'][0] == 'LinuxMint'):
return self._ubuntu_do_install_apache()
elif os_version['version'][0] == 'arch':
return self._arch_do_install_apache()
return False
def _ubuntu_do_install_apache(self):
if not apache.apache_install():
_logger.error('Failed to install Apache. Exiting.')
return False
_logger.info('Configuring site config files.')
if tools.exec_command('cp %s/odootools/odoo/static/apache/apache-odoo /etc/apache2/sites-available/odoo' % config.params['odootools_path'], as_root=True):
_logger.warning('Failed copy Apache odoo site conf file.')
if tools.exec_command('mkdir -p /etc/odoo/apache2/rewrites', as_root=True):
_logger.warning('Failed make /etc/odoo/apache2/rewrites.')
if tools.exec_command('cp %s/odootools/odoo/static/apache/apache-ssl-%s-skeleton /etc/odoo/apache2/ssl-%s-skeleton' % (config.params['odootools_path'], self._branch, self._branch), as_root=True):
_logger.warning('Failed copy Apache rewrite skeleton.')
if tools.exec_command('sed -i "s#ServerAdmin .*\\$#ServerAdmin support@clearcorp.co.cr\\n\\n\\tInclude /etc/apache2/sites-available/odoo#g" /etc/apache2/sites-available/000-default.conf', as_root=True):
_logger.warning('Failed config Apache site.')
if tools.exec_command('sed -i "s#ServerAdmin .*\\$#ServerAdmin support@clearcorp.co.cr\\n\\n\\tInclude /etc/odoo/apache2/rewrites#g" /etc/apache2/sites-available/default-ssl.conf', as_root=True):
_logger.warning('Failed config Apache site.')
if not apache.apache_restart():
_logger.warning('Failed restart Apache.')
return True
def _arch_do_install_apache(self):
if not apache.apache_install():
_logger.error('Failed to install Apache. Exiting.')
return False
#TODO: lp:1133385 arch configuration of sites
_logger.info('Configuring site config files.')
#if tools.exec_command('cp %s/oerptools/oerp/static/apache/apache-erp /etc/apache2/sites-available/erp' % config.params['oerptools_path'], as_root=True):
# _logger.warning('Failed copy Apache erp site conf file.')
if tools.exec_command('mkdir -p /etc/odoo/apache2/rewrites', as_root=True):
_logger.warning('Failed make /etc/odoo/apache2/rewrites.')
if tools.exec_command('cp %s/odootools/odoo/static/apache/apache-ssl-%s-skeleton /etc/odoo/apache2/ssl-%s-skeleton' % (config.params['odootools_path'], branch, branch), as_root=True):
_logger.warning('Failed copy Apache rewrite skeleton.')
#if tools.exec_command('sed -i "s/ServerAdmin .*$/ServerAdmin support@clearcorp.co.cr\n\n\tInclude \/etc\/apache2\/sites-available\/erp/g" /etc/apache2/sites-available/default', as_root=True):
# _logger.warning('Failed config Apache site.')
#if tools.exec_command('sed -i "s/ServerAdmin .*$/ServerAdmin support@clearcorp.co.cr\n\n\tInclude \/etc\/openerp\/apache2\/rewrites/g" /etc/apache2/sites-available/default-ssl', as_root=True):
# _logger.warning('Failed config Apache site.')
if apache.apache_restart():
_logger.warning('Failed restart Apache.')
return True
def _set_logrotation(self):
if tools.exec_command('cp %s/odootools/odoo/static/log/odoo.logrotate /etc/logrotate.d/' % config.params['odootools_path'], as_root=True):
_logger.error('Failed to copy logrotate. Exiting.')
return False
return True
def install(self):
_logger.info('Odoo server installation started.')
_logger.info('')
_logger.info('Please check the following information before continuing.')
_logger.info('=========================================================')
_logger.info('')
_logger.info('System info')
_logger.info('-----------')
#Check system variables
hostname = tools.get_hostname()
_logger.info('Hostname: %s' % hostname[0])
if not hostname[1]:
_logger.warning('FQDN unknown, he hostname may not be properly set.')
else:
_logger.info('FQDN: %s' % hostname[1])
_logger.info('Time and date: %s' % datetime.datetime.today().strftime('%Y/%m/%d %H:%M:%S'))
#TODO: lp:1133388 list installed and default locale
_logger.info('')
_logger.info('Installation info')
_logger.info('-----------------')
_logger.info('Odoo version (branch) to install: %s' % self._branch)
if self._installation_type == 'dev':
_logger.info('Installation type: development station')
elif self._installation_type == 'server':
_logger.info('Installation type: production server')
else:
_logger.error('Installation type unknown: %s' % self._installation_type)
return False
if self._installation_type == 'dev':
if not self._user:
self._user = pwd.getpwuid(os.getuid()).pw_name
if self._user == 'root':
_logger.error('No user specified for dev intallation, current user is root, can\'t install with root. Exiting.')
return False
else:
_logger.warning('No user specified for dev intallation, using current user: %s' % self._user)
else:
try:
pw = pwd.getpwnam(self._user)
except:
_logger.error('User unknown (%s). Exiting.' % self._user)
return False
else:
_logger.info('User: %s' % self._user)
elif not self._user and self._installation_type == 'server':
self._user = 'odoo'
if not self._user:
_logger.error('User unknown. Exiting.')
return False
elif pwd.getpwuid(os.getuid()).pw_name not in (self._user, 'root'):
try:
group = grp.getgrnam('odoo')
if not pwd.getpwuid(os.getuid()).pw_name in group['gr_mem']:
_logger.error('Your user must be the user of installation (%s), root, or be part of odoo group. Exiting.')
return False
except:
_logger.error('Your user must be the user of installation (%s), root, or be part of odoo group. Exiting.')
return False
_logger.info('')
_logger.info('Addons installation:')
_logger.info('--------------------')
if self._install_odoo_clearcorp:
_logger.info('Install odoo-clearcorp: YES')
else:
_logger.info('Install odoo-clearcorp: NO')
if self._install_odoo_costa_rica:
_logger.info('Install odoo-costa-rica: YES')
else:
_logger.info('Install odoo-costa-rica: NO')
_logger.info('')
_logger.info('Please review the values above and confirm accordingly.')
answer = False
while not answer:
answer = raw_input('Are the configuration values correct (y/n)? ')
if re.match(r'^y$|^yes$', answer, flags=re.IGNORECASE):
answer = 'y'
elif re.match(r'^n$|^no$', answer, flags=re.IGNORECASE):
answer = 'n'
_logger.error('The configuration values are incorrect. Please correct any configuration error and run the script again.')
return False
else:
answer = False
_logger.info('Setting the Odoo admin password.')
# Get admin password
while not self._admin_password:
self._admin_password = getpass.getpass('Enter Odoo admin password: ')
if not self._admin_password == getpass.getpass('Confirm Odoo admin password: '):
_logger.error('Passwords don\'t match. Try again.')
self._admin_password = False
_logger.info('Setting the PostgreSQL admin password.')
# Set postgres admin password
if not self._postgresql_password and self._installation_type == 'dev':
answer = False
while not answer:
answer = raw_input('Do you want to change PostgreSQL admin password (Y/n)? ')
if answer == '':
answer = 'y'
elif re.match(r'^y$|^yes$', answer, flags=re.IGNORECASE):
answer = 'y'
elif re.match(r'^n$|^no$', answer, flags=re.IGNORECASE):
answer = 'n'
else:
answer = False
if answer == 'y':
while not self._postgresql_password:
self._postgresql_password = getpass.getpass('Enter PostgreSQL\'s admin password: ')
if not self._postgresql_password == getpass.getpass('Confirm PostgreSQL\'s admin password: '):
_logger.error('Passwords don\'t match. Try again.')
self._postgresql_password = False
#Update config file with new values
values = {
'odoo-install': {
self._branch+'_installation_type': self._installation_type,
self._branch+'_user': self._user,
self._branch+'_install_odoo_clearcorp': self._install_odoo_clearcorp,
self._branch+'_install_odoo_costa_rica': self._install_odoo_costa_rica,
self._branch+'_admin_password': self._admin_password,
self._branch+'_postgresql_password': self._postgresql_password,
},
}
config_file_path = config.params.update_config_file_values(values)
if config_file_path:
_logger.info('Updated config file with installation values: %s' % config_file_path)
else:
_logger.warning('Failed to update config file with installation values.')
# Preparing installation
_logger.info('')
_logger.info('Preparing Odoo installation')
_logger.info('==============================')
_logger.info('')
self._add_odoo_user()
self._install_python_libs()
if not self._install_postgresql():
_logger.error('Failed to install PostgreSQL. Exiting.')
return False
if self._update_postgres_hba:
self._do_update_postgres_hba()
if self._create_postgres_user:
self._add_postgresql_user()
if self._postgresql_password:
self._change_postgresql_admin_password()
modules_to_install = {
'odoo-clearcorp': self._install_odoo_clearcorp,
'odoo-costa-rica': self._install_odoo_costa_rica,
}
self._download_odoo(modules_to_install)
self._config_odoo_version()
self.change_perms()
if self._install_apache:
self._do_install_apache()
phppgadmin.phppgadmin_install()
self._set_logrotation()
return True
| ClearCorp/odootools | odootools/odootools/odoo/server.py | Python | agpl-3.0 | 42,624 |
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import textwrap
import time
import yaml
from jinja2 import BaseLoader, Environment, FileSystemLoader
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import build_collection, install_collections, publish_collection, \
validate_collection_name
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
urlparse = six.moves.urllib.parse.urlparse
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
# Inject role into sys.argv[1] as a backwards compatibility step
if len(args) > 1 and args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
# Remove this in Ansible 2.13 when we also remove -v as an option on the root parser for ansible-galaxy.
idx = 2 if args[1].startswith('-v') else 1
args.insert(idx, 'role')
self.api_servers = []
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences. You can also use ansible-galaxy login to '
'retrieve this key or set the token for the GALAXY_SERVER_LIST entry.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs',
default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
opt_help.add_verbosity_options(common)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.argparse.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_login_options(role_parser, parents=[common])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each role installed in the roles_path.')
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument('role', help='Role', nargs='?', metavar='role')
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_login_options(self, parser, parents=None):
login_parser = parser.add_parser('login', parents=parents,
help="Login to api.github.com server in order to use ansible-galaxy role sub "
"command such as 'import', 'delete', 'publish', and 'setup'")
login_parser.set_defaults(func=self.execute_login)
login_parser.add_argument('--github-token', dest='token', default=None,
help='Identify with github token rather than username and password.')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type))
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=C.COLLECTIONS_PATHS[0],
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
else:
install_parser.add_argument('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported.')
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be publish to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
def server_config_def(section, key, required):
return {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
}
server_def = [('url', True), ('username', False), ('password', False), ('token', False),
('auth_url', False)]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_key in server_list:
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in server_def)
defs = AnsibleLoader(yaml.safe_dump(config_dict)).get_single_data()
C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
server_options = C.config.get_plugin_options('galaxy_server', server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi
auth_url = server_options.pop('auth_url', None)
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username,
server_options['password'])
else:
if token_val:
if auth_url:
server_options['token'] = KeycloakToken(access_token=token_val,
auth_url=auth_url,
validate_certs=not context.CLIARGS['ignore_certs'])
else:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
config_servers.append(GalaxyAPI(self.galaxy, server_key, **server_options))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(self.galaxy, 'cmd_arg', cmd_server, token=cmd_token))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token))
context.CLIARGS['func']()
@property
def api(self):
return self.api_servers[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml.safe_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if file_requirements is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
else:
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles', []):
requirements['roles'] += parse_role_req(role_req)
for collection_req in file_requirements.get('collections', []):
if isinstance(collection_req, dict):
req_name = collection_req.get('name', None)
if req_name is None:
raise AnsibleError("Collections requirement entry should contain the key name.")
req_version = collection_req.get('version', '*')
req_source = collection_req.get('source', None)
if req_source:
# Try and match up the requirement source with our list of Galaxy API servers defined in the
# config, otherwise create a server with that URL without any auth.
req_source = next(iter([a for a in self.api_servers if req_source in [a.name, a.api_server]]),
GalaxyAPI(self.galaxy, "explicit_requirement_%s" % req_name, req_source))
requirements['collections'].append((req_name, req_version, req_source))
else:
requirements['collections'].append((collection_req, '*', None))
return requirements
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
def to_yaml(v):
return yaml.safe_dump(v, default_flow_style=False).rstrip()
env = Environment(loader=BaseLoader)
env.filters['comment_ify'] = comment_ify
env.filters['to_yaml'] = to_yaml
template = env.from_string(meta_template)
meta_value = template.render({'required_config': required_config, 'optional_config': optional_config})
return meta_value
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(collection_path, output_path, force)
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
))
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
))
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
if obj_skeleton is not None:
own_skeleton = False
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
template_env = Environment(loader=FileSystemLoader(obj_skeleton))
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file, encoding='utf-8')
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.api, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not context.CLIARGS['offline']:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
"""
if context.CLIARGS['type'] == 'collection':
collections = context.CLIARGS['args']
force = context.CLIARGS['force']
output_path = context.CLIARGS['collections_path']
ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
requirements_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(requirements_file, allow_old_format=False)['collections']
else:
requirements = []
for collection_input in collections:
requirement = None
if os.path.isfile(to_bytes(collection_input, errors='surrogate_or_strict')) or \
urlparse(collection_input).scheme.lower() in ['http', 'https']:
# Arg is a file path or URL to a collection
name = collection_input
else:
name, dummy, requirement = collection_input.partition(':')
requirements.append((name, requirement or '*', None))
output_path = GalaxyCLI._resolve_path(output_path)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(output_path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection won't be picked up in an Ansible "
"run." % (to_text(output_path), to_text(":".join(collections_path))))
if os.path.split(output_path)[1] != 'ansible_collections':
output_path = os.path.join(output_path, 'ansible_collections')
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(requirements, output_path, self.api_servers, (not ignore_certs), ignore_errors,
no_deps, force, force_deps)
return 0
role_file = context.CLIARGS['role_file']
if not context.CLIARGS['args'] and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
roles_left = []
if role_file:
if not (role_file.endswith('.yaml') or role_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
roles_left = self._parse_requirements_file(role_file)['roles']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, self.api, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % to_text(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependant role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
roles_left.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
roles_left.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
if context.CLIARGS['role']:
# show the requested role, if it exists
name = context.CLIARGS['role']
gr = GalaxyRole(self.galaxy, self.api, name)
if gr.metadata:
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = context.CLIARGS['roles_path']
path_found = False
warnings = []
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.api, path_file, path=path)
if gr.metadata:
_display_role(gr)
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if context.CLIARGS['token'] is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = context.CLIARGS['token']
galaxy_response = self.api.authenticate(github_token)
if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
| kvar/ansible | lib/ansible/cli/galaxy.py | Python | gpl-3.0 | 59,771 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class TrnascanSe(AutotoolsPackage):
"""Seaching for tRNA genes in genomic sequence"""
homepage = "http://lowelab.ucsc.edu/tRNAscan-SE/"
url = "http://trna.ucsc.edu/software/trnascan-se-2.0.0.tar.gz"
version('2.0.0', sha256='0dde1c07142e4bf77b21d53ddf3eeb1ef8c52248005a42323d13f8d7c798100c')
depends_on('infernal@1.1.2', type='run', when='@2.0.0')
def patch(self):
filter_file('infernal_dir: {bin_dir}',
'infernal_dir: %s' % self.spec['infernal'].prefix.bin,
'tRNAscan-SE.conf.src', string=True)
| iulian787/spack | var/spack/repos/builtin/packages/trnascan-se/package.py | Python | lgpl-2.1 | 799 |
from django.contrib.sitemaps import Sitemap
from .models import Letter
class LetterSitemap(Sitemap):
def items(self):
return Letter.objects.all()
def lastmod(self, obj):
return obj.modified
| watchdogpolska/feder | feder/letters/sitemaps.py | Python | mit | 218 |
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from __future__ import division
"""
main.py -- main program of motif sequence coverage pipeline tool
example for running code: python main.py -jid test -confFile ./data/default_scan_only.conf
This will create a results folder with the name test. You will need to delete the folder if you want to run the code again with the same name
"""
#python imports
import sys
import os
import argparse
import shutil
#add the util folder path to use util files in it
inPath = os.path.realpath(__file__)
split = inPath.split('/')
inPath = '/'.join(split[:len(split)-1])
sys.path.append(inPath + '/utils')
import conf
import general_utils
import Fimo
import Tomtom
#add the alg folder path to call the different algorithms
sys.path.append(inPath + '/algo')
import greedy
import motif_pwm_scan_cov
import motif_pwm_scan_only
#MAIN
def main(args):
#example for running code: python main.py -jid test -confFile ./data/default_scan_only.conf
print "main.py::main()"
parser = argparse.ArgumentParser()
parser.add_argument("-jid", "--jid", help="enter job ID") #job id to make a folder to store all the data for a specific job
parser.add_argument("-confFile", "--confFile", help="enter the configuration file")#path to configuration file
args = parser.parse_args()
print 'jobId:', args.jid,'configfile:', args.confFile
#make a results directory to store job results
resultsDirName = args.jid
os.makedirs(resultsDirName)
#make a file list to store all the files to be moved to the results folder
fileList = []
#copy the config file
cpConfFileName = args.jid + '_in_conf_file'
cpConfFile = open(cpConfFileName, 'wb')
with open(args.confFile, 'rb') as handler:
for line in handler:
cpConfFile.write(line)
cpConfFile.close()
fileList.append(cpConfFileName)
#make a config object
confObj = conf.Conf()
confDict = confObj.read(args.confFile)
############
#Have a PWM file and want to scan it across a fasta file and then apply sequence coverage
############
if confDict['job.type']['type'] == 'motifPwmScanCov':
print 'motif_pwm scanning and coverage operation'
motif_pwm_scan_cov.callMotifPwmScanCov(args, confDict, fileList)
#move files to results folder
for outFile in fileList:
shutil.move(outFile, resultsDirName)
exit()
############
#Have a PWM file and want to scan it across a file only
############
if confDict['job.type']['type'] == 'pwmScan':
print 'motif pwm scanning only'
motif_pwm_scan_only.callMotifPwmScanOnly(args, confDict, fileList)
#move files to results folder
for outFile in fileList:
shutil.move(outFile, resultsDirName)
exit()
###############
#EXIT
###############
exit()
#calling main
if( __name__ == "__main__" ):
main(sys.argv)
| RamiOran/SeqCov | main.py | Python | mit | 3,137 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
from odoo.exceptions import UserError
import odoo.addons.decimal_precision as dp
class StockQuantPackage(models.Model):
_inherit = "stock.quant.package"
@api.one
@api.depends('quant_ids', 'children_ids')
def _compute_weight(self):
weight = 0
for quant in self.quant_ids:
weight += quant.qty * quant.product_id.weight
for pack in self.children_ids:
pack._compute_weight()
weight += pack.weight
self.weight = weight
weight = fields.Float(compute='_compute_weight')
shipping_weight = fields.Float(string='Shipping Weight', help="Can be changed during the 'put in pack' to adjust the weight of the shipping.")
class StockPackOperation(models.Model):
_inherit = 'stock.pack.operation'
@api.multi
def manage_package_type(self):
self.ensure_one()
return {
'name': _('Package Details'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'stock.quant.package',
'view_id': self.env.ref('delivery.view_quant_package_form_save').id,
'target': 'new',
'res_id': self.result_package_id.id,
'context': {
'current_package_carrier_type': self.picking_id.carrier_id.delivery_type if self.picking_id.carrier_id.delivery_type not in ['base_on_rule', 'fixed'] else 'none',
},
}
class StockPicking(models.Model):
_inherit = 'stock.picking'
def _default_uom(self):
weight_uom_id = self.env.ref('product.product_uom_kgm', raise_if_not_found=False)
if not weight_uom_id:
uom_categ_id = self.env.ref('product.product_uom_categ_kgm').id
weight_uom_id = self.env['product.uom'].search([('category_id', '=', uom_categ_id), ('factor', '=', 1)], limit=1)
return weight_uom_id
@api.one
@api.depends('pack_operation_ids')
def _compute_packages(self):
self.ensure_one()
packs = set()
for packop in self.pack_operation_ids:
if packop.result_package_id:
packs.add(packop.result_package_id.id)
elif packop.package_id and not packop.product_id:
packs.add(packop.package_id.id)
self.package_ids = list(packs)
@api.one
@api.depends('pack_operation_ids')
def _compute_bulk_weight(self):
weight = 0.0
for packop in self.pack_operation_ids:
if packop.product_id and not packop.result_package_id:
weight += packop.product_uom_id._compute_quantity(packop.product_qty, packop.product_id.uom_id) * packop.product_id.weight
self.weight_bulk = weight
@api.one
@api.depends('package_ids', 'weight_bulk')
def _compute_shipping_weight(self):
self.shipping_weight = self.weight_bulk + sum([pack.shipping_weight for pack in self.package_ids])
carrier_price = fields.Float(string="Shipping Cost")
delivery_type = fields.Selection(related='carrier_id.delivery_type', readonly=True)
carrier_id = fields.Many2one("delivery.carrier", string="Carrier")
volume = fields.Float(copy=False)
weight = fields.Float(compute='_cal_weight', digits=dp.get_precision('Stock Weight'), store=True)
carrier_tracking_ref = fields.Char(string='Tracking Reference', copy=False)
number_of_packages = fields.Integer(string='Number of Packages', copy=False)
weight_uom_id = fields.Many2one('product.uom', string='Unit of Measure', required=True, readonly="1", help="Unit of measurement for Weight", default=_default_uom)
package_ids = fields.Many2many('stock.quant.package', compute='_compute_packages', string='Packages')
weight_bulk = fields.Float('Bulk Weight', compute='_compute_bulk_weight')
shipping_weight = fields.Float("Weight for Shipping", compute='_compute_shipping_weight')
@api.onchange('carrier_id')
def onchange_carrier(self):
if self.carrier_id.delivery_type in ['fixed', 'base_on_rule']:
order = self.sale_id
if order:
self.carrier_price = self.carrier_id.get_price_available(order)
else:
self.carrier_price = self.carrier_id.price
@api.depends('product_id', 'move_lines')
def _cal_weight(self):
for picking in self:
picking.weight = sum(move.weight for move in picking.move_lines if move.state != 'cancel')
@api.multi
def do_transfer(self):
# TDE FIXME: should work in batch
self.ensure_one()
res = super(StockPicking, self).do_transfer()
if self.carrier_id and self.carrier_id.delivery_type not in ['fixed', 'base_on_rule'] and self.carrier_id.integration_level == 'rate_and_ship':
self.send_to_shipper()
if self.carrier_id:
self._add_delivery_cost_to_so()
return res
@api.multi
def put_in_pack(self):
# TDE FIXME: work in batch, please
self.ensure_one()
package = super(StockPicking, self).put_in_pack()
current_package_carrier_type = self.carrier_id.delivery_type if self.carrier_id.delivery_type not in ['base_on_rule', 'fixed'] else 'none'
count_packaging = self.env['product.packaging'].search_count([('package_carrier_type', '=', current_package_carrier_type)])
if not count_packaging:
return False
# By default, sum the weights of all package operations contained in this package
pack_operation_ids = self.env['stock.pack.operation'].search([('result_package_id', '=', package.id)])
package_weight = sum([x.qty_done * x.product_id.weight for x in pack_operation_ids])
package.shipping_weight = package_weight
return {
'name': _('Package Details'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'stock.quant.package',
'view_id': self.env.ref('delivery.view_quant_package_form_save').id,
'target': 'new',
'res_id': package.id,
'context': {
'current_package_carrier_type': current_package_carrier_type,
},
}
@api.multi
def send_to_shipper(self):
self.ensure_one()
res = self.carrier_id.send_shipping(self)[0]
self.carrier_price = res['exact_price']
self.carrier_tracking_ref = res['tracking_number']
order_currency = self.sale_id.currency_id or self.company_id.currency_id
msg = _("Shipment sent to carrier %s for expedition with tracking number %s<br/>Cost: %.2f %s") % (self.carrier_id.name, self.carrier_tracking_ref, self.carrier_price, order_currency.name)
self.message_post(body=msg)
@api.multi
def _add_delivery_cost_to_so(self):
self.ensure_one()
sale_order = self.sale_id
if sale_order.invoice_shipping_on_delivery:
sale_order._create_delivery_line(self.carrier_id, self.carrier_price)
@api.multi
def open_website_url(self):
self.ensure_one()
if self.carrier_id.get_tracking_link(self):
url = self.carrier_id.get_tracking_link(self)[0]
else:
raise UserError(_("Your delivery method has no redirect on courier provider's website to track this order."))
client_action = {'type': 'ir.actions.act_url',
'name': "Shipment Tracking Page",
'target': 'new',
'url': url,
}
return client_action
@api.one
def cancel_shipment(self):
self.carrier_id.cancel_shipment(self)
msg = "Shipment %s cancelled" % self.carrier_tracking_ref
self.message_post(body=msg)
self.carrier_tracking_ref = False
@api.multi
def check_packages_are_identical(self):
'''Some shippers require identical packages in the same shipment. This utility checks it.'''
self.ensure_one()
if self.package_ids:
packages = [p.packaging_id for p in self.package_ids]
if len(set(packages)) != 1:
package_names = ', '.join([str(p.name) for p in packages])
raise UserError(_('You are shipping different packaging types in the same shipment.\nPackaging Types: %s' % package_names))
return True
class StockReturnPicking(models.TransientModel):
_inherit = 'stock.return.picking'
@api.multi
def _create_returns(self):
# Prevent copy of the carrier and carrier price when generating return picking
# (we have no integration of returns for now)
new_picking, pick_type_id = super(StockReturnPicking, self)._create_returns()
picking = self.env['stock.picking'].browse(new_picking)
picking.write({'carrier_id': False,
'carrier_price': 0.0})
return new_picking, pick_type_id
| hip-odoo/odoo | addons/delivery/models/stock_picking.py | Python | agpl-3.0 | 9,041 |
import os
from additional.signals import signal_err
from additional.start_local import start_local
from additional.start_server import start_server
def start(args):
# correct and access check
if len(args) < 3:
signal_err('Local or server part?')
return False
start_type = args[2]
if start_type != 'local' and start_type != 'server':
signal_err('Incorrect part')
return False
current_dir = os.getcwd()
if not os.access(current_dir + '/' + 'Sanelotto_' + start_type, os.F_OK):
signal_err('Directory "' + start_type + '" not found')
return False
if not os.access(current_dir + '/' + 'Sanelotto_' + start_type, os.R_OK):
signal_err('Permission denied')
return False
# launch
if start_type == 'local':
start_local(current_dir)
elif start_type == 'server':
start_server(current_dir)
else:
signal_err('Incorrect command') | Vladimir37/Sanelotto | routes/start.py | Python | mit | 957 |
"""
tests.component.media_player.test_demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo media_player component.
"""
import unittest
from unittest.mock import patch
from pprint import pprint
import homeassistant.core as ha
from homeassistant.const import (
STATE_OFF, STATE_ON, STATE_UNKNOWN, STATE_PLAYING, STATE_PAUSED)
import homeassistant.components.media_player as mp
entity_id = 'media_player.walkman'
class TestDemoMediaPlayer(unittest.TestCase):
""" Test the media_player module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_volume_services(self):
assert mp.setup(self.hass, {'media_player': {'platform': 'demo'}})
state = self.hass.states.get(entity_id)
assert 1.0 == state.attributes.get('volume_level')
mp.set_volume_level(self.hass, 0.5, entity_id)
self.hass.pool.block_till_done()
state = self.hass.states.get(entity_id)
assert 0.5 == state.attributes.get('volume_level')
mp.volume_down(self.hass, entity_id)
self.hass.pool.block_till_done()
state = self.hass.states.get(entity_id)
assert 0.4 == state.attributes.get('volume_level')
mp.volume_up(self.hass, entity_id)
self.hass.pool.block_till_done()
state = self.hass.states.get(entity_id)
assert 0.5 == state.attributes.get('volume_level')
assert False is state.attributes.get('is_volume_muted')
mp.mute_volume(self.hass, True, entity_id)
self.hass.pool.block_till_done()
state = self.hass.states.get(entity_id)
assert True is state.attributes.get('is_volume_muted')
def test_turning_off_and_on(self):
assert mp.setup(self.hass, {'media_player': {'platform': 'demo'}})
assert self.hass.states.is_state(entity_id, 'playing')
mp.turn_off(self.hass, entity_id)
self.hass.pool.block_till_done()
assert self.hass.states.is_state(entity_id, 'off')
assert not mp.is_on(self.hass, entity_id)
mp.turn_on(self.hass, entity_id)
self.hass.pool.block_till_done()
assert self.hass.states.is_state(entity_id, 'playing')
mp.toggle(self.hass, entity_id)
self.hass.pool.block_till_done()
assert self.hass.states.is_state(entity_id, 'off')
assert not mp.is_on(self.hass, entity_id)
def test_playing_pausing(self):
assert mp.setup(self.hass, {'media_player': {'platform': 'demo'}})
assert self.hass.states.is_state(entity_id, 'playing')
mp.media_pause(self.hass, entity_id)
self.hass.pool.block_till_done()
assert self.hass.states.is_state(entity_id, 'paused')
mp.media_play_pause(self.hass, entity_id)
self.hass.pool.block_till_done()
assert self.hass.states.is_state(entity_id, 'playing')
mp.media_play_pause(self.hass, entity_id)
self.hass.pool.block_till_done()
assert self.hass.states.is_state(entity_id, 'paused')
mp.media_play(self.hass, entity_id)
self.hass.pool.block_till_done()
assert self.hass.states.is_state(entity_id, 'playing')
def test_prev_next_track(self):
assert mp.setup(self.hass, {'media_player': {'platform': 'demo'}})
state = self.hass.states.get(entity_id)
assert 1 == state.attributes.get('media_track')
assert 0 == (mp.SUPPORT_PREVIOUS_TRACK &
state.attributes.get('supported_media_commands'))
mp.media_next_track(self.hass, entity_id)
self.hass.pool.block_till_done()
state = self.hass.states.get(entity_id)
assert 2 == state.attributes.get('media_track')
assert 0 < (mp.SUPPORT_PREVIOUS_TRACK &
state.attributes.get('supported_media_commands'))
mp.media_next_track(self.hass, entity_id)
self.hass.pool.block_till_done()
state = self.hass.states.get(entity_id)
assert 3 == state.attributes.get('media_track')
assert 0 < (mp.SUPPORT_PREVIOUS_TRACK &
state.attributes.get('supported_media_commands'))
mp.media_previous_track(self.hass, entity_id)
self.hass.pool.block_till_done()
state = self.hass.states.get(entity_id)
assert 2 == state.attributes.get('media_track')
assert 0 < (mp.SUPPORT_PREVIOUS_TRACK &
state.attributes.get('supported_media_commands'))
@patch('homeassistant.components.media_player.demo.DemoYoutubePlayer.media_seek')
def test_play_media(self, mock_seek):
assert mp.setup(self.hass, {'media_player': {'platform': 'demo'}})
ent_id = 'media_player.living_room'
state = self.hass.states.get(ent_id)
assert 0 < (mp.SUPPORT_PLAY_MEDIA &
state.attributes.get('supported_media_commands'))
assert state.attributes.get('media_content_id') is not None
mp.play_media(self.hass, 'youtube', 'some_id', ent_id)
self.hass.pool.block_till_done()
state = self.hass.states.get(ent_id)
assert 0 < (mp.SUPPORT_PLAY_MEDIA &
state.attributes.get('supported_media_commands'))
assert 'some_id' == state.attributes.get('media_content_id')
assert not mock_seek.called
mp.media_seek(self.hass, 100, ent_id)
self.hass.pool.block_till_done()
assert mock_seek.called
| toddeye/home-assistant | tests/components/media_player/test_demo.py | Python | mit | 5,542 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import warnings
import numpy
import chainer
from chainer import training
from chainer.training import extensions
import chainerx
from facade_dataset import FacadeDataset
from facade_visualizer import out_image
from net import Decoder
from net import Discriminator
from net import Encoder
from updater import FacadeUpdater
def main():
parser = argparse.ArgumentParser(
description='chainer implementation of pix2pix')
parser.add_argument('--batchsize', '-b', type=int, default=1,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=200,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--dataset', '-i', default='./facade/base',
help='Directory of image files.')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot')
parser.add_argument('--seed', type=int, default=0,
help='Random seed')
parser.add_argument('--snapshot_interval', type=int, default=1000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=100,
help='Interval of displaying log to console')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
if device.xp is chainerx:
sys.stderr.write('This example does not support ChainerX devices.\n')
sys.exit(1)
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
device.use()
# Set up a neural network to train
enc = Encoder(in_ch=12)
dec = Decoder(out_ch=3)
dis = Discriminator(in_ch=12, out_ch=3)
enc.to_device(device)
dec.to_device(device)
dis.to_device(device)
# Setup an optimizer
def make_optimizer(model, alpha=0.0002, beta1=0.5):
optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
return optimizer
opt_enc = make_optimizer(enc)
opt_dec = make_optimizer(dec)
opt_dis = make_optimizer(dis)
train_d = FacadeDataset(args.dataset, data_range=(1, 300))
test_d = FacadeDataset(args.dataset, data_range=(300, 379))
train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)
# Set up a trainer
updater = FacadeUpdater(
models=(enc, dec, dis),
iterator={
'main': train_iter,
'test': test_iter},
optimizer={
'enc': opt_enc, 'dec': opt_dec,
'dis': opt_dis},
device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
trainer.extend(extensions.snapshot(
filename='snapshot_iter_{.updater.iteration}.npz'),
trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
enc, 'enc_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
dec, 'dec_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'enc/loss', 'dec/loss', 'dis/loss',
]), trigger=display_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(
out_image(
updater, enc, dec,
5, 5, args.seed, args.out),
trigger=snapshot_interval)
if args.resume is not None:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| pfnet/chainer | examples/pix2pix/train_facade.py | Python | mit | 5,144 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table(u'blog_post', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=60)),
('body', self.gf('django.db.models.fields.TextField')()),
('tags', self.gf('django.db.models.fields.TextField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'blog', ['Post'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table(u'blog_post')
models = {
u'blog.post': {
'Meta': {'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tags': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'})
}
}
complete_apps = ['blog'] | daviferreira/leticiastallone.com | leticiastallone/blog/migrations/0001_initial.py | Python | mit | 1,393 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Loss operations for use in neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import util
from tensorflow.python.platform import tf_logging as logging
class Reduction(object):
"""Types of loss reduction."""
# Un-reduced weighted losses with the same shape as input.
NONE = "none"
# Scalar sum of `NONE`.
SUM = "weighted_sum"
# Scalar `SUM` divided by sum of weights.
MEAN = "weighted_mean"
# Scalar `SUM` divided by number of non-zero weights.
SUM_BY_NONZERO_WEIGHTS = "weighted_sum_by_nonzero_weights"
@classmethod
def all(cls):
return (
cls.NONE,
cls.SUM,
cls.MEAN,
cls.SUM_BY_NONZERO_WEIGHTS)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError("Invalid ReductionKey %s." % key)
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator, array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return _safe_div(total_loss, num_present)
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
`[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,
in effect, tiled to match the shape of `losses`. Following this effective
tile, the total number of present elements is the number of non-zero weights.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: `Tensor` of shape `[]`, `[batch_size]` or
`[batch_size, d1, ... dK]`, where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is `True`, the value is returned as a tensor of size
`[batch_size]`. Otherwise, a single scalar tensor is returned.
"""
with ops.name_scope(None, "num_present", (losses, weights)) as scope:
weights = math_ops.to_float(weights)
present = array_ops.where(
math_ops.equal(weights, 0.0),
array_ops.zeros_like(weights),
array_ops.ones_like(weights))
present = weights_broadcast_ops.broadcast_weights(present, losses)
if per_batch:
return math_ops.reduce_sum(
present, axis=math_ops.range(1, array_ops.rank(present)),
keep_dims=True, name=scope)
return math_ops.reduce_sum(present, name=scope)
def compute_weighted_loss(
losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, and must be broadcastable to `losses` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: the scope for the operations performed in computing the loss.
loss_collection: the loss will be added to these collections.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
"""
Reduction.validate(reduction)
with ops.name_scope(scope, "weighted_loss", (losses, weights)):
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, losses),)):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
weights = math_ops.to_float(weights)
weighted_losses = math_ops.multiply(losses, weights)
if reduction == Reduction.NONE:
loss = weighted_losses
else:
loss = math_ops.reduce_sum(weighted_losses)
if reduction == Reduction.MEAN:
loss = _safe_mean(
loss,
math_ops.reduce_sum(array_ops.ones_like(losses) * weights))
elif reduction == Reduction.SUM_BY_NONZERO_WEIGHTS:
loss = _safe_mean(loss, _num_present(losses, weights))
# Convert the result back to the input type.
loss = math_ops.cast(loss, input_dtype)
util.add_loss(loss, loss_collection)
return loss
def absolute_difference(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a `Tensor` of
shape `[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "absolute_difference",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
def cosine_distance(
labels, predictions, dim=None, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
labels: `Tensor` whose shape matches 'predictions'
predictions: An arbitrary matrix.
dim: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
if dim is None:
raise ValueError("`dim` cannot be None.")
with ops.name_scope(scope, "cosine_distance_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(dim,), keep_dims=True)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a hinge loss to the training procedure.
Args:
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
logits: The logits, a float tensor.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", (logits, labels)) as scope:
logits = math_ops.to_float(logits)
labels = math_ops.to_float(labels)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Huber Loss term to the training procedure.
For each value x in `error=labels-predictions`, the following is calculated:
```
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`.
See: https://en.wikipedia.org/wiki/Huber_loss
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
delta: `float`, the point where the huber loss function
changes from a quadratic to linear.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "huber_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
error = math_ops.subtract(predictions, labels)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
# The following expression is the same in value as
# tf.maximum(abs_error - delta, 0), but importantly the gradient for the
# expression when abs_error == delta is 0 (for tf.maximum it would be 1).
# This is necessary to avoid doubling the gradient, since there is already a
# nonzero contribution to the gradient from the quadratic term.
linear = (abs_error - quadratic)
losses = 0.5 * quadratic**2 + delta * linear
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "log_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = -math_ops.multiply(
labels,
math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
# TODO(b/37208492): Add reduction arg.
def mean_pairwise_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
labels: The ground truth output tensor, whose shape must match the shape of
`predictions`.
predictions: The predicted outputs, a tensor of size
`[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in
`predictions`.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_pairwise_squared_error",
(predictions, labels, weights)) as scope:
weights = math_ops.to_float(weights)
labels = math_ops.to_float(labels)
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, labels),)):
predictions = math_ops.to_float(predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
diffs = math_ops.subtract(predictions, labels)
reduction_indices = math_ops.range(1, array_ops.rank(diffs))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs), reduction_indices=reduction_indices,
keep_dims=True)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch,
num_present_per_batch)
sum_diff = math_ops.reduce_sum(
diffs, reduction_indices=reduction_indices, keep_dims=True)
term2 = 2.0 * _safe_div(math_ops.square(sum_diff),
math_ops.square(num_present_per_batch))
weighted_losses = math_ops.multiply(term1 - term2, weights)
loss = math_ops.reduce_sum(weighted_losses)
mean_loss = array_ops.where(
math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
util.add_loss(mean_loss, loss_collection)
return mean_loss
def mean_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.square(math_ops.subtract(predictions, labels))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
def sigmoid_cross_entropy(
multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
multi_class_labels: `[batch_size, num_classes]` target integer labels in
`(0, 1)`.
logits: Float `[batch_size, num_classes]` logits outputs of the network.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
label_smoothing: If greater than `0` then smooth the labels.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `logits`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None.
"""
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
(logits, multi_class_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
logging.info("logits.dtype=%s.", logits.dtype)
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
logging.info("multi_class_labels.dtype=%s.", multi_class_labels.dtype)
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
logits=logits,
name="xentropy")
logging.info("losses.dtype=%s.", losses.dtype)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
def softmax_cross_entropy(
onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Args:
onehot_labels: `[batch_size, num_classes]` target one-hot-encoded labels.
logits: [batch_size, num_classes] logits outputs of the network .
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`onehot_labels`, and must be broadcastable to `onehot_labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `losses`
dimension).
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has shape `[batch_size]`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None.
"""
with ops.name_scope(scope, "softmax_cross_entropy_loss",
(logits, onehot_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = nn.softmax_cross_entropy_with_logits(labels=onehot_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
# TODO(ptucker): Merge this with similar method in metrics_impl.
def _remove_squeezable_dimensions(
labels, predictions, weights=None, expected_rank_diff=0):
"""Internal version of _remove_squeezable_dimensions which handles weights.
Squeezes `predictions` and `labels` if their ranks differ from expected by
exactly 1.
Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
weights: Optional weight `Tensor`. It will be squeezed if it's not scalar,
and its rank is 1 more than the new rank of `labels`.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
Returns:
Tuple of `predictions`, `labels` and `weights`, possibly with the last
dimension squeezed.
"""
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=expected_rank_diff)
if weights is not None:
weights = ops.convert_to_tensor(weights)
labels_rank = labels.get_shape().ndims
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if (labels_rank is not None) and (weights_rank is not None):
# Use static rank.
rank_diff = weights_rank - labels_rank
if rank_diff == 1:
weights = array_ops.squeeze(weights, [-1])
return labels, predictions, weights
# Use dynamic rank.
rank_diff = array_ops.rank(weights) - array_ops.rank(labels)
if (weights_rank is None) or (
weights_shape.dims[-1].is_compatible_with(1)):
weights = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(weights, [-1]),
lambda: weights)
return labels, predictions, weights
def sparse_softmax_cross_entropy(
labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float32` or `float64`.
weights: Coefficients for the loss. This must be scalar or of same rank as
`labels`
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of logits, labels, and weight are incompatible, or
if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
(logits, labels, weights)) as scope:
# As documented above in Args, labels contain class IDs and logits contains
# 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1;
# therefore, expected_rank_diff=1.
labels, logits, weights = _remove_squeezable_dimensions(
labels, logits, weights, expected_rank_diff=1)
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
| wangyum/tensorflow | tensorflow/python/ops/losses/losses_impl.py | Python | apache-2.0 | 32,549 |
from ROOT import gROOT,gSystem
gSystem.Load( 'libNucDB' )
from ROOT import NucDBManager,NucDBExperiment,NucDBMeasurement,NucDBDiscreteVariable,NucDBInvariantMassDV,NucDBPhotonEnergyDV
from NucDBExtractors import *
import os
class SLACE142Extractor(NucDBRawDataExtractor):
def __init__(self):
NucDBRawDataExtractor.__init__(self)
self.iQsq=3
self.iValueRow=4
self.istatErr=5
self.isysErr=6
self.NumberOfLines=0
self.linesRead=0
def ParseLine(self):
""" See input file for column structures
"""
ixbjorken=2
ixMin=0
ixMax=1
values = self.currentline.split()
if self.linesRead > self.NumberOfLines :
self.rowcut.currentValue=1
return
deltax=float(values[ixMax])-float(values[ixMin])
x = self.fCurrentDataPoint.GetBinVariable('x')
x.SetBinValueSize(float(values[ixbjorken]),deltax)
#x.Print()
Qsq = self.fCurrentDataPoint.GetBinVariable("Qsquared")
Qsq.SetBinValueSize(float(values[self.iQsq]),0.1)
#Qsq.Print()
self.fCurrentDataPoint.SetValue(float(values[self.iValueRow]))
self.fCurrentDataPoint.GetStatError().SetError(float(values[self.istatErr].lstrip('+')))
self.fCurrentDataPoint.GetSystError().SetError(float(values[self.isysErr].lstrip('+')))
self.fCurrentDataPoint.CalculateTotalError()
#
W = self.fCurrentDataPoint.GetDependentVariable("W")
if not W :
W = NucDBInvariantMassDV()
self.fCurrentDataPoint.AddDependentVariable(W)
if W :
W.SetVariable(0,x)
W.SetVariable(1,Qsq)
nu = self.fCurrentDataPoint.GetDependentVariable("nu")
if not nu :
nu = NucDBPhotonEnergyDV()
self.fCurrentDataPoint.AddDependentVariable(nu)
if nu :
nu.SetVariable(0,x)
nu.SetVariable(1,Qsq)
self.fCurrentDataPoint.CalculateDependentVariables()
#
#self.fCurrentDataPoint.Print()
self.linesRead+=1
if __name__ == "__main__":
manager = NucDBManager.GetManager(1)
experiment = manager.GetExperiment("SLAC_E142")
if not experiment :
experiment = NucDBExperiment("SLAC_E142","SLAC_E142")
#HELIUM-3
g1He3 = experiment.GetMeasurement("g1He3")
if not g1He3 :
g1He3 = NucDBMeasurement("g1He3","g_{1}^{He3}")
experiment.AddMeasurement(g1He3)
g1He3.ClearDataPoints()
g1He3.SetColor(4011)
extractor1 = SLACE142Extractor()
extractor1.iValueRow=7
extractor1.istatErr=8
extractor1.isysErr=9
extractor1.SetMeasurement(g1He3)
extractor1.SetInputFile("experiments/SLAC-E142/a1g1He3.dat",16,24-16)
#extractor1.linestoskip=17
#extractor1.NumberOfLines=24-17
Xbjorken = NucDBBinnedVariable("x","x")
Qsq = NucDBBinnedVariable("Qsquared","Q^{2}")
extractor1.fCurrentDataPoint.AddBinVariable(Xbjorken)
extractor1.fCurrentDataPoint.AddBinVariable(Qsq)
extractor1.Initialize()
extractor1.ExtractAllValues()
g1He3.BuildGraph()
A1He3 = experiment.GetMeasurement("A1He3")
if not A1He3 :
A1He3 = NucDBMeasurement("A1He3","A_{1}^{He3}")
experiment.AddMeasurement(A1He3)
A1He3.ClearDataPoints()
A1He3.SetColor(4011)
extractor2 = SLACE142Extractor()
extractor2.iValueRow=4
extractor2.isysErr=5
extractor2.istatErr=6
extractor2.SetMeasurement(A1He3)
extractor2.SetInputFile("experiments/SLAC-E142/a1g1He3.dat",16,24-16)
#extractor2.linestoskip=17
#extractor2.NumberOfLines=24-17
Xbjorken = NucDBBinnedVariable("x","x")
Qsq = NucDBBinnedVariable("Qsquared","Q^{2}")
extractor2.fCurrentDataPoint.AddBinVariable(Xbjorken)
extractor2.fCurrentDataPoint.AddBinVariable(Qsq)
extractor2.Initialize()
extractor2.ExtractAllValues()
A1He3.BuildGraph()
g2He3 = experiment.GetMeasurement("g2He3")
if not g2He3 :
g2He3 = NucDBMeasurement("g2He3","g_{2}^{He3}")
experiment.AddMeasurement(g2He3)
g2He3.ClearDataPoints()
g2He3.SetColor(4011)
extractor3 = SLACE142Extractor()
extractor3.iValueRow=7
extractor3.istatErr=8
extractor3.isysErr=9
extractor3.SetMeasurement(g2He3)
extractor3.SetInputFile("experiments/SLAC-E142/a2g2He3.dat",16,24-16)
#extractor3.linestoskip=17
#extractor3.NumberOfLines=24-17
Xbjorken = NucDBBinnedVariable("x","x")
Qsq = NucDBBinnedVariable("Qsquared","Q^{2}")
extractor3.fCurrentDataPoint.AddBinVariable(Xbjorken)
extractor3.fCurrentDataPoint.AddBinVariable(Qsq)
extractor3.Initialize()
extractor3.ExtractAllValues()
g2He3.BuildGraph()
A2He3 = experiment.GetMeasurement("A2He3")
if not A2He3 :
A2He3 = NucDBMeasurement("A2He3","A_{2}^{He3}")
experiment.AddMeasurement(A2He3)
A2He3.ClearDataPoints()
A2He3.SetColor(4011)
extractor4 = SLACE142Extractor()
extractor4.iValueRow=4
extractor4.isysErr=5
extractor4.istatErr=6
extractor4.SetMeasurement(A2He3)
extractor4.SetInputFile("experiments/SLAC-E142/a1g1He3.dat",16,24-16)
#extractor4.linestoskip=17
#extractor4.NumberOfLines=24-17
Xbjorken = NucDBBinnedVariable("x","x")
Qsq = NucDBBinnedVariable("Qsquared","Q^{2}")
extractor4.fCurrentDataPoint.AddBinVariable(Xbjorken)
extractor4.fCurrentDataPoint.AddBinVariable(Qsq)
extractor4.Initialize()
extractor4.ExtractAllValues()
A2He3.BuildGraph()
#Neutron
g1n = experiment.GetMeasurement("g1n")
if not g1n :
g1n = NucDBMeasurement("g1n","g_{1}^{n}")
experiment.AddMeasurement(g1n)
g1n.ClearDataPoints()
g1n.SetColor(4011)
extractor11 = SLACE142Extractor()
extractor11.iValueRow=7
extractor11.istatErr=8
extractor11.isysErr=9
extractor11.SetMeasurement(g1n)
extractor11.SetInputFile("experiments/SLAC-E142/a1g1n.dat",18,26-18)
#extractor11.linestoskip=19
#extractor11.NumberOfLines=26-19
Xbjorken = NucDBBinnedVariable("x","x")
Qsq = NucDBBinnedVariable("Qsquared","Q^{2}")
extractor11.fCurrentDataPoint.AddBinVariable(Xbjorken)
extractor11.fCurrentDataPoint.AddBinVariable(Qsq)
extractor11.Initialize()
extractor11.ExtractAllValues()
g1n.BuildGraph()
A1n = experiment.GetMeasurement("A1n")
if not A1n :
A1n = NucDBMeasurement("A1n","A_{1}^{n}")
experiment.AddMeasurement(A1n)
A1n.ClearDataPoints()
A1n.SetColor(4011)
extractor12 = SLACE142Extractor()
extractor12.iValueRow=4
extractor12.isysErr=5
extractor12.istatErr=6
extractor12.SetMeasurement(A1n)
extractor12.SetInputFile("experiments/SLAC-E142/a1g1n.dat",18,26-18)
#extractor12.linestoskip=19
#extractor12.NumberOfLines=26-19
Xbjorken = NucDBBinnedVariable("x","x")
Qsq = NucDBBinnedVariable("Qsquared","Q^{2}")
extractor12.fCurrentDataPoint.AddBinVariable(Xbjorken)
extractor12.fCurrentDataPoint.AddBinVariable(Qsq)
extractor12.Initialize()
extractor12.ExtractAllValues()
A1n.BuildGraph()
g2n = experiment.GetMeasurement("g2n")
if not g2n :
g2n = NucDBMeasurement("g2n","g_{2}^{n}")
experiment.AddMeasurement(g2n)
g2n.ClearDataPoints()
g2n.SetColor(4011)
extractor13 = SLACE142Extractor()
extractor13.iValueRow=7
extractor13.istatErr=8
extractor13.isysErr=9
extractor13.SetMeasurement(g2n)
extractor13.SetInputFile("experiments/SLAC-E142/a2g2n.dat",17,25-17)
#extractor13.linestoskip=18
#extractor13.NumberOfLines=25-18
Xbjorken = NucDBBinnedVariable("x","x")
Qsq = NucDBBinnedVariable("Qsquared","Q^{2}")
extractor13.fCurrentDataPoint.AddBinVariable(Xbjorken)
extractor13.fCurrentDataPoint.AddBinVariable(Qsq)
extractor13.Initialize()
extractor13.ExtractAllValues()
g2n.BuildGraph()
A2n = experiment.GetMeasurement("A2n")
if not A2n :
A2n = NucDBMeasurement("A2n","A_{2}^{n}")
experiment.AddMeasurement(A2n)
A2n.ClearDataPoints()
A2n.SetColor(4011)
extractor14 = SLACE142Extractor()
extractor14.iValueRow=4
extractor14.isysErr=5
extractor14.istatErr=6
extractor14.SetMeasurement(A2n)
extractor14.SetInputFile("experiments/SLAC-E142/a2g2n.dat",17,25-17)
#extractor14.linestoskip=18
#extractor14.NumberOfLines=25-18
Xbjorken = NucDBBinnedVariable("x","x")
Qsq = NucDBBinnedVariable("Qsquared","Q^{2}")
extractor14.fCurrentDataPoint.AddBinVariable(Xbjorken)
extractor14.fCurrentDataPoint.AddBinVariable(Qsq)
extractor14.Initialize()
extractor14.ExtractAllValues()
A2n.BuildGraph()
experiment.Print()
manager.SaveExperiment(experiment)
| whit2333/NucDB | experiments/SLAC-E142/SLAC-E142_NucDB.py | Python | gpl-3.0 | 8,790 |
###
# Copyright (c) 2005, Ali Afshar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.utils as utils
import supybot.world as world
from supybot.commands import *
import supybot.ircmsgs as ircmsgs
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import twisted.internet.reactor as reactor
import twisted.internet.protocol as protocol
import os
class ExternalNotice(callbacks.Plugin):
"""Allow the supybot user to send a notice to a channel."""
def __init__(self, irc):
callbacks.Plugin.__init__(self, irc)
self.server = SupyUDP(self)
self.path = os.path.expanduser('~/.supybot-external-notice.%s' %
self.nick)
if os.path.exists(self.path):
os.remove(self.path)
self.listener = reactor.listenUNIXDatagram(self.path, self.server)
os.chmod(self.path, 200)
def receivedCommand(self, data):
"""Received a command over the UDP wire."""
error = None
commanditems = data.split()
if len(commanditems) > 2:
network = commanditems.pop(0)
channel = commanditems.pop(0)
text = ' '.join(commanditems)
netfound = False
for irc in world.ircs:
if irc.network == network:
netfound = True
chanfound = False
for onchannel in irc.state.channels:
if channel == onchannel[1:]:
chanfound = True
m = ircmsgs.notice(onchannel, text)
irc.sendMsg(m)
if not chanfound:
error = 'Bad Channel "%s"' % channel
if not netfound:
error = 'Bad Network "%s"' % network
if error:
self.log.info('Attempted external notice: %s', error)
else:
self.log.info('Successful external notice')
def die(self):
""" Called on unload. Stop listening and remove the socket. """
self.listener.stopListening()
os.remove(self.path)
class SupyUDP(protocol.DatagramProtocol):
""" A very simple datagram protocol """
def __init__(self, cb):
self.cb = cb
def datagramReceived(self, data, address):
""" Just pass the data on to the plugin. """
self.cb.receivedCommand(data)
Class = ExternalNotice
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| kg-bot/SupyBot | plugins/ExternalNotice/plugin.py | Python | gpl-3.0 | 4,068 |
"""Factor Analysis.
A latent linear variable model.
FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
While PCA assumes Gaussian noise with the same variance for each
feature, the FactorAnalysis model assumes different variances for
each of them.
This implementation is based on David Barber's Book,
Bayesian Reasoning and Machine Learning,
http://www.cs.ucl.ac.uk/staff/d.barber/brml,
Algorithm 21.1
"""
# Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Licence: BSD3
import warnings
from math import sqrt, log
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array, check_random_state
from ..utils.extmath import fast_logdet, fast_dot, randomized_svd, squared_norm
from ..utils.validation import check_is_fitted
from ..exceptions import ConvergenceWarning
class FactorAnalysis(BaseEstimator, TransformerMixin):
"""Factor Analysis (FA)
A simple linear generative model with Gaussian latent variables.
The observations are assumed to be caused by a linear transformation of
lower dimensional latent factors and added Gaussian noise.
Without loss of generality the factors are distributed according to a
Gaussian with zero mean and unit covariance. The noise is also zero mean
and has an arbitrary diagonal covariance matrix.
If we would restrict the model further, by assuming that the Gaussian
noise is even isotropic (all diagonal entries are the same) we would obtain
:class:`PPCA`.
FactorAnalysis performs a maximum likelihood estimate of the so-called
`loading` matrix, the transformation of the latent variables to the
observed ones, using expectation-maximization (EM).
Read more in the :ref:`User Guide <FA>`.
Parameters
----------
n_components : int | None
Dimensionality of latent space, the number of components
of ``X`` that are obtained after ``transform``.
If None, n_components is set to the number of features.
tol : float
Stopping tolerance for EM algorithm.
copy : bool
Whether to make a copy of X. If ``False``, the input X gets overwritten
during fitting.
max_iter : int
Maximum number of iterations.
noise_variance_init : None | array, shape=(n_features,)
The initial guess of the noise variance for each feature.
If None, it defaults to np.ones(n_features)
svd_method : {'lapack', 'randomized'}
Which SVD method to use. If 'lapack' use standard SVD from
scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
Defaults to 'randomized'. For most applications 'randomized' will
be sufficiently precise while providing significant speed gains.
Accuracy can also be improved by setting higher values for
`iterated_power`. If this is not sufficient, for maximum precision
you should choose 'lapack'.
iterated_power : int, optional
Number of iterations for the power method. 3 by default. Only used
if ``svd_method`` equals 'randomized'
random_state : int or RandomState
Pseudo number generator state used for random sampling. Only used
if ``svd_method`` equals 'randomized'
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
loglike_ : list, [n_iterations]
The log likelihood at each iteration.
noise_variance_ : array, shape=(n_features,)
The estimated noise variance for each feature.
n_iter_ : int
Number of iterations run.
References
----------
.. David Barber, Bayesian Reasoning and Machine Learning,
Algorithm 21.1
.. Christopher M. Bishop: Pattern Recognition and Machine Learning,
Chapter 12.2.4
See also
--------
PCA: Principal component analysis is also a latent linear variable model
which however assumes equal noise variance for each feature.
This extra assumption makes probabilistic PCA faster as it can be
computed in closed form.
FastICA: Independent component analysis, a latent variable model with
non-Gaussian latent variables.
"""
def __init__(self, n_components=None, tol=1e-2, copy=True, max_iter=1000,
noise_variance_init=None, svd_method='randomized',
iterated_power=3, random_state=0):
self.n_components = n_components
self.copy = copy
self.tol = tol
self.max_iter = max_iter
if svd_method not in ['lapack', 'randomized']:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % svd_method)
self.svd_method = svd_method
self.noise_variance_init = noise_variance_init
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using EM
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self
"""
X = check_array(X, copy=self.copy, dtype=np.float64)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2. * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError("noise_variance_init dimension does not "
"with number of features : %d != %d" %
(len(self.noise_variance_init), n_features))
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
# we'll modify svd outputs to return unexplained variance
# to allow for unified computation of loglikelihood
if self.svd_method == 'lapack':
def my_svd(X):
_, s, V = linalg.svd(X, full_matrices=False)
return (s[:n_components], V[:n_components],
squared_norm(s[n_components:]))
elif self.svd_method == 'randomized':
random_state = check_random_state(self.random_state)
def my_svd(X):
_, s, V = randomized_svd(X, n_components,
random_state=random_state,
n_iter=self.iterated_power)
return s, V, squared_norm(X) - squared_norm(s)
else:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % self.svd_method)
for i in xrange(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
s, V, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s - 1., 0.))[:, np.newaxis] * V
del V
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s))
ll += unexp_var + np.sum(np.log(psi))
ll *= -n_samples / 2.
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W ** 2, axis=0), SMALL)
else:
warnings.warn('FactorAnalysis did not converge.' +
' You might want' +
' to increase the number of iterations.',
ConvergenceWarning)
self.components_ = W
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = i + 1
return self
def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
The latent variables of X.
"""
check_is_fitted(self, 'components_')
X = check_array(X)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = fast_dot(X_transformed, Wpsi.T)
X_transformed = fast_dot(tmp, cov_z)
return X_transformed
def get_covariance(self):
"""Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : array, shape (n_features, n_features)
Estimated covariance of data.
"""
check_is_fitted(self, 'components_')
cov = np.dot(self.components_.T, self.components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the FactorAnalysis model.
Returns
-------
precision : array, shape (n_features, n_features)
Estimated precision of data.
"""
check_is_fitted(self, 'components_')
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components == 0:
return np.diag(1. / self.noise_variance_)
if self.n_components == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
precision = np.dot(components_ / self.noise_variance_, components_.T)
precision.flat[::len(precision) + 1] += 1.
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= self.noise_variance_[:, np.newaxis]
precision /= -self.noise_variance_[np.newaxis, :]
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def score_samples(self, X):
"""Compute the log-likelihood of each sample
Parameters
----------
X: array, shape (n_samples, n_features)
The data
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'components_')
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Compute the average log-likelihood of the samples
Parameters
----------
X: array, shape (n_samples, n_features)
The data
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
| kashif/scikit-learn | sklearn/decomposition/factor_analysis.py | Python | bsd-3-clause | 11,953 |
"""Parse a Python file and retrieve classes and methods.
Parse enough of a Python file to recognize class and method
definitions and to find out the superclasses of a class.
The interface consists of a single function:
readmodule(module, path)
module is the name of a Python module, path is an optional list of
directories where the module is to be searched. If present, path is
prepended to the system search path sys.path.
The return value is a dictionary. The keys of the dictionary are
the names of the classes defined in the module (including classes
that are defined via the from XXX import YYY construct). The values
are class instances of the class Class defined here.
A class is described by the class Class in this module. Instances
of this class have the following instance variables:
name -- the name of the class
super -- a list of super classes (Class instances)
methods -- a dictionary of methods
file -- the file in which the class was defined
lineno -- the line in the file on which the class statement occurred
The dictionary of methods uses the method names as keys and the line
numbers on which the method was defined as values.
If the name of a super class is not recognized, the corresponding
entry in the list of super classes is not a class instance but a
string giving the name of the super class. Since import statements
are recognized and imported modules are scanned as well, this
shouldn't happen often.
BUGS
- Continuation lines are not dealt with at all, except inside strings.
- Nested classes and functions can confuse it.
- Code that doesn't pass tabnanny or python -t will confuse it, unless
you set the module TABWIDTH vrbl (default 8) to the correct tab width
for the file.
PACKAGE RELATED BUGS
- If you have a package and a module inside that or another package
with the same name, module caching doesn't work properly since the
key is the base name of the module/package.
- The only entry that is returned when you readmodule a package is a
__path__ whose value is a list which confuses certain class browsers.
- When code does:
from package import subpackage
class MyClass(subpackage.SuperClass):
...
It can't locate the parent. It probably needs to have the same
hairy logic that the import locator already does. (This logic
exists coded in Python in the freeze package.)
"""
import sys
import imp
import re
import string
__all__ = ["readmodule"]
TABWIDTH = 8
_getnext = re.compile(r"""
(?P<String>
\""" [^"\\]* (?:
(?: \\. | "(?!"") )
[^"\\]*
)*
\"""
| ''' [^'\\]* (?:
(?: \\. | '(?!'') )
[^'\\]*
)*
'''
| " [^"\\\n]* (?: \\. [^"\\\n]*)* "
| ' [^'\\\n]* (?: \\. [^'\\\n]*)* '
)
| (?P<Method>
^
(?P<MethodIndent> [ \t]* )
def [ \t]+
(?P<MethodName> [a-zA-Z_] \w* )
[ \t]* \(
)
| (?P<Class>
^
(?P<ClassIndent> [ \t]* )
class [ \t]+
(?P<ClassName> [a-zA-Z_] \w* )
[ \t]*
(?P<ClassSupers> \( [^)\n]* \) )?
[ \t]* :
)
| (?P<Import>
^ import [ \t]+
(?P<ImportList> [^#;\n]+ )
)
| (?P<ImportFrom>
^ from [ \t]+
(?P<ImportFromPath>
[a-zA-Z_] \w*
(?:
[ \t]* \. [ \t]* [a-zA-Z_] \w*
)*
)
[ \t]+
import [ \t]+
(?P<ImportFromList> [^#;\n]+ )
)
""", re.VERBOSE | re.DOTALL | re.MULTILINE).search
_modules = {} # cache of modules we've seen
# each Python class is represented by an instance of this class
class Class:
'''Class to represent a Python class.'''
def __init__(self, module, name, super, file, lineno):
self.module = module
self.name = name
if super is None:
super = []
self.super = super
self.methods = {}
self.file = file
self.lineno = lineno
def _addmethod(self, name, lineno):
self.methods[name] = lineno
class Function(Class):
'''Class to represent a top-level Python function'''
def __init__(self, module, name, file, lineno):
Class.__init__(self, module, name, None, file, lineno)
def _addmethod(self, name, lineno):
assert 0, "Function._addmethod() shouldn't be called"
def readmodule(module, path=[], inpackage=0):
'''Backwards compatible interface.
Like readmodule_ex() but strips Function objects from the
resulting dictionary.'''
dict = readmodule_ex(module, path, inpackage)
res = {}
for key, value in dict.items():
if not isinstance(value, Function):
res[key] = value
return res
def readmodule_ex(module, path=[], inpackage=0):
'''Read a module file and return a dictionary of classes.
Search for MODULE in PATH and sys.path, read and parse the
module and return a dictionary with one entry for each class
found in the module.'''
dict = {}
i = module.rfind('.')
if i >= 0:
# Dotted module name
package = module[:i].strip()
submodule = module[i+1:].strip()
parent = readmodule_ex(package, path, inpackage)
child = readmodule_ex(submodule, parent['__path__'], 1)
return child
if _modules.has_key(module):
# we've seen this module before...
return _modules[module]
if module in sys.builtin_module_names:
# this is a built-in module
_modules[module] = dict
return dict
# search the path for the module
f = None
if inpackage:
try:
f, file, (suff, mode, type) = \
imp.find_module(module, path)
except ImportError:
f = None
if f is None:
fullpath = list(path) + sys.path
f, file, (suff, mode, type) = imp.find_module(module, fullpath)
if type == imp.PKG_DIRECTORY:
dict['__path__'] = [file]
_modules[module] = dict
path = [file] + path
f, file, (suff, mode, type) = \
imp.find_module('__init__', [file])
if type != imp.PY_SOURCE:
# not Python source, can't do anything with this module
f.close()
_modules[module] = dict
return dict
_modules[module] = dict
classstack = [] # stack of (class, indent) pairs
src = f.read()
f.close()
# To avoid having to stop the regexp at each newline, instead
# when we need a line number we simply string.count the number of
# newlines in the string since the last time we did this; i.e.,
# lineno = lineno + \
# string.count(src, '\n', last_lineno_pos, here)
# last_lineno_pos = here
countnl = string.count
lineno, last_lineno_pos = 1, 0
i = 0
while 1:
m = _getnext(src, i)
if not m:
break
start, i = m.span()
if m.start("Method") >= 0:
# found a method definition or function
thisindent = _indent(m.group("MethodIndent"))
meth_name = m.group("MethodName")
lineno = lineno + \
countnl(src, '\n',
last_lineno_pos, start)
last_lineno_pos = start
# close all classes indented at least as much
while classstack and \
classstack[-1][1] >= thisindent:
del classstack[-1]
if classstack:
# it's a class method
cur_class = classstack[-1][0]
cur_class._addmethod(meth_name, lineno)
else:
# it's a function
f = Function(module, meth_name,
file, lineno)
dict[meth_name] = f
elif m.start("String") >= 0:
pass
elif m.start("Class") >= 0:
# we found a class definition
thisindent = _indent(m.group("ClassIndent"))
# close all classes indented at least as much
while classstack and \
classstack[-1][1] >= thisindent:
del classstack[-1]
lineno = lineno + \
countnl(src, '\n', last_lineno_pos, start)
last_lineno_pos = start
class_name = m.group("ClassName")
inherit = m.group("ClassSupers")
if inherit:
# the class inherits from other classes
inherit = inherit[1:-1].strip()
names = []
for n in inherit.split(','):
n = n.strip()
if dict.has_key(n):
# we know this super class
n = dict[n]
else:
c = n.split('.')
if len(c) > 1:
# super class
# is of the
# form module.class:
# look in
# module for class
m = c[-2]
c = c[-1]
if _modules.has_key(m):
d = _modules[m]
if d.has_key(c):
n = d[c]
names.append(n)
inherit = names
# remember this class
cur_class = Class(module, class_name, inherit,
file, lineno)
dict[class_name] = cur_class
classstack.append((cur_class, thisindent))
elif m.start("Import") >= 0:
# import module
for n in m.group("ImportList").split(','):
n = n.strip()
try:
# recursively read the imported module
d = readmodule_ex(n, path, inpackage)
except:
##print 'module', n, 'not found'
pass
elif m.start("ImportFrom") >= 0:
# from module import stuff
mod = m.group("ImportFromPath")
names = m.group("ImportFromList").split(',')
try:
# recursively read the imported module
d = readmodule_ex(mod, path, inpackage)
except:
##print 'module', mod, 'not found'
continue
# add any classes that were defined in the
# imported module to our name space if they
# were mentioned in the list
for n in names:
n = n.strip()
if d.has_key(n):
dict[n] = d[n]
elif n == '*':
# only add a name if not
# already there (to mimic what
# Python does internally)
# also don't add names that
# start with _
for n in d.keys():
if n[0] != '_' and \
not dict.has_key(n):
dict[n] = d[n]
else:
assert 0, "regexp _getnext found something unexpected"
return dict
def _indent(ws, _expandtabs=string.expandtabs):
return len(_expandtabs(ws, TABWIDTH))
| remybaranx/qtaste | tools/jython/lib/Lib/pyclbr.py | Python | gpl-3.0 | 11,452 |
# -*- coding: utf-8 -*-
from .. import models
from .generic import Manager, AllMixin, GetByIdMixin, SyncMixin
class ItemsManager(Manager, AllMixin, GetByIdMixin, SyncMixin):
state_name = 'items'
object_type = 'item'
def add(self, content, project_id, **kwargs):
"""
Creates a local item object, by appending the equivalent request to the
queue.
"""
obj = models.Item({'content': content, 'project_id': project_id},
self.api)
obj.temp_id = obj['id'] = self.api.generate_uuid()
obj.data.update(kwargs)
self.state[self.state_name].append(obj)
cmd = {
'type': 'item_add',
'temp_id': obj.temp_id,
'uuid': self.api.generate_uuid(),
'args': obj.data,
}
self.queue.append(cmd)
return obj
def update(self, item_id, **kwargs):
"""
Updates an item remotely, by appending the equivalent request to the
queue.
"""
args = {'id': item_id}
args.update(kwargs)
cmd = {
'type': 'item_update',
'uuid': self.api.generate_uuid(),
'args': args,
}
self.queue.append(cmd)
def delete(self, item_ids):
"""
Deletes items remotely, by appending the equivalent request to the
queue.
"""
cmd = {
'type': 'item_delete',
'uuid': self.api.generate_uuid(),
'args': {
'ids': item_ids
}
}
self.queue.append(cmd)
def move(self, project_items, to_project):
"""
Moves items to another project remotely, by appending the equivalent
request to the queue.
"""
cmd = {
'type': 'item_move',
'uuid': self.api.generate_uuid(),
'args': {
'project_items': project_items,
'to_project': to_project,
},
}
self.queue.append(cmd)
def close(self, item_id):
"""
Marks item as done
"""
cmd = {
'type': 'item_close',
'uuid': self.api.generate_uuid(),
'args': {
'id': item_id,
},
}
self.queue.append(cmd)
def complete(self, item_ids, force_history=0):
"""
Marks items as completed remotely, by appending the equivalent request to the
queue.
"""
cmd = {
'type': 'item_complete',
'uuid': self.api.generate_uuid(),
'args': {
'ids': item_ids,
'force_history': force_history,
},
}
self.queue.append(cmd)
def uncomplete(self, item_ids, update_item_orders=1, restore_state=None):
"""
Marks items as not completed remotely, by appending the equivalent request to the
queue.
"""
args = {
'ids': item_ids,
'update_item_orders': update_item_orders,
}
if restore_state:
args['restore_state'] = restore_state
cmd = {
'type': 'item_uncomplete',
'uuid': self.api.generate_uuid(),
'args': args,
}
self.queue.append(cmd)
def update_date_complete(self, item_id, new_date_utc=None, date_string=None,
is_forward=None):
"""
Completes a recurring task remotely, by appending the equivalent
request to the queue.
"""
args = {
'id': item_id,
}
if new_date_utc:
args['new_date_utc'] = new_date_utc
if date_string:
args['date_string'] = date_string
if is_forward:
args['is_forward'] = is_forward
cmd = {
'type': 'item_update_date_complete',
'uuid': self.api.generate_uuid(),
'args': args,
}
self.queue.append(cmd)
def update_orders_indents(self, ids_to_orders_indents):
"""
Updates the order and indents of multiple items remotely, by appending
the equivalent request to the queue.
"""
cmd = {
'type': 'item_update_orders_indents',
'uuid': self.api.generate_uuid(),
'args': {
'ids_to_orders_indents': ids_to_orders_indents,
},
}
self.queue.append(cmd)
def update_day_orders(self, ids_to_orders):
"""
Updates in the local state the day orders of multiple items remotely,
by appending the equivalent request to the queue.
"""
cmd = {
'type': 'item_update_day_orders',
'uuid': self.api.generate_uuid(),
'args': {
'ids_to_orders': ids_to_orders,
},
}
self.queue.append(cmd)
| ajar98/todoist_bot | todoist/managers/items.py | Python | mit | 4,930 |
#!/usr/bin/python
import re
import time
from os import stat
import httpagentparser
import pygeoip
import sys
import logging
import logging.handlers
nginx_log="/tmp/access.log"
syslog_host="10.1.0.107"
syslog_port=5514
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = (syslog_host,syslog_port))
#formatter = logging.Formatter('%(module)s.%(funcName)s: %(message)s')
#handler.setFormatter(formatter)
log.addHandler(handler)
gi = pygeoip.GeoIP('/usr/share/GeoIP/GeoIP.dat')
gic = pygeoip.GeoIP('/usr/share/GeoIP/GeoIPCity.dat')
while True:
try:
logfile = open(nginx_log,"r")
except IOError:
print "IO Error"
time.sleep(5)
continue
logfile.seek(0,2)
while True:
current_position = int(logfile.tell())
file_size = stat(nginx_log).st_size
#print "file_size: %s" % file_size
#print "pos: %s " % current_position
if current_position > file_size:
#print "truncated"
break
line = logfile.readline()
if not line:
time.sleep(0.1)
continue
# only send the url with html or '/'
if re.search("GET \/ HTTP|html ", line):
myline=line.split('|')
try:
clientip=myline[-2].strip()
except:
continue
#print clientip
user_agent=myline.pop()
myline=map(str.strip, myline)
agent_info=httpagentparser.detect(user_agent)
try:
country = '"' + gi.country_name_by_addr(clientip) + '"'
except:
country='"N/A"'
try:
if gic.record_by_addr(clientip)['city']:
city = '"' + gic.record_by_addr(clientip)['city'] + '"'
else:
city = '"N/A"'
except:
city='"N/A"'
try:
os= '"' + agent_info['platform']['name'] + '"'
except:
os='"N/A"'
try:
device = '"' + agent_info['dist']['name'] + '"'
except:
device = '""'
try:
browser = '"' + agent_info['browser']['name'] + '"'
except:
browser = '"N/A"'
myline.append(os)
myline.append(device)
myline.append(browser)
myline.append(country)
myline.append(city)
try:
final_line = " ".join(myline)
#print final_line
# don't send search bot access
if not agent_info['bot']:
log.debug(final_line)
except:
pass
| bgc8080/ak-tools | ak-elk-log-sender.py | Python | gpl-3.0 | 2,802 |
# Copyright 2015 UniMOOC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from models.models import Student
from models.models import StudentProfileDAO
from modules.um_gobalo.service import GobaloAPI
from .exceptions import UserNotAuthorized
class Students(object):
@classmethod
def get_by_email(cls, student_email):
return Student.get_by_email(cls._plain_email(student_email))
@classmethod
def register_user(cls, user):
from modules.um_editions.service import Editions
student = Student.get_enrolled_student_by_email(user.email())
if student:
return student
name, last_name, edition_code = GobaloAPI.get_user_info(user.email())
if not name:
raise UserNotAuthorized('gobalo_user_not_authorized')
if not Editions.get(edition_code):
raise UserNotAuthorized('edition_for_student_not_exists')
additional_fields = json.dumps({
'name': name, 'last_name': last_name, 'edition_code': edition_code,
'original_email': user.email(), 'acreditations_email': user.email()
})
student = StudentProfileDAO._add_new_student_for_current_user(
user.user_id(),
cls._plain_email(user.email()),
name + ' ' + last_name,
additional_fields
)
Editions.register_user(edition_code, student)
return student
@classmethod
def _plain_email(cls, email):
user_mail, domain_mail = email.lower().split("@")
plain_email = user_mail.replace(".", "") + "@" + domain_mail
return plain_email
@classmethod
def clean_email(cls, email):
return cls._plain_email(email)
| UniMOOC/AAClassroom | modules/um_students/service.py | Python | apache-2.0 | 2,246 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .core import UnitedStates
class Arizona(UnitedStates):
"""Arizona"""
martin_luther_king_label = "Dr. Martin Luther King Jr./Civil Rights Day"
presidents_day_label = "Lincoln/Washington Presidents' Day"
| sayoun/workalendar | workalendar/usa/arizona.py | Python | mit | 355 |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wextra',
'-Werror',
'-fexceptions',
'-DNDEBUG',
'-DYOGA_USE_POSIX',
'-DYOGA_USE_DEBUGGING_UTILITIES',
'-DYOGA_DEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if compilation_database_folder:
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| Florianjw/libyoga | .ycm_extra_conf.py | Python | gpl-3.0 | 6,213 |
from bank_CI import BankCI
from bank_controller import BankController
from settings import DB_NAME, CREATE_TABLES, DROP_DATABASE
from sql_manager import BankDatabaseManager
def main():
manager = BankDatabaseManager.create_from_db_and_sql(DB_NAME, CREATE_TABLES, DROP_DATABASE, create_if_exists=False)
controller = BankController(manager)
command_interface = BankCI(controller)
command_interface.main_menu()
if __name__ == '__main__':
main()
| pepincho/Python101-and-Algo1-Courses | Programming-101-v3/week9/1-Money-In-The-Bank/start.py | Python | mit | 465 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mps_v2', '0028_backfill_committeeresolutions'),
]
operations = [
migrations.AlterField(
model_name='suggestion',
name='source_id',
field=models.ForeignKey(db_column=b'source_id', to_field=b'source_id', to='mps_v2.CommitteeResolution'),
preserve_default=False,
),
migrations.RenameField(
model_name='suggestion',
old_name='source_id',
new_name='source_resolution',
),
migrations.AlterUniqueTogether(
name='suggestion',
unique_together=set([('source_resolution', 'source_index')]),
),
# Workaround for https://code.djangoproject.com/ticket/25621
# D58bb2cf0b1407c8c24fa06b0cc34f38 is what you get from Django's
# BaseDatabaseSchemaEditor._create_index_name() when you pass it
# (model, ["source_id"], suffix="_fk_mps_v2_committeeresolution_source_id")
migrations.RunSQL([], [
"ALTER TABLE mps_v2_suggestion DROP FOREIGN KEY D58bb2cf0b1407c8c24fa06b0cc34f38",
]),
]
| ManoSeimas/manoseimas.lt | manoseimas/mps_v2/migrations/0029_link_suggestion_to_committeeresolution.py | Python | agpl-3.0 | 1,271 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class NaCl(recipe_util.Recipe):
"""Basic Recipe class for NaCl."""
@staticmethod
def fetch_spec(props):
url = ('https://chromium.googlesource.com/native_client/'
'src/native_client.git')
solution = { 'name' :'native_client',
'url' : url,
'deps_file': '.DEPS.git',
'managed' : False,
'custom_deps': {},
'safesync_url': '',
}
spec = {
'solutions': [solution],
'svn_url': 'svn://svn.chromium.org/native_client',
'svn_branch': 'trunk/src/native_client',
'svn_ref': 'master',
}
if props.get('submodule_git_svn_spec'):
spec['submodule_git_svn_spec'] = props['submodule_git_svn_spec']
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
if props.get('target_os_only'):
spec['target_os_only'] = props['target_os_only']
checkout_type = 'gclient_git_svn'
if props.get('nosvn'):
checkout_type = 'gclient_git'
spec_type = '%s_spec' % checkout_type
return {
'type': checkout_type,
spec_type: spec,
}
@staticmethod
def expected_root(_props):
return 'native_client'
def main(argv=None):
return NaCl().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Phonebooth/depot_tools | recipes/nacl.py | Python | bsd-3-clause | 1,637 |
import json
import uuid
from datetime import datetime
from urllib.parse import urlencode
from flask import Blueprint, abort, current_app, jsonify, request
from notifications_utils.recipients import (
is_uk_phone_number,
use_numeric_sender,
)
from sqlalchemy.exc import IntegrityError
from app.config import QueueNames
from app.dao.permissions_dao import permission_dao
from app.dao.service_user_dao import (
dao_get_service_user,
dao_update_service_user,
)
from app.dao.services_dao import dao_fetch_service_by_id
from app.dao.template_folder_dao import (
dao_get_template_folder_by_id_and_service_id,
)
from app.dao.templates_dao import dao_get_template_by_id
from app.dao.users_dao import (
count_user_verify_codes,
create_secret_code,
create_user_code,
dao_archive_user,
get_user_and_accounts,
get_user_by_email,
get_user_by_id,
get_user_code,
get_users_by_partial_email,
increment_failed_login_count,
reset_failed_login_count,
save_model_user,
save_user_attribute,
update_user_password,
use_user_code,
)
from app.errors import InvalidRequest, register_errors
from app.models import (
EMAIL_TYPE,
KEY_TYPE_NORMAL,
SMS_TYPE,
Permission,
Service,
)
from app.notifications.process_notifications import (
persist_notification,
send_notification_to_queue,
)
from app.schema_validation import validate
from app.schemas import (
create_user_schema,
email_data_request_schema,
partial_email_data_request_schema,
user_update_password_schema_load_json,
user_update_schema_load_json,
)
from app.user.users_schema import (
post_send_user_email_code_schema,
post_send_user_sms_code_schema,
post_set_permissions_schema,
post_verify_code_schema,
post_verify_webauthn_schema,
)
from app.utils import url_with_token
user_blueprint = Blueprint('user', __name__)
register_errors(user_blueprint)
@user_blueprint.errorhandler(IntegrityError)
def handle_integrity_error(exc):
"""
Handle integrity errors caused by the auth type/mobile number check constraint
"""
if 'ck_user_has_mobile_or_other_auth' in str(exc):
# we don't expect this to trip, so still log error
current_app.logger.exception('Check constraint ck_user_has_mobile_or_other_auth triggered')
return jsonify(result='error', message='Mobile number must be set if auth_type is set to sms_auth'), 400
raise exc
@user_blueprint.route('', methods=['POST'])
def create_user():
user_to_create, errors = create_user_schema.load(request.get_json())
req_json = request.get_json()
if not req_json.get('password', None):
errors.update({'password': ['Missing data for required field.']})
raise InvalidRequest(errors, status_code=400)
save_model_user(user_to_create, password=req_json.get('password'), validated_email_access=True)
result = user_to_create.serialize()
return jsonify(data=result), 201
@user_blueprint.route('/<uuid:user_id>', methods=['POST'])
def update_user_attribute(user_id):
user_to_update = get_user_by_id(user_id=user_id)
req_json = request.get_json()
if 'updated_by' in req_json:
updated_by = get_user_by_id(user_id=req_json.pop('updated_by'))
else:
updated_by = None
update_dct, errors = user_update_schema_load_json.load(req_json)
if errors:
raise InvalidRequest(errors, status_code=400)
save_user_attribute(user_to_update, update_dict=update_dct)
if updated_by:
if 'email_address' in update_dct:
template = dao_get_template_by_id(current_app.config['TEAM_MEMBER_EDIT_EMAIL_TEMPLATE_ID'])
recipient = user_to_update.email_address
reply_to = template.service.get_default_reply_to_email_address()
elif 'mobile_number' in update_dct:
template = dao_get_template_by_id(current_app.config['TEAM_MEMBER_EDIT_MOBILE_TEMPLATE_ID'])
recipient = user_to_update.mobile_number
reply_to = get_sms_reply_to_for_notify_service(recipient, template)
else:
return jsonify(data=user_to_update.serialize()), 200
service = Service.query.get(current_app.config['NOTIFY_SERVICE_ID'])
saved_notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=recipient,
service=service,
personalisation={
'name': user_to_update.name,
'servicemanagername': updated_by.name,
'email address': user_to_update.email_address
},
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reply_to_text=reply_to
)
send_notification_to_queue(saved_notification, False, queue=QueueNames.NOTIFY)
return jsonify(data=user_to_update.serialize()), 200
def get_sms_reply_to_for_notify_service(recipient, template):
if not is_uk_phone_number(recipient) and use_numeric_sender(recipient):
reply_to = current_app.config['NOTIFY_INTERNATIONAL_SMS_SENDER']
else:
reply_to = template.service.get_default_sms_sender()
return reply_to
@user_blueprint.route('/<uuid:user_id>/archive', methods=['POST'])
def archive_user(user_id):
user = get_user_by_id(user_id)
dao_archive_user(user)
return '', 204
@user_blueprint.route('/<uuid:user_id>/activate', methods=['POST'])
def activate_user(user_id):
user = get_user_by_id(user_id=user_id)
if user.state == 'active':
raise InvalidRequest('User already active', status_code=400)
user.state = 'active'
save_model_user(user)
return jsonify(data=user.serialize()), 200
@user_blueprint.route('/<uuid:user_id>/reset-failed-login-count', methods=['POST'])
def user_reset_failed_login_count(user_id):
user_to_update = get_user_by_id(user_id=user_id)
reset_failed_login_count(user_to_update)
return jsonify(data=user_to_update.serialize()), 200
@user_blueprint.route('/<uuid:user_id>/verify/password', methods=['POST'])
def verify_user_password(user_id):
user_to_verify = get_user_by_id(user_id=user_id)
try:
txt_pwd = request.get_json()['password']
except KeyError:
message = 'Required field missing data'
errors = {'password': [message]}
raise InvalidRequest(errors, status_code=400)
if user_to_verify.check_password(txt_pwd):
reset_failed_login_count(user_to_verify)
return jsonify({}), 204
else:
increment_failed_login_count(user_to_verify)
message = 'Incorrect password'
errors = {'password': [message]}
raise InvalidRequest(errors, status_code=400)
@user_blueprint.route('/<uuid:user_id>/verify/code', methods=['POST'])
def verify_user_code(user_id):
data = request.get_json()
validate(data, post_verify_code_schema)
user_to_verify = get_user_by_id(user_id=user_id)
code = get_user_code(user_to_verify, data['code'], data['code_type'])
if user_to_verify.failed_login_count >= current_app.config.get('MAX_FAILED_LOGIN_COUNT'):
raise InvalidRequest("Code not found", status_code=404)
if not code:
# only relevant from sms
increment_failed_login_count(user_to_verify)
raise InvalidRequest("Code not found", status_code=404)
if datetime.utcnow() > code.expiry_datetime or code.code_used:
# sms and email
increment_failed_login_count(user_to_verify)
raise InvalidRequest("Code has expired", status_code=400)
user_to_verify.current_session_id = str(uuid.uuid4())
user_to_verify.logged_in_at = datetime.utcnow()
if data['code_type'] == 'email':
user_to_verify.email_access_validated_at = datetime.utcnow()
user_to_verify.failed_login_count = 0
save_model_user(user_to_verify)
use_user_code(code.id)
return jsonify({}), 204
# TODO: Remove the "verify" endpoint once admin no longer points at it
@user_blueprint.route('/<uuid:user_id>/complete/webauthn-login', methods=['POST'])
@user_blueprint.route('/<uuid:user_id>/verify/webauthn-login', methods=['POST'])
def complete_login_after_webauthn_authentication_attempt(user_id):
"""
complete login after a webauthn authentication. There's nothing webauthn specific in this code
but the sms/email flows do this as part of `verify_user_code` above and this is the equivalent spot in the
webauthn flow.
If the authentication was successful, we've already confirmed the user holds the right security key,
but we still need to check the max login count and set up a current_session_id and last_logged_in_at here.
If the authentication was unsuccessful then we just bump the failed_login_count in the db.
"""
data = request.get_json()
validate(data, post_verify_webauthn_schema)
user = get_user_by_id(user_id=user_id)
successful = data['successful']
if user.failed_login_count >= current_app.config.get('MAX_VERIFY_CODE_COUNT'):
raise InvalidRequest("Maximum login count exceeded", status_code=403)
if successful:
user.current_session_id = str(uuid.uuid4())
user.logged_in_at = datetime.utcnow()
user.failed_login_count = 0
save_model_user(user)
else:
increment_failed_login_count(user)
return jsonify({}), 204
@user_blueprint.route('/<uuid:user_id>/<code_type>-code', methods=['POST'])
def send_user_2fa_code(user_id, code_type):
user_to_send_to = get_user_by_id(user_id=user_id)
if count_user_verify_codes(user_to_send_to) >= current_app.config.get('MAX_VERIFY_CODE_COUNT'):
# Prevent more than `MAX_VERIFY_CODE_COUNT` active verify codes at a time
current_app.logger.warning('Too many verify codes created for user {}'.format(user_to_send_to.id))
else:
data = request.get_json()
if code_type == SMS_TYPE:
validate(data, post_send_user_sms_code_schema)
send_user_sms_code(user_to_send_to, data)
elif code_type == EMAIL_TYPE:
validate(data, post_send_user_email_code_schema)
send_user_email_code(user_to_send_to, data)
else:
abort(404)
return '{}', 204
def send_user_sms_code(user_to_send_to, data):
recipient = data.get('to') or user_to_send_to.mobile_number
secret_code = create_secret_code()
personalisation = {'verify_code': secret_code}
create_2fa_code(
current_app.config['SMS_CODE_TEMPLATE_ID'],
user_to_send_to,
secret_code,
recipient,
personalisation
)
def send_user_email_code(user_to_send_to, data):
recipient = user_to_send_to.email_address
secret_code = str(uuid.uuid4())
personalisation = {
'name': user_to_send_to.name,
'url': _create_2fa_url(user_to_send_to, secret_code, data.get('next'), data.get('email_auth_link_host'))
}
create_2fa_code(
current_app.config['EMAIL_2FA_TEMPLATE_ID'],
user_to_send_to,
secret_code,
recipient,
personalisation
)
def create_2fa_code(template_id, user_to_send_to, secret_code, recipient, personalisation):
template = dao_get_template_by_id(template_id)
# save the code in the VerifyCode table
create_user_code(user_to_send_to, secret_code, template.template_type)
reply_to = None
if template.template_type == SMS_TYPE:
reply_to = get_sms_reply_to_for_notify_service(recipient, template)
elif template.template_type == EMAIL_TYPE:
reply_to = template.service.get_default_reply_to_email_address()
saved_notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=recipient,
service=template.service,
personalisation=personalisation,
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reply_to_text=reply_to
)
# Assume that we never want to observe the Notify service's research mode
# setting for this notification - we still need to be able to log into the
# admin even if we're doing user research using this service:
send_notification_to_queue(saved_notification, False, queue=QueueNames.NOTIFY)
@user_blueprint.route('/<uuid:user_id>/change-email-verification', methods=['POST'])
def send_user_confirm_new_email(user_id):
user_to_send_to = get_user_by_id(user_id=user_id)
email, errors = email_data_request_schema.load(request.get_json())
if errors:
raise InvalidRequest(message=errors, status_code=400)
template = dao_get_template_by_id(current_app.config['CHANGE_EMAIL_CONFIRMATION_TEMPLATE_ID'])
service = Service.query.get(current_app.config['NOTIFY_SERVICE_ID'])
saved_notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=email['email'],
service=service,
personalisation={
'name': user_to_send_to.name,
'url': _create_confirmation_url(user=user_to_send_to, email_address=email['email']),
'feedback_url': current_app.config['ADMIN_BASE_URL'] + '/support'
},
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reply_to_text=service.get_default_reply_to_email_address()
)
send_notification_to_queue(saved_notification, False, queue=QueueNames.NOTIFY)
return jsonify({}), 204
@user_blueprint.route('/<uuid:user_id>/email-verification', methods=['POST'])
def send_new_user_email_verification(user_id):
request_json = request.get_json()
# when registering, we verify all users' email addresses using this function
user_to_send_to = get_user_by_id(user_id=user_id)
template = dao_get_template_by_id(current_app.config['NEW_USER_EMAIL_VERIFICATION_TEMPLATE_ID'])
service = Service.query.get(current_app.config['NOTIFY_SERVICE_ID'])
saved_notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=user_to_send_to.email_address,
service=service,
personalisation={
'name': user_to_send_to.name,
'url': _create_verification_url(
user_to_send_to,
base_url=request_json.get('admin_base_url'),
),
},
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reply_to_text=service.get_default_reply_to_email_address()
)
send_notification_to_queue(saved_notification, False, queue=QueueNames.NOTIFY)
return jsonify({}), 204
@user_blueprint.route('/<uuid:user_id>/email-already-registered', methods=['POST'])
def send_already_registered_email(user_id):
to, errors = email_data_request_schema.load(request.get_json())
template = dao_get_template_by_id(current_app.config['ALREADY_REGISTERED_EMAIL_TEMPLATE_ID'])
service = Service.query.get(current_app.config['NOTIFY_SERVICE_ID'])
saved_notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=to['email'],
service=service,
personalisation={
'signin_url': current_app.config['ADMIN_BASE_URL'] + '/sign-in',
'forgot_password_url': current_app.config['ADMIN_BASE_URL'] + '/forgot-password',
'feedback_url': current_app.config['ADMIN_BASE_URL'] + '/support'
},
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reply_to_text=service.get_default_reply_to_email_address()
)
send_notification_to_queue(saved_notification, False, queue=QueueNames.NOTIFY)
return jsonify({}), 204
@user_blueprint.route('/<uuid:user_id>', methods=['GET'])
@user_blueprint.route('', methods=['GET'])
def get_user(user_id=None):
users = get_user_by_id(user_id=user_id)
result = [x.serialize() for x in users] if isinstance(users, list) else users.serialize()
return jsonify(data=result)
@user_blueprint.route('/<uuid:user_id>/service/<uuid:service_id>/permission', methods=['POST'])
def set_permissions(user_id, service_id):
# TODO fix security hole, how do we verify that the user
# who is making this request has permission to make the request.
service_user = dao_get_service_user(user_id, service_id)
user = get_user_by_id(user_id)
service = dao_fetch_service_by_id(service_id=service_id)
data = request.get_json()
validate(data, post_set_permissions_schema)
permission_list = [
Permission(service_id=service_id, user_id=user_id, permission=p['permission'])
for p in data['permissions']
]
permission_dao.set_user_service_permission(user, service, permission_list, _commit=True, replace=True)
if 'folder_permissions' in data:
folders = [
dao_get_template_folder_by_id_and_service_id(folder_id, service_id)
for folder_id in data['folder_permissions']
]
service_user.folders = folders
dao_update_service_user(service_user)
return jsonify({}), 204
@user_blueprint.route('/email', methods=['POST'])
def fetch_user_by_email():
email, errors = email_data_request_schema.load(request.get_json())
if errors:
raise InvalidRequest(message=errors, status_code=400)
fetched_user = get_user_by_email(email['email'])
result = fetched_user.serialize()
return jsonify(data=result)
# TODO: Deprecate this GET endpoint
@user_blueprint.route('/email', methods=['GET'])
def get_by_email():
email = request.args.get('email')
if not email:
error = 'Invalid request. Email query string param required'
raise InvalidRequest(error, status_code=400)
fetched_user = get_user_by_email(email)
result = fetched_user.serialize()
return jsonify(data=result)
@user_blueprint.route('/find-users-by-email', methods=['POST'])
def find_users_by_email():
email, errors = partial_email_data_request_schema.load(request.get_json())
fetched_users = get_users_by_partial_email(email['email'])
result = [user.serialize_for_users_list() for user in fetched_users]
return jsonify(data=result), 200
@user_blueprint.route('/reset-password', methods=['POST'])
def send_user_reset_password():
request_json = request.get_json()
email, errors = email_data_request_schema.load(request_json)
user_to_send_to = get_user_by_email(email['email'])
template = dao_get_template_by_id(current_app.config['PASSWORD_RESET_TEMPLATE_ID'])
service = Service.query.get(current_app.config['NOTIFY_SERVICE_ID'])
saved_notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=email['email'],
service=service,
personalisation={
'user_name': user_to_send_to.name,
'url': _create_reset_password_url(
user_to_send_to.email_address,
base_url=request_json.get('admin_base_url'),
next_redirect=request_json.get('next')
)
},
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reply_to_text=service.get_default_reply_to_email_address()
)
send_notification_to_queue(saved_notification, False, queue=QueueNames.NOTIFY)
return jsonify({}), 204
@user_blueprint.route('/<uuid:user_id>/update-password', methods=['POST'])
def update_password(user_id):
user = get_user_by_id(user_id=user_id)
req_json = request.get_json()
password = req_json.get('_password')
update_dct, errors = user_update_password_schema_load_json.load(req_json)
if errors:
raise InvalidRequest(errors, status_code=400)
update_user_password(user, password)
return jsonify(data=user.serialize()), 200
@user_blueprint.route('/<uuid:user_id>/organisations-and-services', methods=['GET'])
def get_organisations_and_services_for_user(user_id):
user = get_user_and_accounts(user_id)
data = get_orgs_and_services(user)
return jsonify(data)
def _create_reset_password_url(email, next_redirect, base_url=None):
data = json.dumps({'email': email, 'created_at': str(datetime.utcnow())})
static_url_part = '/new-password/'
full_url = url_with_token(data, static_url_part, current_app.config, base_url=base_url)
if next_redirect:
full_url += '?{}'.format(urlencode({'next': next_redirect}))
return full_url
def _create_verification_url(user, base_url):
data = json.dumps({'user_id': str(user.id), 'email': user.email_address})
url = '/verify-email/'
return url_with_token(data, url, current_app.config, base_url=base_url)
def _create_confirmation_url(user, email_address):
data = json.dumps({'user_id': str(user.id), 'email': email_address})
url = '/user-profile/email/confirm/'
return url_with_token(data, url, current_app.config)
def _create_2fa_url(user, secret_code, next_redirect, email_auth_link_host):
data = json.dumps({'user_id': str(user.id), 'secret_code': secret_code})
url = '/email-auth/'
full_url = url_with_token(data, url, current_app.config, base_url=email_auth_link_host)
if next_redirect:
full_url += '?{}'.format(urlencode({'next': next_redirect}))
return full_url
def get_orgs_and_services(user):
return {
'organisations': [
{
'name': org.name,
'id': org.id,
'count_of_live_services': len(org.live_services),
}
for org in user.organisations if org.active
],
'services': [
{
'id': service.id,
'name': service.name,
'restricted': service.restricted,
'organisation': service.organisation.id if service.organisation else None,
}
for service in user.services if service.active
]
}
| alphagov/notifications-api | app/user/rest.py | Python | mit | 22,166 |
from django.db import models
from eventtools.models import EventModel, OccurrenceModel, GeneratorModel, ExclusionModel
from django.conf import settings
class ExampleEvent(EventModel):
difference_from_parent = models.CharField(max_length=250, blank=True, null=True)
def __unicode__(self):
if self.difference_from_parent and self.parent:
return u"%s (%s)" % (self.title, self.difference_from_parent)
return self.title
class EventMeta:
fields_to_inherit = ['title',]
class ExampleGenerator(GeneratorModel):
event = models.ForeignKey(ExampleEvent, related_name="generators")
class ExampleOccurrence(OccurrenceModel):
generated_by = models.ForeignKey(ExampleGenerator, related_name="occurrences", blank=True, null=True)
event = models.ForeignKey(ExampleEvent, related_name="occurrences")
class ExampleExclusion(ExclusionModel):
event = models.ForeignKey(ExampleEvent, related_name="exclusions")
class ExampleTicket(models.Model):
# used to test that an occurrence is unhooked rather than deleted.
occurrence = models.ForeignKey(ExampleOccurrence, on_delete=models.PROTECT) | ixc/glamkit-eventtools | eventtools/tests/eventtools_testapp/models.py | Python | bsd-3-clause | 1,174 |
"""Test the TcEx Utils Module."""
# pylint: disable=no-self-use
class TestIsCidr:
"""Test the TcEx Utils Module."""
def test_ipv4_cidr(self, tcex):
"""Test an IPv4 CIDR range
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
s = '8.8.8.0/24'
assert tcex.utils.is_cidr(s)
def test_ipv4_cidr_host_bits_set(self, tcex):
"""Test an IPv4 CIDR range with the host bits set.
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
s = '8.8.8.1/24'
assert tcex.utils.is_cidr(s)
def test_ipv6_cidr(self, tcex):
"""Test an IPv6 CIDR range
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
s = '2001:db8::/128'
assert tcex.utils.is_cidr(s)
def test_invalid_ip_not_cidr(self, tcex):
"""Test a string that is not a CIDR range
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
s = '8.8.8.8'
assert not tcex.utils.is_cidr(s)
def test_invalid(self, tcex):
"""Test a string that is not a CIDR range
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
s = 'foo bar'
assert not tcex.utils.is_cidr(s)
| kstilwell/tcex | tests/utils/test_is_cidr.py | Python | apache-2.0 | 1,384 |
#!/usr/bin/python
#
# Copyright 2008, Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code sample checks a keyword to see whether it will get any traffic."""
import SOAPpy
# Provide AdWords login information.
email = 'INSERT_LOGIN_EMAIL_HERE'
password = 'INSERT_PASSWORD_HERE'
client_email = 'INSERT_CLIENT_LOGIN_EMAIL_HERE'
useragent = 'INSERT_COMPANY_NAME: AdWords API Python Sample Code'
developer_token = 'INSERT_DEVELOPER_TOKEN_HERE'
application_token = 'INSERT_APPLICATION_TOKEN_HERE'
# Define SOAP headers.
headers = SOAPpy.Types.headerType()
headers.email = email
headers.password = password
headers.clientEmail = client_email
headers.useragent = useragent
headers.developerToken = developer_token
headers.applicationToken = application_token
# Set up service connection. To view XML request/response, change value of
# estimator_service.config.debug to 1. To send requests to production
# environment, replace "sandbox.google.com" with "adwords.google.com".
namespace = 'https://sandbox.google.com/api/adwords/v12'
estimator_service = SOAPpy.SOAPProxy(namespace + '/TrafficEstimatorService',
header=headers)
estimator_service.config.debug = 0
# Create keyword structure.
keyword = {
'keywordText': 'mars cruise',
'keywordType': SOAPpy.Types.untypedType('Exact'),
'language': 'en'
}
# Check keyword traffic.
estimates = estimator_service.checkKeywordTraffic([keyword])
# Convert to a list if we get back a single object.
if len(estimates) > 0 and not isinstance(estimates, list):
estimates = [estimates]
# Display estimate.
for index in range(len(estimates)):
print 'Estimate for keyword "%s" is "%s".' % \
(keyword['keywordText'], estimates[index])
| madaboutcode/dotless | lib/PEG_GrammarExplorer/PEG_GrammarExplorer/PegSamples/python_2_5_2/input/adwords/awapi_python_samples_1.0.0/src/check_keyword_traffic.py | Python | apache-2.0 | 2,260 |
"""
hashlib_mapping_crc16.py
Mapping hash lib syle functions to hash types crc16.
"""
from __future__ import absolute_import
import crcmod
from hashit.core.hash_type import HashType
HASHLIB_MAPPING_CRC16 = {
HashType.CRC16: crcmod.predefined.Crc('crc-16').new,
HashType.CRC16_AUG_CCITT: crcmod.predefined.Crc('crc-aug-ccitt').new,
HashType.CRC16_BUYPASS: crcmod.predefined.Crc('crc-16-buypass').new,
HashType.CRC16_CCITT_FALSE: crcmod.predefined.Crc('crc-ccitt-false').new,
HashType.CRC16_DDS_110: crcmod.predefined.Crc('crc-16-dds-110').new,
HashType.CRC16_DECT_R: crcmod.predefined.Crc('crc-16-dect').new,
HashType.CRC16_DNP: crcmod.predefined.Crc('crc-16-dnp').new,
HashType.CRC16_EN_13757: crcmod.predefined.Crc('crc-16-en-13757').new,
HashType.CRC16_GENIUS: crcmod.predefined.Crc('crc-16-genibus').new,
HashType.CRC16_KERMIT: crcmod.predefined.Crc('kermit').new,
HashType.CRC16_MAXIM: crcmod.predefined.Crc('crc-16-maxim').new,
HashType.CRC16_MCRF4XX: crcmod.predefined.Crc('crc-16-mcrf4xx').new,
HashType.CRC16_MODBUS: crcmod.predefined.Crc('modbus').new,
HashType.CRC16_RIELLO: crcmod.predefined.Crc('crc-16-riello').new,
HashType.CRC16_T10_DIF: crcmod.predefined.Crc('crc-16-t10-dif').new,
HashType.CRC16_TELEDISK: crcmod.predefined.Crc('crc-16-teledisk').new,
HashType.CRC16_USB: crcmod.predefined.Crc('crc-16-usb').new,
HashType.CRC16_X_25: crcmod.predefined.Crc('x-25').new,
HashType.CRC16_XMODEM: crcmod.predefined.Crc('xmodem').new,
}
| art-of-dom/hash-it | hashit/core/mappings/hashlib_mapping_crc16.py | Python | mit | 1,527 |
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def check_label_kfold(shuffle):
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels,
n_folds=n_folds,
shuffle=shuffle,
random_state=rng).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels,
n_folds=n_folds,
shuffle=shuffle,
random_state=rng):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels,
n_folds=n_folds,
shuffle=shuffle,
random_state=rng).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels,
n_folds=n_folds,
shuffle=shuffle,
random_state=rng):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_label_kfold():
for shuffle in [False, True]:
yield check_label_kfold, shuffle
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| JsNoNo/scikit-learn | sklearn/tests/test_cross_validation.py | Python | bsd-3-clause | 47,366 |
#!/usr/bin/env python
# coding=utf-8
# Jurius Client for Pla-Rail
#
# kumapapa2012, Kenichi Mori
#
#
import socket
import time
import commands
host = '127.0.0.1'
port = 10500
#
# コマンド処理
#
def process_command (rcvmsg):
# 最低限の処理
# fail なら終了
if ( rcvmsg.find("RECOGFAIL") >=0 ):
print 'Failed to recognize.'
return
# "GO_" あれば前進
if ( rcvmsg.find("GO_") >=0 ):
print commands.getoutput("python start.py")
# "STOP_" あれば停止
if ( rcvmsg.find("STOP_") >=0 ):
print commands.getoutput("python stop.py")
#
# 接続
#
sock_cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retry=0
while retry < 5:
try:
sock_cli.connect((host,port))
except:
time.sleep(3)
retry=retry+1
else:
break
if(retry >= 5):
exit
#
# メインループ
#
while True:
print '==== ==== ==== Waiting for command ==== ==== ===='
rcvmsg = sock_cli.recv(4096) # 4kB 固定。。。
print 'Received %d bytes'%len(rcvmsg)
print '==== ==== ==== DATA ==== ==== ===='
# print rcvmsg
process_command(rcvmsg)
sock_cli.close()
| Kumapapa2012/Raspberry-Pi-Zero-on-PLA-RAIL | _main/home/pi/kmori-Pla-Rail-Scripts/julius_cli.py | Python | mit | 1,214 |
#
# namcap rules - capsnames
# Copyright (C) 2003-2007 Jason Chu <jason@archlinux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import pacman,re
class package:
def short_name(self):
return "capsnames"
def long_name(self):
return "Verifies package name does not include upper case letters"
def prereq(self):
return ""
def analyze(self, pkginfo, tar):
ret = [[],[],[]]
if re.search('[A-Z]', pkginfo.name) != None:
ret[0].append(("package-name-in-uppercase", ()))
return ret
def type(self):
return "pkgbuild"
# vim: set ts=4 sw=4 noet:
| abhidg/namcap | Namcap/capsnames.py | Python | gpl-2.0 | 1,245 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2017 GEM Foundation and G. Weatherill
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Trellising Utilities
"""
import numpy as np
from math import sqrt, ceil
def best_subplot_dimensions(nplots):
"""
Returns the optimum arrangement of number of rows and number of
columns given a total number of plots. Converted from the Matlab function
"BestArrayDims"
:param int nplots:
Number of subplots
:returns:
Number of rows (int)
Number of columns (int)
"""
if nplots == 1:
return 1, 1
nplots = float(nplots)
d_l = ceil(sqrt(nplots))
wdth = np.arange(1., d_l + 1., 1.)
hgt = np.ceil(nplots / wdth)
waste = (wdth * hgt - nplots) / nplots
savr = (2. * wdth + 2. * hgt) / (wdth * hgt)
cost = 0.5 * savr + 0.5 * waste
num_col = np.argmin(cost)
num_row = int(hgt[num_col])
return num_row, num_col + 1
| g-weatherill/gmpe-smtk | smtk/trellis/trellis_utils.py | Python | agpl-3.0 | 1,612 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# Script which generates a collage of provider logos from multiple provider
# logo files.
#
# It works in two steps:
#
# 1. Resize all the provider logo files (reduce the dimensions)
# 2. Assemble a final image from the resized images
import os
import sys
import argparse
import subprocess
import random
from os.path import join as pjoin
DIMENSIONS = '150x150' # Dimensions of the resized image (<width>x<height>)
GEOMETRY = '+4+4' # How to arrange images (+<rows>+<columns>)
TO_CREATE_DIRS = ['resized/', 'final/']
def setup(output_path):
"""
Create missing directories.
"""
for directory in TO_CREATE_DIRS:
final_path = pjoin(output_path, directory)
if not os.path.exists(final_path):
os.makedirs(final_path)
def get_logo_files(input_path):
logo_files = os.listdir(input_path)
logo_files = [name for name in logo_files if
'resized' not in name and name.endswith('png')]
logo_files = [pjoin(input_path, name) for name in logo_files]
return logo_files
def resize_images(logo_files, output_path):
resized_images = []
for logo_file in logo_files:
name, ext = os.path.splitext(os.path.basename(logo_file))
new_name = '%s%s' % (name, ext)
out_name = pjoin(output_path, 'resized/', new_name)
print('Resizing image: %(name)s' % {'name': logo_file})
values = {'name': logo_file, 'out_name': out_name,
'dimensions': DIMENSIONS}
cmd = 'convert %(name)s -resize %(dimensions)s %(out_name)s'
cmd = cmd % values
subprocess.call(cmd, shell=True)
resized_images.append(out_name)
return resized_images
def assemble_final_image(resized_images, output_path):
final_name = pjoin(output_path, 'final/logos.png')
random.shuffle(resized_images)
values = {'images': ' '.join(resized_images), 'geometry': GEOMETRY,
'out_name': final_name}
cmd = 'montage %(images)s -geometry %(geometry)s %(out_name)s'
cmd = cmd % values
print('Generating final image: %(name)s' % {'name': final_name})
subprocess.call(cmd, shell=True)
def main(input_path, output_path):
if not os.path.exists(input_path):
print('Path doesn\'t exist: %s' % (input_path))
sys.exit(2)
if not os.path.exists(output_path):
print('Path doesn\'t exist: %s' % (output_path))
sys.exit(2)
logo_files = get_logo_files(input_path=input_path)
setup(output_path=output_path)
resized_images = resize_images(logo_files=logo_files,
output_path=output_path)
assemble_final_image(resized_images=resized_images,
output_path=output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Assemble provider logos '
' in a single image')
parser.add_argument('--input-path', action='store',
help='Path to directory which contains provider '
'logo files')
parser.add_argument('--output-path', action='store',
help='Path where the new files will be written')
args = parser.parse_args()
input_path = os.path.abspath(args.input_path)
output_path = os.path.abspath(args.output_path)
main(input_path=input_path, output_path=output_path)
| Kami/libcloud | contrib/generate_provider_logos_collage_image.py | Python | apache-2.0 | 4,224 |
def get_service(context):
return context.services["mpd"]
def get_browser_store(context):
return get_service(context).data.browser
def get_player_status(context):
return get_service(context).data.status
def get_playlists(context):
return get_service(context).data.playlists
def get_queue_store(context):
return get_service(context).data.queue
def get_search_store(context):
return get_service(context).data.search
def get_cmd_sender(context):
return get_service(context).handler
def length_str(time):
if time > 0:
m = int(time / 60)
s = time % 60
if m <= 0 and s <= 0:
return "--:--"
elif m > 99:
return str(m) + "m"
return str(m).zfill(2) + ":" + str(s).zfill(2)
else:
return ""
def playtime_str(time):
return ", ".join(filter(lambda s: not s.startswith("0"),
["%i days" % (time / (24 * 60 * 60)),
"%i hours" % (time / (60 * 60) % 24),
"%i minutes" % (time / 60 % 60),
"%i seconds" % (time % (60))]))
def mpd_status_format(format, status):
return format.format(cartist=status.current.artist,
calbumartist=status.current.albumartist,
calbum=status.current.album,
ctitle=status.current.title,
cdate=status.current.date)
def mpd_song_format(format, song):
return format.format(artist=song.artist,
album=song.album,
title=song.title,
file=song.file,
time=song.time)
| dstenb/pylaunchr-mpd | mpd/utils.py | Python | mit | 1,653 |
import simplejson, sys, shutil, os, ast , re
from mpipe import OrderedStage , Pipeline
import glob, json, uuid, logging , time ,datetime
import subprocess, threading,traceback
from collections import OrderedDict
from pprint import pprint , pformat
import parallel_tools as parallel
from mpipe import OrderedStage , Pipeline
import contig_id_mapping as c_mapping
import script_util
import handler_utils as handler_util
from biokbase.workspace.client import Workspace
from biokbase.auth import Token
import multiprocessing as mp
import doekbase.data_api
from biokbase.RNASeq import rnaseq_util
from doekbase.data_api.annotation.genome_annotation.api import GenomeAnnotationAPI , GenomeAnnotationClientAPI
from doekbase.data_api.sequence.assembly.api import AssemblyAPI , AssemblyClientAPI
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
try:
from biokbase.HandleService.Client import HandleService
except:
from biokbase.AbstractHandle.Client import AbstractHandle as HandleService
from AssemblyUtil.AssemblyUtilClient import AssemblyUtil
from GenomeFileUtil.GenomeFileUtilClient import GenomeFileUtil
from biokbase.RNASeq.StringTie import StringTie
class StringTieSampleException(Exception):
pass
class StringTieSampleSet(StringTie):
def __init__(self, logger, directory, urls, max_cores):
pprint(self.__class__)
super(self.__class__, self).__init__(logger, directory, urls, max_cores)
#super(StringtTieSampleSet, self).__init__(logger, directory, urls)
# user defined shared variables across methods
#self.sample_info = None
self.alignmentset_info = None
self.num_threads = 1
def prepare(self):
# for quick testing, we recover parameters here
ws_client = self.common_params['ws_client']
hs = self.common_params['hs_client']
params = self.method_params
logger = self.logger
token = self.common_params['user_token']
stringtie_dir = self.directory
try:
a_sampleset = ws_client.get_objects(
[{'name' : params['alignmentset_id'],'workspace' : params['ws_id']}])[0]
a_sampleset_info = ws_client.get_object_info_new({"objects" :
[{'name' : params['alignmentset_id'],'workspace' : params['ws_id']}]})[0]
self.alignmentset_info = a_sampleset_info
a_sampleset_id = str(a_sampleset_info[6]) + '/' + str(a_sampleset_info[0]) + '/' + str(a_sampleset_info[4])
alignmentset_id = a_sampleset_id
except Exception,e:
logger.exception("".join(traceback.format_exc()))
raise Exception("Error Downloading objects from the workspace ")
#read_sample_id']
### Check if the gtf file exists in the workspace. if exists download the file from that
genome_id = a_sampleset['data']['genome_id']
genome_name = ws_client.get_object_info([{"ref" :genome_id}],includeMetadata=None)[0][1]
ws_gtf = genome_name+"_GTF_Annotation"
gtf_file = script_util.check_and_download_existing_handle_obj( logger,
ws_client,
self.urls,
params['ws_id'],
ws_gtf,
"KBaseRNASeq.GFFAnnotation",
stringtie_dir,
token )
if gtf_file is None:
rnaseq_util.create_gtf_annotation_from_genome( logger,
ws_client,
hs,
self.urls,
params['ws_id'],
genome_id,
genome_name,
stringtie_dir,
token )
gtf_info = ws_client.get_object_info_new({"objects": [{'name': ws_gtf , 'workspace': params['ws_id']}]})[0]
gtf_id = str(gtf_info[6]) + '/' + str(gtf_info[0]) + '/' + str(gtf_info[4])
self.tool_opts = { k:str(v) for k,v in params.iteritems() if not k in ('ws_id','alignmentset_id', 'num_threads') and v is not None }
alignment_ids = a_sampleset['data']['sample_alignments']
m_alignment_names = a_sampleset['data']['mapped_rnaseq_alignments']
self.sampleset_id = a_sampleset['data']['sampleset_id']
### Get List of Alignments Names
self.align_names = []
for d_align in m_alignment_names:
for i , j in d_align.items():
self.align_names.append(j)
m_alignment_ids = a_sampleset['data']['mapped_alignments_ids']
self.num_jobs = len(alignment_ids)
if self.num_jobs < 2:
raise ValueError("Please ensure you have atleast 2 alignments to run Stringtie in Set mode")
logger.info(" Number of threads used by each process {0}".format(self.num_threads))
count = 0
for i in m_alignment_ids:
for sample_name, alignment_id in i.items():
task_param = {'job_id' : alignment_id,
'gtf_file' : gtf_file,
'ws_id' : params['ws_id'],
'genome_id' : genome_id,
'stringtie_dir' : self.directory,
'annotation_id': gtf_id,
'sample_id' : sample_name,
'alignmentset_id' : alignmentset_id
}
self.task_list.append(task_param)
count = count + 1
def collect(self) :
# do with
expressionSet_name = self.method_params['alignmentset_id']+"_stringtie_ExpressionSet"
self.logger.info(" Creating ExpressionSet for the Assemblies {0}".format(expressionSet_name))
# TODO: Split alignment set and report method
reportObj = rnaseq_util.create_RNASeq_ExpressionSet_and_build_report( self.logger,
self.common_params['ws_client'],
self.tool_used,
self.tool_version,
self.tool_opts,
self.method_params['ws_id'],
self.align_names,
self.task_list[0]['alignmentset_id'],
self.task_list[0]['genome_id'],
self.sampleset_id,
self.results,
expressionSet_name
)
self.returnVal = { 'output' : expressionSet_name ,'workspace' : self.method_params['ws_id'] }
# reportName = 'Align_Reads_using_Hisat2_'+str(hex(uuid.getnode()))
# report_info = self.common_params['ws_client'].save_objects({
# 'id':self.alignmentset_info[6],
# 'objects':[
# {
# 'type':'KBaseReport.Report',
# 'data':reportObj,
# 'name':reportName,
# 'meta':{},
# 'hidden':1, # important! make sure the report is hidden
# #'provenance':provenance
# }
# ]
# })[0]
# self.returnVal = { "report_name" : reportName,"report_ref" : str(report_info[6]) + '/' + str(report_info[0]) + '/' + str(report_info[4]) }
| kbase/KBaseRNASeq | lib/biokbase/RNASeq/StringTieSampleSet.py | Python | mit | 9,003 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateInstance
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-memcache
# [START memcache_v1beta2_generated_CloudMemcache_CreateInstance_async]
from google.cloud import memcache_v1beta2
async def sample_create_instance():
# Create a client
client = memcache_v1beta2.CloudMemcacheAsyncClient()
# Initialize request argument(s)
resource = memcache_v1beta2.Instance()
resource.name = "name_value"
resource.node_count = 1070
resource.node_config.cpu_count = 976
resource.node_config.memory_size_mb = 1505
request = memcache_v1beta2.CreateInstanceRequest(
parent="parent_value",
instance_id="instance_id_value",
resource=resource,
)
# Make the request
operation = client.create_instance(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END memcache_v1beta2_generated_CloudMemcache_CreateInstance_async]
| googleapis/python-memcache | samples/generated_samples/memcache_v1beta2_generated_cloud_memcache_create_instance_async.py | Python | apache-2.0 | 1,851 |
import numpy as np
from bokeh.plotting import *
N = 100
x = np.linspace(0.1, 5, N)
output_file("logplot.html", title="log axis example")
hold()
figure(tools="pan,wheel_zoom,box_zoom,reset,previewsave",
y_axis_type="log", y_range=[0.001, 10**22], title="log axis example")
line(x, np.sqrt(x), line_width=2, line_color="yellow", legend="y=sqrt(x)")
line(x, x, legend="y=x")
circle(x, x, legend="y=x")
line(x, x**2, legend="y=x**2")
circle(x, x**2, fill_color=None, line_color="green", legend="y=x**2")
line(x, 10**x, line_color="red", line_width=2, legend="y=10^x")
line(x, x**x, line_color="purple", line_width=2, legend="y=x^x")
line(x, 10**(x**2), line_color="orange", line_width=2, legend="y=10^(x^2)")
show() # open a browser | the13fools/Bokeh_Examples | plotting/file/logplot.py | Python | bsd-3-clause | 746 |
from flask import Flask, render_template
import config
# Initializing the web app
app = Flask(__name__)
# Views
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(host=config.flask_host, port=config.flask_port, debug=config.flask_debug) | halflings/flask-base | web.py | Python | apache-2.0 | 298 |
from abc import ABCMeta, abstractmethod
class Activation(object):
__metaclass__ = ABCMeta
@abstractmethod
def apply(self, vector):
pass
@abstractmethod
def apply_derivative(self, vector):
pass | alnaav/shredNN | nn/activations/activation.py | Python | apache-2.0 | 232 |
from .base_client import GoogleBaseClient
class GoogleIAmClient(GoogleBaseClient):
def __init__(self, project, **kwargs):
super().__init__(f'https://iam.googleapis.com/v1/projects/{project}', **kwargs)
# https://cloud.google.com/iam/docs/reference/rest
| hail-is/hail | hail/python/hailtop/aiocloud/aiogoogle/client/iam_client.py | Python | mit | 272 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import re
from typing import Optional
def get_cgroup_cpuset():
with open("/sys/fs/cgroup/cpuset/cpuset.cpus", "r") as f:
content = f.readlines()
cpu_set = []
values = content[0].strip().split(",")
for value in values:
if "-" in value:
# Parse the value like "2-4"
start, end = value.split("-")
cpu_set.extend([i for i in range(int(start), int(end) + 1)])
else:
cpu_set.append(int(value))
return cpu_set
def get_cpu_info():
cpuinfo = []
args = ["lscpu", "--parse=CPU,Core,Socket"]
lscpu_info = subprocess.check_output(args, universal_newlines=True).split("\n")
# Get information about cpu, core, socket and node
for line in lscpu_info:
pattern = r"^([\d]+,[\d]+,[\d]+)"
regex_out = re.search(pattern, line)
if regex_out:
cpuinfo.append(regex_out.group(1).strip().split(","))
get_physical_core = {}
get_socket = {}
for line in cpuinfo:
int_line = [int(x) for x in line]
l_id, p_id, s_id = int_line
get_physical_core[l_id] = p_id
get_socket[l_id] = s_id
return get_physical_core, get_socket
def schedule_workers(num_workers: int, cores_per_worker: Optional[int] = None):
# If we are in a docker container whose --cpuset-cpus are set,
# we can get available cpus in /sys/fs/cgroup/cpuset/cpuset.cpus.
# If we are not in a container, this just return all cpus.
cpuset = get_cgroup_cpuset()
cpuset = sorted(cpuset)
l_core_to_p_core, l_core_to_socket = get_cpu_info()
p2l = {}
p_cores = set()
for logical_core in cpuset:
physical_core = l_core_to_p_core[logical_core]
p_cores.add(physical_core)
if physical_core not in p2l:
p2l[physical_core] = logical_core
p_cores = sorted(p_cores)
if cores_per_worker is None:
cores_per_worker = len(p_cores) // num_workers
msg = "total number of cores requested must be smaller or" \
" equal than the physical cores available"
assert cores_per_worker * num_workers <= len(p_cores), msg
schedule = []
for i in range(num_workers):
schedule.append([p2l[core] for core in p_cores[i*cores_per_worker:(i+1)*cores_per_worker]])
return schedule
| intel-analytics/BigDL | python/orca/src/bigdl/orca/cpu_info.py | Python | apache-2.0 | 2,911 |
# -*- coding: utf-8 -*-
#https://www.facebook.com/groups/vietkodi/
import urllib,urllib2,re
import os
import xbmc,xbmcplugin,xbmcgui,xbmcaddon
import getlink
import simplejson as json
from config import VIETMEDIA_HOST
from addon import alert, notify, ADDON, ADDON_ID, ADDON_PROFILE
from platform import PLATFORM
import uuid
import SimpleDownloader as downloader
import remove_accents
downloader = downloader.SimpleDownloader()
reload(sys);
sys.setdefaultencoding("utf8")
HANDLE = int(sys.argv[1])
CURRENT_PATH = ADDON.getAddonInfo("path")
PROFILE_PATH = xbmc.translatePath(ADDON_PROFILE).decode("utf-8")
VERSION = ADDON.getAddonInfo("version")
USER = ADDON.getSetting('user_id')
USER_PIN_CODE = ADDON.getSetting('user_pin_code')
USER_VIP_CODE = ADDON.getSetting('user_vip_code')
LOCK_PIN = ADDON.getSetting('lock_pin')
def fetch_data(url, headers=None):
visitor = get_visitor()
if headers is None:
headers = {
'User-agent' : 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36 VietMedia/1.0',
'Referers' : 'http://www..google.com',
'X-Visitor' : visitor,
'X-Version' : VERSION,
'X-User' : USER,
'X-User-Pin' : USER_PIN_CODE,
'X-User-VIP' : USER_VIP_CODE,
'X-Platform' : PLATFORM,
}
try:
req = urllib2.Request(url,headers=headers)
f = urllib2.urlopen(req)
body=f.read()
return json.loads(body)
except:
pass
def get_visitor():
filename = os.path.join(PROFILE_PATH, 'visitor.dat' )
visitor = ''
if os.path.exists(filename):
with open(filename, "r") as f:
visitor = f.readline()
else:
try:
visitor = str(uuid.uuid1())
except:
visitor = str(uuid.uuid4())
if not os.path.exists(PROFILE_PATH):
os.makedirs(PROFILE_PATH)
with open(filename, "w") as f:
f.write(visitor)
return visitor
def add_lock_dir(item_path):
item_path = re.sub('&d=__.*__','',item_path)
filename = os.path.join(PROFILE_PATH, 'lock_dir.dat' )
with open(filename,"a+") as f:
f.write(item_path + "\n")
notify('Đã khoá thành công')
def remove_lock_dir(item_path):
filename = os.path.join(PROFILE_PATH, 'lock_dir.dat' )
if not os.path.exists(filename):
return
dialog = xbmcgui.Dialog()
result = dialog.input('Nhập mã khoá', type=xbmcgui.INPUT_ALPHANUM, option=xbmcgui.ALPHANUM_HIDE_INPUT)
if len(result) == 0 or result != LOCK_PIN:
notify('Sai mật mã, vui lòng nhập lại')
return
item_path = re.sub('&d=__.*__','',item_path)
with open(filename,"r") as f:
lines = f.readlines()
with open(filename,"w") as f:
for line in lines:
if line!=item_path + "\n":
f.write(line)
notify('Đã mở khoá thành công')
def check_lock(item_path):
filename = os.path.join(PROFILE_PATH, 'lock_dir.dat' )
if not os.path.exists(filename):
return False
with open(filename,"r") as f:
lines = f.readlines()
return (item_path + "\n") in lines
def play(data):
link = data["url"]
link = getlink.get(link)
if link is None or len(link) == 0:
notify('Lỗi không lấy được link phim.')
return
subtitle = ''
links = link.split('[]')
if len(links) == 2:
subtitle = links[1]
elif data.get('subtitle'):
subtitle = data.get('subtitle')
link = links[0]
item = xbmcgui.ListItem(path=link, thumbnailImage=xbmc.getInfoLabel("ListItem.Art(thumb)"))
xbmcplugin.setResolvedUrl(HANDLE, True, item)
if len(subtitle) > 0:
subtitlePath = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('path')).decode("utf-8")
subfile = xbmc.translatePath(os.path.join(subtitlePath, "temp.sub"))
try:
if os.path.exists(subfile):
os.remove(subfile)
f = urllib2.urlopen(subtitle)
with open(subfile, "wb") as code:
code.write(f.read())
xbmc.sleep(3000)
xbmc.Player().setSubtitles(subfile)
#notify('Tải phụ đề thành công')
except:
notify('Không tải được phụ đề phim.')
def go():
url = sys.argv[0].replace("plugin://%s" % ADDON_ID, VIETMEDIA_HOST ) + sys.argv[2]
if url == VIETMEDIA_HOST + '/':
url += '?action=menu'
#Settings
if '__settings__' in url:
ADDON.openSettings()
return
#Search
if '__search__' in url:
keyboardHandle = xbmc.Keyboard('','VietmediaF')
keyboardHandle.doModal()
if (keyboardHandle.isConfirmed()):
queryText = keyboardHandle.getText()
if len(queryText) == 0:
return
queryText = urllib.quote_plus(queryText)
url = url.replace('__search__', queryText)
else:
return
if '__download__' in url:
download(url)
return
if '__lock__' in url:
add_lock_dir(url)
return
if '__unlock__' in url:
remove_lock_dir(url)
return
if check_lock(url):
dialog = xbmcgui.Dialog()
result = dialog.input('Nhập mã khoá', type=xbmcgui.INPUT_ALPHANUM, option=xbmcgui.ALPHANUM_HIDE_INPUT)
if len(result) == 0 or result != LOCK_PIN:
notify('Sai mật mã, vui lòng nhập lại')
return
data = fetch_data(url)
if not data:
return
if data.get('error'):
alert(data['error'])
return
if data.get("url"):
play(data)
return
if not data.get("items"):
return
if data.get("content_type") and len(data["content_type"]) > 0:
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_GENRE)
xbmcplugin.setContent(HANDLE, data["content_type"])
listitems = range(len(data["items"]))
for i, item in enumerate(data["items"]):
lock_url = item["path"].replace("plugin://%s" % ADDON_ID, VIETMEDIA_HOST )
lock_url = re.sub('\?','/?',lock_url)
label = item["label"]
if check_lock(lock_url):
label = "*" + label
listItem = xbmcgui.ListItem(label=label, label2=item["label2"], iconImage=item["icon"], thumbnailImage=item["thumbnail"])
if item.get("info"):
listItem.setInfo("video", item["info"])
if item.get("stream_info"):
for type_, values in item["stream_info"].items():
listItem.addStreamInfo(type_, values)
if item.get("art"):
listItem.setArt(item["art"])
menu_context = []
if item.get("context_menu"):
listItem.addContextMenuItems(item["context_menu"])
elif item["is_playable"] == True:
title = item["label"]
title = re.sub('\[.*?]','',title)
title = re.sub('\s','-',title)
title = re.sub('--','-',title)
title = re.sub('--','-',title)
title = re.sub('[\\\\/*?:"<>|#]',"",title)
title = remove_accents.remove_accents(title)
command = 'XBMC.RunPlugin(%s&d=__download__&file_name=%s)' % (item["path"], title)
menu_context.append(( 'Download...', command, ))
command = 'XBMC.RunPlugin(%s&d=__lock__)' % item["path"]
menu_context.append(( 'Khoá mục này', command, ))
command = 'XBMC.RunPlugin(%s&d=__unlock__)' % item["path"]
menu_context.append(( 'Mở khoá mục này', command, ))
listItem.addContextMenuItems( menu_context )
listItem.setProperty("isPlayable", item["is_playable"] and "true" or "false")
if item.get("properties"):
for k, v in item["properties"].items():
listItem.setProperty(k, v)
listitems[i] = (item["path"], listItem, not item["is_playable"])
xbmcplugin.addDirectoryItems(HANDLE, listitems, totalItems=len(listitems))
xbmcplugin.endOfDirectory(HANDLE, succeeded=True, updateListing=False, cacheToDisc=True)
def download(url):
dialog = xbmcgui.Dialog()
download_path = dialog.browse(3, 'XBMC', 'files')
if len(download_path) > 0:
xbmc.log(url)
data = fetch_data(url)
if not data:
return
if data.get('error'):
alert(data['error'])
return
if data.get("url"):
link = data["url"]
link = getlink.get(link)
if link is None or len(link) == 0:
notify('Lỗi không lấy được link phim.')
return
match = re.search(r'file_name=(.*?)$', url)
file_name = match.group(1) + ".mp4"
params = { "url": link, "download_path": download_path }
downloader.download(file_name, params)
go()
| onepas/kodi-addons | plugin.video.vietmediaF/default.py | Python | gpl-2.0 | 8,863 |
ncase = int(input())
for case in range(1, ncase+1):
s = input()
ans = [s[0]]
i = 1
while(i < len(s)):
new = s[i]
# print([s[i]], ans)
sbegin = "".join([s[i]] + ans)
send = "".join(ans + [s[i]])
if sbegin > send:
ans = [sbegin]
else:
ans = [send]
i += 1
print("Case #{}: {}".format(case, ans[0]))
| mofhu/codejam | R1A2016/a.py | Python | mit | 399 |
from apiwrappers import APIRequestWrapper
from requests.auth import AuthBase
class CrucbileAuth(AuthBase):
"""
Does the authentication for Console requests.
"""
def __init__(self, token):
# setup any auth-related data here
self.token = token
def __call__(self, r):
# modify and return the request
r.headers['Authorization'] = self.token
return r
class Crucible(APIRequestWrapper):
"""
Wraps the Crucible API
"""
def authenticate(self):
"""
Write a custom auth property where we grab the auth token and put it in
the headers
"""
#send a post with no auth. prevents an infinite loop
self.get('/rest-service/auth-v1/login?userName=%spassword=%s' %
(self.user, self.password), auth = None)
| uhjish/link | link/wrappers/atlassianwrappers.py | Python | apache-2.0 | 837 |
#!/usr/bin/env python2
__author__ = "Ryon Sherman"
__email__ = "ryon.sherman@gmail.com"
__copyright__ = "Copyright 2015, Ryon Sherman"
__license__ = "MIT"
from PyQt4 import QtGui
def hex2QColor(c):
""" Convert hex value to QColor object """
# get hex color string
c = str(c).lstrip('#')
# parse color values
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
# return QColor object
return QtGui.QColor(r, g, b)
def scrollBuffer(buf, pos):
""" Scroll text buffer to position """
buf.moveCursor(pos)
buf.ensureCursorVisible()
def scrollBufferToStart(buf):
""" Scroll to start of buffer """
return scrollBuffer(buf, QtGui.QTextCursor.Start)
def scrollBufferToEnd(buf):
""" Scroll to end of buffer """
return scrollBuffer(buf, QtGui.QTextCursor.End)
| ryonsherman/qt | util.py | Python | mit | 835 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import AsyncIterator, Dict, Iterable, List, Optional, Tuple
from idb.common.types import (
HIDButton,
HIDButtonType,
HIDDelay,
HIDDirection,
HIDEvent,
HIDKey,
HIDPress,
HIDPressAction,
HIDSwipe,
HIDTouch,
Point,
)
def tap_to_events(
x: float, y: float, duration: Optional[float] = None
) -> List[HIDEvent]:
return _press_with_duration(HIDTouch(point=Point(x=x, y=y)), duration=duration)
def button_press_to_events(
button: HIDButtonType, duration: Optional[float] = None
) -> List[HIDEvent]:
return _press_with_duration(HIDButton(button=button), duration=duration)
def key_press_to_events(
keycode: int, duration: Optional[float] = None
) -> List[HIDEvent]:
return _press_with_duration(HIDKey(keycode=keycode), duration=duration)
def _press_with_duration(
action: HIDPressAction, duration: Optional[float] = None
) -> List[HIDEvent]:
events = []
events.append(HIDPress(action=action, direction=HIDDirection.DOWN))
if duration:
events.append(HIDDelay(duration=duration))
events.append(HIDPress(action=action, direction=HIDDirection.UP))
return events
def swipe_to_events(
p_start: Tuple[float, float],
p_end: Tuple[float, float],
duration: Optional[float] = None,
delta: Optional[float] = None,
) -> List[HIDEvent]:
start = Point(x=p_start[0], y=p_start[1])
end = Point(x=p_end[0], y=p_end[1])
return [HIDSwipe(start=start, end=end, delta=delta, duration=duration)]
def _key_down_event(keycode: int) -> HIDEvent:
return HIDPress(action=HIDKey(keycode=keycode), direction=HIDDirection.DOWN)
def _key_up_event(keycode: int) -> HIDEvent:
return HIDPress(action=HIDKey(keycode=keycode), direction=HIDDirection.UP)
def key_press_shifted_to_events(keycode: int) -> List[HIDEvent]:
return [
_key_down_event(225),
_key_down_event(keycode),
_key_up_event(keycode),
_key_up_event(225),
]
KEY_MAP: Dict[str, List[HIDEvent]] = {
"a": key_press_to_events(4),
"b": key_press_to_events(5),
"c": key_press_to_events(6),
"d": key_press_to_events(7),
"e": key_press_to_events(8),
"f": key_press_to_events(9),
"g": key_press_to_events(10),
"h": key_press_to_events(11),
"i": key_press_to_events(12),
"j": key_press_to_events(13),
"k": key_press_to_events(14),
"l": key_press_to_events(15),
"m": key_press_to_events(16),
"n": key_press_to_events(17),
"o": key_press_to_events(18),
"p": key_press_to_events(19),
"q": key_press_to_events(20),
"r": key_press_to_events(21),
"s": key_press_to_events(22),
"t": key_press_to_events(23),
"u": key_press_to_events(24),
"v": key_press_to_events(25),
"w": key_press_to_events(26),
"x": key_press_to_events(27),
"y": key_press_to_events(28),
"z": key_press_to_events(29),
"A": key_press_shifted_to_events(4),
"B": key_press_shifted_to_events(5),
"C": key_press_shifted_to_events(6),
"D": key_press_shifted_to_events(7),
"E": key_press_shifted_to_events(8),
"F": key_press_shifted_to_events(9),
"G": key_press_shifted_to_events(10),
"H": key_press_shifted_to_events(11),
"I": key_press_shifted_to_events(12),
"J": key_press_shifted_to_events(13),
"K": key_press_shifted_to_events(14),
"L": key_press_shifted_to_events(15),
"M": key_press_shifted_to_events(16),
"N": key_press_shifted_to_events(17),
"O": key_press_shifted_to_events(18),
"P": key_press_shifted_to_events(19),
"Q": key_press_shifted_to_events(20),
"R": key_press_shifted_to_events(21),
"S": key_press_shifted_to_events(22),
"T": key_press_shifted_to_events(23),
"U": key_press_shifted_to_events(24),
"V": key_press_shifted_to_events(25),
"W": key_press_shifted_to_events(26),
"X": key_press_shifted_to_events(27),
"Y": key_press_shifted_to_events(28),
"Z": key_press_shifted_to_events(29),
"1": key_press_to_events(30),
"2": key_press_to_events(31),
"3": key_press_to_events(32),
"4": key_press_to_events(33),
"5": key_press_to_events(34),
"6": key_press_to_events(35),
"7": key_press_to_events(36),
"8": key_press_to_events(37),
"9": key_press_to_events(38),
"0": key_press_to_events(39),
"\n": key_press_to_events(40),
";": key_press_to_events(51),
"=": key_press_to_events(46),
",": key_press_to_events(54),
"-": key_press_to_events(45),
".": key_press_to_events(55),
"/": key_press_to_events(56),
"`": key_press_to_events(53),
"[": key_press_to_events(47),
"\\": key_press_to_events(49),
"]": key_press_to_events(48),
"'": key_press_to_events(52),
" ": key_press_to_events(44),
"!": key_press_shifted_to_events(30),
"@": key_press_shifted_to_events(31),
"#": key_press_shifted_to_events(32),
"$": key_press_shifted_to_events(33),
"%": key_press_shifted_to_events(34),
"^": key_press_shifted_to_events(35),
"&": key_press_shifted_to_events(36),
"*": key_press_shifted_to_events(37),
"(": key_press_shifted_to_events(38),
")": key_press_shifted_to_events(39),
"_": key_press_shifted_to_events(45),
"+": key_press_shifted_to_events(46),
"{": key_press_shifted_to_events(47),
"}": key_press_shifted_to_events(48),
":": key_press_shifted_to_events(51),
'"': key_press_shifted_to_events(52),
"|": key_press_shifted_to_events(49),
"<": key_press_shifted_to_events(54),
">": key_press_shifted_to_events(55),
"?": key_press_shifted_to_events(56),
"~": key_press_shifted_to_events(53),
}
def text_to_events(text: str) -> List[HIDEvent]:
events = []
for character in text:
if character in KEY_MAP:
events.extend(KEY_MAP[character])
else:
raise Exception(f"No keycode found for {character}")
return events
async def iterator_to_async_iterator(
events: Iterable[HIDEvent],
) -> AsyncIterator[HIDEvent]:
for event in events:
yield event
| facebook/FBSimulatorControl | idb/common/hid.py | Python | mit | 6,273 |
#!/bin/python
# -*- coding: utf-8 -*-
# Fenrir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
from fenrirscreenreader.core import debug
from fenrirscreenreader.utils import mark_utils
class command():
def __init__(self):
pass
def initialize(self, environment):
self.env = environment
def shutdown(self):
pass
def getDescription(self):
return _('copies marked text to the currently selected clipboard')
def run(self):
if not self.env['commandBuffer']['Marks']['1']:
self.env['runtime']['outputManager'].presentText(_("One or two marks are needed"), interrupt=True)
return
if not self.env['commandBuffer']['Marks']['2']:
self.env['runtime']['cursorManager'].setMark()
# use the last first and the last setted mark as range
startMark = self.env['commandBuffer']['Marks']['1'].copy()
endMark = self.env['commandBuffer']['Marks']['2'].copy()
marked = mark_utils.getTextBetweenMarks(startMark, endMark, self.env['screen']['newContentText'])
self.env['runtime']['memoryManager'].addValueToFirstIndex('clipboardHistory', marked)
# reset marks
self.env['runtime']['cursorManager'].clearMarks()
self.env['runtime']['outputManager'].presentText(marked, soundIcon='CopyToClipboard', interrupt=True)
def setCallback(self, callback):
pass
| chrys87/fenrir | src/fenrirscreenreader/commands/commands/copy_marked_to_clipboard.py | Python | lgpl-3.0 | 1,460 |
# -*- coding: UTF-8 -*-
# Copyright 2012-2021 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.core import constants as ext_requests
from lino.core.renderer import HtmlRenderer
from lino.core.renderer import add_user_language
from .views import index_response
class Renderer(HtmlRenderer):
"""A HTML render that uses Bootstrap3.
"""
tableattrs = {
'class': "table table-hover table-striped table-condensed"}
cellattrs = dict(align="left", valign="top")
can_auth = False
def obj2url(self, ar, obj, **kw):
ba = obj.get_detail_action(ar)
if ba is not None:
# no need to check again:
# if ar is None or a.get_bound_action_permission(ar, obj, None):
add_user_language(kw, ar)
return self.get_detail_url(ar, ba.actor, obj.pk, **kw)
# def get_detail_url(self, actor, pk, *args, **kw):
# return self.front_end.build_plain_url(
# actor.app_label,
# actor.__name__,
# str(pk), *args, **kw)
# def pk2url(self, model, pk, **kw):
# """Overrides :meth:`lino.core.renderer.Renderer.pk2url`.
# """
# if pk is not None:
# return self.front_end.build_plain_url(
# model._meta.app_label, model.__name__, str(pk), **kw)
def get_home_url(self, *args, **kw):
return self.front_end.build_plain_url(*args, **kw)
def get_request_url(self, ar, *args, **kw):
if ar.actor.__name__ == "Main":
return self.front_end.build_plain_url(*args, **kw)
st = ar.get_status()
kw.update(st['base_params'])
add_user_language(kw, ar)
if ar.offset is not None:
kw.setdefault(ext_requests.URL_PARAM_START, ar.offset)
if ar.limit is not None:
kw.setdefault(ext_requests.URL_PARAM_LIMIT, ar.limit)
if ar.order_by is not None:
sc = ar.order_by[0]
if sc.startswith('-'):
sc = sc[1:]
kw.setdefault(ext_requests.URL_PARAM_SORTDIR, 'DESC')
kw.setdefault(ext_requests.URL_PARAM_SORT, sc)
#~ print '20120901 TODO get_request_url'
return self.front_end.build_plain_url(
ar.actor.app_label, ar.actor.__name__, *args, **kw)
def request_handler(self, ar, *args, **kw):
return ''
def action_button(self, obj, ar, ba, label=None, **kw):
label = label or ba.get_button_label()
return "%s" % label
def action_call(self, ar, bound_action, status):
a = bound_action.action
if a.opens_a_window or (a.parameters and not a.no_params_window):
return "#"
sar = bound_action.request_from(ar)
return self.get_request_url(sar)
def render_action_response(self, ar):
return index_response(ar)
| lino-framework/lino | lino/modlib/bootstrap3/renderer.py | Python | bsd-2-clause | 2,889 |
"""
____ _ _ __________
| __ )| | / \ |__ | ____|
| _ \| | / _ \ / /| _|
| |_) | |___ / ___ \ / /_| |___
|____/|_____/_/ \_/____|_____|
by fabian0010
"""
import os, argparse
def update(version):
"""
Update/download the server files
"""
# TODO
print(version)
def downloadModule(name):
"""
Update/download a module
"""
# TODO
print(name)
def run():
"""
Check if the main server files exists,
import and run the server
"""
# Check if the main server file exists
if os.path.isfile("Core/Server.py"):
# Import the ´Server´ class to a variable
smod = __import__("Core.Server").Server
# Start the server
s = smod.Server()
s.setDaemon(True)
s.start()
# Wait for the user to press CTRL+C
# and tell the server to exit
while True:
try:
pass
except KeyboardInterrupt:
s.exit()
if __name__ == "__main__":
"""
Parse the commandline arguments
and execute the users commands
"""
# Create a parser instance
parser = argparse.ArgumentParser()
# Add argument groups
g1 = parser.add_argument_group()
g2 = parser.add_argument_group()
# Add the ´update´ and ´run´ arguments
# to the first group
g1.add_argument("-u", "--update", type=int, help="Install/update the server", metavar="PROTOCOL VERSION")
g1.add_argument("-r", "--run", help="Run the server", action="store_true")
# Add the ´module´ argument to the
# second group
g2.add_argument("-m", "--module", type=str, help="Install/update a module", metavar="MODULE NAME")
# The ´module´ argument is not
# supposed to be added to the
# other arguments
# Parse the arguments
args = parser.parse_args()
# Check for arguments and run
# the functions they belong to
if args.update: update(args.update)
if args.module: downloadModule(args.module)
if args.run: run()
# Check if there are no arguments
# and print the help if so
if not args.update and not args.module and not args.run:
parser.print_help() | fabian0010/Blaze | Blaze.py | Python | mit | 1,981 |
# -*- coding: latin-1 -*-
import copy
import conditions
import effects
import f_expression
import pddl_types
class Action(object):
def __init__(self, name, parameters, precondition, effects):
self.name = name
self.parameters = parameters
self.condition = precondition
self.effects = effects
self.uniquify_variables()
def parse(alist):
iterator = iter(alist)
assert iterator.next() == ":action"
name = iterator.next()
parameters_tag_opt = iterator.next()
if parameters_tag_opt == ":parameters":
parameters = pddl_types.parse_typed_list(iterator.next(),
only_variables=True)
precondition_tag_opt = iterator.next()
else:
parameters = []
precondition_tag_opt = parameters_tag_opt
if precondition_tag_opt == ":precondition":
precondition = conditions.parse_condition(iterator.next())
effect_tag = iterator.next()
else:
precondition = conditions.Conjunction([])
effect_tag = precondition_tag_opt
assert effect_tag == ":effect"
effect_list = iterator.next()
eff = []
effects.parse_effects(effect_list,eff)
for rest in iterator:
assert False, rest
return Action(name, parameters, precondition, eff)
parse = staticmethod(parse)
def dump(self):
print "%s(%s)" % (self.name, ", ".join(map(str, self.parameters)))
print "Precondition:"
self.condition.dump()
print "Effects:"
for eff in self.effects:
eff.dump()
def uniquify_variables(self):
self.type_map = dict([(par.name, par.type) for par in self.parameters])
self.condition = self.condition.uniquify_variables(self.type_map)
for effect in self.effects:
effect.uniquify_variables(self.type_map)
def relaxed(self):
new_effects = []
for eff in self.effects:
relaxed_eff = eff.relaxed()
if relaxed_eff:
new_effects.append(relaxed_eff)
return Action(self.name, self.parameters,
self.condition.relaxed().simplified(),
new_effects)
def untyped(self):
# We do not actually remove the types from the parameter lists,
# just additionally incorporate them into the conditions.
# Maybe not very nice.
result = copy.copy(self)
parameter_atoms = [par.to_untyped_strips() for par in self.parameters]
new_precondition = self.condition.untyped()
result.condition = conditions.Conjunction(parameter_atoms + [new_precondition])
result.effects = [eff.untyped() for eff in self.effects]
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, objects_by_type):
"""Return a PropositionalAction which corresponds to the instantiation of
this action with the arguments in var_mapping. Only fluent parts of the
conditions (those in fluent_facts) are included. init_facts are evaluated
whilte instantiating.
Precondition and effect conditions must be normalized for this to work.
Returns None if var_mapping does not correspond to a valid instantiation
(because it has impossible preconditions or an empty effect list.)"""
arg_list = [var_mapping[conditions.Variable(par.name)].name for par in self.parameters]
name = "(%s %s)" % (self.name, " ".join(arg_list))
precondition = []
try:
self.condition.instantiate(var_mapping, init_facts, fluent_facts,
init_function_vals, fluent_functions, task,
new_axiom, precondition)
except conditions.Impossible:
return None
effects = []
for eff in self.effects:
eff.instantiate(var_mapping, init_facts, fluent_facts,
init_function_vals, fluent_functions, task,
new_axiom, objects_by_type, effects)
if effects:
return PropositionalAction(name, precondition, effects)
else:
return None
class DurativeAction(object):
def __init__(self, name, parameters, duration, conditions, effects):
self.name = name
self.parameters = parameters
self.orig_parameter_length = len(parameters)
self.duration = duration
self.condition = conditions
assert len(effects)==2
self.effects = effects
self.uniquify_variables()
def parse(alist):
iterator = iter(alist)
assert iterator.next() == ":durative-action"
name = iterator.next()
parameters_tag_opt = iterator.next()
if parameters_tag_opt == ":parameters":
parameters = pddl_types.parse_typed_list(iterator.next(),
only_variables=True)
duration_tag = iterator.next()
else:
parameters = []
duration_tag = parameters_tag_opt
assert duration_tag == ":duration"
duration_list = iterator.next()
if duration_list[0] == "and":
duration_list = duration_list[1:]
else:
duration_list = [duration_list]
duration_start = []
duration_end = []
for item in duration_list: # each item is a simple-duration-constraint
duration = duration_start
if item[0] == "at":
if item[1] == "end":
duration = duration_end
item = item[2]
assert item[0] in ("<=",">=","=")
assert len(item) == 3
assert item[1] == "?duration"
op = item[0]
value = f_expression.parse_expression(item[2])
duration += [(op,value)]
condition_tag = iterator.next()
if condition_tag == ":condition":
condition = conditions.parse_durative_condition(iterator.next())
effect_tag = iterator.next()
else:
condition = conditions.parse_durative_condition([])
effect_tag = condition_tag
assert effect_tag == ":effect"
effect_list = iterator.next()
effect = [[],[]]
effects.parse_durative_effects(effect_list, effect)
for rest in iterator:
assert False, rest
return DurativeAction(name, parameters, (duration_start,duration_end), condition, effect)
parse = staticmethod(parse)
def dump(self):
if self.orig_parameter_length != len(self.parameters):
print "%s(%s, (%s))" % (self.name,
", ".join(map(str, self.parameters[0:self.orig_parameter_length])),
", ".join(map(str, self.parameters[self.orig_parameter_length:])))
else:
print "%s(%s)" % (self.name, ", ".join(map(str, self.parameters)))
if len(self.duration[0]) > 0:
print "duration (values from start):"
for (op, val) in self.duration[0]:
print " " + op
val.dump(" ")
if len(self.duration[1]) > 0:
print "duration (values from end):"
for (op, val) in self.duration[1]:
print " " + op
val.dump(" ")
print "start condition:"
self.condition[0].dump()
print "over all condition:"
self.condition[1].dump()
print "end condition:"
self.condition[2].dump()
print "start effects:"
for eff in self.effects[0]:
eff.dump()
print "end effects:"
for eff in self.effects[1]:
eff.dump()
def __str__(self):
return "<Action %s>" % self.name
def uniquify_variables(self):
self.type_map = dict([(par.name, par.type) for par in self.parameters])
for index, condition in enumerate(self.condition):
self.condition[index] = condition.uniquify_variables(self.type_map)
for effects in self.effects:
for effect in effects:
effect.uniquify_variables(self.type_map)
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, objects_by_type):
"""Return a PropositionalDurativeAction which corresponds to the instantiation of
this action with the arguments in var_mapping. Only fluent parts of the
conditions (those in fluent_facts) are included. init_facts are evaluated
whilte instantiating.
Precondition and effect conditions must be normalized for this to work.
Returns None if var_mapping does not correspond to a valid instantiation
(because it has impossible preconditions or an empty effect list.)"""
arg_list = [var_mapping[conditions.Variable(par.name)].name for par in self.parameters]
name = "(%s %s)" % (self.name, " ".join(arg_list[:self.orig_parameter_length]))
try:
inst_duration = [[(op,pne.instantiate(var_mapping, fluent_functions,
init_function_vals, task, new_axiom))
for op,pne in self.duration[0]],
[(op,pne.instantiate(var_mapping, fluent_functions,
init_function_vals, task, new_axiom))
for op,pne in self.duration[1]]]
except ValueError, e:
print "dropped action %s" % name
print "Error: %s" % e
return None
inst_conditions = [[],[],[]]
for time,condition in enumerate(self.condition):
try:
condition.instantiate(var_mapping, init_facts, fluent_facts,
init_function_vals, fluent_functions, task,
new_axiom, inst_conditions[time])
except conditions.Impossible:
return None
effects = [[],[]]
for time,timed_effects in enumerate(self.effects):
for eff in timed_effects:
eff.instantiate(var_mapping, init_facts, fluent_facts,
init_function_vals, fluent_functions, task,
new_axiom, objects_by_type, effects[time])
if effects:
return PropositionalDurativeAction(name, inst_duration, inst_conditions, effects)
else:
return None
# def relaxed(self):
# new_effects = []
# for eff in self.effects:
# relaxed_eff = eff.relaxed()
# if relaxed_eff:
# new_effects.append(relaxed_eff)
# return Action(self.name, self.parameters,
# self.condition.relaxed().simplified(),
# new_effects)
# def untyped(self):
# # We do not actually remove the types from the parameter lists,
# # just additionally incorporate them into the conditions.
# # Maybe not very nice.
# result = copy.copy(self)
# parameter_atoms = [par.to_untyped_strips() for par in self.parameters]
# new_precondition = self.condition.untyped()
# result.condition = conditions.Conjunction(parameter_atoms + [new_precondition])
# result.effects = [eff.untyped() for eff in self.effects]
# return result
class PropositionalAction:
def __init__(self, name, precondition, effects):
self.name = name
self.condition = precondition
self.add_effects = []
self.del_effects = []
self.assign_effects = []
for (condition, effect) in effects:
if isinstance(effect,f_expression.FunctionAssignment):
self.assign_effects.append((condition, effect))
elif effect.negated:
self.del_effects.append((condition, effect.negate()))
else:
self.add_effects.append((condition, effect))
def dump(self):
print self.name
for fact in self.condition:
print "PRE: %s" % fact
for cond, fact in self.add_effects:
print "ADD: %s -> %s" % (", ".join(map(str, cond)), fact)
for cond, fact in self.del_effects:
print "DEL: %s -> %s" % (", ".join(map(str, cond)), fact)
for cond, fact in self.assign_effects:
print "ASS: %s -> %s" % (", ".join(map(str, cond)), fact)
class PropositionalDurativeAction:
def __init__(self, name, duration, conditions, effects):
self.name = name
self.duration = duration
self.conditions = conditions
self.add_effects = [[],[]]
self.del_effects = [[],[]]
self.assign_effects = [[],[]]
for time in range(2):
for (condition, effect) in effects[time]:
if isinstance(effect,f_expression.FunctionAssignment):
self.assign_effects[time].append((condition, effect))
elif effect.negated:
self.del_effects[time].append((condition, effect.negate()))
else:
self.add_effects[time].append((condition, effect))
def dump(self):
print self.name
for duration in self.duration[0]:
print "START DUR: %s %s" % (duration[0],duration[1])
for duration in self.duration[1]:
print "END DUR: %s %s" % (duration[0],duration[1])
for fact in self.conditions[0]:
print "START COND: %s" % fact
for fact in self.conditions[1]:
print "OVER ALL COND: %s" % fact
for fact in self.conditions[2]:
print "END COND: %s" % fact
for cond, fact in self.add_effects[0]:
print "ADD START: %s -> %s" % (", ".join(map(str, cond)), fact)
for cond, fact in self.add_effects[1]:
print "ADD END: %s -> %s" % (", ".join(map(str, cond)), fact)
for cond, fact in self.del_effects[0]:
print "DEL START: %s -> %s" % (", ".join(map(str, cond)), fact)
for cond, fact in self.del_effects[1]:
print "DEL END: %s -> %s" % (", ".join(map(str, cond)), fact)
for cond, fact in self.assign_effects[0]:
print "ASS START: %s -> %s" % (", ".join(map(str, cond)), fact)
for cond, fact in self.assign_effects[1]:
print "ASS END: %s -> %s" % (", ".join(map(str, cond)), fact)
| thierry1985/project-1022 | MISC/TFD-0.2.2/translate/pddl/actions.py | Python | mit | 14,709 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
try:
from . import GAuth
from . import GCache
except:
import GAuth
import GCache
import logging
import os
import apiclient
from apiclient.http import MediaFileUpload
from apiclient.http import MediaIoBaseDownload
import time
DIR_MIME = 'application/vnd.google-apps.folder'
OAUTH2_SCOPE = 'https://www.googleapis.com/auth/drive'
# Number of bytes to send/receive in each request.
CHUNKSIZE = 2 * 1024 * 1024
# Mimetype to use if one can't be guessed from the file extension.
DEFAULT_MIMETYPE = 'application/octet-stream'
class GDrive(GAuth.GAuth):
def __init__(self, oauth2json = None, oauth2storage = None, scope = OAUTH2_SCOPE):
GAuth.GAuth.__init__(self, oauth2json, oauth2storage, scope)
self.cache = GCache.GCache()
logging.debug('GDrive object created')
def get_id_from_cache(self, name, parent_folder = 'root'):
# Check whether an object does not already exists in cache
return self.cache[(name, parent_folder)]
def get_id_from_gdrive(self, name, parent_folder = 'root'):
logging.info('Fetching metadata for %s in %s' % (name, parent_folder))
q = "name='%s' and '%s' in parents" % (name, parent_folder)
logging.debug("Google Drive query: %s" % q)
param = {
'q' : q,
'fields' : 'files(id)',
}
response = self.service.files().list(**param).execute()
id = response['files'][0]['id']
logging.debug('Data fetched: %s', response['files'])
# Save to the cache
self.cache[id] = (name, parent_folder)
return id
def get_id(self, name, parent_folder = 'root'):
# Try if it exists in cache
try:
return self.get_id_from_cache(name, parent_folder)
except:
pass
# Try if it exists in GDrive
try:
return self.get_id_from_gdrive(name, parent_folder)
except IndexError:
raise KeyError('"%s" in "%s"' % (name, parent_folder))
def is_dir(self, object_id):
logging.debug('Fetching metadata for %s' % (object_id,))
response = self.service.files().get(fileId = object_id, fields = 'mimeType').execute()
logging.debug('mimeType of the object: %s' % response.get('mimeType'))
return response.get('mimeType') == DIR_MIME
def mkdir(self, folder_title, parent_folder = 'root'):
try:
return self.get_id(folder_title, parent_folder)
except:
pass
# Looks like there is no such directory yet, so let's create it
body = {
"name": folder_title,
"parents": [{"id": parent_folder}],
"mimeType": DIR_MIME,
}
directory = self.service.files().create(body = body).execute()
did = directory['id']
logging.debug('Directory %s in %s has been created as ID %s' % (folder_title, parent_folder, did))
self.cache[did] = (folder_title, parent_folder)
return did
def rm(self, object_id):
logging.info("Removing an object with id %s" % object_id)
try:
self.service.files().delete(fileId=object_id).execute()
logging.debug("Object with id %s has been sucesfully removed" % object_id)
except e:
logging.error("Removal of object id %s has failed: %s" % (object_id, str(e)))
raise
try:
del self.cache[object_id]
except:
pass
def ls(self, folder_id = 'root'):
logging.info('Fetching metadata for %s' % (folder_id,))
q = "'%s' in parents" % (folder_id,)
logging.debug("Google Drive query: %s" % q)
param = {
'q' : q,
'fields' : 'files(id,name)',
}
response = self.service.files().list(**param).execute()
ids = list()
for o in response['files']:
# Save to the cache
self.cache[o['id']] = (o['name'], folder_id)
# Append the id to the final list
ids.append(o['id'])
logging.debug('Fetched: %s objects', len(ids))
return id
def upload(self, filename, gdrivename = None, parent_folder = 'root'):
logging.debug('Going to upload file to GDrive. filename=%s , gdrivename=%s , parent_folder=%s' % (filename, gdrivename, parent_folder))
# Convert the name of the file on GDrive in case it is not provided
if gdrivename is None or gdrivename == '':
gdrivename = filename.split('/')[-1]
# Check whether the file does not already exists
try:
self.get_id(gdrivename, parent_folder)
except:
pass
else:
logging.error("The file to upload %s already exists" % gdrivename)
raise FileExistsError(gdrivename)
# Prepare for the file upload
logging.debug("Creating the media object for uploading from %s" % filename)
media = MediaFileUpload(filename, chunksize = CHUNKSIZE, resumable = True)
if not media.mimetype():
logging.debug("MIME type of the file has not been recognized, using the default %s" % DEFAULT_MIMETYPE)
media = MediaFileUpload(filename, mimeType = DEFAULT_MIMETYPE, chunksize = CHUNKSIZE, resumable = True)
body = {
'name': gdrivename,
#'parents': [{"id": parent_folder}],
'parents': [parent_folder],
}
logging.debug('Starting upload of the %s file as %s' % (filename, gdrivename))
request = self.service.files().create(body = body, media_body = media, fields='id')
retry = 5
while retry > 0:
try:
response = None
while response is None:
status, response = request.next_chunk()
if status:
logging.info("Uploaded %d%%." % int(status.progress() * 100))
logging.info("Upload has been completed")
# No need for a retry
retry = -1
except apiclient.errors.HttpError as e:
if e.resp.status in [404]:
# Start the upload all over again.
request = self.service.files().create(body = body, media_body = media, fields='id')
elif e.resp.status in [500, 502, 503, 504]:
# Call next_chunk() again, but use an exponential backoff for repeated errors.
logging.warning('Upload of a chunk has failed, retrying ...')
retry -= 1
time.sleep(3)
else:
# Do not retry. Log the error and fail.
logging.error('The upload has failed: %s' % str(e))
raise
if retry == 0:
logging.error('The upload has failed.')
raise ConnectionError
fid = response.get('id')
self.cache[fid] = (gdrivename, parent_folder)
return fid
def move(self, object_id, folder_id = 'root'):
logging.debug("Copying an object with id %s to a folder with id %s" % (object_id, folder_id))
# Retrieve the existing parents to remove
f = self.service.files().get(fileId = object_id, fields = 'parents').execute()
previous_parents = ",".join(f.get('parents'))
# Move the file to the new folder
f = self.service.files().update(fileId = object_id,
addParents = folder_id,
removeParents = previous_parents,
fields = 'id').execute()
logging.info("Object with id %s has been sucesfully copied to a folder with id %s" % (object_id, folder_id))
return f.get('id')
def download(self, filename, object_id):
logging.debug('Starting download of object %s to %s' % (object_id, filename))
with open(filename, 'wb') as fd:
request = self.service.files().get_media(fileId = object_id)
downloader = MediaIoBaseDownload(fd, request, chunksize = CHUNKSIZE)
done = False
while done is False:
status, done = downloader.next_chunk()
logging.info("Download %d%%." % int(status.progress() * 100))
logging.info('Object %s has been downloaded as %s' % (object_id, filename))
def walk(self, path):
logging.debug('Walking through %s' % path)
dirs = path.split('/')
dirs = [d for d in dirs if d != '']
if len(dirs) == 0:
return 'root'
dirs = ['root'] + dirs
index = 1
oid = None
while index < len(dirs):
logging.debug('Diving into %s/%s' % (dirs[index - 1], dirs[index]))
try:
oid = self.get_id(dirs[index], dirs[index - 1])
except KeyError as e:
logging.info('The artefact has not been found')
return None
dirs[index] = oid
index += 1
logging.info('The artefact has been found as OID=%s' % oid)
return oid
if __name__ == "__main__":
logging.basicConfig(level = logging.DEBUG, format = '%(asctime)s %(levelname)s[%(module)s] %(message)s')
gd = GDrive(oauth2json = '/home/jkurik/.gp.json', oauth2storage = '/home/jkurik/.gp')
gd.auth()
dname = "Nazdarek ke smazani"
did = gd.mkdir(dname)
#print("ID of a new object:", gd.get_id_from_gdrive(dname))
#print("ID of a new object in a cache:", gd.get_id_from_cache(dname))
gd.rm(did)
gd.ls()
#fid = gd.upload('/home/jkurik/the.hacker.playbook.practical.guide.to.penetration.testing.pdf')
#fid = gd.upload('/home/jkurik/kalendar2016v3.xls')
#did = gd.mkdir('tmp')
#gd.is_dir(did)
#gd.move(fid, did)
#gd.download('/tmp/xxx', fid)
#gd.rm(fid)
#gd.rm(did)
| kurik/scripts | googlelib/GDrive.py | Python | bsd-3-clause | 9,870 |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'console_auth_tokens')
class ConsoleAuthTokensController(wsgi.Controller):
def __init__(self, *args, **kwargs):
self._consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
super(ConsoleAuthTokensController, self).__init__(*args, **kwargs)
def show(self, req, id):
"""Checks a console auth token and returns the related connect info."""
context = req.environ['nova.context']
authorize(context)
token = id
connect_info = self._consoleauth_rpcapi.check_token(context, token)
if not connect_info:
raise webob.exc.HTTPNotFound(explanation=_("Token not found"))
console_type = connect_info.get('console_type')
# This is currently required only for RDP consoles
if console_type != "rdp-html5":
raise webob.exc.HTTPUnauthorized(
explanation=_("The requested console type details are not "
"accessible"))
return {'console':
{i: connect_info[i]
for i in ['instance_uuid', 'host', 'port',
'internal_access_path']
if i in connect_info}}
class Console_auth_tokens(extensions.ExtensionDescriptor):
"""Console token authentication support."""
name = "ConsoleAuthTokens"
alias = "os-console-auth-tokens"
namespace = ("http://docs.openstack.org/compute/ext/"
"consoles-auth-tokens/api/v2")
updated = "2013-08-13T00:00:00Z"
def get_resources(self):
controller = ConsoleAuthTokensController()
ext = extensions.ResourceExtension('os-console-auth-tokens',
controller)
return [ext]
| nikesh-mahalka/nova | nova/api/openstack/compute/legacy_v2/contrib/console_auth_tokens.py | Python | apache-2.0 | 2,601 |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import web
from nailgun.fake_keystone.handlers import EndpointsHandler
from nailgun.fake_keystone.handlers import ServicesHandler
from nailgun.fake_keystone.handlers import TokensHandler
from nailgun.fake_keystone.handlers import VersionHandler
urls = (
r"/v2.0/?$", VersionHandler.__name__,
r"/v2.0/tokens/?$", TokensHandler.__name__,
r"/v2.0/OS-KSADM/services/?$", ServicesHandler.__name__,
r"/v2.0/endpoints/?$", EndpointsHandler.__name__,
)
_locals = locals()
def app():
return web.application(urls, _locals)
| eayunstack/fuel-web | nailgun/nailgun/fake_keystone/urls.py | Python | apache-2.0 | 1,173 |
'''load sample data into cache.'''
sample_dict = {
'blog_title' : 'Blastoff! Demo',
'blog_description' : 'A simple blog written with bootstrap',
'sidebar_widgets' : '''
<div class="sidebar-module sidebar-module-inset">
<h4>About</h4>
<p>Etiam porta <em>sem malesuada magna</em> mollis euismod. Cras mattis consectetur purus sit amet fermentum. Aenean lacinia bibendum nulla sed consectetur.</p>
</div>
<div class="sidebar-module">
<h4>Archives</h4>
<ol class="list-unstyled">
<li><a href="#">March 2014</a></li>
<li><a href="#">February 2014</a></li>
<li><a href="#">January 2014</a></li>
<li><a href="#">December 2013</a></li>
<li><a href="#">November 2013</a></li>
<li><a href="#">October 2013</a></li>
<li><a href="#">September 2013</a></li>
<li><a href="#">August 2013</a></li>
<li><a href="#">July 2013</a></li>
<li><a href="#">June 2013</a></li>
<li><a href="#">May 2013</a></li>
<li><a href="#">April 2013</a></li>
</ol>
</div>
<div class="sidebar-module">
<h4>Elsewhere</h4>
<ol class="list-unstyled">
<li><a href="#">GitHub</a></li>
<li><a href="#">Twitter</a></li>
<li><a href="#">Facebook</a></li>
</ol>
</div>''',
'top_nav' : '''
<a class="blog-nav-item active" href="#">Home</a>
<a class="blog-nav-item" href="#">New features</a>
<a class="blog-nav-item" href="#">Press</a>
<a class="blog-nav-item" href="#">New hires</a>
<a class="blog-nav-item" href="#">About</a>
''',
}
| michaelgugino/blastoff | sample_data.py | Python | apache-2.0 | 1,812 |
from django.contrib.auth.models import User, Permission
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
class TestAdminNormalUser(TestCase):
def setUp(self):
staff = User(username='staff')
staff.set_password('test')
staff.is_staff = True
staff.save()
staff.user_permissions.add(
Permission.objects.get_by_natural_key(
'change_page', 'test_app', 'page'))
self.url = reverse('admin:test_app_page_changelist')
self.c = Client()
self.c.login(username='staff', password='test')
def test_has_no_load_button(self):
response = self.c.get(self.url)
self.assertNotContains(response, '<a href="/admin/load/">')
def test_has_no_dump_button(self):
response = self.c.get(self.url)
self.assertNotContains(response, '<a href="dump/">')
class TestAdminSuperUser(TestCase):
def setUp(self):
superuser = User(username='superuser')
superuser.set_password('test')
superuser.is_staff = True
superuser.is_superuser = True
superuser.save()
self.url = reverse('admin:test_app_page_changelist')
self.c = Client()
self.c.login(username='superuser', password='test')
def test_has_load_button(self):
response = self.c.get(self.url)
self.assertContains(response, '<a href="/admin/load/">')
def test_has_dump_button(self):
response = self.c.get(self.url)
self.assertContains(response, '<a href="dump/">')
| fedosov/django-smuggler | tests/test_app/tests/test_admin.py | Python | lgpl-3.0 | 1,564 |
"""
MUSE -- A Multi-algorithm-collaborative Universal Structure-prediction Environment
Copyright (C) 2010-2017 by Zhong-Li Liu
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation
version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
E-mail: zl.liu@163.com
"""
import os
from muse.Readwrite import Read_Write
from muse.Symmetry import Findspg
from muse.Calculators import Submit
from muse.Readwrite.ReadInput import indict
def DirectOpt(BigDict,Old_cry,nu,ng):
with open('../log.muse','a') as logfile: print >>logfile
all_enthfile = open('../all-enthalpy-'+str(nu),'a')
if int(indict['Num_Keep'][0]) > 0:
i = 0
nn = 1
nkept = 1
spglist = []
while nkept <= int(indict['Num_Keep'][0]):
if int(indict['IfReOptKept'][0]):
with open('../log.muse','a') as logfile: print >>logfile, "Direct reopt. ..."
spgnum = Findspg.Findspg(Old_cry[i][1])
if spgnum[0] not in spglist:
spglist.append(spgnum[0])
Read_Write.write_vasp('POSCAR',Old_cry[i][1],label=indict['NameSys'][0]+": "+str(ng)+'-'+str(nn),direct=True,sort=True,vasp5=True)
nk,enth,BigDict = Submit.Submit(BigDict,nu,ng,nn,Old_cry)
nn += 1
nkept +=1
else:
spgnum = Findspg.Findspg(Old_cry[i][1])
if spgnum[0] not in spglist:
with open('../log.muse','a') as logfile: print >>logfile, "-"*23,"%d-%d"%(ng,nn),"-"*23
spglist.append(spgnum[0])
with open('../log.muse','a') as logfile:
print >>logfile, "%02d: %s, %s %10.4f kept, not reopt."%(i+1,spgnum[0],spgnum[1],Old_cry[i][0])
print >>logfile
BigDict[nu][ng][Old_cry[i][0]] = Old_cry[i][1].copy()
ifexist = os.system("grep %02d-%02d: %s"%(ng,nn,"../all-enthalpy-"+str(nu)))
if ifexist != 0:
all_enthfile.write(" %02d-%02d:%11s%9s%14.6f%14.6f%14s"%(ng,nn,spgnum[0],spgnum[1],Old_cry[i][0],Old_cry[i][1].get_volume(),'----')+'\n')
Read_Write.write_vasp('POSCAR',Old_cry[i][1],label=indict['NameSys'][0]+": "+"%02d-%02d"%(ng,nn)+' '+spgnum[0]+' '+str(spgnum[1])+' '+str(Old_cry[i][0]),direct=True,sort=True,vasp5=True)
os.system("cat POSCAR >> ../poscars-%d"%nu)
nn += 1
nkept +=1
i +=1
all_enthfile.close()
return BigDict
| zhongliliu/muse | muse/Calculators/DirectOpt.py | Python | gpl-2.0 | 2,930 |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import unittest
from datacheck.compat import (native_type,
int as _int, list as _list)
class TestNativeType(unittest.TestCase):
def test_native_type(self):
if sys.version_info[0] == 2:
self.assertEqual(_int.__name__, 'newint')
self.assertEqual(_list.__name__, 'newlist')
self.assertEqual(native_type(_int).__name__, 'int')
self.assertEqual(native_type(_list).__name__, 'list')
| csdev/datacheck | tests/unit_tests/test_compat.py | Python | mit | 584 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module for the ops not belonging to the official numpy package."""
from . import _op
from . import control_flow
from . import image
from . import random
from . import _register
from ._op import * # pylint: disable=wildcard-import
from .control_flow import * # pylint: disable=wildcard-import
__all__ = _op.__all__ + control_flow.__all__
| szha/mxnet | python/mxnet/ndarray/numpy_extension/__init__.py | Python | apache-2.0 | 1,130 |
import collections
class TimeBuffer(object):
def __init__(self):
self._times = self._create_container()
def _create_container(self):
return []
def clear(self):
self._times[:] = []
def update(self, curr_sim_time):
pass
def get_times(self):
return self._times
def __len__(self):
return len(self._times)
def __repr__(self):
return str(self._times)
def __getitem__(self, sliced):
return self._times[sliced]
def append(self, time):
self._times.append(time)
class WindowedTimeBuffer(TimeBuffer):
def __init__(self, time_window):
TimeBuffer.__init__(self)
self._time_window = time_window
def _create_container(self):
return collections.deque()
def clear(self):
self._times.clear()
def update(self, current_time):
threshold = current_time - self._time_window
if self._times:
time = self._times.popleft()
while self._times and time < threshold:
time = self._times.popleft()
if time >= threshold:
self._times.appendleft(time)
class SpikeBuffer(TimeBuffer):
def append_spike(self, time):
self.append(time)
class WindowedSpikeBuffer(WindowedTimeBuffer, SpikeBuffer):
def __init__(self, time_window):
SpikeBuffer.__init__(self)
WindowedTimeBuffer.__init__(self, time_window)
def rate(self):
return float(len(self)) / self._time_window
class ValueBuffer(TimeBuffer):
def __init__(self):
TimeBuffer.__init__(self)
self._values = []
def append_value(self, time, value):
self.append(time)
self._values.append(value)
def clear(self):
TimeBuffer.clear(self)
self._values[:] = []
def update(self, current_time):
TimeBuffer.update(self, current_time)
self._values[:] = self._values[-len(self._times):]
def get_values(self):
return self._values
def get_timed_values(self):
return list(zip(self._times, self._values))
def __repr__(self):
return str(self.get_timed_values())
def __getitem__(self, sliced):
return self.get_timed_values()[sliced]
class WindowedValueBuffer(WindowedTimeBuffer, ValueBuffer):
def __init__(self, time_window):
ValueBuffer.__init__(self)
WindowedTimeBuffer.__init__(self, time_window)
def update(self, current_time):
WindowedTimeBuffer.update(self, current_time)
ValueBuffer.update(self, current_time)
| IGITUGraz/spore-nest-module | examples/pattern_matching_showcase/python/snn_utils/buffer.py | Python | gpl-2.0 | 2,589 |
from voluptuous import Schema, MultipleInvalid, Datetime, Required
from almanach.common.exceptions.validation_exception import InvalidAttributeException
class InstanceValidator(object):
def __init__(self):
self.schema = Schema({
'name': unicode,
'flavor': unicode,
'os': {
Required('distro'): unicode,
Required('version'): unicode,
Required('os_type'): unicode,
},
'metadata': dict,
'start_date': Datetime(),
'end_date': Datetime(),
})
def validate_update(self, payload):
try:
return self.schema(payload)
except MultipleInvalid as e:
raise InvalidAttributeException(e.errors)
| internap/almanach | almanach/validators/instance_validator.py | Python | apache-2.0 | 776 |
class Foo:
pass
Foo.var = 1
def __main__():
print("in __main__")
# del Foo.var
Foo.var = 2
try:
Foo.var2 = 2
except RuntimeError:
print("RuntimeError1")
| pfalcon/micropython | tests/strict/rt_class_attr_add.py | Python | mit | 198 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.