text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from kubeflow.kubeflow.crud_backend import api, status
def pvc_status(pvc):
"""
Set the status of the pvc
"""
if pvc.metadata.deletion_timestamp is not None:
return status.create_status(status.STATUS_PHASE.TERMINATING,
"Deleting Volume...")
if pvc.status.phase == "Bound":
return status.create_status(status.STATUS_PHASE.READY, "Bound")
# The PVC is in Pending state, we check the Events to find out why
evs = api.v1_core.list_namespaced_event(
namespace=pvc.metadata.namespace,
field_selector=api.events_field_selector(
"PersistentVolumeClaim", pvc.metadata.name
),
).items
# If there are no events, then the PVC was just created
if len(evs) == 0:
return status.create_status(status.STATUS_PHASE.WAITING,
"Provisioning Volume...")
msg = f"Pending: {evs[0].message}"
state = evs[0].reason
if evs[0].reason == "WaitForFirstConsumer":
phase = status.STATUS_PHASE.UNAVAILABLE
msg = (
"Pending: This volume will be bound when its first consumer"
" is created. E.g., when you first browse its contents, or"
" attach it to a notebook server"
)
elif evs[0].reason == "Provisioning":
phase = status.STATUS_PHASE.WAITING
elif evs[0].reason == "FailedBinding":
phase = status.STATUS_PHASE.WARNING
elif evs[0].type == "Warning":
phase = status.STATUS_PHASE.WARNING
elif evs[0].type == "Normal":
phase = status.STATUS_PHASE.READY
return status.create_status(phase, msg, state)
def viewer_status(viewer):
"""
Return a string representing the status of that viewer. If a deletion
timestamp is set we want to return a `Terminating` state.
"""
try:
ready = viewer["status"]["ready"]
except KeyError:
return status.STATUS_PHASE.UNINITIALIZED
if "deletionTimestamp" in viewer["metadata"]:
return status.STATUS_PHASE.TERMINATING
if not ready:
return status.STATUS_PHASE.WAITING
return status.STATUS_PHASE.READY
| kubeflow/kubeflow | components/crud-web-apps/volumes/backend/apps/common/status.py | Python | apache-2.0 | 2,168 | 0 |
"""
"""
################################################################################
##### Command Line Interface ###################################################
################################################################################
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from tempfile import gettempdir
import os
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__ )
parser.add_argument("-p", "--path",
type=str,
help="the path to the desired location of the generated site")
parser.add_argument("-d", "--deploy",
action="store_true",
help="package site for movement to deployment server. Default path is the"
"current working directory, but the path flag will override that value" )
parser.add_argument("-r", "--reuse",
action="store_true",
help="if an already built website exists at the targeted path, attempt to"
"reuse already present resources (i.e. images, favicon elements and other"
"static resources)" )
args = parser.parse_args()
if args.path is None:
args.path = os.getcwd()
# if args.deploy:
# args.path = os.getcwd()
# else:
# args.path = gettempdir()
################################################################################
##### Overrides ################################################################
################################################################################
from string import Template
from re import compile
class TemplateWrapper():
def __init__(self, cls):
PYTHON_LL = 80
HTML_LL = 112
self.cls = cls
self.headers = [
( # Primary python file header template
compile(r'\$ph{(.*?)}'),
lambda x: "\n\n{1}\n##### {0} {2}\n{1}\n".format(
x.upper(), '#'*PYTHON_LL, '#'*(PYTHON_LL-len(x)-7) )
),
( # Secondary python file header template
compile(r'\$sh{(.*?)}'),
lambda x: "\n### {0} {1}".format(
x, '#'*(PYTHON_LL-len(x)-5) )
),
( # HTML file header template
compile(r'\$wh{(.*?)}'),
lambda x: "<!-- ***** {0} {1} -->".format(
x, '*'*(HTML_LL-len(x)-16) )
)
]
def __call__(self, template):
for header in self.headers:
ptn, tpl = header
for match in ptn.finditer(template):
replacements = ( match.group(0), tpl(match.group(1)) )
template = template.replace(*replacements)
template_obj = self.cls(template)
template_obj.populate = self.populate
return template_obj
@staticmethod
def populate(template, filepath, **kwargs):
for key, value in kwargs.items():
if isinstance(value, list):
kwargs[key] = "\n".join(
[ t[0].safe_substitute(**t[1]) for t in value ]
)
try:
with open(filepath, 'w') as f:
f.write(template.safe_substitute(**kwargs))
except Exception as exception:
raise exception
Template = TemplateWrapper(Template)
from subprocess import Popen, call, DEVNULL, STDOUT, PIPE
from sys import executable
def sPopen(*args):
command, shell = list(args), True
if command[0] == 'python':
command[0] = executable
shell = False
if os.name == 'nt':
from subprocess import CREATE_NEW_CONSOLE
return Popen( command, shell=shell, creationflags=CREATE_NEW_CONSOLE )
else:
return Popen( command, shell=shell )
def sCall(*args):
command, shell = list(args), True
if command[0] == 'python':
command[0] = executable
shell = False
if os.name != 'nt':
shell = False
call( command, shell=shell, stdout=DEVNULL, stderr=STDOUT )
################################################################################
##### Templates ################################################################
################################################################################
APP_PY_TEMPLATE = Template("""\
\"""
${doc_string}
\"""
from bottle import run, route, get, post, error
from bottle import static_file, template, request
from bottle import HTTPError
$ph{Command Line Interface}
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from inspect import getframeinfo, currentframe
from os.path import dirname, abspath
import os
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__ )
parser.add_argument('-d', '--deploy',
action='store_true',
help='Run server for deployment' )
parser.add_argument('-i', '--ip',
type=str,
default="127.0.0.1",
help='ip to run the server against, default localhost' )
parser.add_argument('-p', '--port',
type=str,
default="8080",
help='port to run server on' )
args = parser.parse_args()
# change working directory to script directory
os.chdir(dirname(abspath(getframeinfo(currentframe()).filename)))
$ph{Main Site Routes}
${main_routes}
$ph{API and Additional Site Routes}
${api_routes}
$ph{Static Routes}
${static_routes}
$sh{Favicon Routes}
${favicon_routes}
$sh{Image Routes}
${image_routes}
$sh{Font Routes}
${font_routes}
$sh{Stylesheet Routes}
${css_routes}
$sh{Javascript Routes}
${js_routes}
$ph{Error Routes}
@error(404)
def error404(error):
return 'nothing to see here'
$ph{Run Server}
if args.deploy:
run(host=args.ip, port=args.port, server='cherrypy') #deployment
else:
run(host=args.ip, port=args.port, debug=True, reloader=True) #development
""" )
MAIN_ROUTE_TEMPLATE = Template("""\
@route('/${path}')
def ${method_name}():
return template('${template}', request=request, template='${template}')
""" )
STATIC_ROUTE_TEMPLATE = Template("""\
@get('/${path}')
def load_resource():
return static_file('${file}', root='${root}')
""" )
WATCH_SASS_SCRIPT = Template("""\
from sys import argv, exit
from signal import signal, SIGTERM, SIGINT
from shutil import rmtree
from subprocess import Popen
from inspect import getframeinfo, currentframe
from os.path import dirname, abspath, isdir, isfile
from os import chdir, remove
def signal_term_handler(signal, frame):
if not p is None: p.kill()
if isfile("_all.scss"): remove("_all.scss")
if isdir(".sass-cache"): rmtree(".sass-cache")
print(argv[0])
remove("watch.py") # argv[0] contains full path
exit(0)
p = None
signal(SIGTERM, signal_term_handler)
signal(SIGINT, signal_term_handler)
chdir(dirname(abspath(getframeinfo(currentframe()).filename)))
command = "sass --watch"
for x in range(1, len(argv)):
command += " {0}.scss:../../www/static/css/{0}.css".format(argv[x])
p = Popen(command, shell=True)
p.wait()
""" )
################################################################################
##### Script Body ##############################################################
################################################################################
from os.path import relpath, abspath, normpath, join, isfile, isdir, splitext
from shutil import copy, copyfileobj, rmtree
from urllib.request import urlopen
from time import sleep
from re import match, search
from sys import exit
SCRIPT_DIR = os.getcwd()
PROJECT_NAME = relpath(SCRIPT_DIR, "..")
STATIC_ROUTE = lambda p, f, r: \
( STATIC_ROUTE_TEMPLATE, { "path": p, "file": f, "root": r } )
MAIN_ROUTE = lambda p, m, t: \
( MAIN_ROUTE_TEMPLATE, { "path": p, "method_name": m, "template": t } )
def migrate_files(directory, destination):
src_path = join(SCRIPT_DIR, directory)
if not isdir(destination): os.makedirs(destination)
for root, dirs, files in os.walk(src_path):
for dirname in dirs:
if dirname.startswith('!') or dirname in ['.DS_STORE']:
dirs.remove(dirname)
for filename in files:
if not filename.startswith('!') and filename not in ['.DS_Store']:
if not isfile(filename): #added for the reuse flag
copy(join(root, filename), join(destination, filename))
if not filename.startswith('~'):
yield normpath(join(relpath(root, src_path),
filename) ).replace('\\', '/')
def migrate_views():
routes = [ MAIN_ROUTE("", "load_root", "index") ]
for route in migrate_files("dev/views", "views"):
tpl_name = splitext(route.split("/")[-1])[0]
if tpl_name == "index":
continue
routes.append(MAIN_ROUTE(
splitext(route)[0],
"load_" + tpl_name.replace("-","_"),
tpl_name
))
return routes
def get_api_routes():
with open( join(SCRIPT_DIR, "dev/py", "routes.py"), 'r') as f:
return f.read()
def migrate_static_files(source, destination):
return [ STATIC_ROUTE(r, r.split("/")[-1], destination)
for r in migrate_files(source, destination) ]
def generate_favicon_resources():
fav_tpl = lambda r: "favicon-{0}x{0}.png".format(r)
and_tpl = lambda r: "touch-icon-{0}x{0}.png".format(r)
app_tpl = lambda r: "apple-touch-icon-{0}x{0}.png".format(r)
pra_tpl = lambda r: "apple-touch-icon-{0}x{0}-precomposed.png".format(r)
fav_path = lambda p: normpath(join("static/favicon", p))
favicon_tpl = normpath(join(SCRIPT_DIR, "res/favicon.svg"))
ico_res = [ "16", "24", "32", "48", "64", "128", "256" ]
fav_res = [ "16", "32", "96", "160", "196", "300" ]
android_res = [ "192" ]
apple_res = [ "57", "76", "120", "152", "180" ] # add to head backwards
if not isdir("static/favicon"): os.makedirs("static/favicon")
# generate favicon resources
for res in (list(set(ico_res) | set(fav_res)) + android_res + apple_res):
if res in android_res: path = abspath( fav_path(and_tpl(res)) )
elif res in apple_res: path = abspath( fav_path(app_tpl(res)) )
else: path = abspath( fav_path(fav_tpl(res)) )
sCall("inkscape", "-z", "-e", path, "-w", res, "-h", res, favicon_tpl)
sCall( *(["convert"] + [fav_path(fav_tpl(r)) for r in ico_res] +
[fav_path("favicon.ico")]) )
for res in [ r for r in ico_res if r not in fav_res ]:
os.remove(fav_path(fav_tpl(res)))
# return routes for generated favicon resources
fav_route = lambda f: STATIC_ROUTE(f, f, "static/favicon")
app_route = lambda p,t: STATIC_ROUTE(p, t("57"), "static/favicon")
return ([ fav_route("favicon.ico") ] +
[ fav_route(fav_tpl(r)) for r in fav_res ] +
[ fav_route(and_tpl(r)) for r in android_res ] +
[ fav_route(app_tpl(r)) for r in apple_res if r!="57" ] +
[ fav_route(pra_tpl(r)) for r in apple_res if r!="57" ] +
[ app_route("apple-touch-icon.png", app_tpl),
app_route("apple-touch-icon-precomposed.png", pra_tpl) ])
def generate_stylesheets():
dev_path = join( SCRIPT_DIR, "dev/sass" )
is_sass = lambda f: splitext(f)[-1].lower() in ['.scss', '.sass']
is_mixin = lambda f: match(r'.*mixins?$', splitext(f)[0].lower())
get_import = lambda p: [ join( relpath(r, dev_path), f )
for r, d, fs in os.walk( join(dev_path, p) )
for f in fs if is_sass(f) ]
if not isdir("static/css"): os.makedirs("static/css")
# generate _all.scss file from existing sass resources
with open( join( dev_path, '_all.scss' ), 'w') as f:
f.write('\n'.join( # probably not the most efficient way
[ '@import "{}";'.format(path.replace('\\', '/')) for path in
( # mixins and global variables must be imported first
# modules
[ f for f in get_import('modules') ]
# vendor mixins
+ [ f for f in get_import('vendor') if is_mixin(f) ]
# all other vendor files
+ [ f for f in get_import('vendor') if not is_mixin(f) ]
# partials (comment out this line for manually selection)
+ [ f for f in get_import('partials') ]
)
] )
)
# use sass command line tool to generate stylesheets
stylesheets = [ splitext(f)[0] for f in os.listdir(dev_path)
if is_sass(f) and not f.startswith('_') ]
sass_path = relpath(dev_path, os.getcwd()).replace('\\', '/')
if args.deploy:
for s in stylesheets:
sCall("sass", sass_path+"/"+s+".scss", "static/css/"+s+".min.css",
"-t", "compressed", "--sourcemap=none", "-C")
os.remove( join(dev_path, "_all.scss") )
else:
Template.populate(WATCH_SASS_SCRIPT, '../dev/sass/watch.py')
command = "sass --watch"
for s in stylesheets:
command += " ../dev/sass/{0}.scss:./static/css/{0}.css".format(s)
p = Popen(command, shell=True)
#p = sPopen( 'python', '../dev/sass/watch.py', *stylesheets )
sleep(3) # delay so the stylesheets have time to be created
p.kill() # note: kill sends SIGKILL
# return css routes from generated stylesheets
return [ STATIC_ROUTE(f, f, "static/css") for f in os.listdir("static/css")]
def generate_javascript():
return migrate_static_files("dev/js", "static/js")
def get_favicon_head():
link_tpl = lambda c: ' <link {0}>\n'.format(c)
all_favs = os.listdir('static/favicon')
favicons = [ x for x in all_favs if x.startswith('favicon') ]
apple_favs = [ x for x in all_favs if x.startswith('apple') ]
android_favs = [ x for x in all_favs if not x in favicons + apple_favs ]
fav_head = link_tpl('rel="shortcut icon" href="favicon.ico"')
favicons.remove('favicon.ico')
def gen_head(fav_tpl, fav_set):
dic = {}
for fav in fav_set:
res = int(search(r'([0-9]+)x', fav).group(1))
dic[res] = fav
keys = list(dic.keys())
keys.sort()
keys.reverse()
for key in keys:
yield link_tpl( fav_tpl.format(key, dic[key]) )
for fav_set in [
('rel="icon" sizes="{0}x{0}" href="/{1}"', android_favs),
('rel="apple-touch-icon" sizes="{0}x{0}" href="/{1}"', apple_favs),
('rel="icon" type="image/png" sizes="{0}x{0}" href="/{1}"', favicons) ]:
fav_head += "".join( gen_head(*fav_set) )
return fav_head
def get_opengraph_head():
og_head_string = """\
% url = request.environ['HTTP_HOST']
<meta property="og:url" content="http://{{url}}/">
<meta property="og:type" content="website">
<meta property="og:title" content="{{title}}">
<meta property="open_graph_image">
<meta property="og:description" content="{{description}}">"""
og_image_string = """<meta property="og:image:type" content="image/png">
<meta property="og:image:width" content="300">
<meta property="og:image:height" content="300">
<meta property="og:image:url" content="http://{{url}}/favicon-300x300.png">
<meta property="og:image" content="http://{{url}}/favicon-300x300.png">"""
if isfile("static/favicon/favicon-300x300.png"):
og_head_string = og_head_string.replace(
'<meta property="open_graph_image">',
og_image_string
)
return og_head_string
def get_stylesheet_head():
styles_tpl = ' <link rel="stylesheet" type="text/css" href="/{0}">\n'
stylesheets = os.listdir('static/css')
styles_head = ''
for style in stylesheets:
if style.split('.')[0] == 'styles':
styles_head += styles_tpl.format(style)
stylesheets.remove(style)
break
stylesheets = [ s.split('.')[0] for s in stylesheets ]
styles_head += " % if template in {}:\n".format(stylesheets)
tpl_style = '{{template}}.min.css' if args.deploy else '{{template}}.css'
styles_head += styles_tpl.format(tpl_style)
styles_head += " % end"
return styles_head
os.chdir(args.path)
if isdir("www"): rmtree("www")
os.makedirs("www")
os.chdir("www")
### Import Bottle Framework ####################################################
from urllib.error import URLError
bottle_url = "https://raw.githubusercontent.com/bottlepy/bottle/master/bottle.py"
try:
with urlopen(bottle_url) as response, open('bottle.py', 'wb') as f:
copyfileobj(response, f)
except URLError as e:
print(e)
### Generate App.py ############################################################
Template.populate(APP_PY_TEMPLATE, 'app.py',
doc_string="",
main_routes=migrate_views(),
api_routes=get_api_routes(),
static_routes=migrate_static_files("res/static", "static"),
favicon_routes=generate_favicon_resources(),
image_routes=migrate_static_files("res/img", "static/img"),
font_routes=migrate_static_files("res/font", "static/font"),
css_routes=generate_stylesheets(),
js_routes=generate_javascript() )
### Generate Head Template #####################################################
if isfile('views/~head.tpl'): os.remove('views/~head.tpl')
head_tpl = ""
with open(join(SCRIPT_DIR, "dev/views/~head.tpl"), 'r') as head:
head_tpl = head.read()
metas = [ "Favicon_Resources", "Open_Graph", "Style_Sheets" ]
for meta in metas:
head_tpl = head_tpl.replace(
'<meta name="'+meta.lower()+'">',
'\n$wh{'+meta.replace('_', ' ')+'}\n${'+meta.lower()+'}'
)
Template.populate(Template(head_tpl), 'views/~head.tpl',
favicon_resources=get_favicon_head(),
open_graph=get_opengraph_head(),
style_sheets=get_stylesheet_head() )
### Packaging For Deployment ###################################################
if not args.deploy:
#sCall('python', 'app.py', '-p', '8081')
exit(0)
from zipfile import ZipFile
os.chdir('..') # work on this
if isfile('www.zip'): os.remove('www.zip')
with ZipFile('www.zip', 'w') as zip_file:
for root, dirs, files in os.walk( join(os.getcwd(), 'www') ):
rel_path = relpath(root, os.getcwd())
for f in files:
zip_file.write( join(rel_path, f) )
# set up watch for template and js files using watchdog
#
#
# from zipfile import ZipFile
#
# def package_site():
| SwankSwashbucklers/bottle-builder | bottle-builder.py | Python | mit | 18,381 | 0.013111 |
"""Model. We are modeling Person objects with a collection
of Address objects. Each Address has a PostalCode, which
in turn references a City and then a Country:
Person --(1..n)--> Address
Address --(has a)--> PostalCode
PostalCode --(has a)--> City
City --(has a)--> Country
"""
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from meta import Base, FromCache, Session, RelationshipCache
class Country(Base):
__tablename__ = 'country'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
def __init__(self, name):
self.name = name
class City(Base):
__tablename__ = 'city'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
country_id = Column(Integer, ForeignKey('country.id'), nullable=False)
country = relationship(Country)
def __init__(self, name, country):
self.name = name
self.country = country
class PostalCode(Base):
__tablename__ = 'postal_code'
id = Column(Integer, primary_key=True)
code = Column(String(10), nullable=False)
city_id = Column(Integer, ForeignKey('city.id'), nullable=False)
city = relationship(City)
@property
def country(self):
return self.city.country
def __init__(self, code, city):
self.code = code
self.city = city
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
person_id = Column(Integer, ForeignKey('person.id'), nullable=False)
street = Column(String(200), nullable=False)
postal_code_id = Column(Integer, ForeignKey('postal_code.id'))
postal_code = relationship(PostalCode)
@property
def city(self):
return self.postal_code.city
@property
def country(self):
return self.postal_code.country
def __str__(self):
return "%s\t"\
"%s, %s\t"\
"%s" % (self.street, self.city.name,
self.postal_code.code, self.country.name)
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
addresses = relationship(Address, collection_class=set)
def __init__(self, name, *addresses):
self.name = name
self.addresses = set(addresses)
def __str__(self):
return self.name
def __repr__(self):
return "Person(name=%r)" % self.name
def format_full(self):
return "\t".join([str(x) for x in [self] + list(self.addresses)])
# Caching options. A set of three RelationshipCache options
# which can be applied to Query(), causing the "lazy load"
# of these attributes to be loaded from cache.
cache_address_bits = RelationshipCache("default", "byid", PostalCode.city).\
and_(
RelationshipCache("default", "byid", City.country)
).and_(
RelationshipCache("default", "byid", Address.postal_code)
)
| simplegeo/sqlalchemy | examples/beaker_caching/model.py | Python | mit | 3,110 | 0.006752 |
from __future__ import print_function, division
import decimal
import math
import re as regex
import sys
from collections import defaultdict
from .core import C
from .sympify import converter, sympify, _sympify, SympifyError
from .singleton import S, Singleton
from .expr import Expr, AtomicExpr
from .decorators import _sympifyit, deprecated
from .cache import cacheit, clear_cache
from sympy.core.compatibility import (
as_int, integer_types, long, string_types, with_metaclass, HAS_GMPY,
SYMPY_INTS)
import sympy.mpmath as mpmath
import sympy.mpmath.libmp as mlib
from sympy.mpmath.libmp import mpf_pow, mpf_pi, mpf_e, phi_fixed
from sympy.mpmath.ctx_mp import mpnumeric
from sympy.mpmath.libmp.libmpf import (
finf as _mpf_inf, fninf as _mpf_ninf,
fnan as _mpf_nan, fzero as _mpf_zero, _normalize as mpf_normalize,
prec_to_dps)
rnd = mlib.round_nearest
_LOG2 = math.log(2)
def mpf_norm(mpf, prec):
"""Return the mpf tuple normalized appropriately for the indicated
precision after doing a check to see if zero should be returned or
not when the mantissa is 0. ``mpf_normlize`` always assumes that this
is zero, but it may not be since the mantissa for mpf's values "+inf",
"-inf" and "nan" have a mantissa of zero, too.
Note: this is not intended to validate a given mpf tuple, so sending
mpf tuples that were not created by mpmath may produce bad results. This
is only a wrapper to ``mpf_normalize`` which provides the check for non-
zero mpfs that have a 0 for the mantissa.
"""
sign, man, expt, bc = mpf
if not man:
# hack for mpf_normalize which does not do this;
# it assumes that if man is zero the result is 0
# (see issue 3540)
if not bc:
return _mpf_zero
else:
# don't change anything; this should already
# be a well formed mpf tuple
return mpf
rv = mpf_normalize(sign, man, expt, bc, prec, rnd)
return rv
# TODO: we should use the warnings module
_errdict = {"divide": False}
def seterr(divide=False):
"""
Should sympy raise an exception on 0/0 or return a nan?
divide == True .... raise an exception
divide == False ... return nan
"""
if _errdict["divide"] != divide:
clear_cache()
_errdict["divide"] = divide
def _as_integer_ratio(p):
"""compatibility implementation for python < 2.6"""
neg_pow, man, expt, bc = getattr(p, '_mpf_', mpmath.mpf(p)._mpf_)
p = [1, -1][neg_pow % 2]*man
if expt < 0:
q = 2**-expt
else:
q = 1
p *= 2**expt
return int(p), int(q)
def _decimal_to_Rational_prec(dec):
"""Convert an ordinary decimal instance to a Rational."""
if not dec.is_finite(): # NOTE: this is_finite is not SymPy's
raise TypeError("dec must be finite, got %s." % dec)
s, d, e = dec.as_tuple()
prec = len(d)
if e >= 0: # it's an integer
rv = Integer(int(dec))
else:
s = (-1)**s
d = sum([di*10**i for i, di in enumerate(reversed(d))])
rv = Rational(s*d, 10**-e)
return rv, prec
def _literal_float(f):
"""Return True if n can be interpreted as a floating point number."""
pat = r"[-+]?((\d*\.\d+)|(\d+\.?))(eE[-+]?\d+)?"
return bool(regex.match(pat, f))
# (a,b) -> gcd(a,b)
_gcdcache = {}
# TODO caching with decorator, but not to degrade performance
def igcd(a, b):
"""Computes positive, integer greatest common divisor of two numbers.
The algorithm is based on the well known Euclid's algorithm. To
improve speed, igcd() has its own caching mechanism implemented.
"""
try:
return _gcdcache[(a, b)]
except KeyError:
a, b = as_int(a), as_int(b)
if a and b:
if b < 0:
b = -b
while b:
a, b = b, a % b
else:
a = abs(a or b)
_gcdcache[(a, b)] = a
return a
def ilcm(a, b):
"""Computes integer least common multiple of two numbers. """
if a == 0 and b == 0:
return 0
else:
return a*b // igcd(a, b)
def igcdex(a, b):
"""Returns x, y, g such that g = x*a + y*b = gcd(a, b).
>>> from sympy.core.numbers import igcdex
>>> igcdex(2, 3)
(-1, 1, 1)
>>> igcdex(10, 12)
(-1, 1, 2)
>>> x, y, g = igcdex(100, 2004)
>>> x, y, g
(-20, 1, 4)
>>> x*100 + y*2004
4
"""
if (not a) and (not b):
return (0, 1, 0)
if not a:
return (0, b//abs(b), abs(b))
if not b:
return (a//abs(a), 0, abs(a))
if a < 0:
a, x_sign = -a, -1
else:
x_sign = 1
if b < 0:
b, y_sign = -b, -1
else:
y_sign = 1
x, y, r, s = 1, 0, 0, 1
while b:
(c, q) = (a % b, a // b)
(a, b, r, s, x, y) = (b, c, x - q*r, y - q*s, r, s)
return (x*x_sign, y*y_sign, a)
class Number(AtomicExpr):
"""
Represents any kind of number in sympy.
Floating point numbers are represented by the Float class.
Integer numbers (of any size), together with rational numbers (again,
there is no limit on their size) are represented by the Rational class.
If you want to represent, for example, ``1+sqrt(2)``, then you need to do::
Rational(1) + sqrt(Rational(2))
"""
is_commutative = True
is_number = True
__slots__ = []
# Used to make max(x._prec, y._prec) return x._prec when only x is a float
_prec = -1
is_Number = True
def __new__(cls, *obj):
if len(obj) == 1:
obj = obj[0]
if isinstance(obj, Number):
return obj
if isinstance(obj, SYMPY_INTS):
return Integer(obj)
if isinstance(obj, tuple) and len(obj) == 2:
return Rational(*obj)
if isinstance(obj, (float, mpmath.mpf, decimal.Decimal)):
return Float(obj)
if isinstance(obj, string_types):
val = sympify(obj)
if isinstance(val, Number):
return val
else:
raise ValueError('String "%s" does not denote a Number' % obj)
if isinstance(obj, Number):
return obj
msg = "expected str|int|long|float|Decimal|Number object but got %r"
raise TypeError(msg % type(obj).__name__)
def __divmod__(self, other):
from .containers import Tuple
from sympy.functions.elementary.complexes import sign
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
raise TypeError(msg % (type(self).__name__, type(other).__name__))
if not other:
raise ZeroDivisionError('modulo by zero')
if self.is_Integer and other.is_Integer:
return Tuple(*divmod(self.p, other.p))
else:
rat = self/other
w = sign(rat)*int(abs(rat)) # = rat.floor()
r = self - other*w
#w*other + r == self
return Tuple(w, r)
def __rdivmod__(self, other):
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
raise TypeError(msg % (type(other).__name__, type(self).__name__))
return divmod(other, self)
def __round__(self, *args):
return round(float(self), *args)
def _as_mpf_val(self, prec):
"""Evaluation of mpf tuple accurate to at least prec bits."""
raise NotImplementedError('%s needs ._as_mpf_val() method' %
(self.__class__.__name__))
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def _as_mpf_op(self, prec):
prec = max(prec, self._prec)
return self._as_mpf_val(prec), prec
def __float__(self):
return mlib.to_float(self._as_mpf_val(53))
def _eval_conjugate(self):
return self
def _eval_order(self, *symbols):
# Order(5, x, y) -> Order(1,x,y)
return C.Order(S.One, *symbols)
def _eval_subs(self, old, new):
if old == -self:
return -new
return self # there is no other possibility
def _eval_is_bounded(self):
return True
def _eval_is_finite(self):
return True
@classmethod
def class_key(cls):
return 1, 0, 'Number'
@cacheit
def sort_key(self, order=None):
return self.class_key(), (0, ()), (), self
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.Infinity
elif other is S.NegativeInfinity:
return S.NegativeInfinity
return AtomicExpr.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
return S.Infinity
return AtomicExpr.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.Infinity
else:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.NegativeInfinity
else:
return S.Infinity
return AtomicExpr.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number):
if other is S.NaN:
return S.NaN
elif other is S.Infinity or other is S.NegativeInfinity:
return S.Zero
return AtomicExpr.__div__(self, other)
__truediv__ = __div__
def __eq__(self, other):
raise NotImplementedError('%s needs .__eq__() method' %
(self.__class__.__name__))
def __ne__(self, other):
raise NotImplementedError('%s needs .__ne__() method' %
(self.__class__.__name__))
def __lt__(self, other):
raise NotImplementedError('%s needs .__lt__() method' %
(self.__class__.__name__))
def __le__(self, other):
raise NotImplementedError('%s needs .__le__() method' %
(self.__class__.__name__))
def __gt__(self, other):
return _sympify(other).__lt__(self)
def __ge__(self, other):
return _sympify(other).__le__(self)
def __hash__(self):
return super(Number, self).__hash__()
def is_constant(self, *wrt, **flags):
return True
def as_coeff_mul(self, *deps):
# a -> c*t
if self.is_Rational:
return self, tuple()
elif self.is_negative:
return S.NegativeOne, (-self,)
return S.One, (self,)
def as_coeff_add(self, *deps):
# a -> c + t
if self.is_Rational:
return self, tuple()
return S.Zero, (self,)
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
if rational and not self.is_Rational:
return S.One, self
return self, S.One
def as_coeff_Add(self):
"""Efficiently extract the coefficient of a summation. """
return self, S.Zero
def gcd(self, other):
"""Compute GCD of `self` and `other`. """
from sympy.polys import gcd
return gcd(self, other)
def lcm(self, other):
"""Compute LCM of `self` and `other`. """
from sympy.polys import lcm
return lcm(self, other)
def cofactors(self, other):
"""Compute GCD and cofactors of `self` and `other`. """
from sympy.polys import cofactors
return cofactors(self, other)
class Float(Number):
"""
Represents a floating point number. It is capable of representing
arbitrary-precision floating-point numbers.
Examples
========
>>> from sympy import Float
>>> Float(3.5)
3.50000000000000
>>> Float(3)
3.00000000000000
Floats can be created from a string representations of Python floats
to force ints to Float or to enter high-precision (> 15 significant
digits) values:
>>> Float('.0010')
0.00100000000000000
>>> Float('1e-3')
0.00100000000000000
>>> Float('1e-3', 3)
0.00100
Float can automatically count significant figures if a null string
is sent for the precision; space are also allowed in the string. (Auto-
counting is only allowed for strings, ints and longs).
>>> Float('123 456 789 . 123 456', '')
123456789.123456
>>> Float('12e-3', '')
0.012
>>> Float(3, '')
3.
If a number is written in scientific notation, only the digits before the
exponent are considered significant if a decimal appears, otherwise the
"e" signifies only how to move the decimal:
>>> Float('60.e2', '') # 2 digits significant
6.0e+3
>>> Float('60e2', '') # 4 digits significant
6000.
>>> Float('600e-2', '') # 3 digits significant
6.00
Notes
=====
Floats are inexact by their nature unless their value is a binary-exact
value.
>>> approx, exact = Float(.1, 1), Float(.125, 1)
For calculation purposes, evalf needs to be able to change the precision
but this will not increase the accuracy of the inexact value. The
following is the most accurate 5-digit approximation of a value of 0.1
that had only 1 digit of precision:
>>> approx.evalf(5)
0.099609
By contrast, 0.125 is exact in binary (as it is in base 10) and so it
can be passed to Float or evalf to obtain an arbitrary precision with
matching accuracy:
>>> Float(exact, 5)
0.12500
>>> exact.evalf(20)
0.12500000000000000000
Trying to make a high-precision Float from a float is not disallowed,
but one must keep in mind that the *underlying float* (not the apparent
decimal value) is being obtained with high precision. For example, 0.3
does not have a finite binary representation. The closest rational is
the fraction 5404319552844595/2**54. So if you try to obtain a Float of
0.3 to 20 digits of precision you will not see the same thing as 0.3
followed by 19 zeros:
>>> Float(0.3, 20)
0.29999999999999998890
If you want a 20-digit value of the decimal 0.3 (not the floating point
approximation of 0.3) you should send the 0.3 as a string. The underlying
representation is still binary but a higher precision than Python's float
is used:
>>> Float('0.3', 20)
0.30000000000000000000
Although you can increase the precision of an existing Float using Float
it will not increase the accuracy -- the underlying value is not changed:
>>> def show(f): # binary rep of Float
... from sympy import Mul, Pow
... s, m, e, b = f._mpf_
... v = Mul(int(m), Pow(2, int(e), evaluate=False), evaluate=False)
... print('%s at prec=%s' % (v, f._prec))
...
>>> t = Float('0.3', 3)
>>> show(t)
4915/2**14 at prec=13
>>> show(Float(t, 20)) # higher prec, not higher accuracy
4915/2**14 at prec=70
>>> show(Float(t, 2)) # lower prec
307/2**10 at prec=10
The same thing happens when evalf is used on a Float:
>>> show(t.evalf(20))
4915/2**14 at prec=70
>>> show(t.evalf(2))
307/2**10 at prec=10
Finally, Floats can be instantiated with an mpf tuple (n, c, p) to
produce the number (-1)**n*c*2**p:
>>> n, c, p = 1, 5, 0
>>> (-1)**n*c*2**p
-5
>>> Float((1, 5, 0))
-5.00000000000000
An actual mpf tuple also contains the number of bits in c as the last
element of the tuple:
>>> _._mpf_
(1, 5, 0, 3)
This is not needed for instantiation and is not the same thing as the
precision. The mpf tuple and the precision are two separate quantities
that Float tracks.
"""
__slots__ = ['_mpf_', '_prec']
is_rational = True
is_real = True
is_Float = True
def __new__(cls, num, prec=15):
if isinstance(num, string_types):
num = num.replace(' ', '')
if num.startswith('.') and len(num) > 1:
num = '0' + num
elif num.startswith('-.') and len(num) > 2:
num = '-0.' + num[2:]
elif isinstance(num, float) and num == 0:
num = '0'
elif isinstance(num, (SYMPY_INTS, Integer)):
num = str(num) # faster than mlib.from_int
elif isinstance(num, mpmath.mpf):
num = num._mpf_
if prec == '':
if not isinstance(num, string_types):
raise ValueError('The null string can only be used when '
'the number to Float is passed as a string or an integer.')
ok = None
if _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
ok = True
if ok is None:
raise ValueError('string-float not recognized: %s' % num)
else:
dps = prec
prec = mlib.libmpf.dps_to_prec(dps)
if isinstance(num, float):
_mpf_ = mlib.from_float(num, prec, rnd)
elif isinstance(num, str):
_mpf_ = mlib.from_str(num, prec, rnd)
elif isinstance(num, decimal.Decimal):
_mpf_ = mlib.from_str(str(num), prec, rnd)
elif isinstance(num, Rational):
_mpf_ = mlib.from_rational(num.p, num.q, prec, rnd)
elif isinstance(num, tuple) and len(num) in (3, 4):
if type(num[1]) is str:
# it's a hexadecimal (coming from a pickled object)
# assume that it is in standard form
num = list(num)
num[1] = long(num[1], 16)
_mpf_ = tuple(num)
else:
if not num[1] and len(num) == 4:
# handle normalization hack
return Float._new(num, prec)
else:
_mpf_ = mpmath.mpf(
S.NegativeOne**num[0]*num[1]*2**num[2])._mpf_
elif isinstance(num, Float):
_mpf_ = num._mpf_
if prec < num._prec:
_mpf_ = mpf_norm(_mpf_, prec)
else:
_mpf_ = mpmath.mpf(num)._mpf_
# special cases
if _mpf_ == _mpf_zero:
pass # we want a Float
elif _mpf_ == _mpf_nan:
return S.NaN
obj = Expr.__new__(cls)
obj._mpf_ = _mpf_
obj._prec = prec
return obj
@classmethod
def _new(cls, _mpf_, _prec):
# special cases
if _mpf_ == _mpf_zero:
return S.Zero # XXX this is different from Float which gives 0.0
elif _mpf_ == _mpf_nan:
return S.NaN
obj = Expr.__new__(cls)
obj._mpf_ = mpf_norm(_mpf_, _prec)
obj._prec = _prec
return obj
# mpz can't be pickled
def __getnewargs__(self):
return (mlib.to_pickable(self._mpf_),)
def __getstate__(self):
return {'_prec': self._prec}
def _hashable_content(self):
return (self._mpf_, self._prec)
def floor(self):
return C.Integer(int(mlib.to_int(
mlib.mpf_floor(self._mpf_, self._prec))))
def ceiling(self):
return C.Integer(int(mlib.to_int(
mlib.mpf_ceil(self._mpf_, self._prec))))
@property
def num(self):
return mpmath.mpf(self._mpf_)
def _as_mpf_val(self, prec):
rv = mpf_norm(self._mpf_, prec)
# uncomment to see failures
#if rv != self._mpf_ and self._prec == prec:
# print self._mpf_, rv
return rv
def _as_mpf_op(self, prec):
return self._mpf_, max(prec, self._prec)
def _eval_is_bounded(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return False
return True
def _eval_is_finite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf, _mpf_zero):
return False
return True
def _eval_is_integer(self):
return self._mpf_ == _mpf_zero
def _eval_is_negative(self):
if self._mpf_ == _mpf_ninf:
return True
if self._mpf_ == _mpf_inf:
return False
return self.num < 0
def _eval_is_positive(self):
if self._mpf_ == _mpf_inf:
return True
if self._mpf_ == _mpf_ninf:
return False
return self.num > 0
def _eval_is_zero(self):
return self._mpf_ == _mpf_zero
def __nonzero__(self):
return self._mpf_ != _mpf_zero
__bool__ = __nonzero__
def __neg__(self):
return Float._new(mlib.mpf_neg(self._mpf_), self._prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_add(self._mpf_, rhs, prec, rnd), prec)
return Number.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_sub(self._mpf_, rhs, prec, rnd), prec)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mul(self._mpf_, rhs, prec, rnd), prec)
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number) and other != 0:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_div(self._mpf_, rhs, prec, rnd), prec)
return Number.__div__(self, other)
__truediv__ = __div__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Rational) and other.q != 1:
# calculate mod with Rationals, *then* round the result
return Float(Rational.__mod__(Rational(self), other),
prec_to_dps(self._prec))
if isinstance(other, Float):
r = self/other
if r == int(r):
prec = max([prec_to_dps(i)
for i in (self._prec, other._prec)])
return Float(0, prec)
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(self._mpf_, rhs, prec, rnd), prec)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Float):
return other.__mod__(self)
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(rhs, self._mpf_, prec, rnd), prec)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
"""
expt is symbolic object but not equal to 0, 1
(-p)**r -> exp(r*log(-p)) -> exp(r*(log(p) + I*Pi)) ->
-> p**r*(sin(Pi*r) + cos(Pi*r)*I)
"""
if self == 0:
if expt.is_positive:
return S.Zero
if expt.is_negative:
return Float('inf')
if isinstance(expt, Number):
if isinstance(expt, Integer):
prec = self._prec
return Float._new(
mlib.mpf_pow_int(self._mpf_, expt.p, prec, rnd), prec)
expt, prec = expt._as_mpf_op(self._prec)
self = self._mpf_
try:
y = mpf_pow(self, expt, prec, rnd)
return Float._new(y, prec)
except mlib.ComplexResult:
re, im = mlib.mpc_pow(
(self, _mpf_zero), (expt, _mpf_zero), prec, rnd)
return Float._new(re, prec) + \
Float._new(im, prec)*S.ImaginaryUnit
def __abs__(self):
return Float._new(mlib.mpf_abs(self._mpf_), self._prec)
def __int__(self):
if self._mpf_ == _mpf_zero:
return 0
return int(mlib.to_int(self._mpf_)) # uses round_fast = round_down
__long__ = __int__
def __eq__(self, other):
if isinstance(other, float):
# coerce to Float at same precision
o = Float(other)
try:
ompf = o._as_mpf_val(self._prec)
except ValueError:
return False
return bool(mlib.mpf_eq(self._mpf_, ompf))
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if isinstance(other, NumberSymbol):
if other.is_irrational:
return False
return other.__eq__(self)
if isinstance(other, Float):
return bool(mlib.mpf_eq(self._mpf_, other._mpf_))
if isinstance(other, Number):
# numbers should compare at the same precision;
# all _as_mpf_val routines should be sure to abide
# by the request to change the prec if necessary; if
# they don't, the equality test will fail since it compares
# the mpf tuples
ompf = other._as_mpf_val(self._prec)
return bool(mlib.mpf_eq(self._mpf_, ompf))
return False # Float != non-Number
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other
if isinstance(other, NumberSymbol):
return other.__le__(self)
if other.is_comparable:
other = other.evalf()
if isinstance(other, Number):
return bool(mlib.mpf_gt(self._mpf_, other._as_mpf_val(self._prec)))
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> ! <=
if isinstance(other, NumberSymbol):
return other.__lt__(self)
if other.is_comparable:
other = other.evalf()
if isinstance(other, Number):
return bool(mlib.mpf_ge(self._mpf_, other._as_mpf_val(self._prec)))
return Expr.__ge__(self, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other
if isinstance(other, NumberSymbol):
return other.__ge__(self)
if other.is_real and other.is_number:
other = other.evalf()
if isinstance(other, Number):
return bool(mlib.mpf_lt(self._mpf_, other._as_mpf_val(self._prec)))
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> ! <=
if isinstance(other, NumberSymbol):
return other.__gt__(self)
if other.is_real and other.is_number:
other = other.evalf()
if isinstance(other, Number):
return bool(mlib.mpf_le(self._mpf_, other._as_mpf_val(self._prec)))
return Expr.__le__(self, other)
def __hash__(self):
return super(Float, self).__hash__()
def epsilon_eq(self, other, epsilon="1e-15"):
return abs(self - other) < Float(epsilon)
def _sage_(self):
import sage.all as sage
return sage.RealNumber(str(self))
# Add sympify converters
converter[float] = converter[decimal.Decimal] = Float
# this is here to work nicely in Sage
RealNumber = Float
class Rational(Number):
"""Represents integers and rational numbers (p/q) of any size.
Examples
========
>>> from sympy import Rational, nsimplify, S, pi
>>> Rational(3)
3
>>> Rational(1, 2)
1/2
Rational is unprejudiced in accepting input. If a float is passed, the
underlying value of the binary representation will be returned:
>>> Rational(.5)
1/2
>>> Rational(.2)
3602879701896397/18014398509481984
If the simpler representation of the float is desired then consider
limiting the denominator to the desired value or convert the float to
a string (which is roughly equivalent to limiting the denominator to
10**12):
>>> Rational(str(.2))
1/5
>>> Rational(.2).limit_denominator(10**12)
1/5
An arbitrarily precise Rational is obtained when a string literal is
passed:
>>> Rational("1.23")
123/100
>>> Rational('1e-2')
1/100
>>> Rational(".1")
1/10
>>> Rational('1e-2/3.2')
1/320
The conversion of other types of strings can be handled by
the sympify() function, and conversion of floats to expressions
or simple fractions can be handled with nsimplify:
>>> S('.[3]') # repeating digits in brackets
1/3
>>> S('3**2/10') # general expressions
9/10
>>> nsimplify(.3) # numbers that have a simple form
3/10
But if the input does not reduce to a literal Rational, an error will
be raised:
>>> Rational(pi)
Traceback (most recent call last):
...
TypeError: invalid input: pi
Low-level
---------
Access numerator and denominator as .p and .q:
>>> r = Rational(3, 4)
>>> r
3/4
>>> r.p
3
>>> r.q
4
Note that p and q return integers (not SymPy Integers) so some care
is needed when using them in expressions:
>>> r.p/r.q
0.75
See Also
========
sympify, sympy.simplify.simplify.nsimplify
"""
is_real = True
is_integer = False
is_rational = True
__slots__ = ['p', 'q']
is_Rational = True
@cacheit
def __new__(cls, p, q=None):
if q is None:
if isinstance(p, Rational):
return p
if isinstance(p, string_types):
p = p.replace(' ', '')
try:
# we might have a Float
neg_pow, digits, expt = decimal.Decimal(p).as_tuple()
p = [1, -1][neg_pow]*int("".join(str(x) for x in digits))
if expt > 0:
# TODO: this branch needs a test
return Rational(p*Pow(10, expt), 1)
return Rational(p, Pow(10, -expt))
except decimal.InvalidOperation:
f = regex.match('^([-+]?[0-9]+)/([0-9]+)$', p)
if f:
n, d = f.groups()
return Rational(int(n), int(d))
elif p.count('/') == 1:
p, q = p.split('/')
return Rational(Rational(p), Rational(q))
else:
pass # error will raise below
else:
try:
if isinstance(p, fractions.Fraction):
return Rational(p.numerator, p.denominator)
except NameError:
pass # error will raise below
if isinstance(p, (float, Float)):
return Rational(*_as_integer_ratio(p))
if not isinstance(p, SYMPY_INTS + (Rational,)):
raise TypeError('invalid input: %s' % p)
q = S.One
else:
p = Rational(p)
q = Rational(q)
if isinstance(q, Rational):
p *= q.q
q = q.p
if isinstance(p, Rational):
q *= p.q
p = p.p
# p and q are now integers
if q == 0:
if p == 0:
if _errdict["divide"]:
raise ValueError("Indeterminate 0/0")
else:
return S.NaN
if p < 0:
return S.NegativeInfinity
return S.Infinity
if q < 0:
q = -q
p = -p
n = igcd(abs(p), q)
if n > 1:
p //= n
q //= n
if q == 1:
return Integer(p)
if p == 1 and q == 2:
return S.Half
obj = Expr.__new__(cls)
obj.p = p
obj.q = q
#obj._args = (p, q)
return obj
def limit_denominator(self, max_denominator=1000000):
"""Closest Rational to self with denominator at most max_denominator.
>>> from sympy import Rational
>>> Rational('3.141592653589793').limit_denominator(10)
22/7
>>> Rational('3.141592653589793').limit_denominator(100)
311/99
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self.q <= max_denominator:
return self
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self.p, self.q
while True:
a = n//d
q2 = q0 + a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0 + a*p1, q2
n, d = d, n - a*d
k = (max_denominator - q0)//q1
bound1 = Rational(p0 + k*p1, q0 + k*q1)
bound2 = Rational(p1, q1)
if abs(bound2 - self) <= abs(bound1 - self):
return bound2
else:
return bound1
def __getnewargs__(self):
return (self.p, self.q)
def _hashable_content(self):
return (self.p, self.q)
def _eval_is_positive(self):
return self.p > 0
def _eval_is_zero(self):
return self.p == 0
def __neg__(self):
return Rational(-self.p, self.q)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.q + self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return other + self
else:
return Number.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.q - self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return -other + self
else:
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.p, self.q*other.q)
elif isinstance(other, Float):
return other*self
else:
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.q, self.q*other.p)
elif isinstance(other, Float):
return self*(1/other)
else:
return Number.__div__(self, other)
__truediv__ = __div__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Rational):
n = (self.p*other.q) // (other.p*self.q)
return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q)
if isinstance(other, Float):
# calculate mod with Rationals, *then* round the answer
return Float(self.__mod__(Rational(other)),
prec_to_dps(other._prec))
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Rational):
return Rational.__mod__(other, self)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
if isinstance(expt, Number):
if isinstance(expt, Float):
return self._eval_evalf(expt._prec)**expt
if expt.is_negative:
# (3/4)**-2 -> (4/3)**2
ne = -expt
if (ne is S.One):
return Rational(self.q, self.p)
if self.is_negative:
if expt.q != 1:
return -(S.NegativeOne)**((expt.p % expt.q) /
S(expt.q))*Rational(self.q, -self.p)**ne
else:
return S.NegativeOne**ne*Rational(self.q, -self.p)**ne
else:
return Rational(self.q, self.p)**ne
if expt is S.Infinity: # -oo already caught by test for negative
if self.p > self.q:
# (3/2)**oo -> oo
return S.Infinity
if self.p < -self.q:
# (-3/2)**oo -> oo + I*oo
return S.Infinity + S.Infinity*S.ImaginaryUnit
return S.Zero
if isinstance(expt, Integer):
# (4/3)**2 -> 4**2 / 3**2
return Rational(self.p**expt.p, self.q**expt.p)
if isinstance(expt, Rational):
if self.p != 1:
# (4/3)**(5/6) -> 4**(5/6)*3**(-5/6)
return Integer(self.p)**expt*Integer(self.q)**(-expt)
# as the above caught negative self.p, now self is positive
return Integer(self.q)**Rational(
expt.p*(expt.q - 1), expt.q) / \
Integer(self.q)**Integer(expt.p)
if self.is_negative and expt.is_even:
return (-self)**expt
return
def _as_mpf_val(self, prec):
return mlib.from_rational(self.p, self.q, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(mlib.from_rational(self.p, self.q, prec, rnd))
def __abs__(self):
return Rational(abs(self.p), self.q)
def __int__(self):
p, q = self.p, self.q
if p < 0:
return -(-p//q)
return p//q
__long__ = __int__
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if isinstance(other, NumberSymbol):
if other.is_irrational:
return False
return other.__eq__(self)
if isinstance(other, Number):
if isinstance(other, Float):
return mlib.mpf_eq(self._as_mpf_val(other._prec), other._mpf_)
elif isinstance(other, Rational):
return self.p == other.p and self.q == other.q
return False
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> not <
if isinstance(other, NumberSymbol):
return other.__le__(self)
if other.is_real and other.is_number and not isinstance(other, Rational):
other = other.evalf()
if isinstance(other, Number):
if isinstance(other, Float):
return bool(mlib.mpf_gt(
self._as_mpf_val(other._prec), other._mpf_))
return bool(self.p*other.q > self.q*other.p)
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> not <=
if isinstance(other, NumberSymbol):
return other.__lt__(self)
if other.is_real and other.is_number and not isinstance(other, Rational):
other = other.evalf()
if isinstance(other, Number):
if isinstance(other, Float):
return bool(mlib.mpf_ge(
self._as_mpf_val(other._prec), other._mpf_))
return bool(self.p*other.q >= self.q*other.p)
return Expr.__ge__(self, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> not <
if isinstance(other, NumberSymbol):
return other.__ge__(self)
if other.is_real and other.is_number and not isinstance(other, Rational):
other = other.evalf()
if isinstance(other, Number):
if isinstance(other, Float):
return bool(mlib.mpf_lt(
self._as_mpf_val(other._prec), other._mpf_))
return bool(self.p*other.q < self.q*other.p)
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> not <=
if isinstance(other, NumberSymbol):
return other.__gt__(self)
if other.is_real and other.is_number and not isinstance(other, Rational):
other = other.evalf()
if isinstance(other, Number):
if other is S.NaN:
return None
if isinstance(other, Float):
return bool(mlib.mpf_le(
self._as_mpf_val(other._prec), other._mpf_))
return bool(self.p*other.q <= self.q*other.p)
return Expr.__le__(self, other)
def __hash__(self):
return super(Rational, self).__hash__()
def factors(self, limit=None, use_trial=True, use_rho=False,
use_pm1=False, verbose=False, visual=False):
"""A wrapper to factorint which return factors of self that are
smaller than limit (or cheap to compute). Special methods of
factoring are disabled by default so that only trial division is used.
"""
from sympy.ntheory import factorint
f = factorint(self.p, limit=limit, use_trial=use_trial,
use_rho=use_rho, use_pm1=use_pm1,
verbose=verbose).copy()
f = defaultdict(int, f)
for p, e in factorint(self.q, limit=limit,
use_trial=use_trial,
use_rho=use_rho,
use_pm1=use_pm1,
verbose=verbose).items():
f[p] += -e
if len(f) > 1 and 1 in f:
del f[1]
if not f:
f = {1: 1}
if not visual:
return dict(f)
else:
if -1 in f:
f.pop(-1)
args = [S.NegativeOne]
else:
args = []
args.extend([Pow(*i, evaluate=False)
for i in sorted(f.items())])
return Mul(*args, evaluate=False)
@_sympifyit('other', NotImplemented)
def gcd(self, other):
if isinstance(other, Rational):
if other is S.Zero:
return other
return Rational(
Integer(igcd(self.p, other.p)),
Integer(ilcm(self.q, other.q)))
return Number.gcd(self, other)
@_sympifyit('other', NotImplemented)
def lcm(self, other):
if isinstance(other, Rational):
return Rational(
self.p*other.p//igcd(self.p, other.p),
igcd(self.q, other.q))
return Number.lcm(self, other)
def as_numer_denom(self):
return Integer(self.p), Integer(self.q)
def _sage_(self):
import sage.all as sage
return sage.Integer(self.p)/sage.Integer(self.q)
def as_content_primitive(self, radical=False):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import S
>>> (S(-3)/2).as_content_primitive()
(3/2, -1)
See docstring of Expr.as_content_primitive for more examples.
"""
if self:
if self.is_positive:
return self, S.One
return -self, S.NegativeOne
return S.One, self
# int -> Integer
_intcache = {}
# TODO move this tracing facility to sympy/core/trace.py ?
def _intcache_printinfo():
ints = sorted(_intcache.keys())
nhit = _intcache_hits
nmiss = _intcache_misses
if nhit == 0 and nmiss == 0:
print()
print('Integer cache statistic was not collected')
return
miss_ratio = float(nmiss) / (nhit + nmiss)
print()
print('Integer cache statistic')
print('-----------------------')
print()
print('#items: %i' % len(ints))
print()
print(' #hit #miss #total')
print()
print('%5i %5i (%7.5f %%) %5i' % (
nhit, nmiss, miss_ratio*100, nhit + nmiss)
)
print()
print(ints)
_intcache_hits = 0
_intcache_misses = 0
def int_trace(f):
import os
if os.getenv('SYMPY_TRACE_INT', 'no').lower() != 'yes':
return f
def Integer_tracer(cls, i):
global _intcache_hits, _intcache_misses
try:
_intcache_hits += 1
return _intcache[i]
except KeyError:
_intcache_hits -= 1
_intcache_misses += 1
return f(cls, i)
# also we want to hook our _intcache_printinfo into sys.atexit
import atexit
atexit.register(_intcache_printinfo)
return Integer_tracer
class Integer(Rational):
q = 1
is_integer = True
is_Integer = True
__slots__ = ['p']
def _as_mpf_val(self, prec):
return mlib.from_int(self.p, prec)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(self._as_mpf_val(prec))
# TODO caching with decorator, but not to degrade performance
@int_trace
def __new__(cls, i):
if isinstance(i, string_types):
i = i.replace(' ', '')
# whereas we cannot, in general, make a Rational from an
# arbitrary expression, we can make an Integer unambiguously
# (except when a non-integer expression happens to round to
# an integer). So we proceed by taking int() of the input and
# let the int routines determine whether the expression can
# be made into an int or whether an error should be raised.
try:
ival = int(i)
except TypeError:
raise TypeError(
'Integer can only work with integer expressions.')
try:
return _intcache[ival]
except KeyError:
# We only work with well-behaved integer types. This converts, for
# example, numpy.int32 instances.
obj = Expr.__new__(cls)
obj.p = ival
_intcache[ival] = obj
return obj
def __getnewargs__(self):
return (self.p,)
# Arithmetic operations are here for efficiency
def __int__(self):
return self.p
__long__ = __int__
def __neg__(self):
return Integer(-self.p)
def __abs__(self):
if self.p >= 0:
return self
else:
return Integer(-self.p)
def __divmod__(self, other):
from .containers import Tuple
if isinstance(other, Integer):
return Tuple(*(divmod(self.p, other.p)))
else:
return Number.__divmod__(self, other)
def __rdivmod__(self, other):
from .containers import Tuple
if isinstance(other, integer_types):
return Tuple(*(divmod(other, self.p)))
else:
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
oname = type(other).__name__
sname = type(self).__name__
raise TypeError(msg % (oname, sname))
return Number.__divmod__(other, self)
# TODO make it decorator + bytecodehacks?
def __add__(self, other):
if isinstance(other, integer_types):
return Integer(self.p + other)
elif isinstance(other, Integer):
return Integer(self.p + other.p)
return Rational.__add__(self, other)
def __radd__(self, other):
if isinstance(other, integer_types):
return Integer(other + self.p)
return Rational.__add__(self, other)
def __sub__(self, other):
if isinstance(other, integer_types):
return Integer(self.p - other)
elif isinstance(other, Integer):
return Integer(self.p - other.p)
return Rational.__sub__(self, other)
def __rsub__(self, other):
if isinstance(other, integer_types):
return Integer(other - self.p)
return Rational.__rsub__(self, other)
def __mul__(self, other):
if isinstance(other, integer_types):
return Integer(self.p*other)
elif isinstance(other, Integer):
return Integer(self.p*other.p)
return Rational.__mul__(self, other)
def __rmul__(self, other):
if isinstance(other, integer_types):
return Integer(other*self.p)
return Rational.__mul__(self, other)
def __mod__(self, other):
if isinstance(other, integer_types):
return Integer(self.p % other)
elif isinstance(other, Integer):
return Integer(self.p % other.p)
return Rational.__mod__(self, other)
def __rmod__(self, other):
if isinstance(other, integer_types):
return Integer(other % self.p)
elif isinstance(other, Integer):
return Integer(other.p % self.p)
return Rational.__rmod__(self, other)
def __eq__(self, other):
if isinstance(other, integer_types):
return (self.p == other)
elif isinstance(other, Integer):
return (self.p == other.p)
return Rational.__eq__(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
if isinstance(other, integer_types):
return (self.p > other)
elif isinstance(other, Integer):
return (self.p > other.p)
return Rational.__gt__(self, other)
def __lt__(self, other):
if isinstance(other, integer_types):
return (self.p < other)
elif isinstance(other, Integer):
return (self.p < other.p)
return Rational.__lt__(self, other)
def __ge__(self, other):
if isinstance(other, integer_types):
return (self.p >= other)
elif isinstance(other, Integer):
return (self.p >= other.p)
return Rational.__ge__(self, other)
def __le__(self, other):
if isinstance(other, integer_types):
return (self.p <= other)
elif isinstance(other, Integer):
return (self.p <= other.p)
return Rational.__le__(self, other)
def __hash__(self):
return super(Integer, self).__hash__()
def __index__(self):
return self.p
########################################
def _eval_is_odd(self):
return bool(self.p % 2)
def _eval_power(self, expt):
"""
Tries to do some simplifications on self**expt
Returns None if no further simplifications can be done
When exponent is a fraction (so we have for example a square root),
we try to find a simpler representation by factoring the argument
up to factors of 2**15, e.g.
- sqrt(4) becomes 2
- sqrt(-4) becomes 2*I
- (2**(3+7)*3**(6+7))**Rational(1,7) becomes 6*18**(3/7)
Further simplification would require a special call to factorint on
the argument which is not done here for sake of speed.
"""
from sympy import perfect_power
if expt is S.Infinity:
if self.p > S.One:
return S.Infinity
# cases -1, 0, 1 are done in their respective classes
return S.Infinity + S.ImaginaryUnit*S.Infinity
if expt is S.NegativeInfinity:
return Rational(1, self)**S.Infinity
if not isinstance(expt, Number):
# simplify when expt is even
# (-2)**k --> 2**k
if self.is_negative and expt.is_even:
return (-self)**expt
if not isinstance(expt, Rational):
return
if expt is S.Half and self.is_negative:
# we extract I for this special case since everyone is doing so
return S.ImaginaryUnit*Pow(-self, expt)
if expt.is_negative:
# invert base and change sign on exponent
ne = -expt
if self.is_negative:
if expt.q != 1:
return -(S.NegativeOne)**((expt.p % expt.q) /
S(expt.q))*Rational(1, -self)**ne
else:
return (S.NegativeOne)**ne*Rational(1, -self)**ne
else:
return Rational(1, self.p)**ne
# see if base is a perfect root, sqrt(4) --> 2
x, xexact = integer_nthroot(abs(self.p), expt.q)
if xexact:
# if it's a perfect root we've finished
result = Integer(x**abs(expt.p))
if self.is_negative:
result *= S.NegativeOne**expt
return result
# The following is an algorithm where we collect perfect roots
# from the factors of base.
# if it's not an nth root, it still might be a perfect power
b_pos = int(abs(self.p))
p = perfect_power(b_pos)
if p is not False:
dict = {p[0]: p[1]}
else:
dict = Integer(self).factors(limit=2**15)
# now process the dict of factors
if self.is_negative:
dict[-1] = 1
out_int = 1 # integer part
out_rad = 1 # extracted radicals
sqr_int = 1
sqr_gcd = 0
sqr_dict = {}
for prime, exponent in dict.items():
exponent *= expt.p
# remove multiples of expt.q: (2**12)**(1/10) -> 2*(2**2)**(1/10)
div_e, div_m = divmod(exponent, expt.q)
if div_e > 0:
out_int *= prime**div_e
if div_m > 0:
# see if the reduced exponent shares a gcd with e.q
# (2**2)**(1/10) -> 2**(1/5)
g = igcd(div_m, expt.q)
if g != 1:
out_rad *= Pow(prime, Rational(div_m//g, expt.q//g))
else:
sqr_dict[prime] = div_m
# identify gcd of remaining powers
for p, ex in sqr_dict.items():
if sqr_gcd == 0:
sqr_gcd = ex
else:
sqr_gcd = igcd(sqr_gcd, ex)
if sqr_gcd == 1:
break
for k, v in sqr_dict.items():
sqr_int *= k**(v//sqr_gcd)
if sqr_int == self and out_int == 1 and out_rad == 1:
result = None
else:
result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, expt.q))
return result
def _eval_is_prime(self):
from sympy.ntheory import isprime
return isprime(self)
def as_numer_denom(self):
return self, S.One
def __floordiv__(self, other):
return Integer(self.p // Integer(other).p)
def __rfloordiv__(self, other):
return Integer(Integer(other).p // self.p)
# Add sympify converters
for i_type in integer_types:
converter[i_type] = Integer
class RationalConstant(Rational):
"""
Abstract base class for rationals with specific behaviors
Derived classes must define class attributes p and q and should probably all
be singletons.
"""
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
class IntegerConstant(Integer):
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
class Zero(with_metaclass(Singleton, IntegerConstant)):
p = 0
q = 1
is_positive = False
is_negative = False
is_finite = False
is_zero = True
is_composite = False
__slots__ = []
@staticmethod
def __abs__():
return S.Zero
@staticmethod
def __neg__():
return S.Zero
def _eval_power(self, expt):
if expt.is_positive:
return self
if expt.is_negative:
return S.Infinity
if expt.is_real is False:
return S.NaN
# infinities are already handled with pos and neg
# tests above; now throw away leading numbers on Mul
# exponent
coeff, terms = expt.as_coeff_Mul()
if coeff.is_negative:
return S.Infinity**terms
if coeff is not S.One: # there is a Number to discard
return self**terms
def _eval_order(self, *symbols):
# Order(0,x) -> 0
return self
if sys.version_info[0] >= 3:
def __bool__(self):
return False
else:
def __nonzero__(self):
return False
class One(with_metaclass(Singleton, IntegerConstant)):
p = 1
q = 1
__slots__ = []
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.NegativeOne
def _eval_power(self, expt):
return self
def _eval_order(self, *symbols):
return
@staticmethod
def factors(limit=None, use_trial=True, use_rho=False, use_pm1=False,
verbose=False, visual=False):
if visual:
return S.One
return {1: 1}
class NegativeOne(with_metaclass(Singleton, IntegerConstant)):
p = -1
q = 1
__slots__ = []
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.One
def _eval_power(self, expt):
if expt.is_odd:
return S.NegativeOne
if expt.is_even:
return S.One
if isinstance(expt, Number):
if isinstance(expt, Float):
return Float(-1.0)**expt
if expt is S.NaN:
return S.NaN
if expt is S.Infinity or expt is S.NegativeInfinity:
return S.NaN
if expt is S.Half:
return S.ImaginaryUnit
if isinstance(expt, Rational):
if expt.q == 2:
return S.ImaginaryUnit**Integer(expt.p)
i, r = divmod(expt.p, expt.q)
if i:
return self**i*self**Rational(r, expt.q)
return
class Half(with_metaclass(Singleton, RationalConstant)):
p = 1
q = 2
__slots__ = []
@staticmethod
def __abs__():
return S.Half
class Infinity(with_metaclass(Singleton, Number)):
is_commutative = True
is_positive = True
is_bounded = False
is_finite = False
is_infinitesimal = False
is_integer = None
is_rational = None
is_odd = None
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\infty"
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
if other is S.NegativeInfinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf'):
return S.NaN
else:
return Float('inf')
else:
return S.Infinity
return NotImplemented
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
if other is S.Infinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('inf'):
return S.NaN
else:
return Float('inf')
else:
return S.Infinity
return NotImplemented
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
if other is S.Zero or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == 0:
return S.NaN
if other > 0:
return Float('inf')
else:
return Float('-inf')
else:
if other > 0:
return S.Infinity
else:
return S.NegativeInfinity
return NotImplemented
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number):
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf') or \
other == Float('inf'):
return S.NaN
elif other.is_nonnegative:
return Float('inf')
else:
return Float('-inf')
else:
if other >= 0:
return S.Infinity
else:
return S.NegativeInfinity
return NotImplemented
__truediv__ = __div__
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.NegativeInfinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``oo ** nan`` ``nan``
``oo ** -p`` ``0`` ``p`` is number, ``oo``
================ ======= ==============================
"""
if expt.is_positive:
return S.Infinity
if expt.is_negative:
return S.Zero
if expt is S.NaN:
return S.NaN
if expt.is_number:
return self**expt.evalf()
def _as_mpf_val(self, prec):
return mlib.finf
def _sage_(self):
import sage.all as sage
return sage.oo
def __hash__(self):
return super(Infinity, self).__hash__()
def __eq__(self, other):
return other is S.Infinity
def __ne__(self, other):
return other is not S.Infinity
def __lt__(self, other):
return False
def __le__(self, other):
return other is S.Infinity
def __gt__(self, other):
return other is not S.Infinity
def __ge__(self, other):
return True
def __mod__(self, other):
return S.NaN
__rmod__ = __mod__
oo = S.Infinity
class NegativeInfinity(with_metaclass(Singleton, Number)):
is_commutative = True
is_real = True
is_positive = False
is_bounded = False
is_finite = False
is_infinitesimal = False
is_integer = None
is_rational = None
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"-\infty"
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
if other is S.Infinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('inf'):
return Float('nan')
else:
return Float('-inf')
else:
return S.NegativeInfinity
return NotImplemented
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
if other is S.NegativeInfinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf'):
return Float('nan')
else:
return Float('-inf')
else:
return S.NegativeInfinity
return NotImplemented
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
if other is S.Zero or other is S.NaN:
return S.NaN
elif other.is_Float:
if other is S.NaN or other.is_zero:
return S.NaN
elif other.is_positive:
return Float('-inf')
else:
return Float('inf')
else:
if other.is_positive:
return S.NegativeInfinity
else:
return S.Infinity
return NotImplemented
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number):
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf') or \
other == Float('inf') or \
other is S.NaN:
return S.NaN
elif other.is_nonnegative:
return Float('-inf')
else:
return Float('inf')
else:
if other >= 0:
return S.NegativeInfinity
else:
return S.Infinity
return NotImplemented
__truediv__ = __div__
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.Infinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``(-oo) ** nan`` ``nan``
``(-oo) ** oo`` ``nan``
``(-oo) ** -oo`` ``nan``
``(-oo) ** e`` ``oo`` ``e`` is positive even integer
``(-oo) ** o`` ``-oo`` ``o`` is positive odd integer
================ ======= ==============================
"""
if isinstance(expt, Number):
if expt is S.NaN or \
expt is S.Infinity or \
expt is S.NegativeInfinity:
return S.NaN
if isinstance(expt, Integer) and expt.is_positive:
if expt.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
return S.NegativeOne**expt*S.Infinity**expt
def _as_mpf_val(self, prec):
return mlib.fninf
def _sage_(self):
import sage.all as sage
return -(sage.oo)
def __hash__(self):
return super(NegativeInfinity, self).__hash__()
def __eq__(self, other):
return other is S.NegativeInfinity
def __ne__(self, other):
return other is not S.NegativeInfinity
def __lt__(self, other):
return other is not S.NegativeInfinity
def __le__(self, other):
return True
def __gt__(self, other):
return False
def __ge__(self, other):
return other is S.NegativeInfinity
class NaN(with_metaclass(Singleton, Number)):
"""
Not a Number.
This represents the corresponding data type to floating point nan, which
is defined in the IEEE 754 floating point standard, and corresponds to the
Python ``float('nan')``.
NaN serves as a place holder for numeric values that are indeterminate,
but not infinite. Most operations on nan, produce another nan. Most
indeterminate forms, such as ``0/0`` or ``oo - oo` produce nan. Three
exceptions are ``0**0``, ``1**oo``, and ``oo**0``, which all produce ``1``
(this is consistent with Python's float).
NaN is a singleton, and can be accessed by ``S.NaN``, or can be imported
as ``nan``.
Examples
========
>>> from sympy import nan, S, oo
>>> nan is S.NaN
True
>>> oo - oo
nan
>>> nan + 1
nan
References
==========
- http://en.wikipedia.org/wiki/NaN
"""
is_commutative = True
is_real = None
is_rational = None
is_integer = None
is_comparable = False
is_finite = None
is_bounded = None
is_zero = None
is_prime = None
is_positive = None
is_negative = None
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\mathrm{NaN}"
@_sympifyit('other', NotImplemented)
def __add__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __div__(self, other):
return self
__truediv__ = __div__
def _as_mpf_val(self, prec):
return _mpf_nan
def _sage_(self):
import sage.all as sage
return sage.NaN
def __hash__(self):
return super(NaN, self).__hash__()
def __eq__(self, other):
return other is S.NaN
def __ne__(self, other):
return other is not S.NaN
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __lt__(self, other):
return False
def __le__(self, other):
return False
nan = S.NaN
class ComplexInfinity(with_metaclass(Singleton, AtomicExpr)):
is_commutative = True
is_bounded = False
is_real = None
is_number = False
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\tilde{\infty}"
@staticmethod
def __abs__():
return S.Infinity
@staticmethod
def __neg__():
return S.ComplexInfinity
def _eval_power(self, expt):
if expt is S.ComplexInfinity:
return S.NaN
if isinstance(expt, Number):
if expt is S.Zero:
return S.NaN
else:
if expt.is_positive:
return S.ComplexInfinity
else:
return S.Zero
zoo = S.ComplexInfinity
class NumberSymbol(AtomicExpr):
is_commutative = True
is_bounded = True
is_finite = True
is_number = True
__slots__ = []
is_NumberSymbol = True
def __new__(cls):
return AtomicExpr.__new__(cls)
def approximation(self, number_cls):
""" Return an interval with number_cls endpoints
that contains the value of NumberSymbol.
If not implemented, then return None.
"""
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if self is other:
return True
if isinstance(other, Number) and self.is_irrational:
return False
return False # NumberSymbol != non-(Number|self)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> not <
if self is other:
return False
if isinstance(other, Number):
approx = self.approximation_interval(other.__class__)
if approx is not None:
l, u = approx
if other < l:
return False
if other > u:
return True
return self.evalf() < other
if other.is_real and other.is_number:
other = other.evalf()
return self.evalf() < other
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> not <=
if self is other:
return True
if other.is_real and other.is_number:
other = other.evalf()
if isinstance(other, Number):
return self.evalf() <= other
return Expr.__le__(self, other)
def __gt__(self, other):
return (-self) < (-other)
def __ge__(self, other):
return (-self) <= (-other)
def __int__(self):
# subclass with appropriate return value
raise NotImplementedError
def __long__(self):
return self.__int__()
def __hash__(self):
return super(NumberSymbol, self).__hash__()
class Exp1(with_metaclass(Singleton, NumberSymbol)):
is_real = True
is_positive = True
is_negative = False # XXX Forces is_negative/is_nonnegative
is_irrational = True
__slots__ = []
def _latex(self, printer):
return r"e"
@staticmethod
def __abs__():
return S.Exp1
def __int__(self):
return 2
def _as_mpf_val(self, prec):
return mpf_e(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(2), Integer(3))
elif issubclass(number_cls, Rational):
pass
def _eval_power(self, expt):
return C.exp(expt)
def _eval_rewrite_as_sin(self):
I = S.ImaginaryUnit
return C.sin(I + S.Pi/2) - I*C.sin(I)
def _eval_rewrite_as_cos(self):
I = S.ImaginaryUnit
return C.cos(I) + I*C.cos(I + S.Pi/2)
def _sage_(self):
import sage.all as sage
return sage.e
E = S.Exp1
class Pi(with_metaclass(Singleton, NumberSymbol)):
is_real = True
is_positive = True
is_negative = False
is_irrational = True
__slots__ = []
def _latex(self, printer):
return r"\pi"
@staticmethod
def __abs__():
return S.Pi
def __int__(self):
return 3
def _as_mpf_val(self, prec):
return mpf_pi(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(3), Integer(4))
elif issubclass(number_cls, Rational):
return (Rational(223, 71), Rational(22, 7))
def _sage_(self):
import sage.all as sage
return sage.pi
pi = S.Pi
class GoldenRatio(with_metaclass(Singleton, NumberSymbol)):
is_real = True
is_positive = True
is_negative = False
is_irrational = True
__slots__ = []
def _latex(self, printer):
return r"\phi"
def __int__(self):
return 1
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
rv = mlib.from_man_exp(phi_fixed(prec + 10), -prec - 10)
return mpf_norm(rv, prec)
def _eval_expand_func(self, **hints):
from sympy import sqrt
return S.Half + S.Half*sqrt(5)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.One, Rational(2))
elif issubclass(number_cls, Rational):
pass
def _sage_(self):
import sage.all as sage
return sage.golden_ratio
class EulerGamma(with_metaclass(Singleton, NumberSymbol)):
is_real = True
is_positive = True
is_negative = False
is_irrational = None
__slots__ = []
def _latex(self, printer):
return r"\gamma"
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.libhyper.euler_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (S.Half, Rational(3, 5))
def _sage_(self):
import sage.all as sage
return sage.euler_gamma
class Catalan(with_metaclass(Singleton, NumberSymbol)):
is_real = True
is_positive = True
is_negative = False
is_irrational = None
__slots__ = []
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.catalan_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (Rational(9, 10), S.One)
def _sage_(self):
import sage.all as sage
return sage.catalan
class ImaginaryUnit(with_metaclass(Singleton, AtomicExpr)):
is_commutative = True
is_imaginary = True
is_bounded = True
is_finite = True
is_number = True
__slots__ = []
def _latex(self, printer):
return r"i"
@staticmethod
def __abs__():
return S.One
def _eval_evalf(self, prec):
return self
def _eval_conjugate(self):
return -S.ImaginaryUnit
def _eval_power(self, expt):
"""
b is I = sqrt(-1)
e is symbolic object but not equal to 0, 1
I**r -> (-1)**(r/2) -> exp(r/2*Pi*I) -> sin(Pi*r/2) + cos(Pi*r/2)*I, r is decimal
I**0 mod 4 -> 1
I**1 mod 4 -> I
I**2 mod 4 -> -1
I**3 mod 4 -> -I
"""
if isinstance(expt, Number):
if isinstance(expt, Integer):
expt = expt.p % 4
if expt == 0:
return S.One
if expt == 1:
return S.ImaginaryUnit
if expt == 2:
return -S.One
return -S.ImaginaryUnit
return (S.NegativeOne)**(expt*S.Half)
return
def as_base_exp(self):
return S.NegativeOne, S.Half
def _sage_(self):
import sage.all as sage
return sage.I
I = S.ImaginaryUnit
try:
# fractions is only available for python 2.6+
import fractions
def sympify_fractions(f):
return Rational(f.numerator, f.denominator)
converter[fractions.Fraction] = sympify_fractions
except ImportError:
pass
try:
if HAS_GMPY == 2:
import gmpy2 as gmpy
elif HAS_GMPY == 1:
import gmpy
else:
raise ImportError
def sympify_mpz(x):
return Integer(long(x))
def sympify_mpq(x):
return Rational(long(x.numerator), long(x.denominator))
converter[type(gmpy.mpz(1))] = sympify_mpz
converter[type(gmpy.mpq(1, 2))] = sympify_mpq
except ImportError:
pass
def sympify_mpmath(x):
return Expr._from_mpmath(x, x.context.prec)
converter[mpnumeric] = sympify_mpmath
def sympify_complex(a):
real, imag = list(map(sympify, (a.real, a.imag)))
return real + S.ImaginaryUnit*imag
converter[complex] = sympify_complex
_intcache[0] = S.Zero
_intcache[1] = S.One
_intcache[-1] = S.NegativeOne
from .power import Pow, integer_nthroot
from .mul import Mul
Mul.identity = One()
from .add import Add
Add.identity = Zero()
| kmacinnis/sympy | sympy/core/numbers.py | Python | bsd-3-clause | 81,489 | 0.000577 |
from django.core import serializers
from rest_framework.response import Response
from django.http import JsonResponse
try:
from urllib import quote_plus # python 2
except:
pass
try:
from urllib.parse import quote_plus # python 3
except:
pass
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from comments.forms import CommentForm
from comments.models import Comment
from .forms import PostForm
from .models import Post
def post_create(request):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
form = PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
# message success
messages.success(request, "Successfully Created")
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
}
return render(request, "post_form.html", context)
def post_detail(request, slug=None):
instance = get_object_or_404(Post, slug=slug)
if instance.publish > timezone.now().date() or instance.draft:
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
share_string = quote_plus(instance.content)
initial_data = {
"content_type": instance.get_content_type,
"object_id": instance.id
}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid() and request.user.is_authenticated():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() == 1:
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(
user=request.user,
content_type=content_type,
object_id=obj_id,
content=content_data,
parent=parent_obj,
)
return HttpResponseRedirect(new_comment.content_object.get_absolute_url())
comments = instance.comments
context = {
"title": instance.title,
"instance": instance,
"share_string": share_string,
"comments": comments,
"comment_form": form,
}
return render(request, "post_detail.html", context)
def post_list(request):
today = timezone.now().date()
queryset_list = Post.objects.active() # .order_by("-timestamp")
if request.user.is_staff or request.user.is_superuser:
queryset_list = Post.objects.all()
query = request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query) |
Q(content__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
paginator = Paginator(queryset_list, 8) # Show 25 contacts per page
page_request_var = "page"
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
queryset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
queryset = paginator.page(paginator.num_pages)
context = {
"object_list": queryset,
"title": "List",
"page_request_var": page_request_var,
"today": today,
}
return render(request, "post_list.html", context)
def post_update(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None,
request.FILES or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item</a> Saved",
extra_tags='html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": instance.title,
"instance": instance,
"form": form,
}
return render(request, "post_form.html", context)
def post_delete(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
instance.delete()
messages.success(request, "Successfully deleted")
return redirect("posts:list")
| our-iot-project-org/pingow-web-service | src/posts/views.py | Python | mit | 5,217 | 0.000767 |
import eventlet
import gettext
import sys
from staccato.common import config
import staccato.openstack.common.wsgi as os_wsgi
import staccato.openstack.common.pastedeploy as os_pastedeploy
# Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
gettext.install('staccato', unicode=1)
def fail(returncode, e):
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(returncode)
def main():
try:
conf = config.get_config_object()
paste_file = conf.find_file(conf.paste_deploy.config_file)
wsgi_app = os_pastedeploy.paste_deploy_app(paste_file,
'staccato-api',
conf)
server = os_wsgi.Service(wsgi_app, conf.bind_port)
server.start()
server.wait()
except RuntimeError as e:
fail(1, e)
main()
| buzztroll/staccato | staccato/cmd/api.py | Python | apache-2.0 | 899 | 0.001112 |
"""Utility methods for handling Entities.
These methods can be shared between entity generation (invoked through
the Entities class) at the start of prod data generation, and between
post processing methods (such as adding edges between family members
and neighbours).
"""
import codecs
import collections
import re
def get_surnames():
"""Retrieves a set of surnames from a provided data file."""
path_surnames = 'prod_generation/surnames.txt'
with codecs.open(path_surnames, 'r') as f:
return set(line.strip().lower() for line in f.readlines())
def get_academic_titles_parser():
"""Returns a regular expression for parsing academic titles."""
# Read list of academic titles from the data file.
path_titles = 'prod_generation/academic_titles.txt'
with codecs.open(path_titles, 'r') as f:
titles = set(line.strip() for line in f.readlines())
# Compile the regular expression.
re_titles = "|".join(titles)
re_name = ("^(?P<titles_pre>((%s)\.?( |,))*)"
"(?P<name_clean>.*?)"
"(?P<titles_suffix>(( |,)*(%s)\.?)*)$" % (
re_titles, re_titles))
return re.compile(re_name)
# NamedTuple for parsed entity names:
# - `titles_pre` is a string of academic titles detected before name
# - `firstnames` is a non-empty list of given names
# - `surname` is a string
# - `titles_suf` is a string of academic titles detected after name
ParsedName = collections.namedtuple(
"ParsedName",
["titles_prefix", "firstnames", "surname", "titles_suffix"]
)
def parse_entity_name(entity_name, titles_parser, surnames,
verbose=False):
"""Parses an entity name into a ParsedName, or returns None."""
if verbose:
print('entity_name = |%s|' % (entity_name))
# Remove newlines from `entity_name`:
entity_name = entity_name.replace("\n", " ")
# Trim name of Zivnost, followed by first occurrence of (' - ').
p = entity_name.find(' - ')
if (p > 0):
name = entity_name[:p]
else:
name = entity_name
if verbose:
print('name = |%s|' % (name))
# Trim academic titles from the start and end of the name.
match = titles_parser.match(name).groupdict()
titles_pre = match['titles_pre'] if 'titles_pre' in match else ''
titles_suf = match['titles_suf'] if 'titles_suf' in match else ''
name_clean = match['name_clean']
if verbose:
print('name_clean = |%s|' % (name_clean))
# Split cleaned name on spaces (it should now be a list of
# firstnames, followed by a surname).
names = name_clean.split()
# Lowercase the names, so that we get case-insensitive matching on
# both surnames and firstnames downstream.
names = [name.lower() for name in names]
# Strict matching: Check that last name is a surname
# if len(names) >= 2 and names[-1] in surnames:
# return {
# 'titles_pre': titles_pre,
# 'firstnames': names[:-1],
# 'surname': names[-1],
# 'titles_suf': titles_suf,
# }
# Less conservative matching: Find the last token that is a surname,
# and take the rest before it as given names
i = len(names) - 1
while (i >= 1) and (names[i] not in surnames):
i -= 1
if i >= 1:
return ParsedName(
titles_prefix=titles_pre,
firstnames=names[:i],
surname=names[i],
titles_suffix=titles_suf
)
else:
if verbose:
print('Parse failed')
return None
| verejnedigital/verejne.digital | data/prod_generation/entity_tools.py | Python | apache-2.0 | 3,557 | 0.000562 |
"""PyDbLite.py adapted for MySQL backend
Differences with PyDbLite:
- pass the connection to the MySQL db as argument to Base()
- in create(), field definitions must specify a type
- no index
- the Base() instance has a cursor attribute, so that SQL requests
can be executed :
db.cursor.execute(an_sql_request)
result = db.cursor.fetchall()
Fields must be declared
Syntax :
from PyDbLite.MySQL import Base
import MySQLdb
# connect to a MySQL server and use database "test"
connection = MySQLdb.connect("localhost","root","admin")
connection.cursor().execute("USE test")
# pass the connection as argument to Base creation
db = Base('dummy',connection)
# create new base with field names
db.create(('name','INTEGER'),('age',"INTEGER'),('size','REAL'))
# existing base
db.open()
# insert new record
db.insert(name='homer',age=23,size=1.84)
# records are dictionaries with a unique integer key __id__
# selection by list comprehension
res = [ r for r in db if 30 > r['age'] >= 18 and r['size'] < 2 ]
# or generator expression
for r in (r for r in db if r['name'] in ('homer','marge') ):
# simple selection (equality test)
res = db(age=30)
# delete a record or a list of records
db.delete(one_record)
db.delete(list_of_records)
# delete a record by its id
del db[rec_id]
# direct access by id
record = db[rec_id] # the record such that record['__id__'] == rec_id
# update
db.update(record,age=24)
# add and drop fields
db.add_field('new_field')
db.drop_field('name')
# save changes on disk
db.commit()
"""
import os
import cPickle
import bisect
import MySQLdb
# compatibility with Python 2.3
try:
set([])
except NameError:
from sets import Set as set
class Base:
def __init__(self,basename,connection):
"""basename = name of the PyDbLite database = a MySQL table
connection = a connection to a MySQL database"""
self.name = basename
self.conn = connection
self.cursor = connection.cursor()
self._iterating = False
def create(self,*fields,**kw):
"""Create a new base with specified field names
A keyword argument mode can be specified ; it is used if a file
with the base name already exists
- if mode = 'open' : open the existing base, ignore the fields
- if mode = 'override' : erase the existing base and create a
new one with the specified fields"""
self.mode = mode = kw.get("mode",None)
if self._table_exists():
if mode == "override":
self.cursor.execute("DROP TABLE %s" %self.name)
elif mode == "open":
return self.open()
else:
raise IOError,"Base %s already exists" %self.name
self.fields = [ f[0] for f in fields ]
self.all_fields = ["__id__","__version__"]+self.fields
_types = ["INTEGER PRIMARY KEY AUTO_INCREMENT","INTEGER"] + \
[f[1] for f in fields]
f_string = [ "%s %s" %(f,t) for (f,t) in zip(self.all_fields,_types)]
sql = "CREATE TABLE %s (%s)" %(self.name,
",".join(f_string))
self.cursor.execute(sql)
return self
def open(self):
"""Open an existing database"""
if self._table_exists():
self.mode = "open"
self._get_table_info()
return self
# table not found
raise IOError,"Table %s doesn't exist" %self.name
def _table_exists(self):
"""Database-specific method to see if the table exists"""
self.cursor.execute("SHOW TABLES")
for table in self.cursor.fetchall():
if table[0].lower() == self.name.lower():
return True
return False
def _get_table_info(self):
"""Database-specific method to get field names"""
self.cursor.execute('DESCRIBE %s' %self.name)
self.all_fields = [ f[0] for f in self.cursor.fetchall() ]
self.fields = self.all_fields[2:]
def commit(self):
"""No use here ???"""
pass
def insert(self,*args,**kw):
"""Insert a record in the database
Parameters can be positional or keyword arguments. If positional
they must be in the same order as in the create() method
If some of the fields are missing the value is set to None
Returns the record identifier
"""
if args:
kw = dict([(f,arg) for f,arg in zip(self.all_fields[2:],args)])
kw["__version__"] = 0
vals = self._make_sql_params(kw)
sql = "INSERT INTO %s SET %s" %(self.name,",".join(vals))
res = self.cursor.execute(sql)
self.cursor.execute("SELECT LAST_INSERT_ID()")
__id__ = self.cursor.fetchone()[0]
return __id__
def delete(self,removed):
"""Remove a single record, or the records in an iterable
Before starting deletion, test if all records are in the base
and don't have twice the same __id__
Return the number of deleted items
"""
if isinstance(removed,dict):
# remove a single record
removed = [removed]
else:
# convert iterable into a list (to be able to sort it)
removed = [ r for r in removed ]
if not removed:
return 0
_ids = [ r['__id__'] for r in removed ]
_ids.sort()
sql = "DELETE FROM %s WHERE __id__ IN (%s)" %(self.name,
",".join([str(_id) for _id in _ids]))
self.cursor.execute(sql)
return len(removed)
def update(self,record,**kw):
"""Update the record with new keys and values"""
# increment version number
kw["__version__"] = record["__version__"] + 1
vals = self._make_sql_params(kw)
sql = "UPDATE %s SET %s WHERE __id__=%s" %(self.name,
",".join(vals),record["__id__"])
self.cursor.execute(sql)
def _make_sql_params(self,kw):
"""Make a list of strings to pass to an SQL statement
from the dictionary kw with Python types"""
vals = []
for k,v in kw.iteritems():
vals.append('%s=%s' %(k,self._conv(v)))
return vals
def _conv(self,v):
if isinstance(v,str):
v = v.replace('"','""')
return '"%s"' %v
elif isinstance(v,datetime.date):
return v.strftime("%Y%m%d")
else:
return v
def _make_record(self,row):
"""Make a record dictionary from the result of a fetch_"""
return dict(zip(self.all_fields,row))
def add_field(self,field,default=None):
fname,ftype = field
if fname in self.all_fields:
raise ValueError,'Field "%s" already defined' %fname
sql = "ALTER TABLE %s ADD %s %s" %(self.name,fname,ftype)
if default is not None:
sql += " DEFAULT %s" %self._conv(default)
self.cursor.execute(sql)
self.commit()
self._get_table_info()
def drop_field(self,field):
if field in ["__id__","__version__"]:
raise ValueError,"Can't delete field %s" %field
if not field in self.fields:
raise ValueError,"Field %s not found in base" %field
sql = "ALTER TABLE %s DROP %s" %(self.name,field)
self.cursor.execute(sql)
self._get_table_info()
def __call__(self,**kw):
"""Selection by field values
db(key=value) returns the list of records where r[key] = value"""
for key in kw:
if not key in self.all_fields:
raise ValueError,"Field %s not in the database" %key
vals = self._make_sql_params(kw)
sql = "SELECT * FROM %s WHERE %s" %(self.name,",".join(vals))
self.cursor.execute(sql)
return [self._make_record(row) for row in self.cursor.fetchall() ]
def __getitem__(self,record_id):
"""Direct access by record id"""
sql = "SELECT * FROM %s WHERE __id__=%s" %(self.name,record_id)
self.cursor.execute(sql)
res = self.cursor.fetchone()
if res is None:
raise IndexError,"No record at index %s" %record_id
else:
return self._make_record(res)
def __len__(self):
return len(self.records)
def __delitem__(self,record_id):
"""Delete by record id"""
self.delete(self[record_id])
def __iter__(self):
"""Iteration on the records"""
self.cursor.execute("SELECT * FROM %s" %self.name)
results = [ self._make_record(r) for r in self.cursor.fetchall() ]
return iter(results)
if __name__ == '__main__':
connection = MySQLdb.connect("localhost","root","admin")
cursor = connection.cursor()
cursor.execute("USE test")
db = Base("pydbtest",connection).create(("name","TEXT"),("age","INTEGER"),
("size","REAL"),("birth","DATE"),
mode="override")
try:
db.add_field(("name","TEXT"))
except:
pass
import random
import datetime
names = ['pierre','claire','simon','camille','jean',
'florence','marie-anne']
#db = Base('PyDbLite_test')
#db.create('name','age','size','birth',mode="override")
for i in range(1000):
db.insert(name=random.choice(names),
age=random.randint(7,47),size=random.uniform(1.10,1.95),
birth=datetime.date(1990,10,10))
db.commit()
print 'Record #20 :',db[20]
print '\nRecords with age=30 :'
for rec in [ r for r in db if r["age"]==30 ]:
print '%-10s | %2s | %s' %(rec['name'],rec['age'],round(rec['size'],2))
print "\nSame with __call__"
# same with select
for rec in db(age=30):
print '%-10s | %2s | %s' %(rec['name'],rec['age'],round(rec['size'],2))
print [ r for r in db if r["age"]==30 ] == db(age=30)
raw_input()
db.insert(name=random.choice(names)) # missing fields
print '\nNumber of records with 30 <= age < 33 :',
print sum([1 for r in db if 33 > r['age'] >= 30])
print db.delete([])
d = db.delete([r for r in db if 32> r['age'] >= 30 and r['name']==u'pierre'])
print "\nDeleting %s records with name == 'pierre' and 30 <= age < 32" %d
print '\nAfter deleting records '
for rec in db(age=30):
print '%-10s | %2s | %s' %(rec['name'],rec['age'],round(rec['size'],2))
print '\n',sum([1 for r in db]),'records in the database'
print '\nMake pierre uppercase for age > 27'
for record in ([r for r in db if r['name']=='pierre' and r['age'] >27]) :
db.update(record,name="Pierre")
print len([r for r in db if r['name']=='Pierre']),'Pierre'
print len([r for r in db if r['name']=='pierre']),'pierre'
print len([r for r in db if r['name'] in ['pierre','Pierre']]),'p/Pierre'
print 'is unicode :',isinstance(db[20]['name'],unicode)
db.commit()
db.open()
print '\nSame operation after commit + open'
print len([r for r in db if r['name']=='Pierre']),'Pierre'
print len([r for r in db if r['name']=='pierre']),'pierre'
print len([r for r in db if r['name'] in ['pierre','Pierre']]),'p/Pierre'
print 'is unicode :',isinstance(db[20]['name'],unicode)
print "\nDeleting record #21"
del db[21]
if not 21 in db:
print "record 21 removed"
print db[22]
db.drop_field('name')
print db[22]
db.add_field(('adate',"DATE"),datetime.date.today())
print db[22]
| leleobhz/scripts | python/chat_back_machine/engine/PyDbLite/MySQL.py | Python | gpl-2.0 | 11,904 | 0.015793 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import mock
from oslo_concurrency import processutils as putils
from oslo_utils import timeutils
from cinder import context
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.targets import tgt
from cinder.volume import utils as vutils
class TestTgtAdmDriver(test.TestCase):
def setUp(self):
super(TestTgtAdmDriver, self).setUp()
self.configuration = conf.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.iscsi_ip_address = '10.9.8.7'
self.fake_volumes_dir = tempfile.mkdtemp()
self.fake_id_1 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
self.fake_id_2 = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba'
self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
self.testvol_1 =\
{'project_id': self.fake_id_1,
'name': 'testvol',
'size': 1,
'id': self.fake_id_2,
'volume_type_id': None,
'provider_location': '10.9.8.7:3260 '
'iqn.2010-10.org.openstack:'
'volume-%s 0' % self.fake_id_2,
'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
'c76370d66b 2FE0CQ8J196R',
'provider_geometry': '512 512',
'created_at': timeutils.utcnow(),
'host': 'fake_host@lvm#lvm'}
self.expected_iscsi_properties = \
{'auth_method': 'CHAP',
'auth_password': '2FE0CQ8J196R',
'auth_username': 'stack-1-a60e2611875f40199931f2c76370d66b',
'encrypted': False,
'logical_block_size': '512',
'physical_block_size': '512',
'target_discovered': False,
'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' %
self.fake_id_2,
'target_lun': 0,
'target_portal': '10.9.8.7:3260',
'volume_id': self.fake_id_2}
self.fake_iscsi_scan =\
('Target 1: iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa
' System information:\n'
' Driver: iscsi\n'
' State: ready\n'
' I_T nexus information:\n'
' LUN information:\n'
' LUN: 0\n'
' Type: controller\n'
' SCSI ID: IET 00010000\n'
' SCSI SN: beaf10\n'
' Size: 0 MB, Block size: 1\n'
' Online: Yes\n'
' Removable media: No\n'
' Prevent removal: No\n'
' Readonly: No\n'
' SWP: No\n'
' Thin-provisioning: No\n'
' Backing store type: null\n'
' Backing store path: None\n'
' Backing store flags:\n'
' LUN: 1\n'
' Type: disk\n'
' SCSI ID: IET 00010001\n'
' SCSI SN: beaf11\n'
' Size: 1074 MB, Block size: 512\n'
' Online: Yes\n'
' Removable media: No\n'
' Prevent removal: No\n'
' Readonly: No\n'
' SWP: No\n'
' Thin-provisioning: No\n'
' Backing store type: rdwr\n'
' Backing store path: /dev/stack-volumes-lvmdriver-1/volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa
' Backing store flags:\n'
' Account information:\n'
' mDVpzk8cZesdahJC9h73\n'
' ACL information:\n'
' ALL"\n')
def fake_safe_get(self, value):
if value == 'volumes_dir':
return self.fake_volumes_dir
elif value == 'iscsi_protocol':
return self.configuration.iscsi_protocol
def test_iscsi_protocol(self):
self.assertEqual(self.target.iscsi_protocol, 'iscsi')
def test_get_target(self):
def _fake_execute(*args, **kwargs):
return self.fake_iscsi_scan, None
self.stubs.Set(utils,
'execute',
_fake_execute)
self.assertEqual('1',
self.target._get_target('iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-'
'8435-77884fe55b45'))
def test_verify_backing_lun(self):
def _fake_execute(*args, **kwargs):
return self.fake_iscsi_scan, None
self.stubs.Set(utils,
'execute',
_fake_execute)
self.assertTrue(self.target._verify_backing_lun(
'iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-'
'8435-77884fe55b45', '1'))
# Test the failure case
bad_scan = self.fake_iscsi_scan.replace('LUN: 1', 'LUN: 3')
def _fake_execute_bad_lun(*args, **kwargs):
return bad_scan, None
self.stubs.Set(utils,
'execute',
_fake_execute_bad_lun)
self.assertFalse(self.target._verify_backing_lun(
'iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-'
'8435-77884fe55b45', '1'))
def test_get_target_chap_auth(self):
persist_file =\
'<target iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45>\n'\
' backing-store /dev/stack-volumes-lvmdriver-1/volume-83c2e877-feed-46be-8435-77884fe55b45\n'\
' driver iscsi\n'\
' incominguser otzLy2UYbYfnP4zXLG5z 234Zweo38VGBBvrpK9nt\n'\
' write-cache on\n'\
'</target>'
test_vol =\
'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
with open(os.path.join(self.fake_volumes_dir,
test_vol.split(':')[1]),
'wb') as tmp_file:
tmp_file.write(persist_file)
expected = ('otzLy2UYbYfnP4zXLG5z', '234Zweo38VGBBvrpK9nt')
self.assertEqual(expected, self.target._get_target_chap_auth(test_vol))
def test_create_iscsi_target(self):
def _fake_execute(*args, **kwargs):
return '', ''
self.stubs.Set(utils,
'execute',
_fake_execute)
self.stubs.Set(self.target,
'_get_target',
lambda x: 1)
self.stubs.Set(self.target,
'_verify_backing_lun',
lambda x, y: True)
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
self.assertEqual(
1,
self.target.create_iscsi_target(
test_vol,
1,
0,
self.fake_volumes_dir))
def test_create_iscsi_target_already_exists(self):
def _fake_execute(*args, **kwargs):
if 'update' in args:
raise putils.ProcessExecutionError(
exit_code=1,
stdout='',
stderr='target already exists',
cmd='tgtad --lld iscsi --op show --mode target')
else:
return 'fake out', 'fake err'
self.stubs.Set(utils,
'execute',
_fake_execute)
self.stubs.Set(self.target,
'_get_target',
lambda x: 1)
self.stubs.Set(self.target,
'_verify_backing_lun',
lambda x, y: True)
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
self.assertEqual(
1,
self.target.create_iscsi_target(
test_vol,
1,
0,
self.fake_volumes_dir))
def test_create_export(self):
def _fake_execute(*args, **kwargs):
return '', ''
self.stubs.Set(utils,
'execute',
_fake_execute)
self.stubs.Set(self.target,
'_get_target',
lambda x: 1)
self.stubs.Set(self.target,
'_verify_backing_lun',
lambda x, y: True)
self.stubs.Set(self.target,
'_get_target_chap_auth',
lambda x: None)
self.stubs.Set(vutils,
'generate_username',
lambda: 'QZJbisGmn9AL954FNF4D')
self.stubs.Set(vutils,
'generate_password',
lambda: 'P68eE7u9eFqDGexd28DQ')
expected_result = {'location': '10.9.8.7:3260,1 '
'iqn.2010-10.org.openstack:testvol 1',
'auth': 'CHAP '
'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'}
ctxt = context.get_admin_context()
self.assertEqual(expected_result,
self.target.create_export(ctxt,
self.testvol_1,
self.fake_volumes_dir))
self.stubs.Set(self.target,
'_get_target_chap_auth',
lambda x: ('otzLy2UYbYfnP4zXLG5z',
'234Zweo38VGBBvrpK9nt'))
expected_result['auth'] = ('CHAP '
'otzLy2UYbYfnP4zXLG5z 234Zweo38VGBBvrpK9nt')
self.assertEqual(expected_result,
self.target.create_export(ctxt,
self.testvol_1,
self.fake_volumes_dir))
def test_ensure_export(self):
ctxt = context.get_admin_context()
with mock.patch.object(self.target, 'create_iscsi_target'):
self.target.ensure_export(ctxt,
self.testvol_1,
self.fake_volumes_dir)
self.target.create_iscsi_target.assert_called_once_with(
'iqn.2010-10.org.openstack:testvol',
1, 0, self.fake_volumes_dir, None,
iscsi_write_cache='on',
check_exit_code=False,
old_name=None)
| Accelerite/cinder | cinder/tests/targets/test_tgt_driver.py | Python | apache-2.0 | 11,627 | 0.000172 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import os
import h5py
import json
import numpy as np
import tensorflow as tf
def log_sum_exp(x_k):
"""Computes log \sum exp in a numerically stable way.
log ( sum_i exp(x_i) )
log ( sum_i exp(x_i - m + m) ), with m = max(x_i)
log ( sum_i exp(x_i - m)*exp(m) )
log ( sum_i exp(x_i - m) + m
Args:
x_k - k -dimensional list of arguments to log_sum_exp.
Returns:
log_sum_exp of the arguments.
"""
m = tf.reduce_max(x_k)
x1_k = x_k - m
u_k = tf.exp(x1_k)
z = tf.reduce_sum(u_k)
return tf.log(z) + m
def linear(x, out_size, do_bias=True, alpha=1.0, identity_if_possible=False,
normalized=False, name=None, collections=None):
"""Linear (affine) transformation, y = x W + b, for a variety of
configurations.
Args:
x: input The tensor to tranformation.
out_size: The integer size of non-batch output dimension.
do_bias (optional): Add a learnable bias vector to the operation.
alpha (optional): A multiplicative scaling for the weight initialization
of the matrix, in the form \alpha * 1/\sqrt{x.shape[1]}.
identity_if_possible (optional): just return identity,
if x.shape[1] == out_size.
normalized (optional): Option to divide out by the norms of the rows of W.
name (optional): The name prefix to add to variables.
collections (optional): List of additional collections. (Placed in
tf.GraphKeys.GLOBAL_VARIABLES already, so no need for that.)
Returns:
In the equation, y = x W + b, returns the tensorflow op that yields y.
"""
in_size = int(x.get_shape()[1]) # from Dimension(10) -> 10
stddev = alpha/np.sqrt(float(in_size))
mat_init = tf.random_normal_initializer(0.0, stddev)
wname = (name + "/W") if name else "/W"
if identity_if_possible and in_size == out_size:
# Sometimes linear layers are nothing more than size adapters.
return tf.identity(x, name=(wname+'_ident'))
W,b = init_linear(in_size, out_size, do_bias=do_bias, alpha=alpha,
normalized=normalized, name=name, collections=collections)
if do_bias:
return tf.matmul(x, W) + b
else:
return tf.matmul(x, W)
def init_linear(in_size, out_size, do_bias=True, mat_init_value=None,
bias_init_value=None, alpha=1.0, identity_if_possible=False,
normalized=False, name=None, collections=None):
"""Linear (affine) transformation, y = x W + b, for a variety of
configurations.
Args:
in_size: The integer size of the non-batc input dimension. [(x),y]
out_size: The integer size of non-batch output dimension. [x,(y)]
do_bias (optional): Add a learnable bias vector to the operation.
mat_init_value (optional): numpy constant for matrix initialization, if None
, do random, with additional parameters.
alpha (optional): A multiplicative scaling for the weight initialization
of the matrix, in the form \alpha * 1/\sqrt{x.shape[1]}.
identity_if_possible (optional): just return identity,
if x.shape[1] == out_size.
normalized (optional): Option to divide out by the norms of the rows of W.
name (optional): The name prefix to add to variables.
collections (optional): List of additional collections. (Placed in
tf.GraphKeys.GLOBAL_VARIABLES already, so no need for that.)
Returns:
In the equation, y = x W + b, returns the pair (W, b).
"""
if mat_init_value is not None and mat_init_value.shape != (in_size, out_size):
raise ValueError(
'Provided mat_init_value must have shape [%d, %d].'%(in_size, out_size))
if bias_init_value is not None and bias_init_value.shape != (1,out_size):
raise ValueError(
'Provided bias_init_value must have shape [1,%d].'%(out_size,))
if mat_init_value is None:
stddev = alpha/np.sqrt(float(in_size))
mat_init = tf.random_normal_initializer(0.0, stddev)
wname = (name + "/W") if name else "/W"
if identity_if_possible and in_size == out_size:
return (tf.constant(np.eye(in_size).astype(np.float32)),
tf.zeros(in_size))
# Note the use of get_variable vs. tf.Variable. this is because get_variable
# does not allow the initialization of the variable with a value.
if normalized:
w_collections = [tf.GraphKeys.GLOBAL_VARIABLES, "norm-variables"]
if collections:
w_collections += collections
if mat_init_value is not None:
w = tf.Variable(mat_init_value, name=wname, collections=w_collections)
else:
w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init,
collections=w_collections)
w = tf.nn.l2_normalize(w, dim=0) # x W, so xW_j = \sum_i x_bi W_ij
else:
w_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
if collections:
w_collections += collections
if mat_init_value is not None:
w = tf.Variable(mat_init_value, name=wname, collections=w_collections)
else:
w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init,
collections=w_collections)
b = None
if do_bias:
b_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
if collections:
b_collections += collections
bname = (name + "/b") if name else "/b"
if bias_init_value is None:
b = tf.get_variable(bname, [1, out_size],
initializer=tf.zeros_initializer(),
collections=b_collections)
else:
b = tf.Variable(bias_init_value, name=bname,
collections=b_collections)
return (w, b)
def write_data(data_fname, data_dict, use_json=False, compression=None):
"""Write data in HD5F format.
Args:
data_fname: The filename of teh file in which to write the data.
data_dict: The dictionary of data to write. The keys are strings
and the values are numpy arrays.
use_json (optional): human readable format for simple items
compression (optional): The compression to use for h5py (disabled by
default because the library borks on scalars, otherwise try 'gzip').
"""
dir_name = os.path.dirname(data_fname)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if use_json:
the_file = open(data_fname,'w')
json.dump(data_dict, the_file)
the_file.close()
else:
try:
with h5py.File(data_fname, 'w') as hf:
for k, v in data_dict.items():
clean_k = k.replace('/', '_')
if clean_k is not k:
print('Warning: saving variable with name: ', k, ' as ', clean_k)
else:
print('Saving variable with name: ', clean_k)
hf.create_dataset(clean_k, data=v, compression=compression)
except IOError:
print("Cannot open %s for writing.", data_fname)
raise
def read_data(data_fname):
""" Read saved data in HDF5 format.
Args:
data_fname: The filename of the file from which to read the data.
Returns:
A dictionary whose keys will vary depending on dataset (but should
always contain the keys 'train_data' and 'valid_data') and whose
values are numpy arrays.
"""
try:
with h5py.File(data_fname, 'r') as hf:
data_dict = {k: np.array(v) for k, v in hf.items()}
return data_dict
except IOError:
print("Cannot open %s for reading." % data_fname)
raise
def write_datasets(data_path, data_fname_stem, dataset_dict, compression=None):
"""Write datasets in HD5F format.
This function assumes the dataset_dict is a mapping ( string ->
to data_dict ). It calls write_data for each data dictionary,
post-fixing the data filename with the key of the dataset.
Args:
data_path: The path to the save directory.
data_fname_stem: The filename stem of the file in which to write the data.
dataset_dict: The dictionary of datasets. The keys are strings
and the values data dictionaries (str -> numpy arrays) associations.
compression (optional): The compression to use for h5py (disabled by
default because the library borks on scalars, otherwise try 'gzip').
"""
full_name_stem = os.path.join(data_path, data_fname_stem)
for s, data_dict in dataset_dict.items():
write_data(full_name_stem + "_" + s, data_dict, compression=compression)
def read_datasets(data_path, data_fname_stem):
"""Read dataset sin HD5F format.
This function assumes the dataset_dict is a mapping ( string ->
to data_dict ). It calls write_data for each data dictionary,
post-fixing the data filename with the key of the dataset.
Args:
data_path: The path to the save directory.
data_fname_stem: The filename stem of the file in which to write the data.
"""
dataset_dict = {}
fnames = os.listdir(data_path)
print ('loading data from ' + data_path + ' with stem ' + data_fname_stem)
for fname in fnames:
if fname.startswith(data_fname_stem):
data_dict = read_data(os.path.join(data_path,fname))
idx = len(data_fname_stem) + 1
key = fname[idx:]
data_dict['data_dim'] = data_dict['train_data'].shape[2]
data_dict['num_steps'] = data_dict['train_data'].shape[1]
dataset_dict[key] = data_dict
if len(dataset_dict) == 0:
raise ValueError("Failed to load any datasets, are you sure that the "
"'--data_dir' and '--data_filename_stem' flag values "
"are correct?")
print (str(len(dataset_dict)) + ' datasets loaded')
return dataset_dict
# NUMPY utility functions
def list_t_bxn_to_list_b_txn(values_t_bxn):
"""Convert a length T list of BxN numpy tensors of length B list of TxN numpy
tensors.
Args:
values_t_bxn: The length T list of BxN numpy tensors.
Returns:
The length B list of TxN numpy tensors.
"""
T = len(values_t_bxn)
B, N = values_t_bxn[0].shape
values_b_txn = []
for b in range(B):
values_pb_txn = np.zeros([T,N])
for t in range(T):
values_pb_txn[t,:] = values_t_bxn[t][b,:]
values_b_txn.append(values_pb_txn)
return values_b_txn
def list_t_bxn_to_tensor_bxtxn(values_t_bxn):
"""Convert a length T list of BxN numpy tensors to single numpy tensor with
shape BxTxN.
Args:
values_t_bxn: The length T list of BxN numpy tensors.
Returns:
values_bxtxn: The BxTxN numpy tensor.
"""
T = len(values_t_bxn)
B, N = values_t_bxn[0].shape
values_bxtxn = np.zeros([B,T,N])
for t in range(T):
values_bxtxn[:,t,:] = values_t_bxn[t]
return values_bxtxn
def tensor_bxtxn_to_list_t_bxn(tensor_bxtxn):
"""Convert a numpy tensor with shape BxTxN to a length T list of numpy tensors
with shape BxT.
Args:
tensor_bxtxn: The BxTxN numpy tensor.
Returns:
A length T list of numpy tensors with shape BxT.
"""
values_t_bxn = []
B, T, N = tensor_bxtxn.shape
for t in range(T):
values_t_bxn.append(np.squeeze(tensor_bxtxn[:,t,:]))
return values_t_bxn
def flatten(list_of_lists):
"""Takes a list of lists and returns a list of the elements.
Args:
list_of_lists: List of lists.
Returns:
flat_list: Flattened list.
flat_list_idxs: Flattened list indices.
"""
flat_list = []
flat_list_idxs = []
start_idx = 0
for item in list_of_lists:
if isinstance(item, list):
flat_list += item
l = len(item)
idxs = range(start_idx, start_idx+l)
start_idx = start_idx+l
else: # a value
flat_list.append(item)
idxs = [start_idx]
start_idx += 1
flat_list_idxs.append(idxs)
return flat_list, flat_list_idxs
| michellemorales/OpenMM | models/lfads/utils.py | Python | gpl-2.0 | 12,183 | 0.010671 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 0, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Anscombe/trend_PolyTrend/cycle_0/ar_12/test_artificial_1024_Anscombe_PolyTrend_0_12_20.py | Python | bsd-3-clause | 265 | 0.086792 |
from sympy.external import import_module
from sympy.utilities.pytest import warns
# fixes issue that arose in addressing issue 6533
def test_no_stdlib_collections():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections2():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections3():
'''make sure we get the right collections with no catch'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0')
if matplotlib:
assert collections != matplotlib.collections
def test_min_module_version_python3_basestring_error():
with warns(UserWarning):
import_module('mpmath', min_module_version='1000.0.1')
| kaushik94/sympy | sympy/external/tests/test_importtools.py | Python | bsd-3-clause | 1,405 | 0.004982 |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PrepaymentBonusResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, id_product=None, id_product_attribute=None, amount=None, type=None, date_add=None, date_upd=None):
"""
PrepaymentBonusResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'id_product': 'int',
'id_product_attribute': 'int',
'amount': 'float',
'type': 'str',
'date_add': 'str',
'date_upd': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'id_product': 'id_product',
'id_product_attribute': 'id_product_attribute',
'amount': 'amount',
'type': 'type',
'date_add': 'date_add',
'date_upd': 'date_upd'
}
self._id = id
self._name = name
self._id_product = id_product
self._id_product_attribute = id_product_attribute
self._amount = amount
self._type = type
self._date_add = date_add
self._date_upd = date_upd
@property
def id(self):
"""
Gets the id of this PrepaymentBonusResponse.
:return: The id of this PrepaymentBonusResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this PrepaymentBonusResponse.
:param id: The id of this PrepaymentBonusResponse.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this PrepaymentBonusResponse.
:return: The name of this PrepaymentBonusResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this PrepaymentBonusResponse.
:param name: The name of this PrepaymentBonusResponse.
:type: str
"""
self._name = name
@property
def id_product(self):
"""
Gets the id_product of this PrepaymentBonusResponse.
:return: The id_product of this PrepaymentBonusResponse.
:rtype: int
"""
return self._id_product
@id_product.setter
def id_product(self, id_product):
"""
Sets the id_product of this PrepaymentBonusResponse.
:param id_product: The id_product of this PrepaymentBonusResponse.
:type: int
"""
self._id_product = id_product
@property
def id_product_attribute(self):
"""
Gets the id_product_attribute of this PrepaymentBonusResponse.
:return: The id_product_attribute of this PrepaymentBonusResponse.
:rtype: int
"""
return self._id_product_attribute
@id_product_attribute.setter
def id_product_attribute(self, id_product_attribute):
"""
Sets the id_product_attribute of this PrepaymentBonusResponse.
:param id_product_attribute: The id_product_attribute of this PrepaymentBonusResponse.
:type: int
"""
self._id_product_attribute = id_product_attribute
@property
def amount(self):
"""
Gets the amount of this PrepaymentBonusResponse.
:return: The amount of this PrepaymentBonusResponse.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this PrepaymentBonusResponse.
:param amount: The amount of this PrepaymentBonusResponse.
:type: float
"""
self._amount = amount
@property
def type(self):
"""
Gets the type of this PrepaymentBonusResponse.
:return: The type of this PrepaymentBonusResponse.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this PrepaymentBonusResponse.
:param type: The type of this PrepaymentBonusResponse.
:type: str
"""
self._type = type
@property
def date_add(self):
"""
Gets the date_add of this PrepaymentBonusResponse.
:return: The date_add of this PrepaymentBonusResponse.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this PrepaymentBonusResponse.
:param date_add: The date_add of this PrepaymentBonusResponse.
:type: str
"""
self._date_add = date_add
@property
def date_upd(self):
"""
Gets the date_upd of this PrepaymentBonusResponse.
:return: The date_upd of this PrepaymentBonusResponse.
:rtype: str
"""
return self._date_upd
@date_upd.setter
def date_upd(self, date_upd):
"""
Sets the date_upd of this PrepaymentBonusResponse.
:param date_upd: The date_upd of this PrepaymentBonusResponse.
:type: str
"""
self._date_upd = date_upd
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| kinow-io/kinow-python-sdk | kinow_client/models/prepayment_bonus_response.py | Python | apache-2.0 | 7,163 | 0.000419 |
import pygame
from pygame.locals import *
import random
import itertools
import state
import block
import tetros
import states
from text import Text
from colors import Colors
from engine import Engine
from playfield import Playfield
from countdown import Countdown
class GameState(state.State):
tetro_classes = (tetros.Leftsnake, tetros.Rightsnake, tetros.Stick,
tetros.Square, tetros.Tee, tetros.Leftgun,
tetros.Rightgun)
tetro_colors = (Colors.ORANGE, Colors.RED, Colors.BLUE, Colors.YELLOW)
def __init__(self):
super(GameState, self).__init__()
self.falling_tetro = None
# nrows should be 22
self.playfield = Playfield(10, 15)
self.playfield.rect.centerx = Engine.screenrect.centerx
self.playfield.rect.bottom = Engine.screenrect.bottom - block.SIZE
self.members.append(self.playfield)
#
# self.kill()
# # start a countdown, and revive ourself when done
# self.intro = Countdown(3000, 256, self.revive)
# self.intro.rect.center = Engine.screenrect.center
# self.members.append(self.intro)
def update(self):
# escape back to main menu
if Engine.is_just_pressed(K_ESCAPE):
Engine.switch(states.MainMenuState())
if not self.alive:
super(GameState, self).update()
return
# update falling tetro
# X movements
if self.falling_tetro is not None:
dx = 0
#
if Engine.pressed(K_LEFT):
dx = -block.SIZE
if Engine.pressed(K_RIGHT):
dx = block.SIZE
#
if dx != 0:
self.falling_tetro.move(dx, 0)
# move it back if any of it's block are now outside the
# playfield
for tblock in self.falling_tetro.members:
if (tblock.rect.x < self.playfield.rect.x
or tblock.rect.right > self.playfield.rect.right):
self.falling_tetro.move(-dx, 0)
break
else:
# not colliding with "walls" check against well blocks
well_blocks = self.playfield.get_well_blocks()
for tblock, wblock in itertools.product(
self.falling_tetro.members, well_blocks):
if tblock.rect.colliderect(wblock.rect):
# move it back and land
self.falling_tetro.move(-dx, 0)
break
else:
self.falling_tetro.col += 1 if dx > 0 else -1
# Y movements
if (self.falling_tetro is not None and self.falling_tetro.dropping):
self.falling_tetro.drop_delay_counter += Engine.elapsed
if self.falling_tetro.drop_delay_counter > self.falling_tetro.drop_delay:
# move and check for collisions
dy = block.SIZE
self.falling_tetro.move(0, dy)
#
well_blocks = self.playfield.get_well_blocks()
# collision with well bottom
for tblock in self.falling_tetro.members:
if tblock.rect.bottom > self.playfield.rect.bottom:
# move it back and land
self.falling_tetro.move(0, -dy)
if self.falling_tetro.row < 0:
self.kill()
return
self.falling_tetro.land(self.playfield)
self.falling_tetro = None
break
else:
# collision with blocks in the well
for tblock, wblock in itertools.product(
self.falling_tetro.members, well_blocks):
if tblock.rect.colliderect(wblock.rect):
# move it back and land
self.falling_tetro.move(0, -dy)
if self.falling_tetro.row < 0:
self.kill()
return
self.falling_tetro.land(self.playfield)
self.falling_tetro = None
break
else:
# update row
self.falling_tetro.row += 1
# reset counter
self.falling_tetro.drop_delay_counter = 0
# new tetro if needed
if self.falling_tetro is None:
color = random.choice(self.tetro_colors)
tetro_cls = random.choice(self.tetro_classes)
#
# not giving the startx-y may get the tetromino and playfield out
# of sync because startx-y default to zero
startx = self.playfield.rect.x + block.SIZE * 4
starty = self.playfield.rect.y - block.SIZE * 4
self.falling_tetro = tetro_cls(color,
startx=startx,
starty=starty,
drop_delay=50)
#
self.members.append(self.falling_tetro)
self.falling_tetro.drop()
super(GameState, self).update()
| viswimmer1/PythonGenerator | data/python_files/31890725/gamestate.py | Python | gpl-2.0 | 5,483 | 0.001094 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from .estestcase import ESTestCase
from pyes.facets import DateHistogramFacet
from pyes.filters import TermFilter, RangeFilter
from pyes.query import FilteredQuery, MatchAllQuery, Search
from pyes.utils import ESRange
import datetime
class FacetSearchTestCase(ESTestCase):
def setUp(self):
super(FacetSearchTestCase, self).setUp()
mapping = {u'parsedtext': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'name': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'title': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'position': {'store': 'yes',
'type': u'integer'},
u'tag': {'store': 'yes',
'type': u'string'},
u'date': {'store': 'yes',
'type': u'date'},
u'uuid': {'boost': 1.0,
'index': 'not_analyzed',
'store': 'yes',
'type': u'string'}}
self.conn.create_index(self.index_name)
self.conn.put_mapping(self.document_type, {'properties': mapping}, self.index_name)
self.conn.index({"name": "Joe Tester",
"parsedtext": "Joe Testere nice guy",
"uuid": "11111",
"position": 1,
"tag": "foo",
"date": datetime.date(2011, 5, 16)},
self.index_name, self.document_type, 1)
self.conn.index({"name": " Bill Baloney",
"parsedtext": "Bill Testere nice guy",
"uuid": "22222",
"position": 2,
"tag": "foo",
"date": datetime.date(2011, 4, 16)},
self.index_name, self.document_type, 2)
self.conn.index({"name": "Bill Clinton",
"parsedtext": "Bill is not nice guy",
"uuid": "33333",
"position": 3,
"tag": "bar",
"date": datetime.date(2011, 4, 28)},
self.index_name, self.document_type, 3)
self.conn.refresh(self.index_name)
def test_terms_facet(self):
q = MatchAllQuery()
q = q.search()
q.facet.add_term_facet('tag')
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 3)
self.assertEquals(resultset.facets.tag.terms, [{u'count': 2, u'term': u'foo'},
{u'count': 1, u'term': u'bar'}])
q2 = MatchAllQuery()
q2 = q2.search()
q2.facet.add_term_facet('tag')
q3 = MatchAllQuery()
q3 = q3.search()
q3.facet.add_term_facet('tag')
self.assertEquals(q2, q3)
q4 = MatchAllQuery()
q4 = q4.search()
q4.facet.add_term_facet('bag')
self.assertNotEquals(q2, q4)
def test_terms_facet_filter(self):
q = MatchAllQuery()
q = FilteredQuery(q, TermFilter('tag', 'foo'))
q = q.search()
q.facet.add_term_facet('tag')
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 2)
self.assertEquals(resultset.facets['tag']['terms'], [{u'count': 2, u'term': u'foo'}])
self.assertEquals(resultset.facets.tag.terms, [{u'count': 2, u'term': u'foo'}])
q2 = MatchAllQuery()
q2 = FilteredQuery(q2, TermFilter('tag', 'foo'))
q2 = q2.search()
q2.facet.add_term_facet('tag')
q3 = MatchAllQuery()
q3 = FilteredQuery(q3, TermFilter('tag', 'foo'))
q3 = q3.search()
q3.facet.add_term_facet('tag')
self.assertEquals(q2, q3)
q4 = MatchAllQuery()
q4 = FilteredQuery(q4, TermFilter('tag', 'foo'))
q4 = q4.search()
q4.facet.add_term_facet('bag')
self.assertNotEquals(q3, q4)
def test_date_facet(self):
q = MatchAllQuery()
q = q.search()
q.facet.facets.append(DateHistogramFacet('date_facet',
field='date',
interval='month'))
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 3)
self.assertEquals(resultset.facets.date_facet.entries, [{u'count': 2, u'time': 1301616000000},
{u'count': 1, u'time': 1304208000000}])
self.assertEquals(datetime.datetime.fromtimestamp(1301616000000 / 1000.).date(),
datetime.date(2011, 04, 01))
self.assertEquals(datetime.datetime.fromtimestamp(1304208000000 / 1000.).date(),
datetime.date(2011, 05, 01))
def test_date_facet_filter(self):
q = MatchAllQuery()
q = FilteredQuery(q, RangeFilter(qrange=ESRange('date',
datetime.date(2011, 4, 1),
datetime.date(2011, 5, 1),
include_upper=False)))
q = q.search()
q.facet.facets.append(DateHistogramFacet('date_facet',
field='date',
interval='month'))
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 2)
self.assertEquals(resultset.facets['date_facet']['entries'], [{u'count': 2, u'time': 1301616000000}])
if __name__ == "__main__":
unittest.main()
| Yelp/pyes | tests/test_facets.py | Python | bsd-3-clause | 6,193 | 0.004198 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, date
from lxml import etree
import time
from openerp import api
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'description': fields.text('Description', translate=True),
'sequence': fields.integer('Sequence'),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
'legend_priority': fields.char(
'Priority Management Explanation', translate=True,
help='Explanation text to help users using the star and priority mechanism on stages or issues that are in this stage.'),
'legend_blocked': fields.char(
'Kanban Blocked Explanation', translate=True,
help='Override the default value displayed for the blocked state for kanban selection, when the task or issue is in that stage.'),
'legend_done': fields.char(
'Kanban Valid Explanation', translate=True,
help='Override the default value displayed for the done state for kanban selection, when the task or issue is in that stage.'),
'legend_normal': fields.char(
'Kanban Ongoing Explanation', translate=True,
help='Override the default value displayed for the normal state for kanban selection, when the task or issue is in that stage.'),
'fold': fields.boolean('Folded in Tasks Pipeline',
help='This stage is folded in the kanban view when '
'there are no records in that stage to display.'),
}
def _get_default_project_ids(self, cr, uid, ctx=None):
if ctx is None:
ctx = {}
default_project_id = ctx.get('default_project_id')
return [default_project_id] if default_project_id else None
_defaults = {
'sequence': 1,
'project_ids': _get_default_project_ids,
}
_order = 'sequence'
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id",
"mail.alias": "alias_id"}
_inherit = ['mail.thread', 'ir.needaction_mixin']
_period_number = 5
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, project.project """
# create aliases for all projects and avoid constraint errors
alias_context = dict(context, alias_model_name='project.task')
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(project, self)._auto_init,
'project.task', self._columns['alias_id'], 'id', alias_prefix='project+', alias_defaults={'project_id':'id'}, context=alias_context)
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
val = {}
if not part:
return {'value': val}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def unlink(self, cr, uid, ids, context=None):
alias_ids = []
mail_alias = self.pool.get('mail.alias')
analytic_account_to_delete = set()
for proj in self.browse(cr, uid, ids, context=context):
if proj.tasks:
raise UserError(_('You cannot delete a project containing tasks. You can either delete all the project\'s tasks and then delete the project or simply deactivate the project.'))
elif proj.alias_id:
alias_ids.append(proj.alias_id.id)
if proj.analytic_account_id and not proj.analytic_account_id.line_ids:
analytic_account_to_delete.add(proj.analytic_account_id.id)
res = super(project, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
self.pool['account.analytic.account'].unlink(cr, uid, list(analytic_account_to_delete), context=context)
return res
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context):
res = {}
attachment = self.pool.get('ir.attachment')
task = self.pool.get('project.task')
for id in ids:
project_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.project'), ('res_id', '=', id)], context=context, count=True)
task_ids = task.search(cr, uid, [('project_id', '=', id)], context=context)
task_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)], context=context, count=True)
res[id] = (project_attachments or 0) + (task_attachments or 0)
return res
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
if context is None:
context = {}
res={}
for project in self.browse(cr, uid, ids, context=context):
res[project.id] = len(project.task_ids)
return res
def _task_needaction_count(self, cr, uid, ids, field_name, arg, context=None):
Task = self.pool['project.task']
res = dict.fromkeys(ids, 0)
projects = Task.read_group(cr, uid, [('project_id', 'in', ids), ('message_needaction', '=', True)], ['project_id'], ['project_id'], context=context)
res.update({project['project_id'][0]: int(project['project_id_count']) for project in projects})
return res
def _get_alias_models(self, cr, uid, context=None):
""" Overriden in project_issue to offer more options """
return [('project.task', "Tasks")]
def _get_visibility_selection(self, cr, uid, context=None):
""" Overriden in portal_project to offer more options """
return [('portal', _('Customer Project: visible in portal if the customer is a follower')),
('employees', _('All Employees Project: all employees can access')),
('followers', _('Private Project: followers only'))]
def attachment_tree_view(self, cr, uid, ids, context):
task_ids = self.pool.get('project.task').search(cr, uid, [('project_id', 'in', ids)])
domain = [
'|',
'&', ('res_model', '=', 'project.project'), ('res_id', 'in', ids),
'&', ('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)]
res_id = ids and ids[0] or False
return {
'name': _('Attachments'),
'domain': domain,
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'help': _('''<p class="oe_view_nocontent_create">
Documents are attached to the tasks and issues of your project.</p><p>
Send messages or log internal notes with attachments to link
documents to your project.
</p>'''),
'limit': 80,
'context': "{'default_res_model': '%s','default_res_id': %d}" % (self._name, res_id)
}
# Lambda indirection method to avoid passing a copy of the overridable method when declaring the field
_alias_models = lambda self, *args, **kwargs: self._get_alias_models(*args, **kwargs)
_visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)
_columns = {
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the project without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of Projects."),
'analytic_account_id': fields.many2one(
'account.analytic.account', 'Contract/Analytic',
help="Link this project to an analytic account if you need financial management on projects. "
"It enables you to connect projects with budgets, planning, cost and revenue analysis, timesheets on projects, etc.",
ondelete="cascade", required=True, auto_join=True),
'label_tasks': fields.char('Use Tasks as', help="Gives label to tasks on project's kanban view."),
'tasks': fields.one2many('project.task', 'project_id', "Task Activities"),
'resource_calendar_id': fields.many2one('resource.calendar', 'Working Time', help="Timetable working hours to adjust the gantt diagram report", states={'close':[('readonly',True)]} ),
'type_ids': fields.many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', 'Tasks Stages', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'task_count': fields.function(_task_count, type='integer', string="Tasks",),
'task_needaction_count': fields.function(_task_needaction_count, type='integer', string="Tasks",),
'task_ids': fields.one2many('project.task', 'project_id',
domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)]),
'color': fields.integer('Color Index'),
'user_id': fields.many2one('res.users', 'Project Manager'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Internal email associated with this project. Incoming emails are automatically synchronized "
"with Tasks (or optionally Issues if the Issue Tracker module is installed)."),
'alias_model': fields.selection(_alias_models, "Alias Model", select=True, required=True,
help="The kind of document created when an email is received on this project's email alias"),
'privacy_visibility': fields.selection(_visibility_selection, 'Privacy / Visibility', required=True,
help="Holds visibility of the tasks or issues that belong to the current project:\n"
"- Portal : employees see everything;\n"
" if portal is activated, portal users see the tasks or issues followed by\n"
" them or by someone of their company\n"
"- Employees Only: employees see all tasks or issues\n"
"- Followers Only: employees see only the followed tasks or issues; if portal\n"
" is activated, portal users see the followed tasks or issues."),
'state': fields.selection([('draft','New'),
('open','In Progress'),
('cancelled', 'Cancelled'),
('pending','Pending'),
('close','Closed')],
'Status', required=True, copy=False),
'doc_count': fields.function(
_get_attached_docs, string="Number of documents attached", type='integer'
),
'date_start': fields.date('Start Date'),
'date': fields.date('Expiration Date', select=True, track_visibility='onchange'),
}
_order = "sequence, name, id"
_defaults = {
'active': True,
'type': 'contract',
'label_tasks': 'Tasks',
'state': 'open',
'sequence': 10,
'user_id': lambda self,cr,uid,ctx: uid,
'alias_model': 'project.task',
'privacy_visibility': 'employees',
}
# TODO: Why not using a SQL contraints ?
def _check_dates(self, cr, uid, ids, context=None):
for leave in self.read(cr, uid, ids, ['date_start', 'date'], context=context):
if leave['date_start'] and leave['date']:
if leave['date_start'] > leave['date']:
return False
return True
_constraints = [
(_check_dates, 'Error! project start-date must be lower than project end-date.', ['date_start', 'date'])
]
def set_template(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=False, context=context)
def reset_project(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=True, context=context)
def map_tasks(self, cr, uid, old_project_id, new_project_id, context=None):
""" copy and map tasks from old to new project """
if context is None:
context = {}
map_task_id = {}
task_obj = self.pool.get('project.task')
proj = self.browse(cr, uid, old_project_id, context=context)
for task in proj.tasks:
# preserve task name and stage, normally altered during copy
defaults = {'stage_id': task.stage_id.id,
'name': task.name}
map_task_id[task.id] = task_obj.copy(cr, uid, task.id, defaults, context=context)
self.write(cr, uid, [new_project_id], {'tasks':[(6,0, map_task_id.values())]})
task_obj.duplicate_task(cr, uid, map_task_id, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
context = dict(context or {})
context['active_test'] = False
proj = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default.update(name=_("%s (copy)") % (proj.name))
res = super(project, self).copy(cr, uid, id, default, context)
for follower in proj.message_follower_ids:
self.message_subscribe(cr, uid, res, partner_ids=[follower.partner_id.id], subtype_ids=[subtype.id for subtype in follower.subtype_ids])
self.map_tasks(cr, uid, id, res, context=context)
return res
def duplicate_template(self, cr, uid, ids, context=None):
context = dict(context or {})
data_obj = self.pool.get('ir.model.data')
result = []
for proj in self.browse(cr, uid, ids, context=context):
context.update({'analytic_project_copy': True})
new_date_start = time.strftime('%Y-%m-%d')
new_date_end = False
if proj.date_start and proj.date:
start_date = date(*time.strptime(proj.date_start,'%Y-%m-%d')[:3])
end_date = date(*time.strptime(proj.date,'%Y-%m-%d')[:3])
new_date_end = (datetime(*time.strptime(new_date_start,'%Y-%m-%d')[:3])+(end_date-start_date)).strftime('%Y-%m-%d')
context.update({'copy':True})
new_id = self.copy(cr, uid, proj.id, default = {
'name':_("%s (copy)") % (proj.name),
'state':'open',
'date_start':new_date_start,
'date':new_date_end}, context=context)
result.append(new_id)
if result and len(result):
res_id = result[0]
form_view_id = data_obj._get_id(cr, uid, 'project', 'edit_project')
form_view = data_obj.read(cr, uid, form_view_id, ['res_id'])
tree_view_id = data_obj._get_id(cr, uid, 'project', 'view_project')
tree_view = data_obj.read(cr, uid, tree_view_id, ['res_id'])
search_view_id = data_obj._get_id(cr, uid, 'project', 'view_project_project_filter')
search_view = data_obj.read(cr, uid, search_view_id, ['res_id'])
return {
'name': _('Projects'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.project',
'view_id': False,
'res_id': res_id,
'views': [(form_view['res_id'],'form'),(tree_view['res_id'],'tree')],
'type': 'ir.actions.act_window',
'search_view_id': search_view['res_id'],
}
@api.multi
def setActive(self, value=True):
""" Set a project as active/inactive, and its tasks as well. """
self.write({'active': value})
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# Prevent double project creation when 'use_tasks' is checked + alias management
create_context = dict(context, project_creation_in_progress=True,
alias_model_name=vals.get('alias_model', 'project.task'),
alias_parent_model_name=self._name,
mail_create_nosubscribe=True)
ir_values = self.pool.get('ir.values').get_default(cr, uid, 'project.config.settings', 'generate_project_alias')
if ir_values:
vals['alias_name'] = vals.get('alias_name') or vals.get('name')
project_id = super(project, self).create(cr, uid, vals, context=create_context)
project_rec = self.browse(cr, uid, project_id, context=context)
values = {'alias_parent_thread_id': project_id, 'alias_defaults': {'project_id': project_id}}
self.pool.get('mail.alias').write(cr, uid, [project_rec.alias_id.id], values, context=context)
return project_id
def write(self, cr, uid, ids, vals, context=None):
# if alias_model has been changed, update alias_model_id accordingly
if vals.get('alias_model'):
model_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', vals.get('alias_model', 'project.task'))])
vals.update(alias_model_id=model_ids[0])
res = super(project, self).write(cr, uid, ids, vals, context=context)
if 'active' in vals:
# archiving/unarchiving a project does it on its tasks, too
projects = self.browse(cr, uid, ids, context)
tasks = projects.with_context(active_test=False).mapped('tasks')
tasks.write({'active': vals['active']})
return res
class task(osv.osv):
_name = "project.task"
_description = "Task"
_date_name = "date_start"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
def _get_default_partner(self, cr, uid, context=None):
if context is None:
context = {}
if 'default_project_id' in context:
project = self.pool.get('project.project').browse(cr, uid, context['default_project_id'], context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
if context is None:
context = {}
default_project_id = context.get('default_project_id')
if not default_project_id:
return False
return self.stage_find(cr, uid, [], default_project_id, [('fold', '=', False)], context=context)
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
if context is None:
context = {}
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
if 'default_project_id' in context:
search_domain = ['|', ('project_ids', '=', context['default_project_id']), ('id', 'in', ids)]
else:
search_domain = [('id', 'in', ids)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
_group_by_full = {
'stage_id': _read_group_stage_ids,
}
def onchange_remaining(self, cr, uid, ids, remaining=0.0, planned=0.0):
if remaining and not planned:
return {'value': {'planned_hours': remaining}}
return {}
def onchange_planned(self, cr, uid, ids, planned=0.0, effective=0.0):
return {'value': {'remaining_hours': planned - effective}}
@api.cr_uid_ids_context
def onchange_project(self, cr, uid, id, project_id, context=None):
values = {}
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
values['partner_id'] = project.partner_id.id
values['stage_id'] = self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
else:
values['stage_id'] = False
values['partner_id'] = False
return {'value': values}
def onchange_user_id(self, cr, uid, ids, user_id, context=None):
vals = {}
if user_id:
vals['date_start'] = fields.datetime.now()
return {'value': vals}
def duplicate_task(self, cr, uid, map_ids, context=None):
mapper = lambda t: map_ids.get(t.id, t.id)
for task in self.browse(cr, uid, map_ids.values(), context):
new_child_ids = set(map(mapper, task.child_ids))
new_parent_ids = set(map(mapper, task.parent_ids))
if new_child_ids or new_parent_ids:
task.write({'parent_ids': [(6,0,list(new_parent_ids))],
'child_ids': [(6,0,list(new_child_ids))]})
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
current = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default['name'] = _("%s (copy)") % current.name
if 'remaining_hours' not in default:
default['remaining_hours'] = current.planned_hours
return super(task, self).copy_data(cr, uid, id, default, context)
_columns = {
'active': fields.boolean('Active'),
'name': fields.char('Task Title', track_visibility='onchange', size=128, required=True, select=True),
'description': fields.html('Description'),
'priority': fields.selection([('0','Normal'), ('1','High')], 'Priority', select=True),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of tasks."),
'stage_id': fields.many2one('project.task.type', 'Stage', track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'tag_ids': fields.many2many('project.tags', string='Tags', oldname='categ_ids'),
'kanban_state': fields.selection([('normal', 'In Progress'),('done', 'Ready for next stage'),('blocked', 'Blocked')], 'Kanban State',
track_visibility='onchange',
help="A task's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this task\n"
" * Ready for next stage indicates the task is ready to be pulled to the next stage",
required=True, copy=False),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'write_date': fields.datetime('Last Modification Date', readonly=True, select=True), #not displayed in the view but it might be useful with base_action_rule module (and it needs to be defined first for that)
'date_start': fields.datetime('Starting Date', select=True, copy=False),
'date_end': fields.datetime('Ending Date', select=True, copy=False),
'date_assign': fields.datetime('Assigning Date', select=True, copy=False, readonly=True),
'date_deadline': fields.date('Deadline', select=True, copy=False),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True, copy=False, readonly=True),
'project_id': fields.many2one('project.project', 'Project', ondelete='set null', select=True, track_visibility='onchange', change_default=True),
'parent_ids': fields.many2many('project.task', 'project_task_parent_rel', 'task_id', 'parent_id', 'Parent Tasks'),
'child_ids': fields.many2many('project.task', 'project_task_parent_rel', 'parent_id', 'task_id', 'Delegated Tasks'),
'notes': fields.text('Notes'),
'planned_hours': fields.float('Initially Planned Hours', help='Estimated time to do the task, usually set by the project manager when the task is in draft state.'),
'remaining_hours': fields.float('Remaining Hours', digits=(16,2), help="Total remaining time, can be re-estimated periodically by the assignee of the task."),
'user_id': fields.many2one('res.users', 'Assigned to', select=True, track_visibility='onchange'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'manager_id': fields.related('project_id', 'user_id', type='many2one', relation='res.users', string='Project Manager'),
'company_id': fields.many2one('res.company', 'Company'),
'id': fields.integer('ID', readonly=True),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'attachment_ids': fields.one2many('ir.attachment', 'res_id', domain=lambda self: [('res_model', '=', self._name)], auto_join=True, string='Attachments'),
# In the domain of displayed_image_id, we couln't use attachment_ids because a one2many is represented as a list of commands so we used res_model & res_id
'displayed_image_id': fields.many2one('ir.attachment', domain="[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]", string='Displayed Image'),
'legend_blocked': fields.related("stage_id", "legend_blocked", type="char", string='Kanban Blocked Explanation'),
'legend_done': fields.related("stage_id", "legend_done", type="char", string='Kanban Valid Explanation'),
'legend_normal': fields.related("stage_id", "legend_normal", type="char", string='Kanban Ongoing Explanation'),
}
_defaults = {
'stage_id': _get_default_stage_id,
'project_id': lambda self, cr, uid, ctx=None: ctx.get('default_project_id') if ctx is not None else False,
'date_last_stage_update': fields.datetime.now,
'kanban_state': 'normal',
'priority': '0',
'sequence': 10,
'active': True,
'user_id': lambda obj, cr, uid, ctx=None: uid,
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'project.task', context=ctx),
'partner_id': lambda self, cr, uid, ctx=None: self._get_default_partner(cr, uid, context=ctx),
'date_start': fields.datetime.now,
}
_order = "priority desc, sequence, date_start, name, id"
def _check_recursion(self, cr, uid, ids, context=None):
for id in ids:
visited_branch = set()
visited_node = set()
res = self._check_cycle(cr, uid, id, visited_branch, visited_node, context=context)
if not res:
return False
return True
def _check_cycle(self, cr, uid, id, visited_branch, visited_node, context=None):
if id in visited_branch: #Cycle
return False
if id in visited_node: #Already tested don't work one more time for nothing
return True
visited_branch.add(id)
visited_node.add(id)
#visit child using DFS
task = self.browse(cr, uid, id, context=context)
for child in task.child_ids:
res = self._check_cycle(cr, uid, child.id, visited_branch, visited_node, context=context)
if not res:
return False
visited_branch.remove(id)
return True
def _check_dates(self, cr, uid, ids, context=None):
if context == None:
context = {}
obj_task = self.browse(cr, uid, ids[0], context=context)
start = obj_task.date_start or False
end = obj_task.date_end or False
if start and end :
if start > end:
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive tasks.', ['parent_ids']),
(_check_dates, 'Error ! Task starting date must be lower than its ending date.', ['date_start','date_end'])
]
# Override view according to the company definition
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
users_obj = self.pool.get('res.users')
if context is None: context = {}
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
tm = obj_tm and obj_tm.name or 'Hours'
res = super(task, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
try:
# using get_object to get translation value
uom_hour = self.pool['ir.model.data'].get_object(cr, uid, 'product', 'product_uom_hour', context=context)
except ValueError:
uom_hour = False
if not obj_tm or not uom_hour or obj_tm.id == uom_hour.id:
return res
eview = etree.fromstring(res['arch'])
# if the project_time_mode_id is not in hours (so in days), display it as a float field
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
# replace reference of 'Hours' to 'Day(s)'
for f in res['fields']:
# TODO this NOT work in different language than english
# the field 'Initially Planned Hours' should be replaced by 'Initially Planned Days'
# but string 'Initially Planned Days' is not available in translation
if 'Hours' in res['fields'][f]['string']:
res['fields'][f]['string'] = res['fields'][f]['string'].replace('Hours', obj_tm.name)
return res
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_document_name'] = _("tasks")
return super(task, self).get_empty_list_help(cr, uid, help, context=context)
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def _check_child_task(self, cr, uid, ids, context=None):
if context == None:
context = {}
tasks = self.browse(cr, uid, ids, context=context)
for task in tasks:
if task.child_ids:
for child in task.child_ids:
if child.stage_id and not child.stage_id.fold:
raise UserError(_("Child task still open.\nPlease cancel or complete child task first."))
return True
def _store_history(self, cr, uid, ids, context=None):
for task in self.browse(cr, uid, ids, context=context):
self.pool.get('project.task.history').create(cr, uid, {
'task_id': task.id,
'remaining_hours': task.remaining_hours,
'planned_hours': task.planned_hours,
'kanban_state': task.kanban_state,
'type_id': task.stage_id.id,
'user_id': task.user_id.id
}, context=context)
return True
# ------------------------------------------------
# CRUD overrides
# ------------------------------------------------
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
# for default stage
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
# user_id change: update date_assign
if vals.get('user_id'):
vals['date_assign'] = fields.datetime.now()
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
task_id = super(task, self).create(cr, uid, vals, context=create_context)
self._store_history(cr, uid, [task_id], context=context)
return task_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
# user_id change: update date_assign
if vals.get('user_id'):
vals['date_assign'] = fields.datetime.now()
# Overridden to reset the kanban_state to normal whenever
# the stage (stage_id) of the task changes.
if vals and not 'kanban_state' in vals and 'stage_id' in vals:
new_stage = vals.get('stage_id')
vals_reset_kstate = dict(vals, kanban_state='normal')
for t in self.browse(cr, uid, ids, context=context):
write_vals = vals_reset_kstate if t.stage_id.id != new_stage else vals
super(task, self).write(cr, uid, [t.id], write_vals, context=context)
result = True
else:
result = super(task, self).write(cr, uid, ids, vals, context=context)
if any(item in vals for item in ['stage_id', 'remaining_hours', 'user_id', 'kanban_state']):
self._store_history(cr, uid, ids, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
self._check_child_task(cr, uid, ids, context=context)
res = super(task, self).unlink(cr, uid, ids, context)
return res
def _get_total_hours(self):
return self.remaining_hours
def _generate_task(self, cr, uid, tasks, ident=4, context=None):
context = context or {}
result = ""
ident = ' '*ident
company = self.pool["res.users"].browse(cr, uid, uid, context=context).company_id
duration_uom = {
'day(s)': 'd', 'days': 'd', 'day': 'd', 'd': 'd',
'month(s)': 'm', 'months': 'm', 'month': 'month', 'm': 'm',
'week(s)': 'w', 'weeks': 'w', 'week': 'w', 'w': 'w',
'hour(s)': 'H', 'hours': 'H', 'hour': 'H', 'h': 'H',
}.get(company.project_time_mode_id.name.lower(), "hour(s)")
for task in tasks:
if task.stage_id and task.stage_id.fold:
continue
result += '''
%sdef Task_%s():
%s todo = \"%.2f%s\"
%s effort = \"%.2f%s\"''' % (ident, task.id, ident, task.remaining_hours, duration_uom, ident, task._get_total_hours(), duration_uom)
start = []
for t2 in task.parent_ids:
start.append("up.Task_%s.end" % (t2.id,))
if start:
result += '''
%s start = max(%s)
''' % (ident,','.join(start))
if task.user_id:
result += '''
%s resource = %s
''' % (ident, 'User_'+str(task.user_id.id))
result += "\n"
return result
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'kanban_state' in init_values and record.kanban_state == 'blocked':
return 'project.mt_task_blocked'
elif 'kanban_state' in init_values and record.kanban_state == 'done':
return 'project.mt_task_ready'
elif 'user_id' in init_values and record.user_id: # assigned -> new
return 'project.mt_task_new'
elif 'stage_id' in init_values and record.stage_id and record.stage_id.sequence <= 1: # start stage -> new
return 'project.mt_task_new'
elif 'stage_id' in init_values:
return 'project.mt_task_stage'
return super(task, self)._track_subtype(cr, uid, ids, init_values, context=context)
def _notification_group_recipients(self, cr, uid, ids, message, recipients, done_ids, group_data, context=None):
""" Override the mail.thread method to handle project users and officers
recipients. Indeed those will have specific action in their notification
emails: creating tasks, assigning it. """
group_project_user = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'project.group_project_user')
for recipient in recipients:
if recipient.id in done_ids:
continue
if recipient.user_ids and group_project_user in recipient.user_ids[0].groups_id.ids:
group_data['group_project_user'] |= recipient
done_ids.add(recipient.id)
return super(task, self)._notification_group_recipients(cr, uid, ids, message, recipients, done_ids, group_data, context=context)
def _notification_get_recipient_groups(self, cr, uid, ids, message, recipients, context=None):
res = super(task, self)._notification_get_recipient_groups(cr, uid, ids, message, recipients, context=context)
take_action = self._notification_link_helper(cr, uid, ids, 'assign', context=context)
new_action_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'project.action_view_task')
new_action = self._notification_link_helper(cr, uid, ids, 'new', context=context, action_id=new_action_id)
task_record = self.browse(cr, uid, ids[0], context=context)
actions = []
if not task_record.user_id:
actions.append({'url': take_action, 'title': _('I take it')})
else:
actions.append({'url': new_action, 'title': _('New Task')})
res['group_project_user'] = {
'actions': actions
}
return res
@api.cr_uid_context
def message_get_reply_to(self, cr, uid, ids, default=None, context=None):
""" Override to get the reply_to of the parent project. """
tasks = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([task.project_id.id for task in tasks if task.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), default=default, context=context)
return dict((task.id, aliases.get(task.project_id and task.project_id.id or 0, False)) for task in tasks)
def email_split(self, cr, uid, ids, msg, context=None):
email_list = tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or ''))
# check left-part is not already an alias
task_ids = self.browse(cr, uid, ids, context=context)
aliases = [task.project_id.alias_name for task in task_ids if task.project_id]
return filter(lambda x: x.split('@')[0] not in aliases, email_list)
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Override to updates the document according to the email. """
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject'),
'planned_hours': 0.0,
'partner_id': msg.get('author_id', False)
}
defaults.update(custom_values)
res = super(task, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
email_list = self.email_split(cr, uid, [res], msg, context=context)
partner_ids = filter(None, self._find_partner_from_emails(cr, uid, [res], email_list, force_create=False, context=context))
self.message_subscribe(cr, uid, [res], partner_ids, context=context)
return res
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Override to update the task according to the email. """
if update_vals is None:
update_vals = {}
maps = {
'cost': 'planned_hours',
}
for line in msg['body'].split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res:
match = res.group(1).lower()
field = maps.get(match)
if field:
try:
update_vals[field] = float(res.group(2).lower())
except (ValueError, TypeError):
pass
email_list = self.email_split(cr, uid, ids, msg, context=context)
partner_ids = filter(None, self._find_partner_from_emails(cr, uid, ids, email_list, force_create=False, context=context))
self.message_subscribe(cr, uid, ids, partner_ids, context=context)
return super(task, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(task, self).message_get_suggested_recipients(cr, uid, ids, context=context)
for data in self.browse(cr, uid, ids, context=context):
if data.partner_id:
reason = _('Customer Email') if data.partner_id.email else _('Customer')
data._message_add_suggested_recipient(recipients, partner=data.partner_id, reason=reason)
return recipients
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
def _compute_project_count(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0)
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = len(account.project_ids)
return result
_columns = {
'use_tasks': fields.boolean('Tasks', help="Check this box to manage internal activities through this project"),
'company_uom_id': fields.related('company_id', 'project_time_mode_id', string="Company UOM", type='many2one', relation='product.uom'),
'project_ids': fields.one2many('project.project', 'analytic_account_id', 'Projects'),
'project_count': fields.function(_compute_project_count, 'Project Count', type='integer')
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_tasks'] = template.use_tasks
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
'''
This function is used to decide if a project needs to be automatically created or not when an analytic account is created. It returns True if it needs to be so, False otherwise.
'''
if context is None: context = {}
return vals.get('use_tasks') and not 'project_creation_in_progress' in context
@api.cr_uid_id_context
def project_create(self, cr, uid, analytic_account_id, vals, context=None):
'''
This function is called at the time of analytic account creation and is used to create a project automatically linked to it if the conditions are meet.
'''
project_pool = self.pool.get('project.project')
project_id = project_pool.search(cr, uid, [('analytic_account_id','=', analytic_account_id)])
if not project_id and self._trigger_project_creation(cr, uid, vals, context=context):
project_values = {
'name': vals.get('name'),
'analytic_account_id': analytic_account_id,
'use_tasks': True,
}
return project_pool.create(cr, uid, project_values, context=context)
return False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('child_ids', False) and context.get('analytic_project_copy', False):
vals['child_ids'] = []
analytic_account_id = super(account_analytic_account, self).create(cr, uid, vals, context=context)
self.project_create(cr, uid, analytic_account_id, vals, context=context)
return analytic_account_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
vals_for_project = vals.copy()
for account in self.browse(cr, uid, ids, context=context):
if not vals.get('name'):
vals_for_project['name'] = account.name
self.project_create(cr, uid, account.id, vals_for_project, context=context)
return super(account_analytic_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
proj_ids = self.pool['project.project'].search(cr, uid, [('analytic_account_id', 'in', ids)])
has_tasks = self.pool['project.task'].search(cr, uid, [('project_id', 'in', proj_ids)], count=True, context=context)
if has_tasks:
raise UserError(_('Please remove existing tasks in the project linked to the accounts you want to delete.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if context is None:
context={}
if context.get('current_model') == 'project.project':
project_ids = self.search(cr, uid, args + [('name', operator, name)], limit=limit, context=context)
return self.name_get(cr, uid, project_ids, context=context)
return super(account_analytic_account, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)
def projects_action(self, cr, uid, ids, context=None):
accounts = self.browse(cr, uid, ids, context=context)
project_ids = sum([account.project_ids.ids for account in accounts], [])
result = {
"type": "ir.actions.act_window",
"res_model": "project.project",
"views": [[False, "tree"], [False, "form"]],
"domain": [["id", "in", project_ids]],
"context": {"create": False},
"name": "Projects",
}
if len(project_ids) == 1:
result['views'] = [(False, "form")]
result['res_id'] = project_ids[0]
else:
result = {'type': 'ir.actions.act_window_close'}
return result
class project_project(osv.osv):
_inherit = 'project.project'
_defaults = {
'use_tasks': True
}
class project_task_history(osv.osv):
"""
Tasks History, used for cumulative flow charts (Lean/Agile)
"""
_name = 'project.task.history'
_description = 'History of Tasks'
_rec_name = 'task_id'
_log_access = False
def _get_date(self, cr, uid, ids, name, arg, context=None):
result = {}
for history in self.browse(cr, uid, ids, context=context):
if history.type_id and history.type_id.fold:
result[history.id] = history.date
continue
cr.execute('''select
date
from
project_task_history
where
task_id=%s and
id>%s
order by id limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
result[history.id] = res and res[0] or False
return result
def _get_related_date(self, cr, uid, ids, context=None):
result = []
for history in self.browse(cr, uid, ids, context=context):
cr.execute('''select
id
from
project_task_history
where
task_id=%s and
id<%s
order by id desc limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
if res:
result.append(res[0])
return result
_columns = {
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select=True),
'type_id': fields.many2one('project.task.type', 'Stage'),
'kanban_state': fields.selection([('normal', 'Normal'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')], 'Kanban State', required=False),
'date': fields.date('Date', select=True),
'end_date': fields.function(_get_date, string='End Date', type="date", store={
'project.task.history': (_get_related_date, None, 20)
}),
'remaining_hours': fields.float('Remaining Time', digits=(16, 2)),
'planned_hours': fields.float('Planned Time', digits=(16, 2)),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'date': fields.date.context_today,
}
class project_task_history_cumulative(osv.osv):
_name = 'project.task.history.cumulative'
_table = 'project_task_history_cumulative'
_inherit = 'project.task.history'
_auto = False
_columns = {
'end_date': fields.date('End Date'),
'nbr_tasks': fields.integer('# of Tasks', readonly=True),
'project_id': fields.many2one('project.project', 'Project'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_task_history_cumulative')
cr.execute(""" CREATE VIEW project_task_history_cumulative AS (
SELECT
history.date::varchar||'-'||history.history_id::varchar AS id,
history.date AS end_date,
*
FROM (
SELECT
h.id AS history_id,
h.date+generate_series(0, CAST((coalesce(h.end_date, DATE 'tomorrow')::date - h.date) AS integer)-1) AS date,
h.task_id, h.type_id, h.user_id, h.kanban_state,
count(h.task_id) as nbr_tasks,
greatest(h.remaining_hours, 1) AS remaining_hours, greatest(h.planned_hours, 1) AS planned_hours,
t.project_id
FROM
project_task_history AS h
JOIN project_task AS t ON (h.task_id = t.id)
GROUP BY
h.id,
h.task_id,
t.project_id
) AS history
)
""")
class project_tags(osv.Model):
""" Tags of project's tasks (or issues) """
_name = "project.tags"
_description = "Tags of project's tasks, issues..."
_columns = {
'name': fields.char('Name', required=True),
'color': fields.integer('Color Index'),
}
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
| be-cloud-be/horizon-addons | server/addons/project/project.py | Python | agpl-3.0 | 53,981 | 0.005131 |
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from oauth2client.django_orm import FlowField, CredentialsField
from crowdsourcing.utils import get_delimiter
import pandas as pd
import os
class RegistrationModel(models.Model):
user = models.OneToOneField(User)
activation_key = models.CharField(max_length=40)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class PasswordResetModel(models.Model):
user = models.OneToOneField(User)
reset_key = models.CharField(max_length=40)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Region(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the region!', })
code = models.CharField(max_length=16, error_messages={'required': 'Please specify the region code!', })
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Country(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the country!', })
code = models.CharField(max_length=8, error_messages={'required': 'Please specify the country code!', })
region = models.ForeignKey(Region)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s' % (self.name)
class City(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the city!', })
country = models.ForeignKey(Country)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s' % (self.name)
class Address(models.Model):
street = models.CharField(max_length=128, error_messages={'required': 'Please specify the street name!', })
country = models.ForeignKey(Country)
city = models.ForeignKey(City)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s, %s, %s' % (self.street, self.city, self.country)
class Role(models.Model):
name = models.CharField(max_length=32, unique=True, error_messages={'required': 'Please specify the role name!',
'unique': 'The role %(value)r already exists. Please provide another name!'})
is_active = models.BooleanField(default=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Language(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the language!'})
iso_code = models.CharField(max_length=8)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserProfile(models.Model):
user = models.OneToOneField(User)
gender_choices = (('M', 'Male'), ('F', 'Female'))
gender = models.CharField(max_length=1, choices=gender_choices)
address = models.ForeignKey(Address, null=True)
birthday = models.DateField(null=True, error_messages={'invalid': "Please enter a correct date format"})
nationality = models.ManyToManyField(Country, through='UserCountry')
verified = models.BooleanField(default=False)
picture = models.BinaryField(null=True)
friends = models.ManyToManyField('self', through='Friendship',
symmetrical=False)
roles = models.ManyToManyField(Role, through='UserRole')
deleted = models.BooleanField(default=False)
languages = models.ManyToManyField(Language, through='UserLanguage')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserCountry(models.Model):
country = models.ForeignKey(Country)
user = models.ForeignKey(UserProfile)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Skill(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the skill name!"})
description = models.CharField(max_length=512, error_messages={'required': "Please enter the skill description!"})
verified = models.BooleanField(default=False)
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Worker(models.Model):
profile = models.OneToOneField(UserProfile)
skills = models.ManyToManyField(Skill, through='WorkerSkill')
deleted = models.BooleanField(default=False)
alias = models.CharField(max_length=32, error_messages={'required': "Please enter an alias!"})
class WorkerSkill(models.Model):
worker = models.ForeignKey(Worker)
skill = models.ForeignKey(Skill)
level = models.IntegerField(null=True)
verified = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'skill')
class Requester(models.Model):
profile = models.OneToOneField(UserProfile)
alias = models.CharField(max_length=32, error_messages={'required': "Please enter an alias!"})
class UserRole(models.Model):
user_profile = models.ForeignKey(UserProfile)
role = models.ForeignKey(Role)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Friendship(models.Model):
user_source = models.ForeignKey(UserProfile, related_name='user_source')
user_target = models.ForeignKey(UserProfile, related_name='user_target')
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Category(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the category name!"})
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Project(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the project name!"})
start_date = models.DateTimeField(auto_now_add=True, auto_now=False)
end_date = models.DateTimeField(auto_now_add=True, auto_now=False)
owner = models.ForeignKey(Requester, related_name='project_owner')
description = models.CharField(max_length=1024, default='')
collaborators = models.ManyToManyField(Requester, through='ProjectRequester')
keywords = models.TextField(null=True)
save_to_drive = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
categories = models.ManyToManyField(Category, through='ProjectCategory')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ProjectRequester(models.Model):
"""
Tracks the list of requesters that collaborate on a specific project
"""
requester = models.ForeignKey(Requester)
project = models.ForeignKey(Project)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('requester', 'project')
class Template(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the template name!"})
owner = models.ForeignKey(UserProfile)
source_html = models.TextField(default=None, null=True)
price = models.FloatField(default=0)
share_with_others = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Module(models.Model):
"""
aka Milestone
This is a group of similar tasks of the same kind.
Fields
-repetition: number of times a task needs to be performed
"""
name = models.CharField(max_length=128, error_messages={'required': "Please enter the module name!"})
description = models.TextField(error_messages={'required': "Please enter the module description!"})
owner = models.ForeignKey(Requester)
project = models.ForeignKey(Project, related_name='modules')
categories = models.ManyToManyField(Category, through='ModuleCategory')
keywords = models.TextField(null=True)
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'In Review'),
(3, 'In Progress'),
(4, 'Completed')
)
status = models.IntegerField(choices=statuses, default=1)
price = models.FloatField()
repetition = models.IntegerField(default=1)
module_timeout = models.IntegerField(default=0)
has_data_set = models.BooleanField(default=False)
data_set_location = models.CharField(max_length=256, default='No data set', null=True)
task_time = models.FloatField(default=0) # in minutes
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
template = models.ManyToManyField(Template, through='ModuleTemplate')
is_micro = models.BooleanField(default=True)
is_prototype = models.BooleanField(default=False)
class ModuleCategory(models.Model):
module = models.ForeignKey(Module)
category = models.ForeignKey(Category)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('category', 'module')
class ProjectCategory(models.Model):
project = models.ForeignKey(Project)
category = models.ForeignKey(Category)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('project', 'category')
class TemplateItem(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the name of the template item!"})
template = models.ForeignKey(Template, related_name='template_items')
id_string = models.CharField(max_length=128)
role = models.CharField(max_length=16)
icon = models.CharField(max_length=256, null=True)
data_source = models.CharField(max_length=256, null=True)
layout = models.CharField(max_length=16, default='column')
type = models.CharField(max_length=16)
sub_type = models.CharField(max_length=16)
values = models.TextField(null=True)
position = models.IntegerField()
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
ordering = ['position']
class ModuleTemplate(models.Model):
module = models.ForeignKey(Module)
template = models.ForeignKey(Template)
class TemplateItemProperties(models.Model):
template_item = models.ForeignKey(TemplateItem)
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Task(models.Model):
module = models.ForeignKey(Module, related_name='module_tasks')
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'Accepted'),
(3, 'Assigned'),
(4, 'Finished')
)
status = models.IntegerField(choices=statuses, default=1)
data = models.TextField(null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
price = models.FloatField(default=0)
class TaskWorker(models.Model):
task = models.ForeignKey(Task, related_name='task_workers')
worker = models.ForeignKey(Worker)
statuses = ((1, 'Created'),
(2, 'In Progress'),
(3, 'Accepted'),
(4, 'Rejected'),
(5, 'Returned'),
(6, 'Skipped')
)
task_status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class TaskWorkerResult(models.Model):
task_worker = models.ForeignKey(TaskWorker, related_name='task_worker_results')
result = models.TextField()
template_item = models.ForeignKey(TemplateItem)
# TODO: To be refined
statuses = ((1, 'Created'),
(2, 'Accepted'),
(3, 'Rejected')
)
status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class WorkerModuleApplication(models.Model):
worker = models.ForeignKey(Worker)
module = models.ForeignKey(Module)
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'Accepted'),
(3, 'Rejected')
)
status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ActivityLog(models.Model):
"""
Track all user's activities: Create, Update and Delete
"""
activity = models.CharField(max_length=512)
author = models.ForeignKey(User)
created_timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
class Qualification(models.Model):
module = models.ForeignKey(Module)
# TODO: To be refined
types = ((1, "Strict"),
(2, 'Flexible'))
type = models.IntegerField(choices=types, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class QualificationItem(models.Model):
qualification = models.ForeignKey(Qualification)
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserLanguage(models.Model):
language = models.ForeignKey(Language)
user = models.ForeignKey(UserProfile)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Currency(models.Model):
name = models.CharField(max_length=32)
iso_code = models.CharField(max_length=8)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserPreferences(models.Model):
user = models.OneToOneField(User)
language = models.ForeignKey(Language)
currency = models.ForeignKey(Currency)
login_alerts = models.SmallIntegerField(default=0)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class RequesterRanking(models.Model):
requester_name = models.CharField(max_length=128)
requester_payRank = models.FloatField()
requester_fairRank = models.FloatField()
requester_speedRank = models.FloatField()
requester_communicationRank = models.FloatField()
requester_numberofReviews = models.IntegerField(default=0)
class ModuleRating(models.Model):
worker = models.ForeignKey(Worker)
module = models.ForeignKey(Module)
value = models.IntegerField()
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'module')
class ModuleReview(models.Model):
worker = models.ForeignKey(Worker)
anonymous = models.BooleanField(default=False)
module = models.ForeignKey(Module)
comments = models.TextField()
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'module')
class FlowModel(models.Model):
id = models.OneToOneField(User, primary_key=True)
flow = FlowField()
class AccountModel(models.Model):
name = models.CharField(max_length=128)
type = models.CharField(max_length=16)
email = models.EmailField()
access_token = models.TextField(max_length=2048)
root = models.CharField(max_length=256)
is_active = models.IntegerField()
quota = models.BigIntegerField()
used_space = models.BigIntegerField()
assigned_space = models.BigIntegerField()
status = models.IntegerField(default=quota)
owner = models.ForeignKey(User)
class CredentialsModel(models.Model):
account = models.ForeignKey(AccountModel)
credential = CredentialsField()
class TemporaryFlowModel(models.Model):
user = models.ForeignKey(User)
type = models.CharField(max_length=16)
email = models.EmailField()
class BookmarkedProjects(models.Model):
profile = models.ForeignKey(UserProfile)
project = models.ForeignKey(Project)
class Conversation(models.Model):
subject = models.CharField(max_length=64)
sender = models.ForeignKey(User, related_name='sender')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
deleted = models.BooleanField(default=False)
recipients = models.ManyToManyField(User, through='ConversationRecipient')
class Message(models.Model):
conversation = models.ForeignKey(Conversation, related_name='messages')
sender = models.ForeignKey(User)
body = models.TextField(max_length=8192)
deleted = models.BooleanField(default=False)
status = models.IntegerField(default=1) # 1:Sent 2:Delivered 3:Read
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ConversationRecipient(models.Model):
recipient = models.ForeignKey(User, related_name='recipients')
conversation = models.ForeignKey(Conversation, related_name='conversation_recipient')
date_added = models.DateTimeField(auto_now_add=True, auto_now=False)
class UserMessage(models.Model):
message = models.ForeignKey(Message)
user = models.ForeignKey(User)
deleted = models.BooleanField(default=False)
class RequesterInputFile(models.Model):
# TODO will need save files on a server rather than in a temporary folder
file = models.FileField(upload_to='tmp/')
deleted = models.BooleanField(default=False)
def parse_csv(self):
delimiter = get_delimiter(self.file.name)
df = pd.DataFrame(pd.read_csv(self.file, sep=delimiter))
return df.to_dict(orient='records')
def delete(self, *args, **kwargs):
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(root, self.file.url[1:])
os.remove(path)
super(RequesterInputFile, self).delete(*args, **kwargs)
class WorkerRequesterRating(models.Model):
origin = models.ForeignKey(UserProfile, related_name='rating_origin')
target = models.ForeignKey(UserProfile, related_name='rating_target')
module = models.ForeignKey(Module, related_name='rating_module')
weight = models.FloatField(default=2)
type = models.CharField(max_length=16)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True) | psykohack/crowdsource-platform | crowdsourcing/models.py | Python | mit | 21,348 | 0.001499 |
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
import wizzat.testutil
import wizzat.pghelper
class DBTestCase(wizzat.testutil.TestCase):
db_info = {
'host' : 'localhost',
'port' : 5432,
'user' : 'wizzat',
'password' : 'wizzat',
'database' : 'wizzatpy_testdb',
'autocommit' : False,
}
db_mgr = wizzat.pghelper.ConnMgr(db_info,
max_objs = 3,
)
def conn(self, name = 'testconn'):
conn = self.db_mgr.name(name)
conn.autocommit = True
return conn
| wizzat/wizzat.py | tests/testcase.py | Python | mit | 630 | 0.022222 |
"""A block Davidson solver for finding a fixed number of eigenvalues.
Adapted from https://joshuagoings.com/2013/08/23/davidsons-method/
"""
import time
from typing import Tuple
import numpy as np
from tqdm import tqdm
def davidson(A: np.ndarray, k: int, eig: int) -> Tuple[np.ndarray, np.ndarray]:
assert len(A.shape) == 2
assert A.shape[0] == A.shape[1]
n = A.shape[0]
## set up subspace and trial vectors
# set of k unit vectors as guess
t = np.eye(n, k)
# hold guess vectors
V = np.zeros((n, n))
I = np.eye(n)
for m in tqdm(range(k, mmax, k)):
if m <= k:
for j in range(k):
V[:, j] = t[:, j] / np.linalg.norm(t[:, j])
theta_old = 1
elif m > k:
theta_old = theta[:eig]
V, R = np.linalg.qr(V)
T = V[:, : (m + 1)].T @ A @ V[:, : (m + 1)]
THETA, S = np.linalg.eig(T)
idx = THETA.argsort()
theta = THETA[idx]
s = S[:, idx]
for j in range(k):
w = (A - theta[j] * I) @ V[:, : (m + 1)] @ s[:, j]
q = w / (theta[j] - A[j, j])
V[:, (m + j + 1)] = q
norm = np.linalg.norm(theta[:eig] - theta_old)
if norm < tol:
break
return theta, V
if __name__ == "__main__":
# dimension of problem
n = 1200
# convergence tolerance
tol = 1e-8
# maximum number of iterations
mmax = n // 2
## set up fake Hamiltonian
sparsity = 1.0e-4
A = np.zeros((n, n))
for i in range(0, n):
A[i, i] = i + 1
A = A + sparsity * np.random.randn(n, n)
A = (A.T + A) / 2
# number of initial guess vectors
k = 8
# number of eigenvalues to solve
eig = 4
start_davidson = time.time()
theta, V = davidson(A, k, eig)
end_davidson = time.time()
print(f"davidson = {theta[:eig]}; {end_davidson - start_davidson} seconds")
start_numpy = time.time()
E, Vec = np.linalg.eig(A)
E = np.sort(E)
end_numpy = time.time()
print(f"numpy = {E[:eig]}; {end_numpy - start_numpy} seconds")
| berquist/programming_party | eric/project12/davidson.py | Python | mpl-2.0 | 2,084 | 0.00144 |
import tests.units.tournaments
import lib.datalayer
import games
import games.settlers
import tournaments
import hruntime
from tests import *
from tests.units.tournaments import create_and_populate_tournament
class Tests(TestCase):
@classmethod
def setup_class(cls):
super(Tests, cls).setup_class()
hruntime.dbroot = lib.datalayer.Root()
hruntime.dbroot.users['SYSTEM'] = tests.DummyUser('SYSTEM')
def test_sanity(self):
patched_events = {
'tournament.Created': 2,
'tournament.PlayerJoined': 12,
'game.GameCreated': 8,
'game.PlayerJoined': 4,
'game.PlayerInvited': 8
}
with EventPatcherWithCounter(patched_events):
T = create_and_populate_tournament(engine = 'randomized')
| happz/settlers | tests/units/tournaments/randomized.py | Python | mit | 746 | 0.009383 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import deform
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.default_behavior import Cancel
from pontus.form import FormView
from pontus.view import BasicView
from pontus.view_operation import MultipleView
from novaideo.content.processes.smart_folder_management.behaviors import (
RemoveSmartFolder)
from novaideo.content.smart_folder import SmartFolder
from novaideo import _
class RemoveSmartFolderViewStudyReport(BasicView):
title = 'Alert for remove'
name = 'alertforremove'
template = 'novaideo:views/smart_folder_management/templates/alert_smartfolder_remove.pt'
def update(self):
result = {}
values = {'context': self.context}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
class RemoveSmartFolderView(FormView):
title = _('Remove')
name = 'removesmartfolderform'
formid = 'formremovesmartfolder'
behaviors = [RemoveSmartFolder, Cancel]
validate_behaviors = False
def before_update(self):
self.action = self.request.resource_url(
self.context, 'novaideoapi',
query={'op': 'update_action_view',
'node_id': RemoveSmartFolder.node_definition.id})
self.schema.widget = deform.widget.FormWidget(
css_class='deform novaideo-ajax-form')
@view_config(
name='removesmartfolder',
context=SmartFolder,
renderer='pontus:templates/views_templates/grid.pt',
)
class RemoveSmartFolderViewMultipleView(MultipleView):
title = _('Remove the topic of interest')
name = 'removesmartfolder'
viewid = 'removesmartfolder'
template = 'pontus:templates/views_templates/simple_multipleview.pt'
views = (RemoveSmartFolderViewStudyReport, RemoveSmartFolderView)
validators = [RemoveSmartFolder.get_validator()]
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{RemoveSmartFolder: RemoveSmartFolderViewMultipleView})
| ecreall/nova-ideo | novaideo/views/smart_folder_management/remove_smart_folder.py | Python | agpl-3.0 | 2,248 | 0.00089 |
from .variables import *
def Cell(node):
# cells must stand on own line
if node.parent.cls not in ("Assign", "Assigns"):
node.auxiliary("cell")
return "{", ",", "}"
def Assign(node):
if node.name == 'varargin':
out = "%(0)s = va_arg(varargin, " + node[0].type + ") ;"
else:
out = "%(0)s.clear() ;"
# append to cell, one by one
for elem in node[1]:
out = out + "\n%(0)s.push_back(" + str(elem) + ") ;"
return out
| jonathf/matlab2cpp | src/matlab2cpp/rules/_cell.py | Python | bsd-3-clause | 495 | 0.00404 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-26 09:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exercises', '0004_exercise_author'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
| FlowFX/unkenmathe.de | src/um/exercises/migrations/0005_auto_20170826_0942.py | Python | agpl-3.0 | 578 | 0.00173 |
# Copyright (c) 2017 Cedric Bellegarde <cedric.bellegarde@adishatz.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gio
from eolie.define import PROXY_BUS, PROXY_PATH, PROXY_INTERFACE, El
class DBusHelper:
"""
Simpler helper for DBus
"""
def __init__(self):
self.__signals = {}
def call(self, call, page_id, dbus_args=None, callback=None, *args):
"""
Call function
@param call as str
@param page_id as int
@param dbus_args as GLib.Variant()/None
@param callback as function
"""
try:
bus = El().get_dbus_connection()
proxy_bus = PROXY_BUS % page_id
Gio.DBusProxy.new(bus, Gio.DBusProxyFlags.NONE, None,
proxy_bus,
PROXY_PATH,
PROXY_INTERFACE, None,
self.__on_get_proxy,
call, dbus_args, callback, *args)
except Exception as e:
print("DBusHelper::call():", e)
def connect(self, signal, callback, page_id):
"""
Connect callback to object signals
@param signal as str
@param callback as function
@param page_id as int
"""
try:
bus = El().get_dbus_connection()
proxy_bus = PROXY_BUS % page_id
subscribe_id = bus.signal_subscribe(None, proxy_bus, signal,
PROXY_PATH, None,
Gio.DBusSignalFlags.NONE,
callback)
self.__signals[page_id] = (bus, subscribe_id)
except Exception as e:
print("DBusHelper::connect():", e)
def disconnect(self, page_id):
"""
Disconnect signal
@param page_id as int
"""
if page_id in self.__signals.keys():
(bus, subscribe_id) = self.__signals[page_id]
bus.signal_unsubscribe(subscribe_id)
del self.__signals[page_id]
#######################
# PRIVATE #
#######################
def __on_get_proxy(self, source, result, call, dbus_args, callback, *args):
"""
Launch call and connect it to callback
@param source as GObject.Object
@param result as Gio.AsyncResult
@param call as str
@param dbus_args as GLib.Variant()/None
@param callback as function
"""
try:
proxy = source.new_finish(result)
proxy.call(call, dbus_args, Gio.DBusCallFlags.NO_AUTO_START,
1000, None, callback, *args)
except Exception as e:
print("DBusHelper::__on_get_proxy():", e)
callback(None, None, *args)
| gnumdk/eolie | eolie/helper_dbus.py | Python | gpl-3.0 | 3,499 | 0 |
from __future__ import print_function, unicode_literals, division, absolute_import
import datetime
import time
import ntplib
from pyotp import utils
from pyotp.otp import OTP
class TOTP(OTP):
systime_offset = None
def __init__(self, *args, **kwargs):
"""
@option options [Integer] interval (30) the time interval in seconds
for OTP This defaults to 30 which is standard.
"""
self.interval = kwargs.pop('interval', 30)
if self.systime_offset is None:
try:
c = ntplib.NTPClient()
TOTP.systime_offset = int(c.request(
'pool.ntp.org', version=3).offset)
except Exception:
self.systime_offset = 0
super(TOTP, self).__init__(*args, **kwargs)
def at(self, for_time, counter_offset=0):
"""
Accepts either a Unix timestamp integer or a Time object.
Time objects will be adjusted to UTC automatically
@param [Time/Integer] time the time to generate an OTP for
@param [Integer] counter_offset an amount of ticks to add to the time counter
"""
if not isinstance(for_time, datetime.datetime):
for_time = datetime.datetime.fromtimestamp(int(for_time))
return self.generate_otp(self.timecode(for_time) + counter_offset)
def now(self):
"""
Generate the current time OTP
@return [Integer] the OTP as an integer
"""
return self.generate_otp(self.timecode(datetime.datetime.now()))
def verify(self, otp, for_time=None, valid_window=0):
"""
Verifies the OTP passed in against the current time OTP
@param [String/Integer] otp the OTP to check against
@param [Integer] valid_window extends the validity to this many counter ticks before and after the current one
"""
if for_time is None:
for_time = datetime.datetime.now()
if valid_window:
for i in range(-valid_window, valid_window + 1):
if utils.strings_equal(str(otp), str(self.at(for_time, i))):
return True
return False
return utils.strings_equal(str(otp), str(self.at(for_time)))
def provisioning_uri(self, name, issuer_name=None):
"""
Returns the provisioning URI for the OTP
This can then be encoded in a QR Code and used
to provision the Google Authenticator app
@param [String] name of the account
@return [String] provisioning uri
"""
return utils.build_uri(self.secret, name, issuer_name=issuer_name)
def timecode(self, for_time):
i = time.mktime(for_time.timetuple()) + self.systime_offset
return int(i / self.interval)
| projectarkc/arkc-server | arkcserver/pyotp/totp.py | Python | gpl-2.0 | 2,787 | 0.001435 |
import csv
from . import WorksheetBase, WorkbookBase, CellMode
class CSVWorksheet(WorksheetBase):
def __init__(self, raw_sheet, ordinal):
super().__init__(raw_sheet, ordinal)
self.name = "Sheet 1"
self.nrows = len(self.raw_sheet)
self.ncols = max([len(r) for r in self.raw_sheet])
def parse_cell(self, cell, coords, cell_mode=CellMode.cooked):
try:
return int(cell)
except ValueError:
pass
try:
return float(cell)
except ValueError:
pass
# TODO Check for dates?
return cell
def get_row(self, row_index):
return self.raw_sheet[row_index]
class CSVWorkbook(WorkbookBase):
def iterate_sheets(self):
with open(self.filename, "r") as rf:
reader = csv.reader(rf)
yield list(reader)
def get_worksheet(self, raw_sheet, index):
return CSVWorksheet(raw_sheet, index)
| treycucco/py-utils | idb/spreadsheet/csv.py | Python | bsd-3-clause | 862 | 0.012761 |
"""The tests for the Graphite component."""
import socket
import unittest
from unittest import mock
import blumate.core as ha
import blumate.components.graphite as graphite
from blumate.const import (
EVENT_STATE_CHANGED,
EVENT_BLUMATE_START, EVENT_BLUMATE_STOP,
STATE_ON, STATE_OFF)
from tests.common import get_test_home_assistant
class TestGraphite(unittest.TestCase):
"""Test the Graphite component."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.latitude = 32.87336
self.hass.config.longitude = 117.22743
self.gf = graphite.GraphiteFeeder(self.hass, 'foo', 123, 'bm')
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
@mock.patch('blumate.components.graphite.GraphiteFeeder')
def test_minimal_config(self, mock_gf):
"""Test setup with minimal configuration."""
self.assertTrue(graphite.setup(self.hass, {}))
mock_gf.assert_called_once_with(self.hass, 'localhost', 2003, 'bm')
@mock.patch('blumate.components.graphite.GraphiteFeeder')
def test_full_config(self, mock_gf):
"""Test setup with full configuration."""
config = {
'graphite': {
'host': 'foo',
'port': 123,
'prefix': 'me',
}
}
self.assertTrue(graphite.setup(self.hass, config))
mock_gf.assert_called_once_with(self.hass, 'foo', 123, 'me')
@mock.patch('blumate.components.graphite.GraphiteFeeder')
def test_config_bad_port(self, mock_gf):
"""Test setup with invalid port."""
config = {
'graphite': {
'host': 'foo',
'port': 'wrong',
}
}
self.assertFalse(graphite.setup(self.hass, config))
self.assertFalse(mock_gf.called)
def test_subscribe(self):
"""Test the subscription."""
fake_hass = mock.MagicMock()
gf = graphite.GraphiteFeeder(fake_hass, 'foo', 123, 'bm')
fake_hass.bus.listen_once.has_calls([
mock.call(EVENT_BLUMATE_START, gf.start_listen),
mock.call(EVENT_BLUMATE_STOP, gf.shutdown),
])
fake_hass.bus.listen.assert_called_once_with(
EVENT_STATE_CHANGED, gf.event_listener)
def test_start(self):
"""Test the start."""
with mock.patch.object(self.gf, 'start') as mock_start:
self.gf.start_listen('event')
mock_start.assert_called_once_with()
def test_shutdown(self):
"""Test the shutdown."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
self.gf.shutdown('event')
mock_queue.put.assert_called_once_with(self.gf._quit_object)
def test_event_listener(self):
"""Test the event listener."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
self.gf.event_listener('foo')
mock_queue.put.assert_called_once_with('foo')
@mock.patch('time.time')
def test_report_attributes(self, mock_time):
"""Test the reporting with attributes."""
mock_time.return_value = 12345
attrs = {'foo': 1,
'bar': 2.0,
'baz': True,
'bat': 'NaN',
}
expected = [
'bm.entity.state 0.000000 12345',
'bm.entity.foo 1.000000 12345',
'bm.entity.bar 2.000000 12345',
'bm.entity.baz 1.000000 12345',
]
state = mock.MagicMock(state=0, attributes=attrs)
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_report_with_string_state(self, mock_time):
"""Test the reporting with strings."""
mock_time.return_value = 12345
expected = [
'bm.entity.foo 1.000000 12345',
'bm.entity.state 1.000000 12345',
]
state = mock.MagicMock(state='above_horizon', attributes={'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_report_with_binary_state(self, mock_time):
"""Test the reporting with binary state."""
mock_time.return_value = 12345
state = ha.State('domain.entity', STATE_ON, {'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
expected = ['bm.entity.foo 1.000000 12345',
'bm.entity.state 1.000000 12345']
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
state.state = STATE_OFF
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
expected = ['bm.entity.foo 1.000000 12345',
'bm.entity.state 0.000000 12345']
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_send_to_graphite_errors(self, mock_time):
"""Test the sending with errors."""
mock_time.return_value = 12345
state = ha.State('domain.entity', STATE_ON, {'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
mock_send.side_effect = socket.error
self.gf._report_attributes('entity', state)
mock_send.side_effect = socket.gaierror
self.gf._report_attributes('entity', state)
@mock.patch('socket.socket')
def test_send_to_graphite(self, mock_socket):
"""Test the sending of data."""
self.gf._send_to_graphite('foo')
mock_socket.assert_called_once_with(socket.AF_INET,
socket.SOCK_STREAM)
sock = mock_socket.return_value
sock.connect.assert_called_once_with(('foo', 123))
sock.sendall.assert_called_once_with('foo'.encode('ascii'))
sock.send.assert_called_once_with('\n'.encode('ascii'))
sock.close.assert_called_once_with()
def test_run_stops(self):
"""Test the stops."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
mock_queue.get.return_value = self.gf._quit_object
self.assertEqual(None, self.gf.run())
mock_queue.get.assert_called_once_with()
mock_queue.task_done.assert_called_once_with()
def test_run(self):
"""Test the running."""
runs = []
event = mock.MagicMock(event_type=EVENT_STATE_CHANGED,
data={'entity_id': 'entity',
'new_state': mock.MagicMock()})
def fake_get():
if len(runs) >= 2:
return self.gf._quit_object
elif runs:
runs.append(1)
return mock.MagicMock(event_type='somethingelse',
data={'new_event': None})
else:
runs.append(1)
return event
with mock.patch.object(self.gf, '_queue') as mock_queue:
with mock.patch.object(self.gf, '_report_attributes') as mock_r:
mock_queue.get.side_effect = fake_get
self.gf.run()
# Twice for two events, once for the stop
self.assertEqual(3, mock_queue.task_done.call_count)
mock_r.assert_called_once_with(
'entity',
event.data['new_state'])
| bdfoster/blumate | tests/components/test_graphite.py | Python | mit | 8,152 | 0 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import signal
import eventlet
from st2common import log as logging
from st2reactor.container.process_container import ProcessSensorContainer
from st2common.services.sensor_watcher import SensorWatcher
from st2common.models.system.common import ResourceReference
LOG = logging.getLogger(__name__)
class SensorContainerManager(object):
def __init__(self, sensors_partitioner):
self._sensor_container = None
self._sensors_watcher = SensorWatcher(create_handler=self._handle_create_sensor,
update_handler=self._handle_update_sensor,
delete_handler=self._handle_delete_sensor,
queue_suffix='sensor_container')
self._container_thread = None
if not sensors_partitioner:
raise ValueError('sensors_partitioner should be non-None.')
self._sensors_partitioner = sensors_partitioner
def run_sensors(self):
"""
Run all sensors as determined by sensors_partitioner.
"""
sensors = self._sensors_partitioner.get_sensors()
if sensors:
LOG.info('Setting up container to run %d sensors.', len(sensors))
LOG.info('\tSensors list - %s.', [self._get_sensor_ref(sensor) for sensor in sensors])
sensors_to_run = []
for sensor in sensors:
# TODO: Directly pass DB object to the ProcessContainer
sensors_to_run.append(self._to_sensor_object(sensor))
LOG.info('(PID:%s) SensorContainer started.', os.getpid())
self._setup_sigterm_handler()
self._spin_container_and_wait(sensors_to_run)
def _spin_container_and_wait(self, sensors):
try:
self._sensor_container = ProcessSensorContainer(sensors=sensors)
self._container_thread = eventlet.spawn(self._sensor_container.run)
LOG.debug('Starting sensor CUD watcher...')
self._sensors_watcher.start()
exit_code = self._container_thread.wait()
LOG.error('Process container quit with exit_code %d.', exit_code)
LOG.error('(PID:%s) SensorContainer stopped.', os.getpid())
except (KeyboardInterrupt, SystemExit):
self._sensor_container.shutdown()
self._sensors_watcher.stop()
LOG.info('(PID:%s) SensorContainer stopped. Reason - %s', os.getpid(),
sys.exc_info()[0].__name__)
eventlet.kill(self._container_thread)
self._container_thread = None
return 0
def _setup_sigterm_handler(self):
def sigterm_handler(signum=None, frame=None):
# This will cause SystemExit to be throw and we call sensor_container.shutdown()
# there which cleans things up.
sys.exit(0)
# Register a SIGTERM signal handler which calls sys.exit which causes SystemExit to
# be thrown. We catch SystemExit and handle cleanup there.
signal.signal(signal.SIGTERM, sigterm_handler)
def _to_sensor_object(self, sensor_db):
file_path = sensor_db.artifact_uri.replace('file://', '')
class_name = sensor_db.entry_point.split('.')[-1]
sensor_obj = {
'pack': sensor_db.pack,
'file_path': file_path,
'class_name': class_name,
'trigger_types': sensor_db.trigger_types,
'poll_interval': sensor_db.poll_interval,
'ref': self._get_sensor_ref(sensor_db)
}
return sensor_obj
#################################################
# Event handler methods for the sensor CUD events
#################################################
def _handle_create_sensor(self, sensor):
if not self._sensors_partitioner.is_sensor_owner(sensor):
LOG.info('sensor %s is not supported. Ignoring create.', self._get_sensor_ref(sensor))
return
if not sensor.enabled:
LOG.info('sensor %s is not enabled.', self._get_sensor_ref(sensor))
return
LOG.info('Adding sensor %s.', self._get_sensor_ref(sensor))
self._sensor_container.add_sensor(sensor=self._to_sensor_object(sensor))
def _handle_update_sensor(self, sensor):
if not self._sensors_partitioner.is_sensor_owner(sensor):
LOG.info('sensor %s is not supported. Ignoring update.', self._get_sensor_ref(sensor))
return
sensor_ref = self._get_sensor_ref(sensor)
sensor_obj = self._to_sensor_object(sensor)
# Handle disabling sensor
if not sensor.enabled:
LOG.info('Sensor %s disabled. Unloading sensor.', sensor_ref)
self._sensor_container.remove_sensor(sensor=sensor_obj)
return
LOG.info('Sensor %s updated. Reloading sensor.', sensor_ref)
try:
self._sensor_container.remove_sensor(sensor=sensor_obj)
except:
LOG.exception('Failed to reload sensor %s', sensor_ref)
else:
self._sensor_container.add_sensor(sensor=sensor_obj)
LOG.info('Sensor %s reloaded.', sensor_ref)
def _handle_delete_sensor(self, sensor):
if not self._sensors_partitioner.is_sensor_owner(sensor):
LOG.info('sensor %s is not supported. Ignoring delete.', self._get_sensor_ref(sensor))
return
LOG.info('Unloading sensor %s.', self._get_sensor_ref(sensor))
self._sensor_container.remove_sensor(sensor=self._to_sensor_object(sensor))
def _get_sensor_ref(self, sensor):
return ResourceReference.to_string_reference(pack=sensor.pack, name=sensor.name)
| punalpatel/st2 | st2reactor/st2reactor/container/manager.py | Python | apache-2.0 | 6,484 | 0.002159 |
"""Runs fast tests."""
import unittest
from tests.kernel_tests import SwiftKernelTests, OwnKernelTests
from tests.simple_notebook_tests import *
if __name__ == '__main__':
unittest.main()
| google/swift-jupyter | test/fast_test.py | Python | apache-2.0 | 196 | 0 |
# Copyright (C) 2014 Robby Zeitfuchs (@robbyFux)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Cridex(Signature):
name = "banker_cridex"
description = "Cridex banking trojan"
severity = 3
alert = True
categories = ["Banking", "Trojan"]
families = ["Cridex"]
authors = ["Robby Zeitfuchs", "@robbyFux"]
minimum = "0.5"
references = ["http://stopmalvertising.com/rootkits/analysis-of-cridex.html",
"http://sempersecurus.blogspot.de/2012/08/cridex-analysis-using-volatility.html",
"http://labs.m86security.com/2012/03/the-cridex-trojan-targets-137-financial-organizations-in-one-go/",
"https://malwr.com/analysis/NDU2ZWJjZTIwYmRiNGVmNWI3MDUyMGExMGQ0MmVhYTY/",
"https://malwr.com/analysis/MTA5YmU4NmIwMjg5NDAxYjlhYzZiZGIwYjZkOTFkOWY/"]
def run(self):
indicators = [".*Local.QM.*",
".*Local.XM.*"]
match_file = self.check_file(pattern=".*\\KB[0-9]{8}\.exe", regex=True)
match_batch_file = self.check_file(pattern=".*\\\\Temp\\\\\S{4}\.tmp\.bat", regex=True)
if match_file and match_batch_file:
self.data.append({"file": match_file})
self.data.append({"batchfile": match_batch_file})
for indicator in indicators:
match_mutex = self.check_mutex(pattern=indicator, regex=True)
if match_mutex:
self.data.append({"mutex": match_mutex})
return True
return False
| lixiangning888/whole_project | modules/signatures_orignal/banker_cridex.py | Python | lgpl-3.0 | 2,206 | 0.00544 |
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.db.models.functions import Distance
from django.contrib.gis.measure import (
Area as AreaMeasure, Distance as DistanceMeasure,
)
from django.db.utils import NotSupportedError
from django.utils.functional import cached_property
class BaseSpatialOperations:
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = '%s'
@cached_property
def select_extent(self):
return self.select
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
# Aggregates
disallowed_aggregates = ()
geom_func_prefix = ''
# Mapping between Django function names and backend names, when names do not
# match; used in spatial_function_name().
function_names = {}
# Blacklist/set of known unsupported functions of the backend
unsupported_functions = {
'Area', 'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'Azimuth',
'BoundingCircle', 'Centroid', 'Difference', 'Distance', 'Envelope',
'GeoHash', 'GeometryDistance', 'Intersection', 'IsValid', 'Length',
'LineLocatePoint', 'MakeValid', 'MemSize', 'NumGeometries',
'NumPoints', 'Perimeter', 'PointOnSurface', 'Reverse', 'Scale',
'SnapToGrid', 'SymDifference', 'Transform', 'Translate', 'Union',
}
# Constructors
from_text = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box, srid):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box, srid):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Return the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value, compiler):
"""
Return the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
def transform_value(value, field):
return value is not None and value.srid != field.srid
if hasattr(value, 'as_sql'):
return (
'%s(%%s, %s)' % (self.spatial_function_name('Transform'), f.srid)
if transform_value(value.output_field, f)
else '%s'
)
if transform_value(value, f):
# Add Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (
self.spatial_function_name('Transform'),
self.from_text, value.srid, f.srid,
)
elif self.connection.features.has_spatialrefsys_table:
return '%s(%%s,%s)' % (self.from_text, f.srid)
else:
# For backwards compatibility on MySQL (#27464).
return '%s(%%s)' % self.from_text
def check_expression_support(self, expression):
if isinstance(expression, self.disallowed_aggregates):
raise NotSupportedError(
"%s spatial aggregation is not supported by this database backend." % expression.name
)
super().check_expression_support(expression)
def spatial_aggregate_name(self, agg_name):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_function_name(self, func_name):
if func_name in self.unsupported_functions:
raise NotSupportedError("This backend doesn't support the %s function." % func_name)
return self.function_names.get(func_name, self.geom_func_prefix + func_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('Subclasses of BaseSpatialOperations must provide a geometry_columns() method.')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
distance_expr_for_lookup = staticmethod(Distance)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
if isinstance(expression.output_field, GeometryField):
converters.append(self.get_geometry_converter(expression))
return converters
def get_geometry_converter(self, expression):
raise NotImplementedError(
'Subclasses of BaseSpatialOperations must provide a '
'get_geometry_converter() method.'
)
def get_area_att_for_field(self, field):
if field.geodetic(self.connection):
if self.connection.features.supports_area_geodetic:
return 'sq_m'
raise NotImplementedError('Area on geodetic coordinate systems not supported.')
else:
units_name = field.units_name(self.connection)
if units_name:
return AreaMeasure.unit_attname(units_name)
def get_distance_att_for_field(self, field):
dist_att = None
if field.geodetic(self.connection):
if self.connection.features.supports_distance_geodetic:
dist_att = 'm'
else:
units = field.units_name(self.connection)
if units:
dist_att = DistanceMeasure.unit_attname(units)
return dist_att
| georgemarshall/django | django/contrib/gis/db/backends/base/operations.py | Python | bsd-3-clause | 6,371 | 0.002197 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-26 22:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parliament', '0002_auto_20161123_1157'),
]
operations = [
migrations.AlterField(
model_name='politicalparty',
name='name_short',
field=models.CharField(max_length=200),
),
]
| openkamer/openkamer | parliament/migrations/0003_auto_20161126_2342.py | Python | mit | 467 | 0 |
import re
import logging
import urllib
import csv
import os
import shutil
from datetime import datetime
import StringIO
from scrapy.spider import BaseSpider
from scrapy import signals
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.xlib.pydispatch import dispatcher
from scrapy.exceptions import CloseSpider
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class TomLeeMusicCaSpider(BaseSpider):
name = 'tomleemusic.ca'
allowed_domains = ['tomleemusic.ca', 'competitormonitor.com']
def __init__(self, *args, **kwargs):
super(TomLeeMusicCaSpider, self).__init__(*args, **kwargs)
dispatcher.connect(self.spider_closed, signals.spider_closed)
def start_requests(self):
if self.full_run_required():
start_req = self._start_requests_full()
log.msg('Full run')
else:
start_req = self._start_requests_simple()
log.msg('Simple run')
for req in start_req:
yield req
def spider_closed(self, spider):
if spider.name == self.name:
shutil.copy('data/%s_products.csv' % spider.crawl_id, os.path.join(HERE, 'tomleemusic_products.csv'))
def _start_requests_full(self):
yield Request('http://www.tomleemusic.ca/main/products.cfm', callback=self.parse_full)
def _start_requests_simple(self):
yield Request('http://competitormonitor.com/login.html?action=get_products_api&website_id=470333&matched=1',
callback=self.parse_simple)
def full_run_required(self):
if not os.path.exists(os.path.join(HERE, 'tomleemusic_products.csv')):
return True
#run full only on Mondays
return datetime.now().weekday() == 1
def parse_full(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//a[@class="catLink"]/@href').extract():
yield Request(url, callback=self.parse_product_list)
def parse_product_list(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//a[@class="catLink"]/@href').extract():
yield Request(url, callback=self.parse_product_list)
for url in hxs.select(u'//a[@class="productListLink"]/@href').extract():
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_product)
next_page = hxs.select(u'//a[@class="smallPrint" and contains(text(),"Next")]/@href').extract()
if next_page:
url = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(url, callback=self.parse_product_list)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
product_loader = ProductLoader(item=Product(), selector=hxs)
product_loader.add_value('url', response.url)
product_loader.add_xpath('name', u'//h1[@class="productDetailHeader"]/text()')
if hxs.select(u'//span[@class="productDetailSelling"]/text()'):
product_loader.add_xpath('price', u'//span[@class="productDetailSelling"]/text()')
else:
product_loader.add_value('price', '')
product_loader.add_xpath('sku', u'//input[@type="hidden" and (@name="hidProductId" or @name="inv")]/@value')
product_loader.add_xpath('category', u'//td[@class="smallPrint"]/a[position()=2 and contains(text(),"Products")]/../a[3]/text()')
img = hxs.select(u'//a[@class="smallPrint" and @rel="lightbox"]/@href').extract()
if img:
img = urljoin_rfc(get_base_url(response), img[0])
product_loader.add_value('image_url', img)
if hxs.select(u'//a[contains(@href,"BrandName")]/@href'):
product_loader.add_xpath('brand', u'substring-after(//a[contains(@href,"BrandName")]/@href,"=")')
else:
brands = hxs.select(u'//strong[@class="sideBarText"]/text()').extract()
brands = [b.strip() for b in brands]
for brand in brands:
if product_loader.get_output_value('name').startswith(brand):
product_loader.add_value('brand', brand)
break
else:
product_loader.add_xpath('brand', u'normalize-space(substring-before(substring-after(//title/text(), " - "), " - "))')
# product_loader.add_xpath('shipping_cost', u'//div[@class="DetailRow"]/div[contains(text(),"Shipping")]/../div[2]/text()')
yield product_loader.load_item()
def parse_simple(self, response):
f = StringIO.StringIO(response.body)
hxs = HtmlXPathSelector()
reader = csv.DictReader(f)
self.matched = set()
for row in reader:
self.matched.add(row['url'])
for url in self.matched:
yield Request(url, self.parse_product)
with open(os.path.join(HERE, 'tomleemusic_products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
if row['url'] not in self.matched:
loader = ProductLoader(selector=hxs, item=Product())
loader.add_value('url', row['url'])
loader.add_value('sku', row['sku'])
loader.add_value('identifier', row['identifier'])
loader.add_value('name', row['name'])
loader.add_value('price', row['price'])
loader.add_value('category', row['category'])
loader.add_value('brand', row['brand'])
loader.add_value('image_url', row['image_url'])
loader.add_value('shipping_cost', row['shipping_cost'])
yield loader.load_item()
| 0--key/lib | portfolio/Python/scrapy/axemusic/tomleemusic_ca.py | Python | apache-2.0 | 5,954 | 0.003359 |
import matplotlib.pyplot as plt
#stores information about laser structure
#saves refraction and electric field profiles in text and graphic form to HDD
class Laser:
refraction = []
field = []
gridX = []
gridN = []
field = []
def __init__(self, (wavelength, concentration, thickness)):
if isinstance(wavelength, (int, float)) == False:
raise TypeError("wavelength should be a number")
if isinstance(concentration, list) == False:
raise TypeError("concentration should be a list")
if isinstance( thickness, (list)) == False:
raise TypeError("thickness should be a list")
for i in range(5):
if isinstance(concentration[i], (int, float)) == False or isinstance( thickness[i], (int, float)) == False:
raise TypeError("concentration and thickness elements should be numbers")
if wavelength is None:
raise ValueError("wavelength is undefined")
if concentration is None:
raise ValueError("concentration is undefined")
if thickness is None:
raise ValueError("thickness is undefined")
if wavelength < 0.85 or wavelength > 1.5:
raise ValueError("wavelength out of range")
self.wavelength = wavelength
self.concentration = concentration
self.thickness = thickness
#refraction profile output
def plotRefraction(self):
if isinstance(self.gridX, list) == False:
raise TypeError("self.gridX should be a list")
if isinstance(self.gridN, list) == False:
raise TypeError("self.gridN should be a list")
if len(self.gridX) <= 20:
raise ValueError("len(self.gridX) out of range")
if len(self.gridN) <= 20:
raise ValueError("len(self.gridN) out of range")
if (len(self.gridX) == len(self.gridN)) == False:
raise IndexError("self.gridX should be the same dimension as self.gridN")
plt.plot(self.gridX, self.gridN)
plt.xlabel('position, micrometers')
plt.ylabel('refraction index, arb. units')
plt.title('Refraction Index Profile')
plt.savefig('refraction.png', format='png', dpi=100)
plt.clf()
refractionFile = open("refraction.txt", "w")
for i in range(len(self.gridN)):
refractionFile.write(str(self.gridX[i]) + ": " + str(self.gridN[i]) + "\n")
refractionFile.close()
#field profile output
def plotField(self):
if isinstance(self.gridX, list) == False:
raise TypeError("self.gridX should be a list")
if isinstance(self.field, list) == False:
raise TypeError("self.field should be a list")
if len(self.gridX) <= 20:
raise ValueError("len(self.gridX) out of range")
if len(self.field) <= 20:
raise ValueError("len(self.field) out of range")
if (len(self.gridX) == len(self.field)) == False:
raise TypeError("self.gridX should be the same dimension as self.field")
for i in range(len(self.field)):
self.field[i] = self.field[i] ** 2
plt.plot(self.gridX, self.field)
plt.xlabel('position, micrometers')
plt.ylabel('electric field, arb. units')
plt.title('Electric field in laser structure')
plt.savefig('field.png', format='png', dpi=100)
plt.clf()
fieldFile = open("field.txt", "w")
for i in range(len(self.gridN)):
fieldFile.write(str(self.gridX[i]) + ": " + str(self.field[i]) + "\n")
fieldFile.close()
| DQE-Polytech-University/Beamplex | src/laserstructure.py | Python | mit | 3,771 | 0.008221 |
"""
The main script
"""
import argparse
import summaryrank.features
import summaryrank.importers
import summaryrank.tools
DESCRIPTION = '''
SummaryRank is a set of tools that help producing machine-learned
summary/sentence rankers. It supports a wide range of functions such
as generating judgments in trec_eval format or creating feature
vectors in the SVMLight format.
corpora tools:
{}
representations and features:
{}
commands:
{}
'''
IMPORTER_FUNCTIONS = [
("import_webap", summaryrank.importers.import_webap),
("import_trec_novelty", summaryrank.importers.import_trec_novelty),
("import_mobileclick", summaryrank.importers.import_mobileclick),
]
FEATURE_FUNCTIONS = [
("gen_term", summaryrank.features.gen_term),
("gen_freqstats", summaryrank.features.gen_freqstats),
("gen_esa", summaryrank.features.gen_esa),
("gen_tagme", summaryrank.features.gen_tagme),
("extract", summaryrank.features.extract),
("contextualize", summaryrank.features.contextualize),
]
GENERAL_FUNCTIONS = [
("describe", summaryrank.tools.describe),
("cut", summaryrank.tools.cut),
("join", summaryrank.tools.join),
("shuffle", summaryrank.tools.shuffle),
("split", summaryrank.tools.split),
("normalize", summaryrank.tools.normalize),
]
def _make_command_list(functions):
""" Prepare a formatted list of commands. """
return [' {:24}{}\n'.format(name, func.__doc__.strip().splitlines()[0])
for name, func in functions]
if __name__.endswith('__main__'):
importer_commands = ''.join(_make_command_list(IMPORTER_FUNCTIONS))
feature_commands = ''.join(_make_command_list(FEATURE_FUNCTIONS))
general_commands = ''.join(_make_command_list(GENERAL_FUNCTIONS))
parser = argparse.ArgumentParser(
prog='summaryrank',
formatter_class=argparse.RawDescriptionHelpFormatter,
usage='%(prog)s [options..] command [args..]',
add_help=False,
description=DESCRIPTION.format(
importer_commands, feature_commands, general_commands)
)
parser.add_argument('command', nargs='?', help=argparse.SUPPRESS)
parser.add_argument('argv', nargs=argparse.REMAINDER, help=argparse.SUPPRESS)
args = parser.parse_args()
commands = dict()
commands.update(IMPORTER_FUNCTIONS)
commands.update(FEATURE_FUNCTIONS)
commands.update(GENERAL_FUNCTIONS)
if args.command in commands:
commands[args.command](args.argv)
else:
if args.command is not None:
parser.error("invalid command '{}'".format(args.command))
else:
parser.print_help()
| rmit-ir/SummaryRank | summaryrank/__main__.py | Python | mit | 2,623 | 0.000381 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for validator.api.middleware.ssl """
from __future__ import unicode_literals
import mock
from validator.api.middleware.ssl import SSLMiddleware
from validator.tests.base import ValidatorTestCase
class SSLMiddlewareTestCase(ValidatorTestCase):
""" Tests for class SSLMiddleware """
def setUp(self):
""" Create a SSLMiddleware instance """
super(SSLMiddlewareTestCase, self).setUp()
self.item = SSLMiddleware()
def test_process_request(self):
""" Tests for method process_request """
self.item.external = mock.MagicMock()
input = "MyInput"
expected = "OK"
self.item.external.return_value = "OK"
observed = self.item.process_request(input)
self.assertEqual(expected, observed)
def tearDown(self):
""" Cleanup the SSLMiddleware instance """
super(SSLMiddlewareTestCase, self).tearDown()
self.m.UnsetStubs()
self.m.ResetAll()
| pmverdugo/fiware-validator | validator/tests/api/middleware/test_ssl.py | Python | apache-2.0 | 1,568 | 0.000638 |
# Copyright (c) 2017 Nick Gashkov
#
# Distributed under MIT License. See LICENSE file for details.
class ValidationError(Exception):
def __init__(self, *args, **kwargs):
self.error_dict = kwargs.pop('error_dict')
super(ValidationError, self).__init__(*args, **kwargs)
| nickgashkov/virtualspace | virtualspace/utils/exceptions.py | Python | mit | 290 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import gettext
_ = gettext.translation('yali', fallback=True).ugettext
from PyQt5.Qt import QWidget, pyqtSignal
import yali.util
import yali.context as ctx
from yali.gui import ScreenWidget
from yali.gui.Ui.bootloaderwidget import Ui_BootLoaderWidget
from yali.storage.bootloader import BOOT_TYPE_NONE, BOOT_TYPE_PARTITION, BOOT_TYPE_MBR, BOOT_TYPE_RAID
class Widget(QWidget, ScreenWidget):
name = "bootloadersetup"
def __init__(self):
QWidget.__init__(self)
self.ui = Ui_BootLoaderWidget()
self.ui.setupUi(self)
self.bootloader = None
self.default = None
self.device = None
self.boot_disk = None
self.boot_partition = None
self.ui.defaultSettings.toggled[bool].connect(self.showDefaultSettings)
self.ui.noInstall.toggled[bool].connect(self.deactivateBootloader)
# self.ui.installPartition.toggled[bool].connect(self.activateInstallPartition)
self.ui.drives.currentIndexChanged[int].connect(self.currentDeviceChanged)
self.ui.advancedSettingsBox.show()
self.ui.defaultSettings.setChecked(True)
def fillDrives(self):
self.ui.drives.clear()
for drive in self.bootloader.drives:
device = ctx.storage.devicetree.getDeviceByName(drive)
item = u"%s" % (device.name)
self.ui.drives.addItem(item, device)
def shown(self):
if ctx.flags.install_type == ctx.STEP_RESCUE:
ctx.mainScreen.disableBack()
self.bootloader = ctx.bootloader
self.bootloader.storage = ctx.storage
self.fillDrives()
self.activateChoices()
def backCheck(self):
if ctx.storage.doAutoPart:
ctx.mainScreen.step_increment = 2
ctx.storage.reset()
return True
def execute(self):
self.bootloader.stage1Device = self.device
if self.ui.noInstall.isChecked():
self.bootloader.bootType = BOOT_TYPE_NONE
# elif self.ui.installPartition.isChecked():
# self.bootloader.bootType = BOOT_TYPE_PARTITION
elif self.ui.installMBR.isChecked():
self.bootloader.bootType = BOOT_TYPE_MBR
if ctx.flags.install_type == ctx.STEP_RESCUE:
ctx.mainScreen.step_increment = 2
else:
if ctx.flags.collection:
ctx.collections = yali.util.get_collections()
if len(ctx.collections) <= 1:
ctx.flags.collection = False
ctx.mainScreen.step_increment = 2
else:
ctx.mainScreen.step_increment = 2
return True
def showDefaultSettings(self, state):
if state:
self.device = self.default
self.ui.advancedSettingsBox.hide()
else:
self.ui.advancedSettingsBox.show()
def activateChoices(self):
for choice in self.bootloader.choices.keys():
if choice == BOOT_TYPE_MBR:
self.ui.installMBR.setText(_("The first sector of"))
self.boot_disk = self.bootloader.choices[BOOT_TYPE_MBR][0]
# elif choice == BOOT_TYPE_RAID:
# self.ui.installPartition.setText("The RAID array where Pardus is installed")
# self.boot_partition = self.bootloader.choices[BOOT_TYPE_RAID][0]
# elif choice == BOOT_TYPE_PARTITION:
# self.ui.installPartition.setText(_("The partition where Pardus is installed"))
# self.boot_partition = self.bootloader.choices[BOOT_TYPE_PARTITION][0]
if self.boot_disk:
self.default = self.boot_disk
self.ui.installMBR.setChecked(True)
# else:
# self.default = self.boot_partition
# self.ui.installPartition.setChecked(True)
def deactivateBootloader(self):
self.device = None
def activateInstallPartition(self, state):
if state:
self.device = self.boot_partition
def currentDeviceChanged(self, index):
if index != -1:
self.device = self.ui.drives.itemData(index).name
| forYali/yali | yali/gui/ScrBootloader.py | Python | gpl-2.0 | 4,461 | 0.003362 |
#!/usr/bin/python
#
# to run an example
# python RunMakeFigures.py -p Demo -i 0 -j 1 -f 3FITC_4PE_004.fcs -h ./projects/Demo
#
import getopt,sys,os
import numpy as np
## important line to fix popup error in mac osx
import matplotlib
matplotlib.use('Agg')
from cytostream import Model
import matplotlib.pyplot as plt
## parse inputs
def bad_input():
print "\nERROR: incorrect args"
print sys.argv[0] + "-p projectID -i channel1 -j channel2 -f selectedFile -a alternateDirectory -s subset -t modelType -h homeDir"
print " projectID (-p) project name"
print " channel1 (-i) channel 1 name"
print " channel2 (-j) channel 2 name"
print " homeDir (-h) home directory of current project"
print " selectedFile (-f) name of selected file"
print " altDir (-a) alternative directory (optional)"
print " subset (-s) subsampling number (optional)"
print " modelName (-m) model name"
print " modelType (-t) model type"
print "\n"
sys.exit()
try:
optlist, args = getopt.getopt(sys.argv[1:],'i:j:s:a:p:f:m:t:h:')
except getopt.GetoptError:
print getopt.GetoptError
bad_input()
projectID = None
channel1 = None
channel2 = None
selectedFile = None
altDir = None
homeDir = None
modelType = None
modelName = None
subset = "all"
run = True
for o, a in optlist:
if o == '-i':
channel1 = a
if o == '-j':
channel2 = a
if o == '-f':
selectedFile = a
if o == '-a':
altDir = a
if o == '-p':
projectID = a
if o == '-s':
subset = a
if o == '-m':
modelName = a
if o == '-t':
modelType = a
if o == '-h':
homeDir = a
def make_scatter_plot(model,selectedFile,channel1Ind,channel2Ind,subset='all',labels=None,buff=0.02,altDir=None):
#fig = pyplot.figure(figsize=(7,7))
markerSize = 5
alphaVal = 0.5
fontName = 'arial'
fontSize = 12
plotType = 'png'
## prepare figure
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111)
## specify channels
fileChannels = model.get_file_channel_list(selectedFile)
index1 = int(channel1Ind)
index2 = int(channel2Ind)
channel1 = fileChannels[index1]
channel2 = fileChannels[index2]
data = model.pyfcm_load_fcs_file(selectedFile)
## subset give an numpy array of indices
if subset != "all":
subsampleIndices = model.get_subsample_indices(subset)
data = data[subsampleIndices,:]
## make plot
totalPoints = 0
if labels == None:
ax.scatter([data[:,index1]],[data[:,index2]],color='blue',s=markerSize)
else:
if type(np.array([])) != type(labels):
labels = np.array(labels)
numLabels = np.unique(labels).size
maxLabel = np.max(labels)
cmp = model.get_n_color_colorbar(maxLabel+1)
for l in np.sort(np.unique(labels)):
rgbVal = tuple([val * 256 for val in cmp[l,:3]])
hexColor = model.rgb_to_hex(rgbVal)[:7]
x = data[:,index1][np.where(labels==l)[0]]
y = data[:,index2][np.where(labels==l)[0]]
totalPoints+=x.size
if x.size == 0:
continue
ax.scatter(x,y,color=hexColor,s=markerSize)
#ax.scatter(x,y,color=hexColor,s=markerSize)
## handle data edge buffers
bufferX = buff * (data[:,index1].max() - data[:,index1].min())
bufferY = buff * (data[:,index2].max() - data[:,index2].min())
ax.set_xlim([data[:,index1].min()-bufferX,data[:,index1].max()+bufferX])
ax.set_ylim([data[:,index2].min()-bufferY,data[:,index2].max()+bufferY])
## save file
fileName = selectedFile
ax.set_title("%s_%s_%s"%(channel1,channel2,fileName),fontname=fontName,fontsize=fontSize)
ax.set_xlabel(channel1,fontname=fontName,fontsize=fontSize)
ax.set_ylabel(channel2,fontname=fontName,fontsize=fontSize)
if altDir == None:
fileName = os.path.join(model.homeDir,'figs',"%s_%s_%s.%s"%(selectedFile[:-4],channel1,channel2,plotType))
fig.savefig(fileName,transparent=False,dpi=50)
else:
fileName = os.path.join(altDir,"%s_%s_%s.%s"%(selectedFile[:-4],channel1,channel2,plotType))
fig.savefig(fileName,transparent=False,dpi=50)
## error checking
if altDir == 'None':
altDir = None
if homeDir == 'None':
homeDir = None
if modelName == 'None':
modelName = None
statModel,statModelClasses = None,None
if altDir == None and homeDir == None:
bad_input()
run = False
print "WARNING: RunMakeFigures failed errorchecking"
if projectID == None or channel1 == None or channel2 == None or selectedFile == None:
bad_input()
run = False
print "WARNING: RunMakeFigures failed errorchecking"
if os.path.isdir(homeDir) == False:
print "ERROR: homedir does not exist -- bad project name", projectID, homeDir
run = False
if altDir != None and os.path.isdir(altDir) == False:
print "ERROR: specified alternative dir does not exist\n", altDir
run = False
if run == True:
model = Model()
model.initialize(projectID,homeDir)
if modelName == None:
make_scatter_plot(model,selectedFile,channel1,channel2,subset=subset,altDir=altDir)
else:
statModel,statModelClasses = model.load_model_results_pickle(modelName,modelType)
make_scatter_plot(model,selectedFile,channel1,channel2,labels=statModelClasses,subset=subset,altDir=altDir)
| ajrichards/GenesDI | genesdi/RunMakeFigures.py | Python | gpl-3.0 | 5,628 | 0.022388 |
import json
import os
import socket
import sys
import uuid
import etcd
from tendrl.commons import objects
from tendrl.commons.utils import etcd_utils
from tendrl.commons.utils import event_utils
from tendrl.commons.utils import log_utils as logger
NODE_ID = None
class NodeContext(objects.BaseObject):
def __init__(self, node_id=None, fqdn=None, ipv4_addr=None,
tags=None, status=None, sync_status=None,
last_sync=None, pkey=None,
locked_by=None, *args, **kwargs):
super(NodeContext, self).__init__(*args, **kwargs)
self.node_id = node_id or self._get_node_id() or self._create_node_id()
self.fqdn = fqdn
self.ipv4_addr = ipv4_addr
if self.fqdn:
self.ipv4_addr = socket.gethostbyname(self.fqdn)
self.locked_by = locked_by
curr_tags = []
try:
_nc_data = etcd_utils.read(
"/nodes/%s/NodeContext/data" % self.node_id
).value
curr_tags = json.loads(_nc_data)['tags']
except etcd.EtcdKeyNotFound:
pass
try:
curr_tags = json.loads(curr_tags)
except (ValueError, TypeError):
# No existing tags
pass
self.tags = tags or []
self.tags += NS.config.data.get('tags', [])
self.tags += curr_tags
self.tags = list(set(self.tags))
self.status = status or "UP"
self.sync_status = sync_status
self.last_sync = last_sync
self.pkey = pkey or self.fqdn
self.value = 'nodes/{0}/NodeContext'
def _create_node_id(self):
node_id = str(uuid.uuid4())
try:
logger.log(
"debug",
NS.publisher_id,
{"message": "Registered Node (%s) with " % node_id}
)
except KeyError:
sys.stdout.write("message: Registered Node (%s) \n" % node_id)
local_node_id = "/var/lib/tendrl/node_id"
if not os.path.exists(os.path.dirname(local_node_id)):
os.makedirs(os.path.dirname(local_node_id))
with open(local_node_id, 'wb+') as f:
f.write(node_id)
global NODE_ID
NODE_ID = node_id
return node_id
def _get_node_id(self):
if NODE_ID:
return NODE_ID
local_node_id = "/var/lib/tendrl/node_id"
if os.path.isfile(local_node_id):
with open(local_node_id) as f:
node_id = f.read()
global NODE_ID
NODE_ID = node_id
return node_id
def render(self):
self.value = self.value.format(self.node_id or NS.node_context.node_id)
return super(NodeContext, self).render()
def save(self, update=True, ttl=None):
super(NodeContext, self).save(update)
status = self.value + "/status"
if ttl:
self._ttl = ttl
try:
etcd_utils.refresh(status, ttl)
except etcd.EtcdKeyNotFound:
pass
def on_change(self, attr, prev_value, current_value):
if attr == "status":
_tc = NS.tendrl.objects.TendrlContext(
node_id=self.node_id
).load()
if current_value is None:
self.status = "DOWN"
self.save()
msg = "Node {0} is DOWN".format(self.fqdn)
event_utils.emit_event(
"node_status",
self.status,
msg,
"node_{0}".format(self.fqdn),
"WARNING",
node_id=self.node_id,
integration_id=_tc.integration_id
)
# Load cluster_node_context will load node_context
# and it will be updated with latest values
cluster_node_context = NS.tendrl.objects.ClusterNodeContext(
node_id=self.node_id,
integration_id=_tc.integration_id
)
cluster_node_context.save()
del cluster_node_context
global_details = NS.tendrl.objects.GlobalDetails(
integration_id=_tc.integration_id).load()
if global_details.status.lower() == "healthy":
global_details.status = "unhealthy"
global_details.save()
_cluster = NS.tendrl.objects.Cluster(
integration_id=_tc.integration_id
).load()
msg = "Cluster:%s is %s" % (
_cluster.short_name, "unhealthy")
instance = "cluster_%s" % _tc.integration_id
event_utils.emit_event(
"cluster_health_status",
"unhealthy",
msg,
instance,
'WARNING',
integration_id=_tc.integration_id
)
_tag = "provisioner/%s" % _tc.integration_id
if _tag in self.tags:
_index_key = "/indexes/tags/%s" % _tag
self.tags.remove(_tag)
self.save()
etcd_utils.delete(_index_key)
_msg = "node_sync, STALE provisioner node "\
"found! re-configuring monitoring "\
"(job-id: %s) on this node"
payload = {
"tags": ["tendrl/node_%s" % self.node_id],
"run": "tendrl.flows.ConfigureMonitoring",
"status": "new",
"parameters": {
'TendrlContext.integration_id': _tc.integration_id
},
"type": "node"
}
_job_id = str(uuid.uuid4())
NS.tendrl.objects.Job(
job_id=_job_id,
status="new",
payload=payload
).save()
logger.log(
"debug",
NS.publisher_id,
{"message": _msg % _job_id}
)
if _tc.sds_name in ["gluster", "RHGS"]:
bricks = etcd_utils.read(
"clusters/{0}/Bricks/all/{1}".format(
_tc.integration_id,
self.fqdn
)
)
for brick in bricks.leaves:
try:
etcd_utils.write(
"{0}/status".format(brick.key),
"Stopped"
)
except (etcd.EtcdAlreadyExist, etcd.EtcdKeyNotFound):
pass
elif current_value == "UP":
msg = "{0} is UP".format(self.fqdn)
event_utils.emit_event(
"node_status",
"UP",
msg,
"node_{0}".format(self.fqdn),
"INFO",
node_id=self.node_id,
integration_id=_tc.integration_id
)
| r0h4n/commons | tendrl/commons/objects/node_context/__init__.py | Python | lgpl-2.1 | 7,441 | 0 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def addpkg(*args, **kwargs):
pass
| jrha/aquilon | build/bootstrap_ms/ms/version/__init__.py | Python | apache-2.0 | 726 | 0 |
#coding=utf8
'''
Created on 2012-9-19
@author: senon
'''
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('friendships.views',
url(r'^concerned_about_friends/', 'concerned_about_friends')
) | xlk521/cloudguantou | friendships/urls.py | Python | bsd-3-clause | 252 | 0.019841 |
import ply.yacc as yacc
from bsi_lexer import tokens
from bsi_object import BsiObject
from bsi_array import BsiArray
def p_object_pairs(p):
'obj : pairs'
p[0] = BsiObject()
for pair in p[1]:
p[0].set(pair[0], pair[1])
def p_pairs_pair(p):
'pairs : pair'
p[0] = [p[1]]
def p_pairs_pair_pairs(p):
'pairs : pair pairs'
p[0] = [p[1]] + p[2]
def p_pair_key_eq_value(p):
'pair : KEY EQ val'
p[0] = (p[1], p[3])
def p_val_num(p):
'val : NUM'
p[0] = p[1]
def p_val_string(p):
'val : STRING'
p[0] = p[1]
def p_val_array(p):
'val : L_SQ_BR vals R_SQ_BR'
p[0] = BsiArray(p[2])
def p_array_val(p):
'vals : val'
p[0] = [p[1]]
def p_array_vals(p):
'vals : val vals'
p[0] = [p[1]] + p[2]
def p_val_nested_obj(p):
'val : L_BRACE obj R_BRACE'
p[0] = p[2]
def p_error(p):
print p
print "Syntax error in input!"
bsi_parser = yacc.yacc()
| jshou/bsi | bsi/bsi_parser.py | Python | mit | 930 | 0.012903 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
_TASK_RUNNER = conf.get('core', 'TASK_RUNNER')
def get_task_runner(local_task_job):
"""
Get the task runner that can be used to run the given job.
:param local_task_job: The LocalTaskJob associated with the TaskInstance
that needs to be executed.
:type local_task_job: airflow.jobs.LocalTaskJob
:return: The task runner to use to run the task.
:rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
"""
if _TASK_RUNNER == "StandardTaskRunner":
return StandardTaskRunner(local_task_job)
elif _TASK_RUNNER == "CgroupTaskRunner":
from airflow.task.task_runner.cgroup_task_runner import CgroupTaskRunner
return CgroupTaskRunner(local_task_job)
else:
raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
| Fokko/incubator-airflow | airflow/task/task_runner/__init__.py | Python | apache-2.0 | 1,828 | 0.001094 |
##################################################################################################
# $HeadURL$
##################################################################################################
"""Collection of DIRAC useful statistics related modules.
.. warning::
By default on Error they return None.
"""
__RCSID__ = "$Id$"
from math import sqrt # Mathematical functions.
def getMean( numbers ):
"""Returns the arithmetic mean of a numeric list.
:param list numbers: data sample
"""
if len(numbers):
numbers = sorted([float(x) for x in numbers])
return sum(numbers)/float(len(numbers))
def getMedian( numbers ):
""" Return the median of the list of numbers.
:param list numbers: data sample
"""
# Sort the list and take the middle element.
nbNum = len(numbers)
if not nbNum:
return
copy = sorted( [float(x) for x in numbers] )
if nbNum & 1: # There is an odd number of elements
return copy[nbNum//2]
else:
return 0.5*(copy[nbNum//2 - 1] + copy[nbNum//2])
def getVariance( numbers, posMean='Empty' ):
"""Determine the measure of the spread of the data set about the mean.
Sample variance is determined by default; population variance can be
determined by setting population attribute to True.
:param list numbers: data sample
:param mixed posMean: mean of a sample or 'Empty' str
"""
if not len(numbers):
return
if posMean == 'Empty':
mean = getMean(numbers)
else:
mean = posMean
numbers = sorted( [float(x) for x in numbers] )
# Subtract the mean from each data item and square the difference.
# Sum all the squared deviations.
return sum([(float(item)-mean)**2.0 for item in numbers ])/len(numbers)
def getStandardDeviation(numbers, variance='Empty', mean='Empty'):
"""Determine the measure of the dispersion of the data set based on the
variance.
:param list numbesr: data sample
:param mixed variance: variance or str 'Empty'
:param mixed mean: mean or str 'Empty'
"""
if not len(numbers):
return
# Take the square root of the variance.
if variance == 'Empty':
if mean == 'Empty':
variance = getVariance(numbers)
else:
variance = getVariance(numbers, posMean=mean)
return sqrt(variance)
| avedaee/DIRAC | Core/Utilities/Statistics.py | Python | gpl-3.0 | 2,281 | 0.021043 |
#!/home/jojoriveraa/Dropbox/Capacitación/Platzi/Python-Django/NFCow/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| jojoriveraa/titulacion-NFCOW | venv/bin/django-admin.py | Python | apache-2.0 | 192 | 0 |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class TransactionsRuleField(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'TRANSACTION_ID': "TRANSACTION_ID",
'NAME': "NAME",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""TransactionsRuleField - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Transaction field for which the rule is defined.., must be one of ["TRANSACTION_ID", "NAME", ] # noqa: E501
Keyword Args:
value (str): Transaction field for which the rule is defined.., must be one of ["TRANSACTION_ID", "NAME", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
| plaid/plaid-python | plaid/model/transactions_rule_field.py | Python | mit | 6,963 | 0.000718 |
'''Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# train the model, output generated text after each iteration
for iteration in range(1, 60):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=128, nb_epoch=1)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
model.save_weights('data/nietzsche_simple_TF.h5')
| sysid/nbs | lstm/lstm_text_generation.py | Python | mit | 3,355 | 0.000596 |
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block-relay-only anchors functionality"""
import os
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import check_node_connections
INBOUND_CONNECTIONS = 5
BLOCK_RELAY_CONNECTIONS = 2
class AnchorsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.disable_autoconnect = False
def run_test(self):
node_anchors_path = os.path.join(
self.nodes[0].datadir, "regtest", "anchors.dat"
)
self.log.info("When node starts, check if anchors.dat doesn't exist")
assert not os.path.exists(node_anchors_path)
self.log.info(f"Add {BLOCK_RELAY_CONNECTIONS} block-relay-only connections to node")
for i in range(BLOCK_RELAY_CONNECTIONS):
self.log.debug(f"block-relay-only: {i}")
self.nodes[0].add_outbound_p2p_connection(
P2PInterface(), p2p_idx=i, connection_type="block-relay-only"
)
self.log.info(f"Add {INBOUND_CONNECTIONS} inbound connections to node")
for i in range(INBOUND_CONNECTIONS):
self.log.debug(f"inbound: {i}")
self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info("Check node connections")
check_node_connections(node=self.nodes[0], num_in=5, num_out=2)
# 127.0.0.1
ip = "7f000001"
# Since the ip is always 127.0.0.1 for this case,
# we store only the port to identify the peers
block_relay_nodes_port = []
inbound_nodes_port = []
for p in self.nodes[0].getpeerinfo():
addr_split = p["addr"].split(":")
if p["connection_type"] == "block-relay-only":
block_relay_nodes_port.append(hex(int(addr_split[1]))[2:])
else:
inbound_nodes_port.append(hex(int(addr_split[1]))[2:])
self.log.info("Stop node 0")
self.stop_node(0)
# It should contain only the block-relay-only addresses
self.log.info("Check the addresses in anchors.dat")
with open(node_anchors_path, "rb") as file_handler:
anchors = file_handler.read().hex()
for port in block_relay_nodes_port:
ip_port = ip + port
assert ip_port in anchors
for port in inbound_nodes_port:
ip_port = ip + port
assert ip_port not in anchors
self.log.info("Start node")
self.start_node(0)
self.log.info("When node starts, check if anchors.dat doesn't exist anymore")
assert not os.path.exists(node_anchors_path)
if __name__ == "__main__":
AnchorsTest().main()
| ajtowns/bitcoin | test/functional/feature_anchors.py | Python | mit | 2,924 | 0.000684 |
import ray
from ray._private.test_utils import run_string_as_driver
# This tests the queue transitions for infeasible tasks. This has been an issue
# in the past, e.g., https://github.com/ray-project/ray/issues/3275.
def test_infeasible_tasks(ray_start_cluster):
cluster = ray_start_cluster
@ray.remote
def f():
return
cluster.add_node(resources={str(0): 100})
ray.init(address=cluster.address)
# Submit an infeasible task.
x_id = f._remote(args=[], kwargs={}, resources={str(1): 1})
# Add a node that makes the task feasible and make sure we can get the
# result.
cluster.add_node(resources={str(1): 100})
ray.get(x_id)
# Start a driver that submits an infeasible task and then let it exit.
driver_script = """
import ray
ray.init(address="{}")
@ray.remote(resources={})
def f():
{}pass # This is a weird hack to insert some blank space.
f.remote()
""".format(
cluster.address, "{str(2): 1}", " "
)
run_string_as_driver(driver_script)
# Now add a new node that makes the task feasible.
cluster.add_node(resources={str(2): 100})
# Make sure we can still run tasks on all nodes.
ray.get([f._remote(args=[], kwargs={}, resources={str(i): 1}) for i in range(3)])
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| ray-project/ray | python/ray/tests/test_node_manager.py | Python | apache-2.0 | 1,376 | 0.000727 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Finds revisions from the Thunderbird migration that don't have based_on
set correctly, and are still relavent, and fixes that.
Run this script like `./manage.py runscript fix_tb_basedon`.
"""
import sys
from traceback import print_exc
from django.db.models import Q
from kitsune.wiki.models import Document, Revision
def run():
try:
run_()
except Exception:
print_exc()
raise
class Progress():
def __init__(self, total):
self.current = 0
self.total = total
def tick(self, incr=1):
self.current += incr
self.draw()
def draw(self):
self._wr('{0.current} / {0.total}\r'.format(self))
def _wr(self, s):
sys.stdout.write(s)
sys.stdout.flush()
def run_():
to_process = list(Document.objects.filter(
~Q(parent=None),
current_revision__based_on=None,
products__slug='thunderbird'))
if len(to_process) == 0:
print 'Nothing to do.'
prog = Progress(len(to_process))
for doc in to_process:
prog.tick()
oldest_parent_rev = (Revision.objects.filter(document=doc.parent)
.order_by('id')[0])
# It has localizations, clearly it should be localizable.
if not doc.parent.is_localizable:
doc.parent.is_localizable = True
doc.parent.save()
doc.current_revision.based_on = oldest_parent_rev
doc.current_revision.save()
| feer56/Kitsune1 | scripts/fix_tb_basedon.py | Python | bsd-3-clause | 1,520 | 0 |
from pidWX import *
| adalke/rdkit | rdkit/sping/WX/__init__.py | Python | bsd-3-clause | 20 | 0 |
# -*- coding: utf-8 -*-
import os
from future.moves.urllib.parse import quote
import uuid
import ssl
from pymongo import MongoClient
import requests
from django.apps import apps
from addons.wiki import settings as wiki_settings
from addons.wiki.exceptions import InvalidVersionError
from osf.utils.permissions import ADMIN, READ, WRITE
# MongoDB forbids field names that begin with "$" or contain ".". These
# utilities map to and from Mongo field names.
mongo_map = {
'.': '__!dot!__',
'$': '__!dollar!__',
}
def to_mongo(item):
for key, value in mongo_map.items():
item = item.replace(key, value)
return item
def to_mongo_key(item):
return to_mongo(item).strip().lower()
def generate_private_uuid(node, wname):
"""
Generate private uuid for internal use in sharejs namespacing.
Note that this will NEVER be passed to to the client or sharejs.
"""
private_uuid = str(uuid.uuid1())
wiki_key = to_mongo_key(wname)
node.wiki_private_uuids[wiki_key] = private_uuid
node.save()
return private_uuid
def get_sharejs_uuid(node, wname):
"""
Format private uuid into the form used in mongo and sharejs.
This includes node's primary ID to prevent fork namespace collision
"""
wiki_key = to_mongo_key(wname)
private_uuid = node.wiki_private_uuids.get(wiki_key)
return str(uuid.uuid5(
uuid.UUID(private_uuid),
str(node._id)
)) if private_uuid else None
def delete_share_doc(node, wname):
"""Deletes share document and removes namespace from model."""
db = share_db()
sharejs_uuid = get_sharejs_uuid(node, wname)
db['docs'].remove({'_id': sharejs_uuid})
db['docs_ops'].remove({'name': sharejs_uuid})
wiki_key = to_mongo_key(wname)
del node.wiki_private_uuids[wiki_key]
node.save()
def migrate_uuid(node, wname):
"""Migrates uuid to new namespace."""
db = share_db()
old_sharejs_uuid = get_sharejs_uuid(node, wname)
broadcast_to_sharejs('lock', old_sharejs_uuid)
generate_private_uuid(node, wname)
new_sharejs_uuid = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': old_sharejs_uuid})
if doc_item:
doc_item['_id'] = new_sharejs_uuid
db['docs'].insert(doc_item)
db['docs'].remove({'_id': old_sharejs_uuid})
ops_items = [item for item in db['docs_ops'].find({'name': old_sharejs_uuid})]
if ops_items:
for item in ops_items:
item['_id'] = item['_id'].replace(old_sharejs_uuid, new_sharejs_uuid)
item['name'] = new_sharejs_uuid
db['docs_ops'].insert(ops_items)
db['docs_ops'].remove({'name': old_sharejs_uuid})
write_contributors = [
user._id for user in node.contributors
if node.has_permission(user, WRITE)
]
broadcast_to_sharejs('unlock', old_sharejs_uuid, data=write_contributors)
def share_db():
"""Generate db client for sharejs db"""
client = MongoClient(wiki_settings.SHAREJS_DB_URL, ssl_cert_reqs=ssl.CERT_NONE)
return client[wiki_settings.SHAREJS_DB_NAME]
def get_sharejs_content(node, wname):
db = share_db()
sharejs_uuid = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': sharejs_uuid})
return doc_item['_data'] if doc_item else ''
def broadcast_to_sharejs(action, sharejs_uuid, node=None, wiki_name='home', data=None):
"""
Broadcast an action to all documents connected to a wiki.
Actions include 'lock', 'unlock', 'redirect', and 'delete'
'redirect' and 'delete' both require a node to be specified
'unlock' requires data to be a list of contributors with write permission
"""
url = 'http://{host}:{port}/{action}/{id}/'.format(
host=wiki_settings.SHAREJS_HOST,
port=wiki_settings.SHAREJS_PORT,
action=action,
id=sharejs_uuid
)
if action == 'redirect' or action == 'delete':
redirect_url = quote(
node.web_url_for('project_wiki_view', wname=wiki_name, _guid=True),
safe='',
)
url = os.path.join(url, redirect_url)
try:
requests.post(url, json=data)
except requests.ConnectionError:
pass # Assume sharejs is not online
def format_wiki_version(version, num_versions, allow_preview):
"""
:param str version: 'preview', 'current', 'previous', '1', '2', ...
:param int num_versions:
:param allow_preview: True if view, False if compare
"""
if not version:
return
if version.isdigit():
version = int(version)
if version > num_versions or version < 1:
raise InvalidVersionError
elif version == num_versions:
return 'current'
elif version == num_versions - 1:
return 'previous'
elif version != 'current' and version != 'previous':
if allow_preview and version == 'preview':
return version
raise InvalidVersionError
elif version == 'previous' and num_versions == 0:
raise InvalidVersionError
return version
def serialize_wiki_settings(user, nodes):
""" Format wiki data for project settings page
:param user: modular odm User object
:param nodes: list of parent project nodes
:return: treebeard-formatted data
"""
WikiPage = apps.get_model('addons_wiki.WikiPage')
items = []
for node in nodes:
assert node, '{} is not a valid Node.'.format(node._id)
can_read = node.has_permission(user, READ)
is_admin = node.has_permission(user, ADMIN)
include_wiki_settings = WikiPage.objects.include_wiki_settings(node)
if not include_wiki_settings:
continue
children = node.get_nodes(**{'is_deleted': False, 'is_node_link': False})
children_tree = []
wiki = node.get_addon('wiki')
if wiki:
children_tree.append({
'select': {
'title': 'permission',
'permission':
'public'
if wiki.is_publicly_editable
else 'private'
},
})
children_tree.extend(serialize_wiki_settings(user, children))
item = {
'node': {
'id': node._id,
'url': node.url if can_read else '',
'title': node.title if can_read else 'Private Project',
'is_public': node.is_public
},
'children': children_tree,
'kind': 'folder' if not node.parent_node or not node.parent_node.has_permission(user, READ) else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': can_read,
'admin': is_admin,
},
}
items.append(item)
return items
def serialize_wiki_widget(node):
from addons.wiki.models import WikiVersion
wiki = node.get_addon('wiki')
wiki_version = WikiVersion.objects.get_for_node(node, 'home')
# Show "Read more" link if there are multiple pages or has > 400 characters
more = node.wikis.filter(deleted__isnull=True).count() >= 2
MAX_DISPLAY_LENGTH = 400
rendered_before_update = False
if wiki_version and wiki_version.content:
if len(wiki_version.content) > MAX_DISPLAY_LENGTH:
more = True
rendered_before_update = wiki_version.rendered_before_update
# Content fetched and rendered by front-end
wiki_html = None
wiki_widget_data = {
'complete': True,
'wiki_content': wiki_html if wiki_html else None,
'wiki_content_url': node.api_url_for('wiki_page_content', wname='home'),
'rendered_before_update': rendered_before_update,
'more': more,
'include': False,
}
wiki_widget_data.update(wiki.config.to_json())
return wiki_widget_data
| mfraezz/osf.io | addons/wiki/utils.py | Python | apache-2.0 | 7,955 | 0.001383 |
# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('cfp', '0004_paperapplication_duration'),
]
operations = [
migrations.AlterField(
model_name='applicant',
name='user',
field=models.OneToOneField(related_name='applicant', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AlterField(
model_name='paperapplication',
name='applicant',
field=models.ForeignKey(related_name='applications', to='cfp.Applicant', on_delete=models.CASCADE),
preserve_default=True,
),
]
| WebCampZg/conference-web | cfp/migrations/0005_auto_20150319_0019.py | Python | bsd-3-clause | 767 | 0.002608 |
# BEGIN_COPYRIGHT
#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
A trivial MapReduce application that counts the occurence of each
vowel in a text input stream. It is more structured than would be
necessary because we want to test automatic distribution of a package
rather than a single module.
"""
| simleo/pydoop | examples/self_contained/vowelcount/__init__.py | Python | apache-2.0 | 850 | 0 |
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
import logging
from pyasn1 import __version__
from pyasn1 import error
from pyasn1.compat.octets import octs2ints
__all__ = ['Debug', 'setLogger', 'hexdump']
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'none': flagNone,
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Printer(object):
# noinspection PyShadowingNames
def __init__(self, logger=None, handler=None, formatter=None):
if logger is None:
logger = logging.getLogger('pyasn1')
logger.setLevel(logging.DEBUG)
if handler is None:
handler = logging.StreamHandler()
if formatter is None:
formatter = logging.Formatter('%(asctime)s %(name)s: %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
self.__logger = logger
def __call__(self, msg):
self.__logger.debug(msg)
def __str__(self):
return '<python logging>'
if hasattr(logging, 'NullHandler'):
NullHandler = logging.NullHandler
else:
# Python 2.6 and older
class NullHandler(logging.Handler):
def emit(self, record):
pass
class Debug(object):
defaultPrinter = Printer()
def __init__(self, *flags, **options):
self._flags = flagNone
if 'loggerName' in options:
# route our logs to parent logger
self._printer = Printer(
logger=logging.getLogger(options['loggerName']),
handler=NullHandler()
)
elif 'printer' in options:
self._printer = options.get('printer')
else:
self._printer = self.defaultPrinter
self._printer('running pyasn1 %s, debug flags %s' % (__version__, ', '.join(flags)))
for flag in flags:
inverse = flag and flag[0] in ('!', '~')
if inverse:
flag = flag[1:]
try:
if inverse:
self._flags &= ~flagMap[flag]
else:
self._flags |= flagMap[flag]
except KeyError:
raise error.PyAsn1Error('bad debug flag %s' % flag)
self._printer("debug category '%s' %s" % (flag, inverse and 'disabled' or 'enabled'))
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer(msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(userLogger):
global logger
if userLogger:
logger = userLogger
else:
logger = 0
def hexdump(octets):
return ' '.join(
['%s%.2X' % (n % 16 == 0 and ('\n%.5d: ' % n) or '', x)
for n, x in zip(range(len(octets)), octs2ints(octets))]
)
class Scope(object):
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
| catapult-project/catapult | third_party/gsutil/third_party/pyasn1/pyasn1/debug.py | Python | bsd-3-clause | 3,361 | 0.000595 |
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_perplex
---------------
This minimal example demonstrates how burnman can be used
to read and interrogate a PerpleX tab file
(as produced by burnman/misc/create_burnman_readable_perplex_table.py
It also demonstrates how we can smooth a given property on a given P-T grid.
*Uses:*
* :doc:`PerplexMaterial`
* :func:`burnman.Material.evaluate`
* :func:`burnman.tools.math.smooth_array`
*Demonstrates:*
* Use of PerplexMaterial
* Smoothing gridded properties
"""
import numpy as np
import matplotlib.pyplot as plt
import burnman
from burnman.tools.math import smooth_array
if __name__ == "__main__":
rock = burnman.PerplexMaterial('../burnman/data/input_perplex/in23_1.tab')
P = 1.e9
T = 1650.
rock.set_state(P, T)
print('P: {0:.1f} GPa, T: {1:.1f} K, density: {2:.1f} kg/m^3'.format(P/1.e9, T, rock.rho))
pressures = np.linspace(10.e9, 25.e9, 151)
temperatures = [T] * len(pressures)
densities = rock.evaluate(['rho'], pressures, temperatures)[0]
plt.plot(pressures/1.e9, densities)
plt.xlabel('Pressure (GPa)')
plt.ylabel('Density (kg/m^3)')
plt.show()
pressures = np.linspace(10.e9, 25.e9, 151)
temperatures = np.linspace(1600., 1800., 3)
T = 1650.
entropies = rock.evaluate(['S'], pressures,
np.array([T] * len(pressures)))[0]
smoothed_entropies = smooth_array(array=entropies,
grid_spacing=np.array([pressures[1]
- pressures[0]]),
gaussian_rms_widths=np.array([5.e8]))
plt.plot(pressures/1.e9, entropies, label='entropies')
plt.plot(pressures/1.e9, smoothed_entropies, label='smoothed entropies')
plt.xlabel('Pressure (GPa)')
plt.ylabel('Entropy (J/K/mol)')
plt.legend(loc='upper right')
plt.show()
| bobmyhill/burnman | examples/example_perplex.py | Python | gpl-2.0 | 2,092 | 0.000478 |
"""
Testing the Intravoxel incoherent motion module
The values of the various parameters used in the tests are inspired by
the study of the IVIM model applied to MR images of the brain by
Federau, Christian, et al. [1].
References
----------
.. [1] Federau, Christian, et al. "Quantitative measurement
of brain perfusion with intravoxel incoherent motion
MR imaging." Radiology 265.3 (2012): 874-881.
"""
import warnings
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises, assert_array_less, run_module_suite,
assert_, assert_equal)
from dipy.testing import assert_greater_equal
import pytest
from dipy.reconst.ivim import ivim_prediction, IvimModel
from dipy.core.gradients import gradient_table, generate_bvecs
from dipy.sims.voxel import multi_tensor
from dipy.utils.optpkg import optional_package
cvxpy, have_cvxpy, _ = optional_package("cvxpy")
needs_cvxpy = pytest.mark.skipif(not have_cvxpy, reason="REQUIRES CVXPY")
def setup_module():
global gtab, ivim_fit_single, ivim_model_trr, data_single, params_trr, \
data_multi, ivim_params_trr, D_star, D, f, S0, gtab_with_multiple_b0, \
noisy_single, mevals, gtab_no_b0, ivim_fit_multi, ivim_model_VP, \
f_VP, D_star_VP, D_VP, params_VP
# Let us generate some data for testing.
bvals = np.array([0., 10., 20., 30., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300., 400.,
500., 600., 700., 800., 900., 1000.])
N = len(bvals)
bvecs = generate_bvecs(N)
gtab = gradient_table(bvals, bvecs.T, b0_threshold=0)
S0, f, D_star, D = 1000.0, 0.132, 0.00885, 0.000921
# params for a single voxel
params_trr = np.array([S0, f, D_star, D])
mevals = np.array(([D_star, D_star, D_star], [D, D, D]))
# This gives an isotropic signal.
signal = multi_tensor(gtab, mevals, snr=None, S0=S0,
fractions=[f * 100, 100 * (1 - f)])
# Single voxel data
data_single = signal[0]
data_multi = np.zeros((2, 2, 1, len(gtab.bvals)))
data_multi[0, 0, 0] = data_multi[0, 1, 0] = data_multi[
1, 0, 0] = data_multi[1, 1, 0] = data_single
ivim_params_trr = np.zeros((2, 2, 1, 4))
ivim_params_trr[0, 0, 0] = ivim_params_trr[0, 1, 0] = params_trr
ivim_params_trr[1, 0, 0] = ivim_params_trr[1, 1, 0] = params_trr
ivim_model_trr = IvimModel(gtab, fit_method='trr')
ivim_model_one_stage = IvimModel(gtab, fit_method='trr')
ivim_fit_single = ivim_model_trr.fit(data_single)
ivim_fit_multi = ivim_model_trr.fit(data_multi)
ivim_model_one_stage.fit(data_single)
ivim_model_one_stage.fit(data_multi)
bvals_no_b0 = np.array([5., 10., 20., 30., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300., 400.,
500., 600., 700., 800., 900., 1000.])
_ = generate_bvecs(N) # bvecs_no_b0
gtab_no_b0 = gradient_table(bvals_no_b0, bvecs.T, b0_threshold=0)
bvals_with_multiple_b0 = np.array([0., 0., 0., 0., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300.,
400., 500., 600., 700., 800., 900.,
1000.])
bvecs_with_multiple_b0 = generate_bvecs(N)
gtab_with_multiple_b0 = gradient_table(bvals_with_multiple_b0,
bvecs_with_multiple_b0.T,
b0_threshold=0)
noisy_single = np.array([4243.71728516, 4317.81298828, 4244.35693359,
4439.36816406, 4420.06201172, 4152.30078125,
4114.34912109, 4104.59375, 4151.61914062,
4003.58374023, 4013.68408203, 3906.39428711,
3909.06079102, 3495.27197266, 3402.57006836,
3163.10180664, 2896.04003906, 2663.7253418,
2614.87695312, 2316.55371094, 2267.7722168])
noisy_multi = np.zeros((2, 2, 1, len(gtab.bvals)))
noisy_multi[0, 1, 0] = noisy_multi[
1, 0, 0] = noisy_multi[1, 1, 0] = noisy_single
noisy_multi[0, 0, 0] = data_single
ivim_model_VP = IvimModel(gtab, fit_method='VarPro')
f_VP, D_star_VP, D_VP = 0.13, 0.0088, 0.000921
# params for a single voxel
params_VP = np.array([f, D_star, D])
ivim_params_VP = np.zeros((2, 2, 1, 3))
ivim_params_VP[0, 0, 0] = ivim_params_VP[0, 1, 0] = params_VP
ivim_params_VP[1, 0, 0] = ivim_params_VP[1, 1, 0] = params_VP
def single_exponential(S0, D, bvals):
return S0 * np.exp(-bvals * D)
def test_single_voxel_fit():
"""
Test the implementation of the fitting for a single voxel.
Here, we will use the multi_tensor function to generate a
bi-exponential signal. The multi_tensor generates a multi
tensor signal and expects eigenvalues of each tensor in mevals.
Our basic test requires a scalar signal isotropic signal and
hence we set the same eigenvalue in all three directions to
generate the required signal.
The bvals, f, D_star and D are inspired from the paper by
Federau, Christian, et al. We use the function "generate_bvecs"
to simulate bvectors corresponding to the bvalues.
In the two stage fitting routine, initially we fit the signal
values for bvals less than the specified split_b using the
TensorModel and get an intial guess for f and D. Then, using
these parameters we fit the entire data for all bvalues.
"""
est_signal = ivim_prediction(ivim_fit_single.model_params, gtab)
assert_array_equal(est_signal.shape, data_single.shape)
assert_array_almost_equal(ivim_fit_single.model_params, params_trr)
assert_array_almost_equal(est_signal, data_single)
# Test predict function for single voxel
p = ivim_fit_single.predict(gtab)
assert_array_equal(p.shape, data_single.shape)
assert_array_almost_equal(p, data_single)
def test_multivoxel():
"""Test fitting with multivoxel data.
We generate a multivoxel signal to test the fitting for multivoxel data.
This is to ensure that the fitting routine takes care of signals packed as
1D, 2D or 3D arrays.
"""
ivim_fit_multi = ivim_model_trr.fit(data_multi)
est_signal = ivim_fit_multi.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(ivim_fit_multi.model_params, ivim_params_trr)
assert_array_almost_equal(est_signal, data_multi)
def test_ivim_errors():
"""
Test if errors raised in the module are working correctly.
Scipy introduced bounded least squares fitting in the version 0.17
and is not supported by the older versions. Initializing an IvimModel
with bounds for older Scipy versions should raise an error.
"""
ivim_model_trr = IvimModel(gtab, bounds=([0., 0., 0., 0.],
[np.inf, 1., 1., 1.]),
fit_method='trr')
ivim_fit = ivim_model_trr.fit(data_multi)
est_signal = ivim_fit.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(ivim_fit.model_params, ivim_params_trr)
assert_array_almost_equal(est_signal, data_multi)
def test_mask():
"""
Test whether setting incorrect mask raises and error
"""
mask_correct = data_multi[..., 0] > 0.2
mask_not_correct = np.array([[False, True, False], [True, False]],
dtype=np.bool)
ivim_fit = ivim_model_trr.fit(data_multi, mask_correct)
est_signal = ivim_fit.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(est_signal, data_multi)
assert_array_almost_equal(ivim_fit.model_params, ivim_params_trr)
assert_raises(ValueError, ivim_model_trr.fit, data_multi,
mask=mask_not_correct)
def test_with_higher_S0():
"""
Test whether fitting works for S0 > 1.
"""
# params for a single voxel
S0_2 = 1000.
params2 = np.array([S0_2, f, D_star, D])
mevals2 = np.array(([D_star, D_star, D_star], [D, D, D]))
# This gives an isotropic signal.
signal2 = multi_tensor(gtab, mevals2, snr=None, S0=S0_2,
fractions=[f * 100, 100 * (1 - f)])
# Single voxel data
data_single2 = signal2[0]
ivim_fit = ivim_model_trr.fit(data_single2)
est_signal = ivim_fit.predict(gtab)
assert_array_equal(est_signal.shape, data_single2.shape)
assert_array_almost_equal(est_signal, data_single2)
assert_array_almost_equal(ivim_fit.model_params, params2)
def test_b0_threshold_greater_than0():
"""
Added test case for default b0_threshold set to 50.
Checks if error is thrown correctly.
"""
bvals_b0t = np.array([50., 10., 20., 30., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300., 400.,
500., 600., 700., 800., 900., 1000.])
N = len(bvals_b0t)
bvecs = generate_bvecs(N)
gtab = gradient_table(bvals_b0t, bvecs.T)
with assert_raises(ValueError) as vae:
_ = IvimModel(gtab, fit_method='trr')
b0_s = "The IVIM model requires a measurement at b==0. As of "
assert b0_s in vae.exception
def test_bounds_x0():
"""
Test to check if setting bounds for signal where initial value is
higher than subsequent values works.
These values are from the IVIM dataset which can be obtained by using
the `read_ivim` function from dipy.data.fetcher. These are values from
the voxel [160, 98, 33] which can be obtained by :
.. code-block:: python
from dipy.data.fetcher import read_ivim
img, gtab = read_ivim()
data = load_nifti_data(img)
signal = data[160, 98, 33, :]
"""
x0_test = np.array([1., 0.13, 0.001, 0.0001])
test_signal = ivim_prediction(x0_test, gtab)
ivim_fit = ivim_model_trr.fit(test_signal)
est_signal = ivim_fit.predict(gtab)
assert_array_equal(est_signal.shape, test_signal.shape)
def test_predict():
"""
Test the model prediction API.
The predict method is already used in previous tests for estimation of the
signal. But here, we will test is separately.
"""
assert_array_almost_equal(ivim_fit_single.predict(gtab),
data_single)
assert_array_almost_equal(ivim_model_trr.predict
(ivim_fit_single.model_params, gtab),
data_single)
ivim_fit_multi = ivim_model_trr.fit(data_multi)
assert_array_almost_equal(ivim_fit_multi.predict(gtab),
data_multi)
def test_fit_object():
"""
Test the method of IvimFit class
"""
assert_raises(IndexError, ivim_fit_single.__getitem__, (-.1, 0, 0))
# Check if the S0 called is matching
assert_array_almost_equal(
ivim_fit_single.__getitem__(0).model_params, 1000.)
ivim_fit_multi = ivim_model_trr.fit(data_multi)
# Should raise a TypeError if the arguments are not passed as tuple
assert_raises(TypeError, ivim_fit_multi.__getitem__, -.1, 0)
# Should return IndexError if invalid indices are passed
assert_raises(IndexError, ivim_fit_multi.__getitem__, (100, -0))
assert_raises(IndexError, ivim_fit_multi.__getitem__, (100, -0, 2))
assert_raises(IndexError, ivim_fit_multi.__getitem__, (-100, 0))
assert_raises(IndexError, ivim_fit_multi.__getitem__, [-100, 0])
assert_raises(IndexError, ivim_fit_multi.__getitem__, (1, 0, 0, 3, 4))
# Check if the get item returns the S0 value for voxel (1,0,0)
assert_array_almost_equal(
ivim_fit_multi.__getitem__((1, 0, 0)).model_params[0],
data_multi[1, 0, 0][0])
def test_shape():
"""
Test if `shape` in `IvimFit` class gives the correct output.
"""
assert_array_equal(ivim_fit_single.shape, ())
ivim_fit_multi = ivim_model_trr.fit(data_multi)
assert_array_equal(ivim_fit_multi.shape, (2, 2, 1))
def test_multiple_b0():
# Generate a signal with multiple b0
# This gives an isotropic signal.
signal = multi_tensor(gtab_with_multiple_b0, mevals, snr=None, S0=S0,
fractions=[f * 100, 100 * (1 - f)])
# Single voxel data
data_single = signal[0]
ivim_model_multiple_b0 = IvimModel(gtab_with_multiple_b0, fit_method='trr')
ivim_model_multiple_b0.fit(data_single)
# Test if all signals are positive
def test_no_b0():
assert_raises(ValueError, IvimModel, gtab_no_b0)
def test_noisy_fit():
"""
Test fitting for noisy signals. This tests whether the threshold condition
applies correctly and returns the linear fitting parameters.
For older scipy versions, the returned value of `f` from a linear fit is
around 135 and D and D_star values are equal. Hence doing a test based on
Scipy version.
"""
model_one_stage = IvimModel(gtab, fit_method='trr')
with warnings.catch_warnings(record=True) as w:
fit_one_stage = model_one_stage.fit(noisy_single)
assert_equal(len(w), 3)
for l_w in w:
assert_(issubclass(l_w.category, UserWarning))
assert_("" in str(w[0].message))
assert_("x0 obtained from linear fitting is not feasibile" in
str(w[0].message))
assert_("x0 is unfeasible" in str(w[1].message))
assert_("Bounds are violated for leastsq fitting" in str(w[2].message))
assert_array_less(fit_one_stage.model_params, [10000., 0.3, .01, 0.001])
def test_S0():
"""
Test if the `IvimFit` class returns the correct S0
"""
assert_array_almost_equal(ivim_fit_single.S0_predicted, S0)
assert_array_almost_equal(ivim_fit_multi.S0_predicted,
ivim_params_trr[..., 0])
def test_perfusion_fraction():
"""
Test if the `IvimFit` class returns the correct f
"""
assert_array_almost_equal(ivim_fit_single.perfusion_fraction, f)
assert_array_almost_equal(
ivim_fit_multi.perfusion_fraction, ivim_params_trr[..., 1])
def test_D_star():
"""
Test if the `IvimFit` class returns the correct D_star
"""
assert_array_almost_equal(ivim_fit_single.D_star, D_star)
assert_array_almost_equal(ivim_fit_multi.D_star, ivim_params_trr[..., 2])
def test_D():
"""
Test if the `IvimFit` class returns the correct D
"""
assert_array_almost_equal(ivim_fit_single.D, D)
assert_array_almost_equal(ivim_fit_multi.D, ivim_params_trr[..., 3])
def test_estimate_linear_fit():
"""
Test the linear estimates considering a single exponential fit.
"""
data_single_exponential_D = single_exponential(S0, D, gtab.bvals)
assert_array_almost_equal(ivim_model_trr.estimate_linear_fit(
data_single_exponential_D,
split_b=500.,
less_than=False),
(S0, D))
data_single_exponential_D_star = single_exponential(S0, D_star, gtab.bvals)
assert_array_almost_equal(ivim_model_trr.estimate_linear_fit(
data_single_exponential_D_star,
split_b=100.,
less_than=True),
(S0, D_star))
def test_estimate_f_D_star():
"""
Test if the `estimate_f_D_star` returns the correct parameters after a
non-linear fit.
"""
params_f_D = f + 0.001, D + 0.0001
assert_array_almost_equal(ivim_model_trr.estimate_f_D_star(params_f_D,
data_single, S0,
D),
(f, D_star))
def test_fit_one_stage():
"""
Test to check the results for the one_stage linear fit.
"""
model = IvimModel(gtab, two_stage=False)
fit = model.fit(data_single)
linear_fit_params = [9.88834140e+02, 1.19707191e-01, 7.91176970e-03,
9.30095210e-04]
linear_fit_signal = [988.83414044, 971.77122546, 955.46786293,
939.87125905, 924.93258982, 896.85182201,
870.90346447, 846.81187693, 824.34108781,
803.28900104, 783.48245048, 764.77297789,
747.03322866, 669.54798887, 605.03328304,
549.00852235, 499.21077611, 454.40299244,
413.83192296, 376.98072773, 343.45531017]
assert_array_almost_equal(fit.model_params, linear_fit_params)
assert_array_almost_equal(fit.predict(gtab), linear_fit_signal)
def test_leastsq_failing():
"""
Test for cases where leastsq fitting fails and the results from a linear
fit is returned.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
fit_single = ivim_model_trr.fit(noisy_single)
assert_greater_equal(len(w), 3)
u_warn = [l_w for l_w in w if issubclass(l_w.category, UserWarning)]
assert_greater_equal(len(u_warn), 3)
message = ["x0 obtained from linear fitting is not feasibile",
"x0 is unfeasible",
"Bounds are violated for leastsq fitting"]
assert_greater_equal(len([lw for lw in u_warn for m in message
if m in str(lw.message)]), 3)
# Test for the S0 and D values
assert_array_almost_equal(fit_single.S0_predicted, 4356.268901117833)
assert_array_almost_equal(fit_single.D, 6.936684e-04)
def test_leastsq_error():
"""
Test error handling of the `_leastsq` method works when unfeasible x0 is
passed. If an unfeasible x0 value is passed using which leastsq fails, the
x0 value is returned as it is.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
fit = ivim_model_trr._leastsq(data_single, [-1, -1, -1, -1])
assert_greater_equal(len(w), 1)
assert_(issubclass(w[-1].category, UserWarning))
assert_("" in str(w[-1].message))
assert_("x0 is unfeasible" in str(w[-1].message))
assert_array_almost_equal(fit, [-1, -1, -1, -1])
@needs_cvxpy
def test_perfusion_fraction_vp():
"""
Test if the `IvimFit` class returns the correct f
"""
ivim_fit_VP = ivim_model_VP.fit(data_single)
assert_array_almost_equal(ivim_fit_VP.perfusion_fraction, f_VP,
decimal=2)
@needs_cvxpy
def test_D_star_vp():
"""
Test if the `IvimFit` class returns the correct D_star
"""
ivim_fit_VP = ivim_model_VP.fit(data_single)
assert_array_almost_equal(ivim_fit_VP.D_star, D_star_VP, decimal=4)
@needs_cvxpy
def test_D_vp():
"""
Test if the `IvimFit` class returns the correct D
"""
ivim_fit_VP = ivim_model_VP.fit(data_single)
assert_array_almost_equal(ivim_fit_VP.D, D_VP, decimal=4)
if __name__ == '__main__':
run_module_suite()
| FrancoisRheaultUS/dipy | dipy/reconst/tests/test_ivim.py | Python | bsd-3-clause | 19,029 | 0 |
import os
import shutil
from cumulusci.core.exceptions import DependencyResolutionError
from cumulusci.core.github import get_github_api_for_repo
from cumulusci.core.github import find_latest_release
from cumulusci.core.github import find_previous_release
from cumulusci.utils import download_extract_github
class GitHubSource:
def __init__(self, project_config, spec):
self.project_config = project_config
self.spec = spec
self.url = spec["github"]
if self.url.endswith(".git"):
self.url = self.url[:-4]
repo_owner, repo_name = self.url.split("/")[-2:]
self.repo_owner = repo_owner
self.repo_name = repo_name
self.gh = get_github_api_for_repo(
project_config.keychain, repo_owner, repo_name
)
self.repo = self.gh.repository(self.repo_owner, self.repo_name)
self.resolve()
def __repr__(self):
return f"<GitHubSource {str(self)}>"
def __str__(self):
s = f"GitHub: {self.repo_owner}/{self.repo_name}"
if self.description:
s += f" @ {self.description}"
if self.commit != self.description:
s += f" ({self.commit})"
return s
def __hash__(self):
return hash((self.url, self.commit))
def resolve(self):
"""Resolve a github source into a specific commit.
The spec must include:
- github: the URL of the github repository
The spec may include one of:
- commit: a commit hash
- ref: a git ref
- branch: a git branch
- tag: a git tag
- release: "latest" | "previous" | "latest_beta"
If none of these are specified, CumulusCI will look for the latest release.
If there is no release, it will use the default branch.
"""
ref = None
if "commit" in self.spec:
self.commit = self.description = self.spec["commit"]
return
elif "ref" in self.spec:
ref = self.spec["ref"]
elif "tag" in self.spec:
ref = "tags/" + self.spec["tag"]
elif "branch" in self.spec:
ref = "heads/" + self.spec["branch"]
elif "release" in self.spec:
release_spec = self.spec["release"]
if release_spec == "latest":
release = find_latest_release(self.repo, include_beta=False)
elif release_spec == "latest_beta":
release = find_latest_release(self.repo, include_beta=True)
elif release_spec == "previous":
release = find_previous_release(self.repo)
else:
raise DependencyResolutionError(f"Unknown release: {release_spec}")
if release is None:
raise DependencyResolutionError(
f"Could not find release: {release_spec}"
)
ref = "tags/" + release.tag_name
if ref is None:
release = find_latest_release(self.repo, include_beta=False)
if release:
ref = "tags/" + release.tag_name
else:
ref = "heads/" + self.repo.default_branch
self.description = ref[6:] if ref.startswith("heads/") else ref
self.commit = self.repo.ref(ref).object.sha
def fetch(self, path=None):
"""Fetch the archive of the specified commit and construct its project config."""
# To do: copy this from a shared cache
if path is None:
path = (
self.project_config.cache_dir
/ "projects"
/ self.repo_name
/ self.commit
)
if not path.exists():
path.mkdir(parents=True)
zf = download_extract_github(
self.gh, self.repo_owner, self.repo_name, ref=self.commit
)
try:
zf.extractall(path)
except Exception:
# make sure we don't leave an incomplete cache
shutil.rmtree(path)
raise
assert path.is_dir()
project_config = self.project_config.construct_subproject_config(
repo_info={
"root": os.path.realpath(path),
"owner": self.repo_owner,
"name": self.repo_name,
"url": self.url,
"commit": self.commit,
}
)
return project_config
@property
def frozenspec(self):
"""Return a spec to reconstruct this source at the current commit"""
return {
"github": self.url,
"commit": self.commit,
"description": self.description,
}
| SalesforceFoundation/CumulusCI | cumulusci/core/source/github.py | Python | bsd-3-clause | 4,708 | 0.000637 |
# assign epitope fitness to each node in the phylogeny
import time
from io_util import *
from tree_util import *
from date_util import *
from seq_util import *
import numpy as np
from itertools import izip
from collections import defaultdict
def append_nonepitope_sites(viruses):
for virus in viruses:
sites_ne = nonepitope_sites(virus['seq'])
virus['sites_ne'] = sites_ne
def remove_nonepitope_sites(viruses):
for virus in viruses:
virus.pop("sites_ne", None)
def remove_nonepitope_distances(viruses):
for virus in viruses:
virus.pop("distance_ne", None)
def most_frequent(char_list):
d = defaultdict(int)
for i in char_list:
d[i] += 1
return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)[0][0]
def consensus_nonepitope(viruses):
"""Return consensus non-epitope sequence"""
consensus = ""
length = len(viruses[0]['sites_ne'])
for i in range(0, length):
column = [v['sites_ne'][i] for v in viruses]
consensus += most_frequent(column)
return consensus
def distance_to_consensus(virus, consensus_ne):
"""Return distance of virusA to virusB by comparing non-epitope sites"""
virus_ne = virus['sites_ne']
ne_distance = sum(a != b for a, b in izip(virus_ne, consensus_ne))
return ne_distance
def compute(viruses):
"""Append non-epitope distances to each virus"""
print "Computing epitope distances"
consensus = consensus_nonepitope(viruses)
for virus in viruses:
distance = distance_to_consensus(virus, consensus)
virus['distance_ne'] = distance
print virus['strain'] + ": " + str(virus['distance_ne'])
def normalize(viruses):
"""Normalizing non-epitope distances to give non-epitope fitness"""
print "Normalizing non-epitope distances"
distances = [v['distance_ne'] for v in viruses]
mean = np.mean(distances)
sd = np.std(distances)
for virus in viruses:
virus['fitness_ne'] = -1 * ( ( virus['distance_ne'] - mean) / sd )
print virus['strain'] + ": " + str(virus['fitness_ne'])
def main(in_fname = None):
print "--- Non-epitope fitness at " + time.strftime("%H:%M:%S") + " ---"
if in_fname is None: in_fname='data/virus_epitope.json'
viruses = read_json(in_fname)
append_nonepitope_sites(viruses)
compute(viruses)
# normalize(viruses)
remove_nonepitope_sites(viruses)
# remove_nonepitope_distances(viruses)
out_fname = "data/virus_nonepitope.json"
write_json(viruses, out_fname)
return out_fname
if __name__ == "__main__":
main()
| doerlbh/Indie-nextflu | augur/scratch/fitness_nonepitope.py | Python | agpl-3.0 | 2,409 | 0.027397 |
from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
HAS_GDAL = False
gdal = GDALInfo()
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
| diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/contrib/gis/geos/base.py | Python | mit | 1,714 | 0.000583 |
from tensorboardX import SummaryWriter
import unittest
from tensorboardX.record_writer import S3RecordWriter, make_valid_tf_name, GCSRecordWriter
import os
import boto3
from moto import mock_s3
os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key")
os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret")
class RecordWriterTest(unittest.TestCase):
@mock_s3
def test_record_writer_s3(self):
client = boto3.client('s3', region_name='us-east-1')
client.create_bucket(Bucket='this')
writer = S3RecordWriter('s3://this/is/apen')
bucket, path = writer.bucket_and_path()
assert bucket == 'this'
assert path == 'is/apen'
writer.write(bytes(42))
writer.flush()
def test_make_valid_tf_name(self):
newname = make_valid_tf_name('$ave/&sound')
assert newname == '._ave/_sound'
def test_record_writer_gcs(self):
pass
# we don't have mock test, so expect error here. However,
# Travis CI env won't raise exception for the following code,
# so I commented it out.
# with self.assertRaises(Exception):
# writer = GCSRecordWriter('gs://this/is/apen')
# writer.write(bytes(42))
# writer.flush()
| lanpa/tensorboardX | tests/test_record_writer.py | Python | mit | 1,257 | 0.000796 |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 3 14:25:06 2014
@author: Matti Ropo
@author: Henrik Levämäki
"""
from __future__ import print_function
import time
import os
import sys
import numpy as np
import pyemto.common.common as common
class System:
"""The main class which provides the basis for the pyEMTO scripts.
Somewhere in the beginning of a pyEMTO script a new instance of
the system class should be created. All subsequent communication
with the newly created system should be through the class methods,
which are described below.
:param folder: Main folder where the input and output files will
be stored. Use of absolute paths is recommended
(Default value = current working directory)
:type folder: str
:param EMTOdir: Path to the folder of the EMTO installation.
This entry can and should be modified by the user
inside the System.__init__ function
(Default value = /home/user/EMTO5.8)
:type EMTOdir: str
:param xc: Choice for the xc-functional can be set here.
(Default value = PBE)
:type xc: str
:returns: None
:rtype: None
"""
def __init__(self, folder=None, EMTOdir=None, xc=None):
# Import necessary packages
from pyemto.latticeinputs.latticeinputs import Latticeinputs
from pyemto.emtoinputs.emtoinputs import Emtoinputs
# Check input arguments
if folder is None:
self.folder = os.getcwd() # Use current folder
else:
self.folder = folder
if EMTOdir is None:
self.EMTOdir = "/home/hpleva/EMTO5.8"
else:
self.EMTOdir = EMTOdir
# Initialize default parameters
self.ca_range_default = np.linspace(1.50, 1.70, 7)
self.elastic_constants_points = 6
self.elastic_constants_deltas = np.linspace(0.0, 0.05,
self.elastic_constants_points)
self.RyBohr3_to_GPa = 14710.5065722
self.kappaw_default = [0.0, -20.0]
self.hcpo_relax_points = 5
self.hcpm_relax_points = 5
if xc is None:
self.xc = 'PBE'
else:
self.xc = xc
# Create working folders
common.check_folders(self.folder + '/kgrn', self.folder + '/kgrn/tmp',
self.folder + '/kfcd', self.folder + '/fit')
# BMDL, KSTR, SHAPE, KGRN and KFCD class instances
self.lattice = Latticeinputs()
self.emto = Emtoinputs()
return
def bulk(self, jobname=None, lat=None, atoms=None, concs=None, splts=None, sws=None,
latname=None, latpath=None, emtopath=None, ibz=None, bmod=None, xc=None, ca=None,
**kwargs):
"""Initializes the basic parameters for bulk systems.
Basic information concerning the system,
such as the types of atoms and the crystal structure should be given to this function and
it should be called right after the class instance has been created.
:param jobname: Name of the system (Default value = None)
:type jobname:
:param lat: The type of lattice structure (Default value = None)
:type lat:
:param atoms: List of atoms in the system (Default value = None)
:type atoms:
:param concs: List of concentrations of the elements in the
'atoms' list. This information is only used in CPA
calculations (Default value = None)
:type concs:
:param splts: List of initial magnetic moments of the elements in the
'atoms' list (Default value = None)
:type splts:
:param sws: The Wigner-Seitz radius of the system (Default value = None)
:type sws: float
:param latname: The 'jobname' of the BMDL, KSTR and SHAPE output files. These
structure output files have to be located in the 'latpath'
directory and they have to be named jobname.extention
(Default value = None)
:type latname:
:param latpath: The absolute path to the folder where the 'bmdl', 'kstr' and 'shape'
folders are located, which in turn contain the output files of the
structure calculation (Default value = None)
:type latpath:
:param emtopath: The absolute path to the folder where the EMTO installation is
located (Default value = None)
:type emtopath:
:param ibz: The code number indicating the Bravais lattice that the crystal
structure of the system has. For a list of possible values, please consult the
EMTO manual (Default value = None)
:type ibz:
:param bmod: The bulk modulus can be inputed here and if it is given,
it will be used by the elastic modulus routines (Default value = None)
:type bmod:
:param xc: The choice of the xc-functional. If None, PBE will be used as default
(Default value = None)
:type xc:
:param ca: The c/a ratio of hcp structures can be inputed here and if it is given,
it will be used by the elastic modulus routines (Default value = None)
:type ca:
:param **kwargs: Arbitrary other KGRN and KFCD input parameters can be given here
as keyword arguments. They will be passed down to the
self.emto.set_values() function
:type **kwargs: str,int,float,list(str),list(int),list(float)
:returns: None
:rtype: None
"""
if lat is None:
sys.exit('System.bulk(): \'lat\' has to be given!')
else:
self.lat = lat
if latname is None:
self.latname = self.lat
else:
self.latname = latname
if latpath is None:
self.latpath = "./"
else:
self.latpath = latpath
if emtopath is None:
self.emtopath = self.folder
else:
self.emtopath = emtopath
if atoms is None:
sys.exit('System.bulk(): \'atoms\' has to be given!')
else:
self.atoms = atoms
if concs is None:
# Assume equal concentrations for each element
self.concs = np.zeros(len(atoms))
self.concs[:] = 1.0 / float(len(atoms))
else:
self.concs = concs
if splts is None:
self.splts = np.zeros(len(atoms))
else:
self.splts = np.asarray(splts)
if sws is None:
self.sws = 0.0
#sys.exit('System.bulk(): \'sws\' has to be given!')
else:
self.sws = sws
if jobname is None:
self.jobname, self.fulljobname = self.create_jobname()
else:
self.jobname = jobname
self.fulljobname = self.create_jobname(jobname)
if ibz is None:
self.ibz = common.lat_to_ibz(self.lat)
else:
self.ibz = ibz
# Knowledge of the c/a lattice parameter for hcp systems
if ca is not None:
self.ca = ca
else:
self.ca = None
# Knowledge of the xc-functional we want to use
if xc is None:
self.xc = 'PBE'
else:
self.xc = xc
# Knowledge of the value of the bulk modulus, which
# is mainly needed in the elastic constant functions
self.bmod = bmod
# hcp requires that we "double" the atoms array and
# create a non-trivial iqs-array because hcp has a
# non-trivial two-atom basis.
if self.lat == 'hcp':
self.atoms = np.array([self.atoms, self.atoms]).flatten()
if concs is None:
self.concs = np.zeros(len(self.atoms))
self.concs[:] = 2.0 / float(len(self.atoms))
else:
self.concs = np.array([self.concs, self.concs]).flatten()
self.iqs = np.zeros(len(self.atoms), dtype='int32')
self.iqs[:len(self.iqs) // 2] = 1
self.iqs[len(self.iqs) // 2:] = 2
if splts is None:
self.splts = np.zeros(len(self.atoms))
else:
self.splts = np.array([self.splts, self.splts]).flatten()
self.itas = np.arange(1, len(self.atoms) / 2 + 1, dtype='int32')
self.itas = np.array([self.itas, self.itas]).flatten()
self.emto.set_values(jobname=self.fulljobname, sws=self.sws, atoms=self.atoms,
iqs=self.iqs, itas=self.itas, concs=self.concs, splts=self.splts,
ibz=self.ibz, latname=self.latname, latpath=self.latpath,
emtopath=self.emtopath, EMTOdir=self.EMTOdir, **kwargs)
# Special settings for the B2 structure. CPA currently not supported!!!
elif self.lat == 'B2':
self.iqs = np.array([1,2],dtype='int32')
self.concs = np.array([1.0,1.0])
self.its = np.array([1,2],dtype='int32')
self.itas = np.array([1,1],dtype='int32')
self.emto.set_values(jobname=self.fulljobname, sws=self.sws, atoms=self.atoms,
iqs=self.iqs, its=self.its, itas=self.itas, concs=self.concs,
splts=self.splts, ibz=self.ibz, latname=self.latname, latpath=self.latpath,
emtopath=self.emtopath, EMTOdir=self.EMTOdir, **kwargs)
else:
self.emto.set_values(jobname=self.fulljobname, sws=self.sws, atoms=self.atoms,
concs=self.concs, splts=self.splts, ibz=self.ibz,
latname=self.latname, latpath=self.latpath, emtopath=self.emtopath,
EMTOdir=self.EMTOdir, **kwargs)
# Make sure that structure files also receive the new input options,
# such as slurm_options.
self.lattice.set_values(**kwargs)
return
def bulk_new(self, jobname=None, lat=None, atoms=None, concs=None, splts=None, sws=None,
latname=None, latpath=None, emtopath=None, ibz=None, bmod=None, xc=None, ca=None,
iqs=None, its=None, itas=None,
**kwargs):
"""Initializes the basic parameters for bulk systems.
!!!A NEW VERSION OF THE OLD "bulk" ROUTINE!!!
Basic information concerning the system,
such as the types of atoms and the crystal structure should be given to this function and
it should be called right after the class instance has been created.
:param jobname: Name of the system (Default value = None)
:type jobname:
:param lat: The type of lattice structure (Default value = None)
:type lat:
:param atoms: List of atoms in the system (Default value = None)
:type atoms:
:param concs: List of concentrations of the elements in the
'atoms' list. This information is only used in CPA
calculations (Default value = None)
:type concs:
:param splts: List of initial magnetic moments of the elements in the
'atoms' list (Default value = None)
:type splts:
:param sws: The Wigner-Seitz radius of the system (Default value = None)
:type sws: float
:param latname: The 'jobname' of the BMDL, KSTR and SHAPE output files. These
structure output files have to be located in the 'latpath'
directory and they have to be named jobname.extention
(Default value = None)
:type latname:
:param latpath: The absolute path to the folder where the 'bmdl', 'kstr' and 'shape'
folders are located, which in turn contain the output files of the
structure calculation (Default value = None)
:type latpath:
:param emtopath: The absolute path to the folder where the EMTO installation is
located (Default value = None)
:type emtopath:
:param ibz: The code number indicating the Bravais lattice that the crystal
structure of the system has. For a list of possible values, please consult the
EMTO manual (Default value = None)
:type ibz:
:param bmod: The bulk modulus can be inputed here and if it is given,
it will be used by the elastic modulus routines (Default value = None)
:type bmod:
:param xc: The choice of the xc-functional. If None, PBE will be used as default
(Default value = None)
:type xc:
:param ca: The c/a ratio of hcp structures can be inputed here and if it is given,
it will be used by the elastic modulus routines (Default value = None)
:type ca:
:param **kwargs: Arbitrary other KGRN and KFCD input parameters can be given here
as keyword arguments. They will be passed down to the
self.emto.set_values() function
:type **kwargs: str,int,float,list(str),list(int),list(float)
:returns: None
:rtype: None
"""
if lat is None:
sys.exit('System.bulk(): \'lat\' has to be given!')
else:
self.lat = lat
if latname is None:
self.latname = self.lat
else:
self.latname = latname
if latpath is None:
self.latpath = "./"
else:
self.latpath = latpath
if emtopath is None:
self.emtopath = self.folder
else:
self.emtopath = emtopath
if atoms is None:
sys.exit('System.init_bulk(): \'atoms\' has to be given!')
else:
self.atoms = atoms
if concs is None:
# Assume equal concentrations for each element
self.concs = np.zeros(len(atoms))
self.concs[:] = 1.0 / float(len(atoms))
else:
self.concs = concs
if splts is None:
self.splts = np.zeros(len(atoms))
else:
self.splts = np.asarray(splts)
if sws is None:
self.sws = 0.0
#sys.exit('System.bulk(): \'sws\' has to be given!')
else:
self.sws = sws
if jobname is None:
self.jobname, self.fulljobname = self.create_jobname()
else:
self.jobname = jobname
self.fulljobname = self.create_jobname(jobname)
if ibz is None:
self.ibz = common.lat_to_ibz(self.lat)
else:
self.ibz = ibz
# Knowledge of the c/a lattice parameter for hcp systems
if ca is not None:
self.ca = ca
else:
self.ca = None
# Knowledge of the xc-functional we want to use
if xc is None:
self.xc = 'PBE'
else:
self.xc = xc
# Knowledge of the value of the bulk modulus, which
# is mainly needed in the elastic constant functions
self.bmod = bmod
# Construct input parameter arrays.
self.iqs = iqs
self.its = its
self.itas = itas
self.emto.set_values(jobname=self.fulljobname, sws=self.sws, atoms=self.atoms,
iqs=self.iqs, its=self.its, itas=self.itas, concs=self.concs,
splts=self.splts, ibz=self.ibz, latname=self.latname, latpath=self.latpath,
emtopath=self.emtopath, EMTOdir=self.EMTOdir, **kwargs)
return
def write_inputs(self,folder=None, batch=True):
"""Write kgrn and kfcd inputs files and possible batch file """
if folder == None:
folder = self.folder
self.emto.kgrn.write_input_file(folder=folder)
self.emto.kfcd.write_input_file(folder=folder)
if batch:
self.emto.batch.write_input_file(folder=folder)
def lattice_constants_analyze(self, sws=None, ca=None,prn=True,debug=False,method='morse',return_error=False):
"""Analyzes the output files generated using the
lattice_constants_batch_generate function.
The results are printed on screen.
:param sws: List of WS-radii (Default value = None)
:type sws: list(float)
:param ca: List hpc c/a ratios (Default value = None)
:type ca: list(float)
:param prn: True if results should be printed on screen, False if not (Default value = True)
:type prn: boolean
:returns: Equilibrium WS-radius, c/a (only hcp), bulk modulus, energy,
R (only hcp) and cs (only hcp)
:rtype: float, float (only hcp), float, float, float (only hcp), float (only hcp)
"""
from pyemto.EOS.EOS import EOS
eos = EOS(name=self.jobname, xc=self.xc, method=method, units='bohr')
if prn:
print('')
print('*****lattice_constants_analyze*****')
print('')
if sws is None:
sys.exit('System.lattice_constants_analyze(): An array of' +
' WS-radii \'sws\' has to be given!')
else:
self.lc_analyze_sws_range = np.asarray(sws)
if ca is None:
self.lc_analyze_ca_range = self.ca_range_default
else:
self.lc_analyze_ca_range = np.asarray(ca)
if self.lat == 'bcc' or self.lat == 'fcc' or self.lat == 'trig' or self.lat == 'stric':
energies = []
swses = []
for j in range(len(self.lc_analyze_sws_range)):
self.sws = self.lc_analyze_sws_range[j]
job = self.create_jobname(self.jobname)
en = self.get_energy(job, folder=self.folder, func=self.xc)
if isinstance(en, type(None)):
print('System.lattice_constants_analyze(): Warning:' +
' No output energy found for {0}'.format(job))
else:
energies.append(en)
swses.append(self.lc_analyze_sws_range[j])
if prn:
self.print_sws_ens(
'lattice_constants_analyze(cubic)', swses, energies)
sws0, e0, B0, grun, R_squared = eos.fit(swses, energies)
# These functions create files on disk about the data to be fitted
# as well as the results of the fit.
# eos.prepareData()
#sws0,e0,B,grun = eos.fit2file()
if prn:
print('lattice_constants_analyze(cubic):')
print('sws0 = {0:13.6f}'.format(sws0))
print('B0 = {0:13.6f}'.format(B0))
print('E0 = {0:13.6f}'.format(e0))
print('')
if return_error:
return sws0, B0, e0, grun, R_squared
else:
return sws0, B0, e0, grun
if self.lat == 'hcp':
# Fit an n'th order polynomial to the energy vs. c/a data.
ca_fit_order = 2
#latnames = ['hcp_ca1', 'hcp_ca2', 'hcp_ca3', 'hcp_ca4', 'hcp_ca5', 'hcp_ca6',
# 'hcp_ca7', 'hcp_ca8', 'hcp_ca9', 'hcp_ca10', 'hcp_ca11', 'hcp_ca12']
caname = 'hcp_ca'
# For the energies a 2D-array [i,j], where i = c/a axis and j = sws
# axis
energies = np.zeros(
(len(self.lc_analyze_ca_range), len(self.lc_analyze_sws_range)))
energies0 = [] # c/a optimized energy for a given WS-radius
swses = [] # List of WS-radii
cas0 = [] # Energetically optimized c/a's for a given WS-radius
# First collect all the output energies into the 2D array.
for i in range(len(self.lc_analyze_ca_range)):
for j in range(len(self.lc_analyze_sws_range)):
self.sws = self.lc_analyze_sws_range[j]
job = self.create_jobname(self.jobname + "_" + caname +str(i+1))
en = self.get_energy(job, folder=self.folder, func=self.xc)
if isinstance(en, type(None)):
print('System.lattice_constants_analyze(): Warning:' +
' No output energy found for {0}'.format(job))
else:
energies[i, j] = en
if debug:
print('Energy matrix (y axis = c/a axis, x axis = sws axis):'+"\n")
formaatti = " "
for i in range(len(self.lc_analyze_sws_range)):
formaatti = formaatti + "{0}{1}{2}{3} ".format("{",i,":8.6f","}")
print(formaatti.format(*self.lc_analyze_sws_range))
formaatti = ""
for i in range(len(self.lc_analyze_sws_range)+1):
formaatti = formaatti + "{0}{1}{2}{3} ".format("{",i,":13.6f","}")
for i in range(len(energies[:,0])):
print(formaatti.format(self.lc_analyze_ca_range[i],*energies[i,:]))
# Now we can start processing the 2D energy array.
# There might be some calculations that didn't converge.
# For those the energy will be zero in the 2D array so
# we have to leave those points out.
for j in range(len(self.lc_analyze_sws_range)):
good_energies = []
good_cas = []
for i in range(len(self.lc_analyze_ca_range)):
if energies[i, j] < -1.0:
good_energies.append(energies[i, j])
good_cas.append(self.lc_analyze_ca_range[i])
if len(good_energies) >= 3:
ca0, en0 = eos.ca_fit(good_cas, good_energies, ca_fit_order,
debug=debug,title='sws{0}'.format(j+1))
cas0.append(ca0)
energies0.append(en0)
swses.append(self.lc_analyze_sws_range[j])
if prn:
self.print_sws_ens_hcp(
'lattice_constants_analyze(hcp)', swses, energies0, cas0)
print('#'*80)
print('# Ground state EOS fit:'+' '*56+'#')
print('#'*80)
sws0, e0, B0, grun, R_squared = eos.fit(swses, energies0)
print('*'*80)
print('*'*80)
print('*'*80+'\n')
# These functions create files on disk about the data to be fitted
# as well as the results of the fit.
# eos.prepareData()
#sws0,e0,B0,grun = eos.fit2file()
# Now that we have the ground state WS-radius sws0 we can use the cas0 array
# to compute the corresponding ground state ca0
ca0_vs_sws = np.polyfit(swses, cas0, 2)
ca0_vs_sws = np.poly1d(ca0_vs_sws)
c_over_a0 = ca0_vs_sws(sws0)
# In order to calculate R = (c33 - c11 - c12 + c13) / cs
# we use Eq. (6.56) p. 108 in Vitos' book.
dca0_vs_dsws = np.polyder(ca0_vs_sws)
dca0dsws0 = dca0_vs_dsws(sws0)
R0 = -sws0 / 3.0 / c_over_a0 * dca0dsws0
# In order to calculate cs = c11 + c12 + 2c33 - 4c13
# we use Eq. (6.68) p. 109 in Vitos' book.
# We need the total energies as a function of c/a
# where sws is fixed to sws0. These can be obtained
# by doing a Morse fit for each c/a and evaluating
# the fitting function at sws0.
energies_cs = []
# There might be some calculations that didn't converge.
# For those the energy will be zero in the 2D array so
# we have to leave those points out.
for i in range(len(self.lc_analyze_ca_range)):
good_energies = []
good_swses = []
for j in range(len(self.lc_analyze_sws_range)):
if energies[i, j] < -1.0:
good_energies.append(energies[i, j])
good_swses.append(self.lc_analyze_sws_range[j])
print('#'*80)
print('# c/a{0} EOS fit:'.format(i+1)+' '*64+'#')
print('#'*80)
# _tmp variables are just dummies, we only want to
# update the EOS parameters of the "eos" instance.
sws_tmp, e_tmp, B_tmp, grun_tmp, R_squared_tmp = eos.fit(
good_swses, good_energies)
print('*'*80)
print('*'*80)
print('*'*80+'\n')
#e_cs = eos.fit_eval(sws0)
#e_cs = eos.fit_eval(sws_tmp)
e_cs = e_tmp
energies_cs.append(e_cs)
e_vs_ca_at_sws0 = np.polyfit(
self.lc_analyze_ca_range, energies_cs, 2)
e_vs_ca_at_sws0 = np.poly1d(e_vs_ca_at_sws0)
d2e_vs_dca2_at_sws0 = np.polyder(e_vs_ca_at_sws0, 2)
d2edca02sws0 = d2e_vs_dca2_at_sws0(c_over_a0)
vol0 = 4.0 / 3.0 * np.pi * sws0**3
cs0 = 9.0 / 2.0 * c_over_a0**2 / vol0 * \
d2edca02sws0 * self.RyBohr3_to_GPa
if prn:
print('hcp_lattice_constants_analyze(hcp):')
print('sws0 = {0:13.6f}'.format(sws0))
print('c/a0 = {0:13.6f}'.format(c_over_a0))
print('B0 = {0:13.6f}'.format(B0))
print('E0 = {0:13.6f}'.format(e0))
print('R = {0:13.6f}'.format(R0))
print('cs = {0:13.6f}'.format(cs0))
print('')
if return_error:
return sws0, c_over_a0, B0, e0, R0, cs0, grun, R_squared
else:
return sws0, c_over_a0, B0, e0, R0, cs0, grun
def lattice_constants_batch_generate(self, sws=None, ca=None, auto_ca=False):
"""Generates input files and writes them to disk.
Based on the input *sws* and *ca* lists jobnames are created and
then corresponding input files are generated and written
on disk. List of jobnames are returned.
:param sws: List of WS-radii (Default value = None)
:type sws: list(float)
:param ca: List of hcp c/a ratios (Default value = None)
:type ca: list(float)
:returns: List of jobnames
:rtype: list(str)
"""
if sws is None:
sys.exit('System.lattice_constants_batch(): An array of' +
' WS-radii \'sws\' has to be given!')
else:
self.lc_batch_sws_range = np.asarray(sws)
if ca is None:
self.lc_batch_ca_range = self.ca_range_default
else:
self.lc_batch_ca_range = np.asarray(ca)
jobnames = []
if self.lat == 'bcc' or self.lat == 'fcc':
for j in range(len(self.lc_batch_sws_range)):
self.sws = self.lc_batch_sws_range[j]
job = self.create_jobname(self.jobname)
jobnames.append(job)
self.emto.set_values(sws=self.sws, jobname=job)
common.check_folders(
self.folder, self.folder + "/kgrn", self.folder + "/kgrn/tmp")
common.check_folders(self.folder + "/kfcd")
common.check_folders(self.folder + "/fit")
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
elif self.lat == 'hcp' and auto_ca is True:
for i in range(len(self.lc_batch_ca_range)):
for j in range(len(self.lc_batch_sws_range)):
caname = "hcp_ca"+str(i+1)
self.sws = self.lc_batch_sws_range[j]
job = self.create_jobname(self.jobname + "_" + caname)
jobnames.append(job)
self.emto.set_values(sws=self.sws, jobname=job)
self.emto.set_values(latname=caname)
common.check_folders(
self.folder, self.folder + "/kgrn", self.folder + "/kgrn/tmp")
common.check_folders(self.folder + "/kfcd")
common.check_folders(self.folder + "/fit")
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
else:
for j in range(len(self.lc_batch_sws_range)):
self.sws = self.lc_batch_sws_range[j]
job = self.create_jobname(self.jobname)
jobnames.append(job)
self.emto.set_values(sws=self.sws, jobname=job)
common.check_folders(
self.folder, self.folder + "/kgrn", self.folder + "/kgrn/tmp")
common.check_folders(self.folder + "/kfcd")
common.check_folders(self.folder + "/fit")
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
return jobnames
def lattice_constants_batch_calculate(self, sws=None, ca=None):
"""Calculates the ground state WS-radius using the parallelism of
the batch system by submitting one job for each entry in the *sws* list.
This is a combination of the batch_generate and batch_analyze functions.
At the end results are printed on screen.
:param sws: List of WS-radii (Default value = None)
:type sws: list(float)
:param ca: hpc c/a ratio (Default value = None)
:type ca: float
:returns: WS-radius, c/a (only hcp), bulk modulus, energy,
R (only hcp), cs (only hcp)
:rtype: float, float (only hcp), float, float,
float (only hcp), float (only hcp)
"""
if sws is None:
sys.exit('System.lattice_constants_calculate(): An array of' +
' WS-radii \'sws\' has to be given!')
else:
self.lc_batch_sws_range = np.asarray(sws)
if ca is None:
self.lc_batch_ca_range = self.ca_range_default
else:
self.lc_batch_ca_range = np.asarray(ca)
# Create input files
jobnames = self.lattice_constants_batch_generate(
sws=self.lc_batch_sws_range, ca=self.lc_batch_ca_range)
# Submit calculations to the batch queue system
jobIDs = self.submit_jobs(jobnames, folder=self.folder)
# Wait until all the jobs have finished
self.wait_for_jobs(jobIDs)
# Now we can analyze the results
if self.lat == 'bcc' or self.lat == 'fcc':
sws0, B0, e0, grun = self.lattice_constants_analyze(
sws=sws, ca=self.lc_batch_ca_range)
return sws0, B0, e0, grun
elif self.lat == 'hcp':
sws0, c_over_a0, B0, e0, R0, cs0,grun = self.lattice_constants_analyze(
sws=sws, ca=self.lc_batch_ca_range)
return sws0, c_over_a0, B0, e0, R0, cs0, grun
def lattice_constants_serial_calculate(self, sws=None, stype="simple", rerun=False, skip=False,
delta=0.01, refine=True):
"""Calculates the equilibrium lattice constants on one CPU **WITHOUT** the batch system.
Also the eq. bulk modulus, c/a ratio and energy are returned.
:param sws: Initial guess for the eq. WS-radius (Default value = None)
:type sws: float
:param stype: Type of the energy minimisation algorithm (Default value = "simple")
:type stype: str
:param rerun: True if (Default value = False)
:type rerun: boolean
:param skip: True if (Default value = False)
:type skip: boolean
:param delta: Step size (Default value = 0.01)
:type delta: float
:param refine: True if an optimized WS-radius vs. energy curve
should be computed, False if not (Default value = True)
:type refine: boolean
:returns: WS-radius, c/a, bulk modulus, energy
:rtype: float, float, float, float
"""
if sws is None:
sys.exit('System.lattice_constants_serial_calculate():' +
' Starting point \'sws\' has to be given!')
else:
self.lc_initial_sws = sws
self.lc_stype = stype
self.lc_rerun = rerun
self.lc_skip = skip
# Find initial volume and bulk module and refine those results.
if self.lat == 'bcc' or self.lat == 'fcc':
c_over_a = 0.0 # sc,bcc and fcc structures only need a
print('Running self.finc_lc()')
v, b, energy = self.find_lc(delta=delta, xc=self.xc)
print('System.find_lc(): sws,Bmod,energy = ', v, b, energy)
if refine:
print('Running self.refine_lc()')
v, b, energy = self.refine_lc(v, delta=delta, xc=self.xc)
print('lattice_constants_serial_calculate: sws,c/a,Bmod,energy = ',
v, c_over_a, b, energy)
elif self.lat == 'hcp':
print('Running self.find_lc_hcp()')
v, b, c_over_a, energy = self.find_lc_hcp(delta=delta, xc=self.xc)
print('System.find_lc_hcp(): sws,c/a,Bmod,energy = ',
v, c_over_a, b, energy)
if refine:
print('Running self.refine_lc_hcp()')
v, b, c_over_a, energy = self.refine_lc_hcp(
v, c_over_a, delta=delta, xc=self.xc)
print('lattice_constants_serial_calculate: sws,c/a,Bmod,energy = ',
v, c_over_a, b, energy)
print()
return v, c_over_a, b, energy
def elastic_constants_analyze(
self, sws=None, bmod=None, ca=None, R=None, cs=None, relax=True, debug=False):
"""Analyzes the output files generated using the
elastic_constants_batch_generate function.
The results are printed on screen.
:param sws: WS-radius (Default value = None)
:type sws: float
:param bmod: Bulk modulus (Default value = None)
:type bmod: float
:param ca: hpc c/a ratio (Default value = None)
:type ca: float
:param R: Dimensionless quantity of hcp systems (Default value = None)
:type R: float
:param cs: Second order c/a-derivative of the energy (Default value = None)
:type cs: float
:returns: None
:rtype: None
"""
from pyemto.EOS.EOS import EOS
eos = EOS(name=self.jobname, xc=self.xc, method='morse', units='bohr')
# Mission critical parameters
if sws is None:
sys.exit(
'System.elastic_constants_analyze(): \'sws\' has not been specified!')
elif sws is not None:
self.sws = sws
if bmod is None:
sys.exit(
'System.elastic_constants_analyze(): \'sws\' has not been specified!')
elif bmod is not None:
self.bmod = bmod
if ca is None and self.lat == 'hcp':
sys.exit(
'System.elastic_constants_analyze(): \'ca\' (c/a) has not been specified!')
else:
self.ec_analyze_ca = ca
if R is None and self.lat == 'hcp':
sys.exit(
'System.elastic_constants_analyze(): \'R\' has not been specified!')
else:
self.ec_analyze_R = R
if cs is None and self.lat == 'hcp':
sys.exit(
'System.elastic_constants_analyze(): \'cs\' has not been specified!')
else:
self.ec_analyze_cs = cs
deltas = self.elastic_constants_deltas
if self.lat == 'bcc' or self.lat == 'fcc' or self.lat == 'B2':
# Orthorhombic distortion for c' first
if self.lat == 'bcc':
jobname_dist = [
'_bcco0', '_bcco1', '_bcco2', '_bcco3', '_bcco4', '_bcco5']
latname_dist = [
'bcco0', 'bcco1', 'bcco2', 'bcco3', 'bcco4', 'bcco5']
elif self.lat == 'fcc':
jobname_dist = [
'_fcco0', '_fcco1', '_fcco2', '_fcco3', '_fcco4', '_fcco5']
latname_dist = [
'fcco0', 'fcco1', 'fcco2', 'fcco3', 'fcco4', 'fcco5']
elif self.lat == 'B2':
jobname_dist = [
'_B2o0', '_B2o1', '_B2o2', '_B2o3', '_B2o4', '_B2o5']
latname_dist = [
'B2o0', 'B2o1', 'B2o2', 'B2o3', 'B2o4', 'B2o5']
en_cprime = []
good_deltas_cprime = []
for i in range(len(jobname_dist)):
already = False
job = self.create_jobname(self.jobname + jobname_dist[i])
en = self.get_energy(job, folder=self.folder, func=self.xc)
if isinstance(en, type(None)):
print('System.cubic_elastic_constants_analyze(): Warning:' +
' No output energy found for {0}'.format(job))
else:
en_cprime.append(en)
good_deltas_cprime.append(deltas[i])
# Exit if we don't have enough points to do the fit
if len(en_cprime) < 3:
sys.exit(
'System.elastic_constants_analyze(): Not enough energy points to fit c\'!')
# Convert into a numpy array
en_cprime = np.asarray(en_cprime)
good_deltas_cprime = np.asarray(good_deltas_cprime)
# Next the monoclinic distortion for c44
if self.lat == 'bcc':
jobname_dist = [
'_bccm0', '_bccm1', '_bccm2', '_bccm3', '_bccm4', '_bccm5']
latname_dist = [
'bccm0', 'bccm1', 'bccm2', 'bccm3', 'bccm4', 'bccm5']
elif self.lat == 'fcc':
jobname_dist = [
'_fccm0', '_fccm1', '_fccm2', '_fccm3', '_fccm4', '_fccm5']
latname_dist = [
'fccm0', 'fccm1', 'fccm2', 'fccm3', 'fccm4', 'fccm5']
elif self.lat == 'B2':
jobname_dist = [
'_B2m0', '_B2m1', '_B2m2', '_B2m3', '_B2m4', '_B2m5']
latname_dist = [
'B2m0', 'B2m1', 'B2m2', 'B2m3', 'B2m4', 'B2m5']
en_c44 = []
good_deltas_c44 = []
for i in range(len(jobname_dist)):
already = False
job = self.create_jobname(self.jobname + jobname_dist[i])
en = self.get_energy(job, folder=self.folder, func=self.xc)
if isinstance(en, type(None)):
print('System.elastic_constants_analyze(): Warning:' +
' No output energy found for {0}'.format(job))
else:
en_c44.append(en)
good_deltas_c44.append(deltas[i])
# Exit if we don't have enough points to do the fit
if len(en_c44) < 3:
sys.exit(
'System.elastic_constants_analyze(): Not enough energy points to fit c44!')
# Convert into a numpy array
en_c44 = np.asarray(en_c44)
good_deltas_c44 = np.asarray(good_deltas_c44)
# All calculations have been done, now it's time to fit the results
popt_cprime, cprime_rsq = eos.distortion_fit(
good_deltas_cprime, en_cprime,title='cprime')
popt_c44, c44_rsq = eos.distortion_fit(
good_deltas_c44, en_c44,title='c44')
volume = 4.0 / 3.0 * np.pi * self.sws**3
cprime = popt_cprime[0] / 2.0 / volume * self.RyBohr3_to_GPa
c44 = popt_c44[0] / 2.0 / volume * self.RyBohr3_to_GPa
c11 = self.bmod + 4.0 / 3.0 * cprime
c12 = self.bmod - 2.0 / 3.0 * cprime
# Polycrystalline elastic constants
#
# B = bulk modulus
# G = shear modulus
# E = Young modulus
# v = Poisson ratio
# Voigt average
BV = (c11 + 2 * c12) / 3.0
#BV = self.bmod
GV = (c11 - c12 + 3 * c44) / 5.0
EV = 9 * BV * GV / (3 * BV + GV)
vV = (3 * BV - 2 * GV) / (6 * BV + 2 * GV)
# Reuss average
BR = BV
#BR = self.bmod
GR = 5 * (c11 - c12) * c44 / (4 * c44 + 3 * (c11 - c12))
ER = 9 * BR * GR / (3 * BR + GR)
vR = (3 * BR - 2 * GR) / (6 * BR + 2 * GR)
# Hill average
BH = (BV + BR) / 2.0
#BH = self.bmod
GH = (GV + GR) / 2.0
EH = 9 * BH * GH / (3 * BH + GH)
vH = (3 * BH - 2 * GH) / (6 * BH + 2 * GH)
# Elastic anisotropy
AVR = (GV - GR) / (GV + GR)
print("")
print('***cubic_elastic_constants***')
print("")
print(self.jobname)
print("")
print('sws(bohr) = {0:7.3f}'.format(self.sws))
print('B(GPa) = {0:6.2f}'.format(self.bmod))
print('c11(GPa) = {0:6.2f}'.format(c11))
print('c12(GPa) = {0:6.2f}'.format(c12))
print('c\'(GPa) = {0:6.2f}'.format(cprime))
print('c44(GPa) = {0:6.2f}'.format(c44))
print('R-squared(c\') = {0:8.6f}'.format(cprime_rsq))
print('R-squared(c44) = {0:8.6f}'.format(c44_rsq))
print("")
print('Voigt average:')
print("")
print('BV(GPa) = {0:6.2f}'.format(BV))
print('GV(GPa) = {0:6.2f}'.format(GV))
print('EV(GPa) = {0:6.2f}'.format(EV))
print('vV(GPa) = {0:6.2f}'.format(vV))
print("")
print('Reuss average:')
print("")
print('BR(GPa) = {0:6.2f}'.format(BR))
print('GR(GPa) = {0:6.2f}'.format(GR))
print('ER(GPa) = {0:6.2f}'.format(ER))
print('vR(GPa) = {0:6.2f}'.format(vR))
print("")
print('Hill average:')
print("")
print('BH(GPa) = {0:6.2f}'.format(BH))
print('GH(GPa) = {0:6.2f}'.format(GH))
print('EH(GPa) = {0:6.2f}'.format(EH))
print('vH(GPa) = {0:6.2f}'.format(vH))
print("")
print('Elastic anisotropy:')
print("")
print('AVR(GPa) = {0:6.2f}'.format(AVR))
return
elif self.lat == 'hcp':
# Orthorhombic distortion for c66 first
if relax:
jobname_dist = []
latname_dist = []
for i in range(self.elastic_constants_points):
tmp_array1 = []
tmp_array2 = []
for j in range(self.hcpo_relax_points):
tmp_str1 = 'hcpo{0}_r{1}'.format(i,j)
tmp_str2 = '_hcpo{0}_r{1}'.format(i,j)
tmp_array1.append(tmp_str1)
tmp_array2.append(tmp_str2)
# We don't need to relax the first structure
# because it's undistorted
if i == 0 and j == 0:
break
latname_dist.append(tmp_array1)
jobname_dist.append(tmp_array2)
else:
jobname_dist = ['_hcpo0_ur',
'_hcpo1_ur',
'_hcpo2_ur',
'_hcpo3_ur',
'_hcpo4_ur',
'_hcpo5_ur']
latname_dist = ['hcpo0_ur',
'hcpo1_ur',
'hcpo2_ur',
'hcpo3_ur',
'hcpo4_ur',
'hcpo5_ur']
en_c66 = []
good_deltas_c66 = []
if relax:
# First we have to determine how relaxation affects
# the energy. The first structure corresponds to the
# undistorted lattice, which means we don't need to
# relax it:
job = self.create_jobname(self.jobname + jobname_dist[0][0])
en_orig = self.get_energy(job, folder=self.folder, func=self.xc)
if isinstance(en_orig, type(None)):
print('System.elastic_constants_analyze(): Warning:' +
' No output energy found for {0}'.format(job))
else:
en_c66.append(en_orig)
good_deltas_c66.append(deltas[0])
# For all the rest of the structures we compute the
# relaxed ground state energy by fitting a polynomial
# to the atomic pos. vs. energy data:
for i in range(1,self.elastic_constants_points):
ens_tmp = []
xind_tmp = []
for j in range(len(jobname_dist[i])):
already = False
job = self.create_jobname(self.jobname + jobname_dist[i][j])
en = self.get_energy(job, folder=self.folder, func=self.xc)
if isinstance(en, type(None)):
print('System.elastic_constants_analyze(): Warning:' +
' No output energy found for {0}'.format(job))
else:
ens_tmp.append(en)
xind_tmp.append(j)
# Now we should find the relaxed energy by fitting
# a polynomial to the energy vs. atom coordinates data.
# Leave out the point if not enough calculations have
# converged.
if len(ens_tmp) >= 3:
relax_xmin, relax_emin = eos.relax_fit(xind_tmp,ens_tmp,2,debug=debug,
title='c66: hcpo{0}'.format(i))
en_c66.append(relax_emin)
# TEST: Use unrelaxed energies
#en_c66.append(ens_tmp[0])
good_deltas_c66.append(deltas[i])
else:
for i in range(self.elastic_constants_points):
already = False
job = self.create_jobname(self.jobname + jobname_dist[i])
en = self.get_energy(job, folder=self.folder, func=self.xc)
if isinstance(en, type(None)):
print('System.elastic_constants_analyze(): Warning:' +
' No output energy found for {0}'.format(job))
else:
en_c66.append(en)
good_deltas_c66.append(deltas[i])
# Exit if we don't have enough points to do the fit
if len(en_c66) < 3:
sys.exit('System.elastic_constants_analyze():' +
' Not enough energy points to fit hcp c66!')
# Convert into a numpy array
en_c66 = np.asarray(en_c66)
good_deltas_c66 = np.asarray(good_deltas_c66)
# Next the monoclinic distortion for c44
# For the monoclinic distortion relaxation effects
# are negligible:
relax = False
if relax:
jobname_dist = []
latname_dist = []
for i in range(self.elastic_constants_points):
tmp_array1 = []
tmp_array2 = []
for j in range(self.hcpo_relax_points):
tmp_str1 = 'hcpm{0}_r{1}'.format(i,j)
tmp_str2 = '_hcpm{0}_r{1}'.format(i,j)
tmp_array1.append(tmp_str1)
tmp_array2.append(tmp_str2)
latname_dist.append(tmp_array1)
jobname_dist.append(tmp_array2)
else:
jobname_dist = ['_hcpm0_ur',
'_hcpm1_ur',
'_hcpm2_ur',
'_hcpm3_ur',
'_hcpm4_ur',
'_hcpm5_ur']
latname_dist = ['hcpm0_ur',
'hcpm1_ur',
'hcpm2_ur',
'hcpm3_ur',
'hcpm4_ur',
'hcpm5_ur']
en_c44 = []
good_deltas_c44 = []
if relax:
# First we have to determine how relaxation affects
# the energy.
for i in range(self.elastic_constants_points):
ens_tmp = []
xind_tmp = []
for j in range(self.hcpo_relax_points):
already = False
job = self.create_jobname(self.jobname + jobname_dist[i][j])
en = self.get_energy(job, folder=self.folder, func=self.xc)
if isinstance(en, type(None)):
print('System.elastic_constants_analyze(): Warning:' +
' No output energy found for {0}'.format(job))
else:
ens_tmp.append(en)
xind_tmp.append(j)
# Now we should find the relaxed energy by fitting
# a polynomial to the energy vs. atom coordinates data.
# Leave out the point if not enough calculations have
# converged.
if len(ens_tmp) >= 3:
relax_xmin, relax_emin = eos.relax_fit(xind_tmp,ens_tmp,3,debug=debug,
title='c44: hcpm{0}'.format(i))
en_c44.append(relax_emix)
good_deltas_c44.append(deltas[i])
else:
for i in range(self.elastic_constants_points):
already = False
job = self.create_jobname(self.jobname + jobname_dist[i])
en = self.get_energy(job, folder=self.folder, func=self.xc)
if isinstance(en, type(None)):
print('System.elastic_constants_analyze(): Warning:' +
' No output energy found for {0}'.format(job))
else:
en_c44.append(en)
good_deltas_c44.append(deltas[i])
# Exit if we don't have enough points to do the fit
if len(en_c44) < 3:
sys.exit('System.elastic_constants_analyze():' +
' Not enough energy points to fit hcp c44!')
# Convert into a numpy array
en_c44 = np.asarray(en_c44)
good_deltas_c44 = np.asarray(good_deltas_c44)
# All calculations have been done, now it's time to fit the results
popt_c66, c66_rsq = eos.distortion_fit(good_deltas_c66, en_c66, title='c66')
popt_c44, c44_rsq = eos.distortion_fit(good_deltas_c44, en_c44, title='c44')
volume = 4.0 / 3.0 * np.pi * self.sws**3
c66 = popt_c66[0] / 2.0 / volume * self.RyBohr3_to_GPa
c44 = popt_c44[0] / 2.0 / volume * self.RyBohr3_to_GPa
c11 = self.bmod + c66 + self.ec_analyze_cs * \
(2 * self.ec_analyze_R - 1)**2 / 18.0
c12 = self.bmod - c66 + self.ec_analyze_cs * \
(2 * self.ec_analyze_R - 1)**2 / 18.0
c13 = self.bmod + 1.0 / 9.0 * self.ec_analyze_cs * \
(2 * self.ec_analyze_R**2 + self.ec_analyze_R - 1)
c33 = self.bmod + 2.0 / 9.0 * \
self.ec_analyze_cs * (self.ec_analyze_R + 1)**2
c2 = c33 * (c11 + c12) - 2.0 * c13**2
# Polycrystalline elastic constants
#
# B = bulk modulus
# G = shear modulus
# E = Young modulus
# v = Poisson ratio
# Voigt average
BV = (2 * c11 + 2 * c12 + 4 * c13 + c33) / 9.0
GV = (12 * c44 + 12 * c66 + self.ec_analyze_cs) / 30.0
EV = 9 * BV * GV / (3 * BV + GV)
vV = (3 * BV - 2 * GV) / (6 * BV + 2 * GV)
# Reuss average
BR = self.bmod
GR = 5.0 / 2.0 * (c44 * c66 * c2) / \
((c44 + c66) * c2 + 3.0 * BV * c44 * c66)
ER = 9 * BR * GR / (3 * BR + GR)
vR = (3 * BR - 2 * GR) / (6 * BR + 2 * GR)
# Hill average
BH = (BV + BR) / 2.0
#BH = self.bmod
GH = (GV + GR) / 2.0
EH = 9 * BH * GH / (3 * BH + GH)
vH = (3 * BH - 2 * GH) / (6 * BH + 2 * GH)
# Elastic anisotropy
AVR = (GV - GR) / (GV + GR)
print("")
print('***hcp_elastic_constants***')
print("")
print(self.jobname)
print("")
print('sws(bohr) = {0:7.3f}'.format(self.sws))
print('B(GPa) = {0:6.2f}'.format(self.bmod))
print('c11(GPa) = {0:6.2f}'.format(c11))
print('c12(GPa) = {0:6.2f}'.format(c12))
print('c13(GPa) = {0:6.2f}'.format(c13))
print('c33(GPa) = {0:6.2f}'.format(c33))
print('c44(GPa) = {0:6.2f}'.format(c44))
print('c66(GPa) = {0:6.2f}'.format(c66))
print('R-squared(c44) = {0:8.6f}'.format(c44_rsq))
print('R-squared(c66) = {0:8.6f}'.format(c66_rsq))
print("")
print('Voigt average:')
print("")
print('BV(GPa) = {0:6.2f}'.format(BV))
print('GV(GPa) = {0:6.2f}'.format(GV))
print('EV(GPa) = {0:6.2f}'.format(EV))
print('vV(GPa) = {0:6.2f}'.format(vV))
print("")
print('Reuss average:')
print("")
print('BR(GPa) = {0:6.2f}'.format(BR))
print('GR(GPa) = {0:6.2f}'.format(GR))
print('ER(GPa) = {0:6.2f}'.format(ER))
print('vR(GPa) = {0:6.2f}'.format(vR))
print("")
print('Hill average:')
print("")
print('BH(GPa) = {0:6.2f}'.format(BH))
print('GH(GPa) = {0:6.2f}'.format(GH))
print('EH(GPa) = {0:6.2f}'.format(EH))
print('vH(GPa) = {0:6.2f}'.format(vH))
print("")
print('Elastic anisotropy:')
print("")
print('AVR(GPa) = {0:6.2f}'.format(AVR))
return
def elastic_constants_batch_generate(self, sws=None, ca=None, relax=True):
"""Generates all the necessary input files based on the class data.
:param sws: WS-radius (Default value = None)
:type sws: float
:param ca: hpc c/a ratio (Default value = None)
:type ca: float
:returns: List of jobnames
:rtype: list(str)
"""
# Mission critical parameters
if sws is None and self.sws is None:
sys.exit(
'System.elastic_constants_batch_generate(): \'sws\' has not been specified!')
elif sws is not None:
self.sws = sws
if ca is None and self.lat == 'hcp':
sys.exit('System.elastic_constants_batch_generate():' +
' \'ca\' (c/a) has not been specified!')
elif ca is not None:
self.ca = ca
jobnames = []
if self.lat == 'B2':
# Orthorhombic distortion input files for c' first
jobname_dist = ['_B2o0','_B2o1','_B2o2','_B2o3','_B2o4','_B2o5']
latname_dist = ['B2o0','B2o1','B2o2','B2o3','B2o4','B2o5']
self.emto.set_values(ibz=8,nkx=17,nky=17,nkz=17) # High-quality
#self.emto.set_values(ibz=10,nkx=15,nky=15,nkz=15) # Normal quality
for i in range(len(jobname_dist)):
job = self.create_jobname(self.jobname + jobname_dist[i])
jobnames.append(job)
self.emto.set_values(
sws=self.sws, jobname=job, latname=latname_dist[i])
common.check_folders(
self.folder, self.folder + "/kgrn", self.folder + "/kgrn/tmp")
common.check_folders(self.folder + "/kfcd")
common.check_folders(self.folder + "/fit")
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
# Next produce the input files of monoclinic distortion for c44
jobname_dist = ['_B2m0','_B2m1','_B2m2','_B2m3','_B2m4','_B2m5']
latname_dist = ['B2m0','B2m1','B2m2','B2m3','B2m4','B2m5']
self.emto.set_values(ibz=9,nkx=17,nky=17,nkz=17) # High-quality
#self.emto.set_values(ibz=11,nkx=15,nky=15,nkz=21) # Normal quality
for i in range(len(jobname_dist)):
job = self.create_jobname(self.jobname + jobname_dist[i])
jobnames.append(job)
self.emto.set_values(
sws=self.sws, jobname=job, latname=latname_dist[i])
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
return jobnames
if self.lat == 'bcc' or self.lat == 'fcc':
# Orthorhombic distortion input files for c' first
if self.lat == 'bcc':
jobname_dist = ['_bcco0','_bcco1','_bcco2','_bcco3','_bcco4','_bcco5']
latname_dist = ['bcco0','bcco1','bcco2','bcco3','bcco4','bcco5']
self.emto.set_values(ibz=10,nkx=31,nky=31,nkz=31) # High-quality
#self.emto.set_values(ibz=10,nkx=15,nky=15,nkz=15) # Normal quality
elif self.lat == 'fcc':
jobname_dist = ['_fcco0','_fcco1','_fcco2','_fcco3','_fcco4','_fcco5']
latname_dist = ['fcco0','fcco1','fcco2','fcco3','fcco4','fcco5']
#self.emto.set_values(ibz=11,nkx=41,nky=41,nkz=41) # Super-high-quality
self.emto.set_values(ibz=11,nkx=31,nky=31,nkz=31) # High-quality
#self.emto.set_values(ibz=11,nkx=17,nky=17,nkz=17) # Normal-quality
for i in range(len(jobname_dist)):
job = self.create_jobname(self.jobname + jobname_dist[i])
jobnames.append(job)
self.emto.set_values(
sws=self.sws, jobname=job, latname=latname_dist[i])
common.check_folders(
self.folder, self.folder + "/kgrn", self.folder + "/kgrn/tmp")
common.check_folders(self.folder + "/kfcd")
common.check_folders(self.folder + "/fit")
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
# Next produce the input files of monoclinic distortion for c44
if self.lat == 'bcc':
jobname_dist = ['_bccm0','_bccm1','_bccm2','_bccm3','_bccm4','_bccm5']
latname_dist = ['bccm0','bccm1','bccm2','bccm3','bccm4','bccm5']
self.emto.set_values(ibz=11,nkx=27,nky=27,nkz=37) # High-quality
#self.emto.set_values(ibz=11,nkx=15,nky=15,nkz=21) # Normal quality
elif self.lat == 'fcc':
jobname_dist = ['_fccm0','_fccm1','_fccm2','_fccm3','_fccm4','_fccm5']
latname_dist = ['fccm0','fccm1','fccm2','fccm3','fccm4','fccm5']
#self.emto.set_values(ibz=10,nkx=37,nky=53,nkz=37) # Super-high-quality
self.emto.set_values(ibz=10,nkx=27,nky=37,nkz=27) # High-quality
#self.emto.set_values(ibz=10,nkx=15,nky=21,nkz=15) # Normal quality
for i in range(len(jobname_dist)):
job = self.create_jobname(self.jobname + jobname_dist[i])
jobnames.append(job)
self.emto.set_values(
sws=self.sws, jobname=job, latname=latname_dist[i])
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
return jobnames
elif self.lat == 'hcp':
# Orthorhombic distortion input files for c66 first
# With hcp the structure depends on the c/a ratio. Therefore we also have
# to generate the corresponding structure files.
if relax:
jobname_dist = []
latname_dist = []
for i in range(self.elastic_constants_points):
tmp_array1 = []
tmp_array2 = []
for j in range(self.hcpo_relax_points):
tmp_str1 = 'hcpo{0}_r{1}'.format(i,j)
tmp_str2 = '_hcpo{0}_r{1}'.format(i,j)
tmp_array1.append(tmp_str1)
tmp_array2.append(tmp_str2)
# We don't need to relax the first structure
# because it's undistorted
if i == 0 and j == 0:
break
latname_dist.append(tmp_array1)
jobname_dist.append(tmp_array2)
else:
jobname_dist = ['_hcpo0_ur', '_hcpo1_ur',
'_hcpo2_ur', '_hcpo3_ur', '_hcpo4_ur', '_hcpo5_ur']
latname_dist = ['hcpo0_ur', 'hcpo1_ur',
'hcpo2_ur', 'hcpo3_ur', 'hcpo4_ur', 'hcpo5_ur']
# Check whether Two-center Taylor expansion is on/off
if self.emto.kgrn.expan == 'M':
kappaw = self.kappaw_default
self.lattice.set_values(kappaw=kappaw)
common.check_folders(self.folder)
common.check_folders(self.folder + '/bmdl')
common.check_folders(self.folder + '/kstr')
common.check_folders(self.folder + '/shape')
if relax:
for i in range(self.elastic_constants_points):
for j in range(len(jobname_dist[i])):
if i == 0:
do_i_relax = False
else:
do_i_relax = True
self.lattice.distortion(lat='hcp', dist='ortho', ca=self.ca, index=i,
deltas=self.elastic_constants_deltas,
relax=do_i_relax,relax_index=j)
self.lattice.set_values(jobname_lat=latname_dist[i][j],latpath=self.folder)
self.lattice.bmdl.write_input_file(folder=self.folder)
self.lattice.kstr.write_input_file(folder=self.folder)
self.lattice.shape.write_input_file(folder=self.folder)
self.lattice.batch.write_input_file(folder=self.folder)
else:
for i in range(self.elastic_constants_points):
self.lattice.distortion(lat='hcp', dist='ortho', ca=self.ca, index=i,
deltas=self.elastic_constants_deltas,
relax=False)
self.lattice.set_values(jobname_lat=latname_dist[i],latpath=self.folder)
self.lattice.bmdl.write_input_file(folder=self.folder)
self.lattice.kstr.write_input_file(folder=self.folder)
self.lattice.shape.write_input_file(folder=self.folder)
self.lattice.batch.write_input_file(folder=self.folder)
#self.emto.set_values(ibz=9, nkx=25, nky=25, nkz=21)
self.emto.set_values(ibz=9, nkx=31, nky=31, nkz=25)
if relax:
for i in range(self.elastic_constants_points):
for j in range(len(jobname_dist[i])):
job = self.create_jobname(self.jobname + jobname_dist[i][j])
jobnames.append(job)
self.emto.set_values(sws=self.sws, jobname=job, latname=latname_dist[i][j],
latpath=self.folder)
common.check_folders(self.folder, self.folder + "/kgrn")
common.check_folders(self.folder + "/kgrn/tmp")
common.check_folders(self.folder + "/kfcd")
common.check_folders(self.folder + "/fit")
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
else:
for i in range(self.elastic_constants_points):
job = self.create_jobname(self.jobname + jobname_dist[i])
jobnames.append(job)
self.emto.set_values(sws=self.sws, jobname=job, latname=latname_dist[i],
latpath=self.folder)
common.check_folders(self.folder, self.folder + "/kgrn")
common.check_folders(self.folder + "/kgrn/tmp")
common.check_folders(self.folder + "/kfcd")
common.check_folders(self.folder + "/fit")
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
# Monoclinic distortion input files for c44 next
# For the monoclinic distortion relaxation effects are
# negligibly small; therefore
relax = False
# With hcp the structure depends on the c/a ratio. Therefore we also have
# to generate the corresponding structure files.
if relax:
jobname_dist = []
latname_dist = []
for i in range(self.elastic_constants_points):
tmp_array1 = []
tmp_array2 = []
for j in range(self.hcpm_relax_points):
tmp_str1 = 'hcpm{0}_r{1}'.format(i,j)
tmp_str2 = '_hcpm{0}_r{1}'.format(i,j)
tmp_array1.append(tmp_str1)
tmp_array2.append(tmp_str2)
# We don't need to relax the first structure
# because it's undistorted
if i == 0 and j == 0:
break
latname_dist.append(tmp_array1)
jobname_dist.append(tmp_array2)
else:
jobname_dist = ['_hcpm0_ur', '_hcpm1_ur',
'_hcpm2_ur', '_hcpm3_ur', '_hcpm4_ur', '_hcpm5_ur']
latname_dist = ['hcpm0_ur', 'hcpm1_ur',
'hcpm2_ur', 'hcpm3_ur', 'hcpm4_ur', 'hcpm5_ur']
# Check whether Two-center Taylor expansion is on/off
if self.emto.kgrn.expan == 'M':
kappaw = self.kappaw_default
self.lattice.set_values(kappaw=kappaw)
common.check_folders(self.folder)
common.check_folders(self.folder + '/bmdl')
common.check_folders(self.folder + '/kstr')
common.check_folders(self.folder + '/shape')
if relax:
for i in range(self.elastic_constants_points):
for j in range(len(jobname_dist[i])):
if i == 0:
do_i_relax = False
else:
do_i_relax = True
self.lattice.distortion(lat='hcp', dist='mono', ca=self.ca, index=i,
deltas=self.elastic_constants_deltas,
relax=do_i_relax,relax_index=j)
self.lattice.set_values(jobname_lat=latname_dist[i][j],latpath=self.folder)
self.lattice.bmdl.write_input_file(folder=self.folder)
self.lattice.kstr.write_input_file(folder=self.folder)
self.lattice.shape.write_input_file(folder=self.folder)
self.lattice.batch.write_input_file(folder=self.folder)
else:
for i in range(self.elastic_constants_points):
self.lattice.distortion(lat='hcp', dist='mono', ca=self.ca, index=i,
deltas=self.elastic_constants_deltas,
relax=False)
self.lattice.set_values(jobname_lat=latname_dist[i],latpath=self.folder)
self.lattice.bmdl.write_input_file(folder=self.folder)
self.lattice.kstr.write_input_file(folder=self.folder)
self.lattice.shape.write_input_file(folder=self.folder)
self.lattice.batch.write_input_file(folder=self.folder)
# Two-atom basis
#self.emto.set_values(ibz=13, nkx=41, nky=41, nkz=41)
if relax:
for i in range(self.elastic_constants_points):
for j in range(len(jobname_dist[i])):
job = self.create_jobname(self.jobname + jobname_dist[i][j])
jobnames.append(job)
self.emto.set_values(sws=self.sws, jobname=job, latname=latname_dist[i][j],
latpath=self.folder)
common.check_folders(self.folder, self.folder + "/kgrn")
common.check_folders(self.folder + "/kgrn/tmp")
common.check_folders(self.folder + "/kfcd")
common.check_folders(self.folder + "/fit")
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
else:
# Four-atom basis
#self.emto.set_values(ibz=12, nkx=18, nky=18, nkz=12)
self.emto.set_values(ibz=12, nkx=22, nky=22, nkz=14)
###################################################################
# Atconf related arrays need to be modified because we now have #
# a four atom basis. #
###################################################################
self.atoms = np.array([self.atoms, self.atoms]).flatten()
self.concs = np.array([self.concs, self.concs]).flatten()
self.iqs = np.zeros(len(self.atoms), dtype='int32')
len_div = len(self.iqs) // 4
for i in range(4):
self.iqs[i * len_div:(i + 1) * len_div] = i + 1
self.splts = np.array([self.splts, self.splts]).flatten()
self.itas = np.array([self.itas, self.itas]).flatten()
self.emto.set_values(atoms=self.atoms, iqs=self.iqs, itas=self.itas,
concs=self.concs, splts=self.splts)
####################################################################
for i in range(self.elastic_constants_points):
job = self.create_jobname(self.jobname + jobname_dist[i])
jobnames.append(job)
self.emto.set_values(sws=self.sws, jobname=job, latname=latname_dist[i],
latpath=self.folder)
common.check_folders(self.folder, self.folder + "/kgrn")
common.check_folders(self.folder + "/kgrn/tmp")
common.check_folders(self.folder + "/kfcd")
common.check_folders(self.folder + "/fit")
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
# These following lines are for the four-atom basis with
# simple monoclinic lattice.
"""
for i in range(len(self.elastic_constants_deltas)):
self.lattice.distortion(lat='hcp', dist='mono', ca=self.ca, index=i,
deltas=self.elastic_constants_deltas)
self.lattice.set_values(
jobname_lat=latname_dist[i], latpath=self.folder)
self.lattice.bmdl.write_input_file(folder=self.folder)
self.lattice.kstr.write_input_file(folder=self.folder)
self.lattice.shape.write_input_file(folder=self.folder)
self.lattice.batch.write_input_file(folder=self.folder)
self.emto.set_values(ibz=12, nkx=30, nky=20, nkz=20)
################################################################
# Atconf related arrays need to be modified because we now have
# a four atom basis.
################################################################
self.atoms = np.array([self.atoms, self.atoms]).flatten()
self.concs = np.array([self.concs, self.concs]).flatten()
self.iqs = np.zeros(len(self.atoms), dtype='int32')
len_div = len(self.iqs) // 4
for i in range(4):
self.iqs[i * len_div:(i + 1) * len_div] = i + 1
self.splts = np.array([self.splts, self.splts]).flatten()
self.itas = np.array([self.itas, self.itas]).flatten()
self.emto.set_values(atoms=self.atoms, iqs=self.iqs, itas=self.itas,
concs=self.concs, splts=self.splts)
for i in range(len(jobname_dist)):
job = self.create_jobname(self.jobname + jobname_dist[i])
jobnames.append(job)
self.emto.set_values(sws=self.sws, jobname=job, latname=latname_dist[i],
latpath=self.folder)
common.check_folders(
self.folder, self.folder + "/kgrn", self.folder + "/kgrn/tmp")
common.check_folders(self.folder + "/kfcd")
common.check_folders(self.folder + "/fit")
self.emto.kgrn.write_input_file(folder=self.folder)
self.emto.kfcd.write_input_file(folder=self.folder)
self.emto.batch.write_input_file(folder=self.folder)
"""
return jobnames
def elastic_constants_batch_calculate(
self, sws=None, bmod=None, ca=None, R=None, cs=None):
"""Calculates the elastic constants of a system using the parallelism
of the batch system.
This is a combination of the batch_generate and batch_analyze functions.
:param sws: WS-radius (Default value = None)
:type sws: float
:param bmod: Bulk modulus (Default value = None)
:type bmod: float
:param ca: hpc c/a ratio (Default value = None)
:type ca: float
:param R: The dimensionless quantity of hcp systems (Default value = None)
:type R: float
:param cs: Second order c/a-derivative of the energy (Default value = None)
:type cs: float
:returns: None
:rtype: None
"""
import time
# Mission critical parameters
if sws is None and self.sws is None:
sys.exit(
'System.elastic_constants_batch_calculate(): \'sws\' has not been specified!')
elif sws is not None:
self.sws = sws
if bmod is None and self.bmod is None:
sys.exit(
'System.elastic_constants_batch_calculate(): \'bmod\' has not been specified!')
elif bmod is not None:
self.bmod = bmod
if ca is None and self.lat == 'hcp':
sys.exit('System.elastic_constants_batch_calculate():' +
' \'ca\' (c/a) has not been specified!')
else:
self.ec_batch_calculate_ca = ca
if R is None and self.lat == 'hcp':
sys.exit(
'System.elastic_constants_batch_calculate(): \'R\' has not been specified!')
else:
self.ec_batch_calculate_R = R
if cs is None and self.lat == 'hcp':
sys.exit(
'System.elastic_constants_batch_calculate(): \'cs\' has not been specified!')
else:
self.ec_batch_calculate_cs = cs
# Generate input files
if self.lat == 'bcc' or self.lat == 'fcc':
jobnames = self.elastic_constants_batch_generate(sws=self.sws)
# Submit calculation to the batch system
jobIDs = self.submit_jobs(jobnames, folder=self.folder)
# Wait until all the jobs have finished
self.wait_for_jobs(jobIDs)
# Now we can analyze the results
self.elastic_constants_analyze(sws=self.sws, bmod=self.bmod)
return
elif self.lat == 'hcp':
jobnames = self.elastic_constants_batch_generate(
sws=self.sws, ca=self.ec_batch_calculate_ca)
jobnames_lat = ['hcpo0_ca0',
'hcpo1_ca0',
'hcpo2_ca0',
'hcpo3_ca0',
'hcpo4_ca0',
'hcpo5_ca0',
'hcpm0_ca0',
'hcpm1_ca0',
'hcpm2_ca0',
'hcpm3_ca0',
'hcpm4_ca0',
'hcpm5_ca0']
# First submit and run the lattice calculations
jobIDs_lat = self.submit_jobs(jobnames_lat, folder=self.folder)
# Wait until all the jobs have finished
self.wait_for_jobs(jobIDs_lat)
# Structure calculations have finished, now submit
# KGRN and KFCD calculation.
jobIDs = self.submit_jobs(jobnames, folder=self.folder)
# Wait until all the jobs have finished
self.wait_for_jobs(jobIDs)
# Now we can analyze the results
self.elastic_constants_analyze(
sws=self.sws, bmod=self.bmod, ca=self.ec_batch_calculate_ca,
R=self.ec_batch_calculate_R, cs=self.ec_batch_calculate_cs)
return
def elastic_constants_serial_calculate(self, sws=None, bmod=None, ca=None, R=None, cs=None):
"""Calculates elastic constants on one CPU without using the batch system.
At the end the results are printed on screen.
:param sws: WS-radius (Default value = None)
:type sws: float
:param bmod: Bulk modulus (Default value = None)
:type bmod: float
:param ca: hpc c/a ratio (Default value = None)
:type ca: float
:returns: None
:rtype: None
"""
from pyemto.EOS.EOS import EOS
eos = EOS(name=self.jobname, xc=self.xc, method='morse', units='bohr')
# Mission critical parameters
if sws is None and self.sws is None:
sys.exit(
'System.elastic_constants_serial_calculate(): \'sws\' has not been specified!')
elif sws is not None:
self.sws = sws
if bmod is None and self.bmod is None:
sys.exit('System.elastic_constants_serial_calculate():' +
' \'bmod\' has not been specified!')
elif bmod is not None:
self.bmod = bmod
if ca is None and self.lat == 'hcp':
sys.exit('System.elastic_constants_serial_calculate():' +
' \'ca\' (c/a) has not been specified!')
elif ca is not None:
self.ca = ca
if R is None and self.lat == 'hcp':
sys.exit(
'System.elastic_constants_analyze(): \'R\' has not been specified!')
else:
self.ec_analyze_R = R
if cs is None and self.lat == 'hcp':
sys.exit(
'System.elastic_constants_analyze(): \'cs\' has not been specified!')
else:
self.ec_analyze_cs = cs
deltas = self.elastic_constants_deltas
if self.lat == 'bcc' or self.lat == 'fcc':
# Orthorhombic distortion for c' first
if self.lat == 'bcc':
jobname_dist = [
'_bcco0', '_bcco1', '_bcco2', '_bcco3', '_bcco4', '_bcco5']
latname_dist = [
'bcco0', 'bcco1', 'bcco2', 'bcco3', 'bcco4', 'bcco5']
self.emto.set_values(ibz=10, nkx=27, nky=27, nkz=27)
elif self.lat == 'fcc':
jobname_dist = [
'_fcco0', '_fcco1', '_fcco2', '_fcco3', '_fcco4', '_fcco5']
latname_dist = [
'fcco0', 'fcco1', 'fcco2', 'fcco3', 'fcco4', 'fcco5']
self.emto.set_values(ibz=11, nkx=27, nky=27, nkz=27)
en_cprime = []
for i in range(len(jobname_dist)):
already = False
job = self.create_jobname(self.jobname + jobname_dist[i])
self.emto.set_values(
sws=self.sws, jobname=job, latname=latname_dist[i])
# check if calculations are already done
already = self.check_conv(job, folder=self.folder)
if all(already):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=self.folder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=self.folder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=self.folder)
quit()
en = self.get_energy(job, folder=self.folder, func=self.xc)
en_cprime.append(en)
# Convert energies into a numpy array
en_cprime = np.asarray(en_cprime)
# Next calculate the monoclinic distortion for c44
if self.lat == 'bcc':
jobname_dist = [
'_bccm0', '_bccm1', '_bccm2', '_bccm3', '_bccm4', '_bccm5']
latname_dist = [
'bccm0', 'bccm1', 'bccm2', 'bccm3', 'bccm4', 'bccm5']
self.emto.set_values(ibz=11, nkx=27, nky=27, nkz=37)
elif self.lat == 'fcc':
jobname_dist = [
'_fccm0', '_fccm1', '_fccm2', '_fccm3', '_fccm4', '_fccm5']
latname_dist = [
'fccm0', 'fccm1', 'fccm2', 'fccm3', 'fccm4', 'fccm5']
self.emto.set_values(ibz=10, nkx=27, nky=37, nkz=27)
en_c44 = []
for i in range(len(jobname_dist)):
already = False
job = self.create_jobname(self.jobname + jobname_dist[i])
self.emto.set_values(
sws=self.sws, jobname=job, latname=latname_dist[i])
# check if calculations are already done
already = self.check_conv(job, folder=self.folder)
if all(already):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=self.folder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=self.folder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=self.folder)
quit()
en = self.get_energy(job, folder=self.folder, func=self.xc)
en_c44.append(en)
# Convert energies into a numpy array
en_c44 = np.asarray(en_c44)
# All calculations have been done, now it's time to fit the results
popt_cprime, cprime_rsq = eos.distortion_fit(deltas, en_cprime)
popt_c44, c44_rsq = eos.distortion_fit(deltas, en_c44)
volume = 4.0 / 3.0 * np.pi * self.sws**3
cprime = popt_cprime[0] / 2.0 / volume * self.RyBohr3_to_GPa
c44 = popt_c44[0] / 2.0 / volume * self.RyBohr3_to_GPa
c11 = self.bmod + 4.0 / 3.0 * cprime
c12 = self.bmod - 2.0 / 3.0 * cprime
# Polycrystalline elastic constants
#
# B = bulk modulus
# G = shear modulus
# E = Young modulus
# v = Poisson ratio
# Voigt average
BV = (c11 + 2 * c12) / 3.0
#BV = self.bmod
GV = (c11 - c12 + 3 * c44) / 5.0
EV = 9 * BV * GV / (3 * BV + GV)
vV = (3 * BV - 2 * GV) / (6 * BV + 2 * GV)
# Reuss average
BR = BV
#BR = self.bmod
GR = 5 * (c11 - c12) * c44 / (4 * c44 + 3 * (c11 - c12))
ER = 9 * BR * GR / (3 * BR + GR)
vR = (3 * BR - 2 * GR) / (6 * BR + 2 * GR)
# Hill average
BH = (BV + BR) / 2.0
#BH = self.bmod
GH = (GV + GR) / 2.0
EH = 9 * BH * GH / (3 * BH + GH)
vH = (3 * BH - 2 * GH) / (6 * BH + 2 * GH)
# Elastic anisotropy
AVR = (GV - GR) / (GV + GR)
print("")
print('***cubic_elastic_constants***')
print("")
print(self.jobname)
print("")
print('c11(GPa) = {0:6.2f}'.format(c11))
print('c12(GPa) = {0:6.2f}'.format(c12))
print(
'c44(GPa) = {0:6.2f}, R-squared = {1:8.6f}'.format(c44, c44_rsq))
print(
'c\' (GPa) = {0:6.2f}, R-squared = {1:8.6f}'.format(cprime, cprime_rsq))
print('B (GPa) = {0:6.2f}'.format(self.bmod))
print("")
print('Voigt average:')
print("")
print('BV(GPa) = {0:6.2f}'.format(BV))
print('GV(GPa) = {0:6.2f}'.format(GV))
print('EV(GPa) = {0:6.2f}'.format(EV))
print('vV(GPa) = {0:6.2f}'.format(vV))
print("")
print('Reuss average:')
print("")
print('BR(GPa) = {0:6.2f}'.format(BR))
print('GR(GPa) = {0:6.2f}'.format(GR))
print('ER(GPa) = {0:6.2f}'.format(ER))
print('vR(GPa) = {0:6.2f}'.format(vR))
print("")
print('Hill average:')
print("")
print('BH(GPa) = {0:6.2f}'.format(BH))
print('GH(GPa) = {0:6.2f}'.format(GH))
print('EH(GPa) = {0:6.2f}'.format(EH))
print('vH(GPa) = {0:6.2f}'.format(vH))
print("")
print('Elastic anisotropy:')
print("")
print('AVR(GPa) = {0:6.2f}'.format(AVR))
return
elif self.lat == 'hcp':
# Check whether Two-center Taylor expansion is on/off
if self.emto.kgrn.expan == 'M':
kappaw = self.kappaw_default
self.lattice.set_values(kappaw=kappaw)
# Check whether we need to create some folders
# for the structure output files.
common.check_folders(self.folder)
common.check_folders(self.folder + '/bmdl')
common.check_folders(self.folder + '/kstr')
common.check_folders(self.folder + '/shape')
# Orthorhombic distortion for c66 first
jobname_dist = ['_hcpo0_ca0', '_hcpo1_ca0',
'_hcpo2_ca0', '_hcpo3_ca0', '_hcpo4_ca0', '_hcpo5_ca0']
latname_dist = ['hcpo0_ca0', 'hcpo1_ca0',
'hcpo2_ca0', 'hcpo3_ca0', 'hcpo4_ca0', 'hcpo5_ca0']
self.emto.set_values(ibz=9, nkx=31, nky=19, nkz=19)
en_c66 = []
for i in range(len(jobname_dist)):
# With hcp the structure depends on the c/a ratio. Therefore we also have
# to generate the corresponding structure files.
self.lattice.distortion(lat='hcp', dist='ortho', ca=self.ca, index=i,
deltas=self.elastic_constants_deltas)
self.lattice.set_values(
jobname_lat=latname_dist[i], latpath=self.folder)
self.runlattice(jobname=latname_dist[i], folder=self.folder)
already = False
job = self.create_jobname(self.jobname + jobname_dist[i])
self.emto.set_values(
sws=self.sws, jobname=job, latname=latname_dist[i])
# check if calculations are already done
already = self.check_conv(job, folder=self.folder)
if all(already):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=self.folder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=self.folder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=self.folder)
quit()
en = self.get_energy(job, folder=self.folder, func=self.xc)
en_c66.append(en)
# Convert energies into a numpy array
en_c66 = np.asarray(en_cprime)
# Monoclinic distortion for c44 next
jobname_dist = ['_hcpm0_ca0',
'_hcpm1_ca0',
'_hcpm2_ca0',
'_hcpm3_ca0',
'_hcpm4_ca0',
'_hcpm5_ca0']
latname_dist = ['hcpm0_ca0',
'hcpm1_ca0',
'hcpm2_ca0',
'hcpm3_ca0',
'hcpm4_ca0',
'hcpm5_ca0']
self.emto.set_values(ibz=12, nkx=30, nky=20, nkz=20)
en_c44 = []
for i in range(len(jobname_dist)):
# With hcp the structure depends on the c/a ratio. Therefore we also have
# to generate the corresponding structure files.
self.lattice.distortion(lat='hcp', dist='mono', ca=self.ca, index=i,
deltas=self.elastic_constants_deltas)
self.lattice.set_values(
jobname_lat=latname_dist[i], latpath=self.folder)
self.runlattice(jobname=latname_dist[i], folder=self.folder)
###############################################################
# Atconf related arrays need to be modified because we now have
# a four atom basis.
###############################################################
self.atoms = np.array([self.atoms, self.atoms]).flatten()
self.concs = np.array([self.concs, self.concs]).flatten()
self.iqs = np.zeros(len(self.atoms), dtype='int32')
len_div = len(self.iqs) // 4
for i in range(4):
self.iqs[i * len_div:(i + 1) * len_div] = i + 1
self.splts = np.array([self.splts, self.splts]).flatten()
self.itas = np.array([self.itas, self.itas]).flatten()
self.emto.set_values(atoms=self.atoms, iqs=self.iqs, itas=self.itas,
concs=self.concs, splts=self.splts)
already = False
job = self.create_jobname(self.jobname + jobname_dist[i])
self.emto.set_values(
sws=self.sws, jobname=job, latname=latname_dist[i])
# check if calculations are already done
already = self.check_conv(job, folder=self.folder)
if all(already):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=self.folder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=self.folder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=self.folder)
quit()
en = self.get_energy(job, folder=self.folder, func=self.xc)
en_c44.append(en)
# Convert energies into a numpy array
en_c44 = np.asarray(en_c44)
# All calculations have been done, now it's time to fit the results
popt_c66, c66_rsq = eos.distortion_fit(deltas, en_c66)
popt_c44, c44_rsq = eos.distortion_fit(deltas, en_c44)
volume = 4.0 / 3.0 * np.pi * self.sws**3
c66 = popt_c66[0] / 2.0 / volume * self.RyBohr3_to_GPa
c44 = popt_c44[0] / 2.0 / volume * self.RyBohr3_to_GPa
c11 = self.bmod + c66 + self.ec_analyze_cs * \
(2 * self.ec_analyze_R - 1)**2 / 18.0
c12 = self.bmod - c66 + self.ec_analyze_cs * \
(2 * self.ec_analyze_R - 1)**2 / 18.0
c13 = self.bmod + 1.0 / 9.0 * self.ec_analyze_cs * (
2 * self.ec_analyze_R**2 + self.ec_analyze_R - 1)
c33 = self.bmod + 2.0 / 9.0 * \
self.ec_analyze_cs * (self.ec_analyze_R + 1)**2
c2 = c33 * (c11 + c12) - 2.0 * c13**2
# Polycrystalline elastic constants
#
# B = bulk modulus
# G = shear modulus
# E = Young modulus
# v = Poisson ratio
# Voigt average
BV = (2 * c11 + 2 * c12 + 4 * c13 + c33) / 9.0
GV = (12 * c44 + 12 * c66 + self.ec_analyze_cs) / 30.0
EV = 9 * BV * GV / (3 * BV + GV)
vV = (3 * BV - 2 * GV) / (6 * BV + 2 * GV)
# Reuss average
BR = self.bmod
GR = 5.0 / 2.0 * (c44 * c66 * c2) / \
((c44 + c66) * c2 + 3.0 * BV * c44 * c66)
ER = 9 * BR * GR / (3 * BR + GR)
vR = (3 * BR - 2 * GR) / (6 * BR + 2 * GR)
# Hill average
BH = (BV + BR) / 2.0
#BH = self.bmod
GH = (GV + GR) / 2.0
EH = 9 * BH * GH / (3 * BH + GH)
vH = (3 * BH - 2 * GH) / (6 * BH + 2 * GH)
# Elastic anisotropy
AVR = (GV - GR) / (GV + GR)
print("")
print('***hcp_elastic_constants***')
print("")
print(self.jobname)
print("")
print('c11(GPa) = {0:6.2f}'.format(c11))
print('c12(GPa) = {0:6.2f}'.format(c12))
print('c13(GPa) = {0:6.2f}'.format(c13))
print('c33(GPa) = {0:6.2f}'.format(c33))
print(
'c44(GPa) = {0:6.2f}, R-squared = {1:8.6f}'.format(c44, c44_rsq))
print(
'c66(GPa) = {0:6.2f}, R-squared = {1:8.6f}'.format(c66, c66_rsq))
print('B (GPa) = {0:6.2f}'.format(self.bmod))
print("")
print('Voigt average:')
print("")
print('BV(GPa) = {0:6.2f}'.format(BV))
print('GV(GPa) = {0:6.2f}'.format(GV))
print('EV(GPa) = {0:6.2f}'.format(EV))
print('vV(GPa) = {0:6.2f}'.format(vV))
print("")
print('Reuss average:')
print("")
print('BR(GPa) = {0:6.2f}'.format(BR))
print('GR(GPa) = {0:6.2f}'.format(GR))
print('ER(GPa) = {0:6.2f}'.format(ER))
print('vR(GPa) = {0:6.2f}'.format(vR))
print("")
print('Hill average:')
print("")
print('BH(GPa) = {0:6.2f}'.format(BH))
print('GH(GPa) = {0:6.2f}'.format(GH))
print('EH(GPa) = {0:6.2f}'.format(EH))
print('vH(GPa) = {0:6.2f}'.format(vH))
print("")
print('Elastic anisotropy:')
print("")
print('AVR(GPa) = {0:6.2f}'.format(AVR))
return
##########################################################################
# #
# Internal routines start here #
# #
##########################################################################
def find_lc(self, delta=0.01, prn=True, xc='PBE'):
"""Computes initial estimates for the ground state quantities for cubic systems.
:param delta: Step size for the volume vs. energy array (Default value = 0.01)
:type delta: float
:param prn: True if results should be printed, False if not (Default value = True)
:type prn: boolean
:param xc: Choice of the xc-functional (Default value = 'PBE')
:type xc: str
:returns: WS-radius, bulk modulus, c/a and energy
:rtype: float, float, float, float
"""
from pyemto.EOS.EOS import EOS
eos = EOS(name=self.jobname, xc=xc, method='morse', units='bohr')
if prn:
print('')
print('*****find_lc*****')
print('')
#enough = False
energies = []
swses = []
# Compute first two points
# Start at the initial sws
already = False
self.sws = self.lc_initial_sws
job = self.create_jobname(self.jobname)
self.emto.set_values(sws=self.sws, jobname=job)
# check if calculations are already done
already = self.check_conv(job, folder=self.folder)
if self.lc_skip or (all(already) and not self.lc_rerun):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=self.folder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=self.folder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=self.folder)
quit()
en = self.get_energy(job, folder=self.folder, func=xc)
energies.append(en)
swses.append(self.sws)
# Compute second point at sws+delta
already = False
self.sws = self.lc_initial_sws + delta
job = self.create_jobname(self.jobname)
self.emto.set_values(sws=self.sws, jobname=job)
# check if calculations are already done
already = self.check_conv(job, folder=self.folder)
#print('already = ',already)
if self.lc_skip or (all(already) and not self.lc_rerun):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=self.folder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=self.folder)
#print('conv = ',conv)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=self.folder)
quit()
en = self.get_energy(job, folder=self.folder, func=xc)
energies.append(en)
swses.append(self.sws)
# The initial 2 points are now ready and
# we use them to predict where to calculate next point
next_sws, minfound = self.predict_next_sws(swses, energies, delta)
# 2 points is not enought to calculate equation of state
enough = False
# Loop until we have enough points to calculate initial sws
iteration = 0
while not enough:
iteration += 1
# if 25 is not enough one should check initial sws
if iteration > 25:
print(
"SWS loop did not converge in {0} iterations!".format(iteration))
quit()
# Calculate next point
self.sws = next_sws
job = self.create_jobname(self.jobname)
self.emto.set_values(sws=self.sws, jobname=job)
# First check if calculations are already done
already = False
already = self.check_conv(job, folder=self.folder)
# Use existing calculations if available.
if self.lc_skip or (all(already) and not self.lc_rerun):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=self.folder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=self.folder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=self.folder)
quit()
en = self.get_energy(job, folder=self.folder, func=xc)
energies.append(en)
swses.append(self.sws)
# Check do we have minumun and predict next sws
next_sws, minfound = self.predict_next_sws(swses, energies, delta)
# Check if we have mimimum and enough points
# 9 points should be enough for EOS
if minfound and len(swses) > 9:
enough = True
#print('debug find_lc: ',swses)
if prn:
self.print_sws_ens('find_lc', swses, energies)
sws0, e0, B, grun = eos.fit(swses, energies)
# These functions create files on disk about the data to be fitted
# as well as the results of the fit.
# eos.prepareData()
#sws0,e0,B,grun = eos.fit2file()
return sws0, B, e0
def refine_lc(self, sws, delta=0.01, prn=True, xc='PBE'):
"""Calculates a more accurate equilibrium volume for cubic systems.
:param sws: WS-radius
:type sws: float
:param delta: Step size for the volume vs. energy array (Default value = 0.01)
:type delta: float
:param prn: True if results should be printed, False if not (Default value = True)
:type prn: boolean
:param xc: Choice of the xc-functional (Default value = 'PBE')
:type xc: str
:returns: Ws-radius, bulk modulus, c/a and energy
:rtype: float, float, float, float
"""
from pyemto.EOS.EOS import EOS
eos = EOS(name=self.jobname, xc=xc, method='morse', units='bohr')
if prn:
print('')
print('*****refine_lc*****')
print('')
# make sws ranges around given sws
points = []
for i in range(-3, 7):
points.append(i * delta)
energies = []
swses = []
for p in points:
already = False
next_sws = sws + p
# Make inputs and job name
self.sws = next_sws
job = self.create_jobname(self.jobname)
self.emto.set_values(sws=self.sws, jobname=job)
# check if calculations are already done
already = self.check_conv(job, folder=self.folder)
if self.lc_skip or (all(already) and not self.lc_rerun):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=self.folder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=self.folder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=self.folder)
quit()
en = self.get_energy(job, folder=self.folder, func=xc)
energies.append(en)
swses.append(self.sws)
if prn:
self.print_sws_ens('refine_lc', swses, energies)
sws0, e0, B, grun = eos.fit(swses, energies)
# These functions create files on disk about the data to be fitted
# as well as the results of the fit.
# eos.prepareData()
#sws0,e0,B,grun = eos.fit2file()
return sws0, B, e0
def find_lc_hcp(self, delta=0.01, prn=True, xc='PBE'):
"""Computes initial estimates for the ground state quantities for hcp systems.
:param delta: Step size for the volume vs. energy array (Default value = 0.01)
:type delta: float
:param prn: True if results should be printed, False if not (Default value = True)
:type prn: boolean
:param xc: Choice of the xc-functional (Default value = 'PBE')
:type xc: str
:returns: WS-radius, bulk modulus, c/a and energy
:rtype: float, float, float, float
"""
from pyemto.EOS.EOS import EOS
eos_hcp = EOS(name=self.jobname, xc=xc, method='morse', units='bohr')
if prn:
print('')
print('*****find_lc_hcp*****')
print('')
#enough = False
# Before anything else happens we generate the
# structure output files (BMDL, KSTR and SHAPE) for a reasonable c over
# a mesh
ca_mesh = self.ca_range_default
ca_mesh_len = len(ca_mesh)
# sws-optimization is done using this particular c/a value,
ca_sws_ind = 2
# all the other c/a's will use these same volumes.
# Fit an n'th order polynomial to the energy vs. c/a data.
ca_fit_order = 2
ca_latpaths = []
ca_prefixes = []
# A 2D-array [i,j], where i = c/a axis and j = sws axis
energies = np.zeros((ca_mesh_len, 50))
energies0 = [] # c/a optimized energy for a given WS-radius
swses = [] # List of WS-radii
cas0 = [] # Energetically optimized c/a's for a given WS-radius
for i in range(0, ca_mesh_len):
ca_prefixes.append("/ca_{0:8.6f}".format(ca_mesh[i]))
ca_latpaths.append(self.latpath + ca_prefixes[i])
# Check whether structure files already exists
# and if not run the calculations.
for i in range(0, ca_mesh_len):
self.lattice.set_values(ca=ca_mesh[i])
# self.lattice.bmdl.write_input_file(folder=ca_latpaths[i])
str_exists = self.check_str(self.latname, folder=ca_latpaths[i])
if str_exists == False:
self.runlattice(jobname=self.latname, folder=ca_latpaths[i])
# Compute first two points
# Start at the initial sws
already = False
self.sws = self.lc_initial_sws
job = self.create_jobname(self.jobname)
self.emto.set_values(sws=self.sws, jobname=job)
# check if calculations are already done
for i in range(ca_mesh_len):
# We have to remember to update the lattice path every
# time we change c/a
self.emto.set_values(latpath=ca_latpaths[i])
hcp_subfolder = self.folder + "/{0}".format(ca_prefixes[i])
already = self.check_conv(job, folder=hcp_subfolder)
if self.lc_skip or (all(already) and not self.lc_rerun):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=hcp_subfolder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=hcp_subfolder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=hcp_subfolder)
quit()
en = self.get_energy(job, folder=hcp_subfolder, func=xc)
energies[i, 0] = en
swses.append(self.sws)
# Calculate energetically optimized c/a and the corresponding energy
# print(energies[:,:15])
ca0, en0 = eos_hcp.ca_fit(ca_mesh, energies[:, 0], ca_fit_order)
cas0.append(ca0)
energies0.append(en0)
# Compute second point at initial sws + delta
already = False
self.sws = self.lc_initial_sws + delta
job = self.create_jobname(self.jobname)
self.emto.set_values(sws=self.sws, jobname=job)
# check if calculations are already done
for i in range(ca_mesh_len):
# We have to remember to update the lattice path every
# time we change c/a
self.emto.set_values(latpath=ca_latpaths[i])
hcp_subfolder = self.folder + "/{0}".format(ca_prefixes[i])
already = self.check_conv(job, folder=hcp_subfolder)
if self.lc_skip or (all(already) and not self.lc_rerun):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=hcp_subfolder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=hcp_subfolder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=hcp_subfolder)
quit()
en = self.get_energy(job, folder=hcp_subfolder, func=xc)
energies[i, 1] = en
swses.append(self.sws)
# Calculate energetically optimized c/a and the corresponding energy
# print(energies[:,:15])
ca0, en0 = eos_hcp.ca_fit(ca_mesh, energies[:, 1], ca_fit_order)
cas0.append(ca0)
energies0.append(en0)
# The initial 2 points are now ready and
# we use them to predict where to calculate next point
next_sws, minfound = self.predict_next_sws(swses, energies0, delta)
# 2 points is not enought to calculate equation of state
enough = False
# Loop until we have enough points to calculate initial sws
iteration = 0
while not enough:
iteration += 1
# if 25 is not enough one should check initial sws
if iteration > 25:
print(
"SWS loop did not converge in {0} iterations!".format(iteration))
quit()
# Calculate next point
already = False
self.sws = next_sws
job = self.create_jobname(self.jobname)
self.emto.set_values(sws=self.sws, jobname=job)
# First check if calculations are already done
for i in range(ca_mesh_len):
# We have to remember to update the lattice path every
# time we change c/a
self.emto.set_values(latpath=ca_latpaths[i])
hcp_subfolder = self.folder + "/{0}".format(ca_prefixes[i])
already = self.check_conv(job, folder=hcp_subfolder)
if self.lc_skip or (all(already) and not self.lc_rerun):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=hcp_subfolder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=hcp_subfolder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=hcp_subfolder)
quit()
en = self.get_energy(job, folder=hcp_subfolder, func=xc)
energies[i, iteration + 1] = en
swses.append(self.sws)
# Calculate energetically optimized c/a and the corresponding energy
# print(energies[:,:15])
ca0, en0 = eos_hcp.ca_fit(
ca_mesh, energies[:, iteration + 1], ca_fit_order)
cas0.append(ca0)
energies0.append(en0)
# Check do we have minumun and predict next sws
next_sws, minfound = self.predict_next_sws(swses, energies0, delta)
# Check if we have mimimum and enough points
# 9 points should be enough for EOS
if minfound and len(swses) > 9:
enough = True
#print('debug find_lc_hcp: ',swses)
if prn:
self.print_sws_ens_hcp('find_lc_hcp', swses, energies0, cas0)
sws0, e0, B0, grun = eos_hcp.fit(swses, energies0)
# These functions create files on disk about the data to be fitted
# as well as the results of the fit.
# eos.prepareData()
#sws0,e0,B0,grun = eos.fit2file()
# Now that we have the ground state WS-radius sws0 we can use the cas0 array
# to compute the corresponding ground state ca0
ca0_vs_sws = np.polyfit(swses, cas0, 2)
ca0_vs_sws = np.poly1d(ca0_vs_sws)
c_over_a0 = ca0_vs_sws(sws0)
return sws0, c_over_a0, B0, e0
def refine_lc_hcp(self, sws, ca0, delta=0.01, prn=True, xc='PBE'):
"""Calculates a more accurate equilibrium volume for hcp systems.
:param sws: WS-radius
:type sws: float
:param ca0: Previously computed eq. c/a ratio
:type ca0: float
:param delta: Step size for the volume vs. energy array (Default value = 0.01)
:type delta: float
:param prn: True if results should be printed, False if not (Default value = True)
:type prn: boolean
:param xc: Choice of the xc-functional (Default value = 'PBE')
:type xc: str
:returns: Ws-radius, bulk modulus, c/a and energy
:rtype: float, float, float, float
"""
from pyemto.EOS.EOS import EOS
eos_hcp = EOS(name=self.jobname, xc=xc, method='morse', units='bohr')
if prn:
print('')
print('*****refine_lc_hcp*****')
print('')
# First compute the structure with the optimized
# c/a (found by the find_lc_hcp() function).
self.lattice.set_values(ca=ca0)
ca_prefix = "/ca_{0:8.6f}".format(ca0)
ca_latpath = self.latpath + ca_prefix
str_exists = self.check_str(self.latname, folder=ca_latpath)
if str_exists == False:
self.runlattice(jobname=self.latname, folder=ca_latpath)
# make sws ranges around given sws
points = []
for i in range(-3, 7):
points.append(i * delta)
energies = []
swses = []
self.emto.set_values(latpath=ca_latpath)
hcp_subfolder = self.folder + "/{0}".format(ca_prefix)
for p in points:
already = False
next_sws = sws + p
# Make inputs and job name
self.sws = next_sws
job = self.create_jobname(self.jobname)
self.emto.set_values(sws=self.sws, jobname=job)
# check if calculations are already done
already = self.check_conv(job, folder=hcp_subfolder)
if self.lc_skip or (all(already) and not self.lc_rerun):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=hcp_subfolder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=hcp_subfolder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=hcp_subfolder)
quit()
en = self.get_energy(job, folder=hcp_subfolder, func=xc)
energies.append(en)
swses.append(self.sws)
if prn:
self.print_sws_ens('refine_lc_hcp', swses, energies)
sws0, e0, B, grun = eos_hcp.fit(swses, energies)
c_over_a = ca0
# These functions create files on disk about the data to be fitted
# as well as the results of the fit.
# eos.prepareData()
#sws0,e0,B,grun = eos.fit2file()
return sws0, B, c_over_a, e0
def predict_next_sws(self, swses, en, maxdelta=0.05):
"""Predict next WS-radius based on a simple gradient descent algorithm.
:param swses: List of current WS-radii
:type swses: list(float)
:param en: List of current energies
:type en: list(float)
:param maxdelta: Maximum step size (Default value = 0.05)
:type maxdelta: float
:returns: Next WS-radii and True if energy minimum has been found,
False if not yet found
:rtype: float, boolean
"""
# Check if we do have minimum here and predict directon for next point
m = 0 # The first one is minimum at start
minsws = 10000.0
maxsws = 0.0
for i in range(len(swses)):
# Check if we have minimum volume
if swses[i] < minsws:
minsws = swses[i]
elif swses[i] > maxsws:
maxsws = swses[i]
if en[i] < en[m]:
m = i
wehaveminumum = False
# Possible cases
if swses[m] == min(swses): # No minimum decrease sws
# One should make delta depend on energy slope at some point.
delta = -maxdelta
newsws = swses[m] + delta
elif swses[m] == max(swses): # No minimum increase sws
delta = maxdelta
newsws = swses[m] + delta
else:
wehaveminumum = True
# Minimum is inside the sws range. Decide where to add new point
larger = [s for s in swses if swses[m] < s]
smaller = [s for s in swses if swses[m] > s]
if len(larger) > len(smaller):
# Decrease volume
sws = minsws
delta = -maxdelta
newsws = sws + delta
else:
# Increase volume
sws = maxsws
delta = maxdelta
newsws = sws + delta
return newsws, wehaveminumum
def print_sws_ens(self, string, swses, energies):
"""Prints the WS-radii and calculated energies of cubic systems
:param string: Header for the printout
:type string: str
:param swses: List of WS-radii
:type swses: list(float)
:param energies: List of energies
:type energies: list(float)
:returns: None
:rtype: None
"""
str_len = len(string)
print('' + '\n')
print('************')
print(str_len * '*')
print(string)
print(str_len * '*')
print('************')
print(' SWS Energy')
for i in range(len(swses)):
print('{0:8.6f} {1:12.6f}'.format(swses[i], energies[i]))
print('' + '\n')
return
def print_sws_ens_hcp(self, string, swses, energies, cas):
"""Prints the WS-radii, ca/a ratios and calculated energies of hcp systems
:param string: Header for the printout
:type string: str
:param swses: List of WS-radii
:type swses: list(float)
:param energies: List of energies
:type energies: list(float)
:param cas: List of c/a ratios
:type cas: list(float)
:returns: None
:rtype: None
"""
str_len = len(string)
print('' + '\n')
print(str_len * '*')
print(string)
print(str_len * '*')
print(' SWS Energy0 c/a0')
for i in range(len(swses)):
print('{0:8.6f} {1:12.6f} {2:8.6f}'.format(
swses[i], energies[i], cas[i]))
print('' + '\n')
return
def check_conv(self, jobname, folder="./"):
"""Checks the convergence of given KGRN and KFCD calculations by reading
their output files.
:param jobname: Name of the output files
:type jobname: str
:param folder: Name of the folder where the output files are located (Default value = "./")
:type folder: str
:returns: Convergence of KGRN (True/False), convergence of KFCD (True/False)
:rtype: boolean, boolean
"""
folderKGRN = folder + '/kgrn/'
# Check if we got convergence in KGRN
prntfile = jobname + ".prn"
convergedKGRN = False
fn = os.path.join(folderKGRN, prntfile)
if not os.path.isfile(fn):
pass
else:
pfile = open(fn, "r")
for line in pfile:
if "Converged in" in line:
convergedKGRN = True
break
pfile.close()
folderKFCD = folder + '/kfcd/'
# Check if we got convergence in KFCD
prntfile = jobname + ".prn"
convergedKFCD = False
fn = os.path.join(folderKFCD, prntfile)
if not os.path.isfile(fn):
pass
else:
pfile = open(fn, "r")
for line in pfile:
if "Finished at:" in line:
convergedKFCD = True
break
pfile.close()
return convergedKGRN, convergedKFCD
def runlattice(self, jobname=None, folder="./", EMTODIR=None):
"""Run BMDL, KSTR and SHAPE calculation **WITHOUT** using the batch system.
:param jobname: Name of the input files (Default value = None)
:type jobname: str
:param folder: Name of the folder where the input files are located (Default value = "./")
:type folder: str
:param EMTODIR: Path to the EMTO installation folder (Default value = None)
:type EMTODIR: str
:returns: None
:rtype: None
"""
if EMTODIR is None:
EMTODIR = self.EMTOdir
if jobname is None:
sys.exit("System.runlattice: \'jobname\' has to be given!")
# Make sure folders exist
common.check_folders(folder, folder + "/bmdl/")
# Create input file
self.lattice.bmdl.write_input_file(folder=folder)
# Run BMDL
job = os.path.join(folder, jobname)
command = "cd {0}; ".format(
folder) + EMTODIR + "/bmdl/source/bmdl < " + job + ".bmdl"
print("running: " + command)
starttime = time.time()
os.system(command)
endtime = time.time()
timetaken = endtime - starttime
timehours = int(timetaken // 3600)
timeminutes = int((timetaken - timehours * 3600) // 60)
timeseconds = (timetaken - timehours * 3600) - timeminutes * 60
print("Finished running: " + command + '. Time: {0:3}h {1:2}m {2:5.2f}s '
.format(timehours, timeminutes, timeseconds) + '\n')
# Make sure folders exist
common.check_folders(folder, folder + "/kstr/")
# Create input file
self.lattice.kstr.write_input_file(folder=folder)
# Run KSTR
job = os.path.join(folder, jobname)
command = "cd {0}; ".format(
folder) + EMTODIR + "/kstr/source/kstr < " + job + ".kstr"
print("running: " + command)
starttime = time.time()
os.system(command)
endtime = time.time()
timetaken = endtime - starttime
timehours = int(timetaken // 3600)
timeminutes = int((timetaken - timehours * 3600) // 60)
timeseconds = (timetaken - timehours * 3600) - timeminutes * 60
print("Finished running: " + command + '. Time: {0:3}h {1:2}m {2:5.2f}s '
.format(timehours, timeminutes, timeseconds) + '\n')
if self.lattice.kstr.twocenter == True:
command = "cd {0}; ".format(
folder) + EMTODIR + "/kstr/source/kstr < " + job + "2.kstr"
print("running: " + command)
starttime = time.time()
os.system(command)
endtime = time.time()
timetaken = endtime - starttime
timehours = int(timetaken // 3600)
timeminutes = int((timetaken - timehours * 3600) // 60)
timeseconds = (timetaken - timehours * 3600) - timeminutes * 60
print("Finished running: " + command + '. Time: {0:3}h {1:2}m {2:5.2f}s '
.format(timehours, timeminutes, timeseconds) + '\n')
# Make sure folders exist
common.check_folders(folder, folder + "/shape/")
# Create input file
self.lattice.shape.write_input_file(folder=folder)
# Run SHAPE
job = os.path.join(folder, jobname)
command = "cd {0}; ".format(
folder) + EMTODIR + "/shape/source/shape < " + job + ".shape"
print("running: " + command)
starttime = time.time()
os.system(command)
endtime = time.time()
timetaken = endtime - starttime
timehours = int(timetaken // 3600)
timeminutes = int((timetaken - timehours * 3600) // 60)
timeseconds = (timetaken - timehours * 3600) - timeminutes * 60
print("Finished running: " + command + '. Time: {0:3}h {1:2}m {2:5.2f}s '
.format(timehours, timeminutes, timeseconds) + '\n')
return
def runemto(self, jobname, folder="./", EMTODIR=None, onlyKFCD=False):
"""Run KGRN and KFCD **WITHOUT** using the batch system and check convergence.
:param jobname: Name of the input files
:type jobname: str
:param folder: Name of the folder where the input files are located (Default value = "./")
:type folder: str
:param EMTODIR: Path to the EMTO installation folder (Default value = None)
:type EMTODIR: str
:param onlyKFCD: True if only KFCD needs to calculated,
False if also KGRN (Default value = False)
:type onlyKFCD: boolean
:returns: True if the calculations converged, False if not
:rtype: boolean
"""
if jobname is None:
sys.exit("System.runemto: \'jobname\' has to be given!")
if EMTODIR is None:
EMTODIR = self.EMTOdir
if onlyKFCD:
# Make sure folders exist
common.check_folders(folder, folder + "/kfcd")
# Run KFCD
self.emto.kfcd.write_input_file(folder=folder)
job = os.path.join(folder, jobname)
command = "cd {0}; ".format(
folder) + EMTODIR + "/kfcd/source/kfcd_cpa < " + job + ".kfcd"
print("running: " + command)
starttime = time.time()
os.system(command)
endtime = time.time()
timetaken = endtime - starttime
timehours = int(timetaken // 3600)
timeminutes = int((timetaken - timehours * 3600) // 60)
timeseconds = (timetaken - timehours * 3600) - timeminutes * 60
print("Finished running: " + command + '. Time: {0:3}h {1:2}m {2:5.2f}s '
.format(timehours, timeminutes, timeseconds) + '\n')
# Check if we got convergence
converged = self.check_conv(jobname, folder=folder)
else:
# Make sure folders exist
common.check_folders(folder, folder + "/kgrn", folder + "/kgrn/tmp")
# Run KGRN
self.emto.kgrn.write_input_file(folder=folder)
job = os.path.join(folder, jobname)
command = "cd {0}; ".format(
folder) + EMTODIR + "/kgrn/source/kgrn_cpa < " + job + ".kgrn"
print("running: " + command)
starttime = time.time()
os.system(command)
endtime = time.time()
timetaken = endtime - starttime
timehours = int(timetaken // 3600)
timeminutes = int((timetaken - timehours * 3600) // 60)
timeseconds = (timetaken - timehours * 3600) - timeminutes * 60
print("Finished running: " + command + '. Time: {0:3}h {1:2}m {2:5.2f}s '
.format(timehours, timeminutes, timeseconds))
# Check if we got convergence
converged = self.check_conv(jobname, folder=folder)
if converged[0]:
# Make sure folders exist
common.check_folders(folder, folder + "/kfcd")
# Run KFCD
self.emto.kfcd.write_input_file(folder=folder)
job = os.path.join(folder, jobname)
command = "cd {0}; ".format(folder) + EMTODIR +\
"/kfcd/source/kfcd_cpa < " + job + ".kfcd"
print("running: " + command)
starttime = time.time()
os.system(command)
endtime = time.time()
timetaken = endtime - starttime
timehours = int(timetaken // 3600)
timeminutes = int((timetaken - timehours * 3600) // 60)
timeseconds = (timetaken - timehours * 3600) - timeminutes * 60
print("Finished running: " + command + '. Time: {0:3}h {1:2}m {2:5.2f}s '
.format(timehours, timeminutes, timeseconds) + '\n')
converged = self.check_conv(jobname, folder=folder)
return converged
def which_error(self, jobname, folder="./"):
"""Tries to determine the reason why a given KGRN calculation did not converge.
The reason for the crash will be printed on screen.
:param jobname: Name of the KGRN output file
:type jobname: str
:param folder: Name of the folder where the output file is located (Default value = "./")
:type folder: str
:returns: None
:rtype: None
"""
folder = folder + '/kgrn/'
errdict = {'EFXMOM': 'EFXMOM:** Fermi level not found',
'NOTCONV': 'Not converged'}
prntfile = jobname + ".prn"
fn = os.path.join(folder, prntfile)
if not os.path.isfile(fn):
print('Function which_error: file {0} does not exist!'.format(fn))
quit()
pfile = open(fn, "r")
done = False
# knownError = failure caused by a known reason in the errdict.
# Otherwise cause is a more exotic KGRN error or the run crashed.
knownError = False
for line in pfile:
for key in errdict:
if errdict[key] in line:
errorkey = key
errormesg = line
done = True
knownError = True
break
if done:
break
pfile.close()
if knownError == True:
print('Problem in KGRN for {0}: {1}'.format(fn, errormesg))
else:
print('Problem in KGRN for {0}: Program has crashed or stopped due to a' +
' less common KGRN error. Check the output file.'.format(fn))
return
def get_energy(self, jobname=None, func="PBE", folder=None):
"""Extracts total energy from the KFCD output file.
Different total energies given by different xc-functionals can
be selected using the *func* input parameter. Default value is
'PBE'.
:param jobname: Name of the KFCD output file
:type jobname: str
:param func: Name of the xc-functional (Default value = "PBE")
:type func: str
:param folder: Name of the folder where the output file is located (Default value = "./")
:type folder: str
:returns: Total energy if it is found, otherwise return None
:rtype: float or None
"""
if folder == None:
folder = self.folder
if jobname == None:
jobname = self.fulljobname
fn = os.path.join(folder, "kfcd/")
fn = os.path.join(fn, jobname + ".prn")
try:
kfcdfile = open(fn, "r")
energy = "TOT-" + func
energyFound = False
for line in kfcdfile:
if energy in line:
energyFound = True
break
# Total energy of the unit cell
# return float(line.split()[1])
# Energy per WS-radius: Better to use this to get Bmod correctly
if energyFound:
return float(line.split()[3])
else:
return
except IOError:
print('System.get_energy(): {0} does not exist!'.format(fn))
return
def get_moments(self, jobname=None, func="PBE", folder=None):
"""Extracts magnetic moments from the KFCD output file.
:param jobname: Name of the KFCD output file
:type jobname: str
:param func: Name of the xc-functional (Default value = "PBE")
:type func: str
:param folder: Name of the folder where the output file is located (Default value = "./")
:type folder: str
:returns: Total energy if it is found, otherwise return None
:rtype: float or None
"""
if folder == None:
folder = self.folder
if jobname == None:
jobname = self.fulljobname
fn = os.path.join(folder, "kfcd/")
fn = os.path.join(fn, jobname + ".prn")
readyTag = "KFCD: Finished at:"
momTag = "Magnetic moment for IQ ="
try:
kfcdfile = open(fn, "r")
lines = kfcdfile.readlines()
kfcdfile.close()
moms = []
ready = False
for line in lines:
if readyTag in line:
ready = True
if ready:
for line in lines:
#if enTag in line:
# linesplit = line.split()
# sws = float(linesplit[6])
if momTag in line:
linesplit = line.split()
moms.append(float(linesplit[7]))
return moms
else:
return None
except IOError:
print('System.get_moments(): {0} does not exist!'.format(fn))
return None
def get_fdos(self, jobname = None,folder=None):
""" Extract density of state (DOS) at fermi level from KGRN output
:param jobname: Name of the KGRN output file
:type jobname: str
:param folder: Name of the folder where the output file is located (Default value = "./")
:type folder: str
:returns: DOS at fermi level
:rtype: float
"""
if folder == None:
folder = self.folder
if jobname == None:
jobname = self.fulljobname
fn = os.path.join(folder, "kgrn/")
fn = os.path.join(fn, jobname + ".prn")
file = open(fn,'r')
lines = file.readlines()
file.close()
nxtsws_tag = " NXTSWS: IQ IT ITA MMT Type NZ ION ELN QTR SPLIT FIX CONC"
alat_tag = "Alat ="
dos_tag = " Dos(Ef) ="
mag_tag = " Magn. mom. ="
hop_tag = " Hopfield ="
for i in range(len(lines)):
if nxtsws_tag in lines[i]:
indMin = i+2
elif alat_tag in lines[i]:
indMax = i-2
break
#
concs = np.zeros(indMax + 1 - indMin)
doses = np.zeros(indMax + 1 - indMin)
its = np.zeros(indMax + 1 - indMin)
#
ind_tmp = 0
for i in range(indMin,indMax+1):
concs[ind_tmp] = float(lines[i].split()[-1])
its[ind_tmp] = int(lines[i].split()[1])
ind_tmp += 1
#
num_sites = np.max(its)
ind_tmp = len(doses) - 1
# Because KGRN output file is different for non- and magnetic calculations,
# we have to do some additional checks to make sure we are reading the right
# values.
for i in range(len(lines)-1,indMax,-1):
if dos_tag in lines[i]:
#print(lines[i])
if mag_tag in lines[i+1] or hop_tag in lines[i+1]:
#print(lines[i+1])
doses[ind_tmp] = float(lines[i].split()[-1])
ind_tmp -= 1
if ind_tmp == -1:
break
#
#for i in range(len(doses)):
# print(doses[i])
#
ry2ev = 13.605698066
dos_tot = 0.0
for i in range(len(concs)):
dos_tot += concs[i]*doses[i]
dos_tot /= num_sites
dos_tot /= ry2ev
return dos_tot
def create_jobname(self, jobname=None):
"""Creates jobnames based on system information.
Creates a jobname based on the optional input prefix *jobname*.
The format of the full jobname in this case will be:
jobname_3.000000, where 3.000000 is the sws.
If *jobname* prefix is not given, full jobname is generated automatically
based on system data self.sws, self.atoms and self.concs.
The format of the jobname is: au0.50pd0.50_3.000000, where 0.50 are the
atomic concentrations and 3.000000 is the sws.
:param jobname: Optional prefix for the full jobname (Default value = None)
:type jobname: str
:returns: Newly created full jobname
:rtype: str
"""
if jobname is None:
# Prefix not specified => create a default jobname based on
# the types of atoms and their concentrations.
jobname = ''
for i in range(len(self.atoms)):
if jobname == "":
pass
else:
jobname = jobname + "_"
jobname = jobname + \
self.atoms[i].lower() + "%4.2f" % (self.concs[i])
fulljobname = jobname + "_%8.6f" % (self.sws)
return jobname, fulljobname
else:
fulljobname = jobname + "_{0:8.6f}".format(self.sws)
return fulljobname
def check_str(self, jobname, folder="./"):
"""Checks if a KSTR calculation file exists and has converged.
Only KSTR file is checked because BMDL and SHAPE are fast to
rerun in any case.
:param jobname: Filename of the structure output file
:type jobname: str
:param folder: Folder where the output file is located (Default value = "./")
:type folder: str
:returns: True if KSTR has finished and False if not
:rtype: boolean
"""
folderKSTR = folder + '/kstr/'
# Check if we got convergence in KSTR
prntfile = jobname + ".prn"
convergedKSTR = False
fn = os.path.join(folderKSTR, prntfile)
if not os.path.isfile(fn):
pass
else:
pfile = open(fn, "r")
for line in pfile:
if "KSTR: Finished at :" in line:
convergedKSTR = True
break
pfile.close()
return convergedKSTR
def wait_for_jobs(self, jobsdict, restart_partition='general', sleeptime=60, restart_z=None,
restart_stragglers_after=0.75, kill_if_all_ssusp=False):
"""Loops checking status until no jobs are waiting or running / all are finished.
wait/run states:
======= =========== ==================================================================================================
**Key** **Meaning** **Description**
------- ----------- --------------------------------------------------------------------------------------------------
CF CONFIGURING Job has been allocated resources, but are waiting for them to become ready for use (e.g. booting).
CG COMPLETING Job is in the process of completing. Some processes on some nodes may still be active.
PD PENDING Job is awaiting resource allocation.
R RUNNING Job currently has an allocation.
RS RESIZING Job is about to change size.
S SUSPENDED Job has an allocation, but execution has been suspended.
======= =========== ==================================================================================================
done states:
======= =========== ==============================================================================================================
**Key** **Meaning** **Description**
------- ----------- --------------------------------------------------------------------------------------------------------------
CA CANCELLED Job was explicitly cancelled by the user or system administrator. The job may or may not have been initiated.
CD COMPLETED Job has terminated all processes on all nodes.
F FAILED Job terminated with non-zero exit code or other failure condition.
NF NODE_FAIL Job terminated due to failure of one or more allocated nodes.
PR PREEMPTED Job terminated due to preemption.
TO TIMEOUT Job terminated upon reaching its time limit.
======= =========== ==============================================================================================================
:param jobsdict:
:type jobsdict:
:param restart_partition: (Default value = 'general')
:type restart_partition:
:param sleeptime: (Default value = 60)
:type sleeptime:
:param restart_z: (Default value = None)
:type restart_z:
:param restart_stragglers_after: (Default value = 0.75)
:type restart_stragglers_after:
:param kill_if_all_ssusp: (Default value = False)
:type kill_if_all_ssusp:
:returns: None
:rtype: None
"""
run_status = ['CONFIGURING', 'COMPLETING',
'PENDING', 'RUNNING', 'RESIZING', 'SUSPENDED']
done_status = ['CANCELLED', 'COMPLETED',
'FAILED', 'NODE_FAIL', 'PREEMPTED', 'TIMEOUT']
import time
import datetime
jobs_amount = len(jobsdict)
print()
print('wait_for_jobs: Submitted {0} jobs'.format(jobs_amount))
status = self.get_status_counts(jobsdict)
t = time.time()
maxllen = 0
print('wait_for_jobs: Will be requesting job statuses' +
' every {0} seconds'.format(sleeptime) + "\n")
while any([k in run_status for k in status.keys()]):
time.sleep(sleeptime)
status = self.get_status_counts(jobsdict)
pctdone = sum([status.get(rs, 0)
for rs in done_status]) / float(sum(status.values()))
# CHECK SUSPENDED; RESTART STRAGGLERS, ETC
outl = '%s %s (%3d%% completion)' % (str(
datetime.timedelta(seconds=int(time.time() - t))), status.__repr__(), pctdone * 100)
# if len(outl) < maxllen:
# pad = maxllen - len(outl)
# outl += ' '*pad
# else:
# maxllen = len(outl)
print(outl)
# sys.stderr.write(outl)
# sys.stderr.flush()
print('completed {0} batch jobs in {1}'.format(
jobs_amount, str(datetime.timedelta(seconds=int(time.time() - t)))))
return
def get_status_counts(self, jobids=None):
"""Returns the counts of all jobs by status category.
:param jobids: (Default value = None)
:type jobids:
:returns:
:rtype:
"""
from collections import defaultdict
jobs_status = self.get_jobs_status(jobids)
status_counts = defaultdict(int)
for jd in jobs_status.values():
status_counts[jd['State'].split()[0]] += 1
return dict(status_counts)
def get_jobs_status(self, jobids=None, toplevel=True):
"""Returns status of the jobs indicated
(jobsdict or list of job ids) or all jobs if no jobids supplied.
Set toplevel=False for job step data.
:param jobids: List of job IDs (Default value = None)
:type jobids: dict or list
:param toplevel: (Default value = True)
:type toplevel: boolean
:returns: Job statuses
:rtype: list(str)
"""
import subprocess
if jobids is None:
sacct_return = subprocess.Popen(
'sacct -p -l', shell=True, stdout=subprocess.PIPE).stdout.readlines()
else:
if isinstance(jobids, dict):
qjobs = jobids.keys()
else:
qjobs = jobids
sacct_return = subprocess.Popen(
'sacct -j %s -p -l' % (
','.join(qjobs),), shell=True, stdout=subprocess.PIPE).stdout.readlines()
jobs_status = {}
for el in sacct_return[1:]:
d = dict(
zip(sacct_return[0].strip().split('|'), el.strip().split('|')))
if not '.' in d['JobID'] or not toplevel:
jobs_status[d['JobID']] = d
return jobs_status
def submit_jobs(self, jobnames, folder=None):
"""Takes a list of jobnames and submits the corresponding
batch scripts to the batch system.
:param jobnames: List of jobnames
:type jobnames: list(float)
:param folder: Folder where the batch scripts are located (Default value = None)
:type folder: str
:returns: List of job ID numbers
:rtype: list(str)
"""
import time
from pyemto.utilities.utils import run_emto
sleeptime = 10
if folder is None:
folder = self.folder
job_ids = []
for i in range(len(jobnames)):
job_ids.append(run_emto(jobnames[i], folder=self.folder))
# Flatten job_ids list and convert the integers into strings
job_ids = [item for sublist in job_ids for item in sublist]
for i in range(len(job_ids)):
job_ids[i] = str(job_ids[i])
# Give SLURM some time to register the jobs.
# If continued straight away the self.wait_for_jobs
# script will likely think all the jobs have finished
# since it cannot find them yet.
time.sleep(sleeptime)
return job_ids
| hpleva/pyemto | pyemto/system.py | Python | mit | 148,431 | 0.003099 |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
from genomicode import mplgraph
from genomicode import filelib
in_data = antecedents
matrix = [x for x in filelib.read_cols(in_data.identifier)]
header = matrix[0]
index = header.index('Confidence')
matrix = matrix[1:]
confidence = [float(i[index]) for i in matrix]
sample = [i[0] for i in matrix]
if confidence == [''] * len(matrix) or 'Correct?' in header:
index = header.index('Predicted_class')
class_value = [i[index] for i in matrix]
label_dict = dict()
label_list = []
i = -1
for label in class_value:
if label not in label_dict.keys():
i = i + 1
label_dict[label] = i
label_list.append(label_dict[label])
yticks = label_dict.keys()
ytick_pos = [label_dict[i] for i in label_dict.keys()]
fig = mplgraph.barplot(label_list,
box_label=sample,
ylim=(-0.5, 1.5),
ytick_pos=ytick_pos,
yticks=yticks,
xtick_rotation='vertical',
ylabel='Prediction',
xlabel='Sample')
fig.savefig(outfile)
else:
fig = mplgraph.barplot(confidence,
box_label=sample,
ylim=(-1.5, 1.5),
xtick_rotation='vertical',
ylabel='Prediction',
xlabel='Sample')
fig.savefig(outfile)
assert filelib.exists_nz(outfile), (
'the output file %s for plot_prediction_bar fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
loocv = ''
if antecedents.data.attributes['loocv'] == 'yes':
loocv = 'loocv'
filename = ('prediction_' + original_file + '_' +
antecedents.data.attributes['classify_alg'] + loocv + '.png')
return filename
| jefftc/changlab | Betsy/Betsy/modules/plot_prediction.py | Python | mit | 2,613 | 0.003827 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlConfigGeneral(PerlPackage):
"""Config::General - Generic Config Module"""
homepage = "https://metacpan.org/pod/Config::General"
url = "https://cpan.metacpan.org/authors/id/T/TL/TLINDEN/Config-General-2.63.tar.gz"
version('2.63', sha256='0a9bf977b8aabe76343e88095d2296c8a422410fd2a05a1901f2b20e2e1f6fad')
| iulian787/spack | var/spack/repos/builtin/packages/perl-config-general/package.py | Python | lgpl-2.1 | 557 | 0.005386 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'openPathTool.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(457, 95)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.formLayout = QtGui.QFormLayout(self.centralwidget)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.pathInLineEdit = QtGui.QLineEdit(self.centralwidget)
self.pathInLineEdit.setObjectName(_fromUtf8("pathInLineEdit"))
self.formLayout.setWidget(0, QtGui.QFormLayout.SpanningRole, self.pathInLineEdit)
self.pathOutLineEdit = QtGui.QLineEdit(self.centralwidget)
self.pathOutLineEdit.setReadOnly(True)
self.pathOutLineEdit.setObjectName(_fromUtf8("pathOutLineEdit"))
self.formLayout.setWidget(1, QtGui.QFormLayout.SpanningRole, self.pathOutLineEdit)
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setObjectName(_fromUtf8("buttonLayout"))
self.explorerButton = QtGui.QPushButton(self.centralwidget)
self.explorerButton.setObjectName(_fromUtf8("explorerButton"))
self.buttonLayout.addWidget(self.explorerButton)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.buttonLayout.addItem(spacerItem)
self.convertButton = QtGui.QPushButton(self.centralwidget)
self.convertButton.setObjectName(_fromUtf8("convertButton"))
self.buttonLayout.addWidget(self.convertButton)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.buttonLayout.addItem(spacerItem1)
self.closeButton = QtGui.QPushButton(self.centralwidget)
self.closeButton.setObjectName(_fromUtf8("closeButton"))
self.buttonLayout.addWidget(self.closeButton)
self.formLayout.setLayout(2, QtGui.QFormLayout.SpanningRole, self.buttonLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.pathInLineEdit.setPlaceholderText(_translate("MainWindow", "Input Path", None))
self.pathOutLineEdit.setPlaceholderText(_translate("MainWindow", "Output Path", None))
self.explorerButton.setText(_translate("MainWindow", "Open In Explorer", None))
self.convertButton.setText(_translate("MainWindow", "Convert", None))
self.closeButton.setText(_translate("MainWindow", "Close", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| david2777/DavidsTools | Standalone/openPathTool/UI_openPathTool.py | Python | gpl-2.0 | 3,656 | 0.003282 |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Autoregressive State Space Model Tests."""
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import AutoregressiveStateSpaceModel
from tensorflow_probability.python.sts import LocalLevelStateSpaceModel
def ar_explicit_logp(y, coefs, level_scale):
"""Manual log-prob computation for an autoregressive process."""
num_coefs = len(coefs)
lp = 0.
# For the first few steps of y, where previous values
# are not available, we model them as zero-mean with
# stddev `prior_scale`.
for i in range(num_coefs):
zero_padded_y = np.zeros([num_coefs])
zero_padded_y[num_coefs - i:num_coefs] = y[:i]
pred_y = np.dot(zero_padded_y, coefs[::-1])
lp += tfd.Normal(pred_y, level_scale).log_prob(y[i])
for i in range(num_coefs, len(y)):
pred_y = np.dot(y[i - num_coefs:i], coefs[::-1])
lp += tfd.Normal(pred_y, level_scale).log_prob(y[i])
return lp
class _AutoregressiveStateSpaceModelTest(test_util.TestCase):
def testEqualsLocalLevel(self):
# An AR1 process with coef 1 is just a random walk, equivalent to a local
# level model. Test that both models define the same distribution
# (log-prob).
num_timesteps = 10
observed_time_series = self._build_placeholder(
np.random.randn(num_timesteps, 1))
level_scale = self._build_placeholder(0.1)
# We'll test an AR1 process, and also (just for kicks) that the trivial
# embedding as an AR2 process gives the same model.
coefficients_order1 = np.array([1.]).astype(self.dtype)
coefficients_order2 = np.array([1., 0.]).astype(self.dtype)
ar1_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order1,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale]))
ar2_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order2,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
local_level_ssm = LocalLevelStateSpaceModel(
num_timesteps=num_timesteps,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale]))
ar1_lp, ar2_lp, ll_lp = self.evaluate(
(ar1_ssm.log_prob(observed_time_series),
ar2_ssm.log_prob(observed_time_series),
local_level_ssm.log_prob(observed_time_series)))
self.assertAllClose(ar1_lp, ll_lp)
self.assertAllClose(ar2_lp, ll_lp)
def testLogprobCorrectness(self):
# Compare the state-space model's log-prob to an explicit implementation.
num_timesteps = 10
observed_time_series_ = np.random.randn(num_timesteps)
coefficients_ = np.array([.7, -.1]).astype(self.dtype)
level_scale_ = 1.0
observed_time_series = self._build_placeholder(observed_time_series_)
level_scale = self._build_placeholder(level_scale_)
expected_logp = ar_explicit_logp(
observed_time_series_, coefficients_, level_scale_)
ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 0.]))
lp = ssm.log_prob(observed_time_series[..., tf.newaxis])
self.assertAllClose(self.evaluate(lp), expected_logp)
def testBatchShape(self):
seed = test_util.test_seed(sampler_type='stateless')
# Check that the model builds with batches of parameters.
order = 3
batch_shape = [4, 2]
# No `_build_placeholder`, because coefficients must have static shape.
coefficients = np.random.randn(*(batch_shape + [order])).astype(self.dtype)
level_scale = self._build_placeholder(
np.exp(np.random.randn(*batch_shape)))
ssm = AutoregressiveStateSpaceModel(
num_timesteps=10,
coefficients=coefficients,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=self._build_placeholder(np.ones([order]))))
if self.use_static_shape:
self.assertAllEqual(
tensorshape_util.as_list(ssm.batch_shape), batch_shape)
else:
self.assertAllEqual(self.evaluate(ssm.batch_shape_tensor()), batch_shape)
y = ssm.sample(seed=seed)
if self.use_static_shape:
self.assertAllEqual(tensorshape_util.as_list(y.shape)[:-2], batch_shape)
else:
self.assertAllEqual(self.evaluate(tf.shape(y))[:-2], batch_shape)
def _build_placeholder(self, ndarray):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype`, and shape specified
statically only if `self.use_static_shape` is `True`.
"""
ndarray = np.asarray(ndarray).astype(self.dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
@test_util.test_all_tf_execution_regimes
class AutoregressiveStateSpaceModelTestStaticShape32(
_AutoregressiveStateSpaceModelTest):
dtype = np.float32
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class AutoregressiveStateSpaceModelTestDynamicShape32(
_AutoregressiveStateSpaceModelTest):
dtype = np.float32
use_static_shape = False
@test_util.test_all_tf_execution_regimes
class AutoregressiveStateSpaceModelTestStaticShape64(
_AutoregressiveStateSpaceModelTest):
dtype = np.float64
use_static_shape = True
del _AutoregressiveStateSpaceModelTest # Don't run tests for the base class.
if __name__ == '__main__':
test_util.main()
| tensorflow/probability | tensorflow_probability/python/sts/components/autoregressive_test.py | Python | apache-2.0 | 6,809 | 0.004112 |
# Removing stop words
# What to do with the Retweets (RT)?
# Make adjust so that the # and @ are attached to their associated word (i.e. #GOP, @twitter)
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import sys
def remove_stopwords(tweets):
with open(tweets, 'r', buffering=1028) as read_tweet:
for tweet in read_tweet:
#Use stop word method
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(tweet)
filtered_tweet = []
for word in word_tokens:
if word not in stop_words:
# Capture only words not listed in stop_word txt
filtered_tweet.append(word)
print(filtered_tweet)
def main():
tweets = "/Users/alanseciwa/Desktop/Independent_Study/Sep16-GOP-TweetsONLY/clean_data-TWEETONLY.csv"
remove_stopwords(tweets)
if __name__ == '__main__':
main()
sys.exit() | aseciwa/independent-study | scripts/stopWords.py | Python | mit | 971 | 0.008239 |
"""
WSGI config for Texas LAN Web project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| TexasLAN/texaslan.org | config/wsgi.py | Python | mit | 1,450 | 0 |
"""
PySAR
Polarimetric SAR decomposition
Contents
--------
decomp_fd(hhhh,vvvv,hvhv,hhvv,numthrd=None) : Freeman-Durden 3-component decomposition
"""
from __future__ import print_function, division
import sys,os
import numpy as np
###===========================================================================================
def decomp_fd(hhhh,vvvv,hvhv,hhvv,null=None,numthrd=None,maxthrd=8):
"""
Freeman-Durden 3-component decomposition
Parameters
----------
hhhh : ndarray
horizontally polarized power
vvvv : ndarray
vertically polarized power
hvhv : ndarray
cross-polarized power
hhvv : ndarray
co-polarized cross product (complex-valued)
null : float or None
null value to exclude from decomposition
numthrd : int or None
number of pthreads; None sets numthrd based on the data array size [None]
maxthrd : int or None
maximum allowable numthrd [8]
Returns
-------
ps : ndarray
surface-scattered power
pd : ndarray
double-bounce power
pv : ndarray
volume-scattered power
Notes
-----
* arrays are returned with the same type as hhhh data
Reference
---------
1. Freeman, A. and Durden, S., "A three-component scattering model for polarimetric SAR data", *IEEE Trans. Geosci. Remote Sensing*, vol. 36, no. 3, pp. 963-973, May 1998.
"""
from pysar.polsar._decomp_modc import free_durden
if not numthrd:
numthrd = np.max([len(hhhh)//1e5, 1])
if numthrd > maxthrd: numthrd = maxthrd
elif numthrd < 1:
raise ValueError('numthrd must be >= 1')
if null:
nullmask = np.abs(hhhh-null) < 1.e-7
nullmask += np.abs(vvvv-null) < 1.e-7
nullmask += np.abs(hvhv-null) < 1.e-7
nullmask += np.abs(hhvv-null) < 1.e-7
hhvv[nullmask] = 0.
hhhhtype = None
if hhhh.dtype != np.float32:
hhhhtype = hhhh.dtype
hhhh = hhhh.astype(np.float32)
vvvv = vvvv.astype(np.float32)
hvhv = hvhv.astype(np.float32)
hhvv = hhvv.astype(np.complex64)
if not all({2-x for x in [hhhh.ndim, vvvv.ndim, hvhv.ndim, hhvv.ndim]}):
hhhh, vvvv = hhhh.flatten(), vvvv.flatten()
hvhv, hhvv = hvhv.flatten(), hhvv.flatten()
P = free_durden(hhhh, vvvv, hvhv, hhvv, numthrd)
if hhhhtype: P = P.astype(hhhhtype)
P = P.reshape(3,-1)
if null: P[0,nullmask], P[1,nullmask], P[2,nullmask] = null, null, null
return P[0,:], P[1,:], P[2,:]
###---------------------------------------------------------------------------------
def decomp_haa(hhhh,vvvv,hvhv,hhhv,hhvv,hvvv,matform='C',null=None,numthrd=None,maxthrd=8):
"""
Cloude-Pottier H/A/alpha polarimetric decomposition
Parameters
----------
hhhh : ndarray
horizontal co-polarized power (or 0.5|HH + VV|^2 if matform = 'T')
vvvv : ndarray
vertical co-polarized power (or 0.5|HH - VV|^2 if matform = 'T')
hvhv : ndarray
cross-polarized power (2|HV|^2 for matform = 'T')
hhhv : ndarray
HH.HV* cross-product (or 0.5(HH+VV)(HH-VV)* for matform = 'T')
hhvv : ndarray
HH.VV* cross-product (or HV(HH+VV)* for matform = 'T')
hvvv : ndarray
HV.VV* cross-product (or HV(HH-VV)* for matform = 'T')
matform : str {'C' or 'T'}
form of input matrix entries: 'C' for covariance matrix and
'T' for coherency matrix ['C'] (see ref. 1)
null : float or None
null value to exclude from decomposition
numthrd : int or None
number of pthreads; None sets numthrd based on the data array size [None]
maxthrd : int or None
maximum allowable numthrd [8]
Returns
-------
H : ndarray
entropy (H = -(p1*log_3(p1) + p2*log_3(p2) + p3*log_3(p3))
where pi = lam_i/(hhhh+vvvv+hvhv)) and lam is an eigenvalue
A : ndarray
anisotropy (A = (lam_2-lam_3)/(lam_2+lam_3) --> lam_1 >= lam_2 >= lam_3
alpha : ndarray
alpha angle in degrees (see ref. 1)
Notes
-----
* arrays are returned with the same type as hhhh data
* if covariance matrix form is used, do not multiply entries by any constants
Reference
---------
1. Cloude, S. and Pottier, E., "An entropy based classification scheme for land applications of polarimetric SAR", *IEEE Trans. Geosci. Remote Sensing*, vol. 35, no. 1, pp. 68-78, Jan. 1997.
"""
from pysar.polsar._decomp_modc import cloude_pot
if matform == 'C' or matform == 'c':
mtf = 1
elif matform == 'T' or matform == 't':
mtf = 0
else:
raise ValueError("matform must be 'C' or 'T'")
if not numthrd:
numthrd = np.max([len(hhhh)//1e5, 1])
if numthrd > maxthrd: numthrd = maxthrd
elif numthrd < 1:
raise ValueError('numthrd must be >= 1')
if null:
nullmask = np.abs(hhhh-null) < 1.e-7
nullmask += np.abs(vvvv-null) < 1.e-7
nullmask += np.abs(hvhv-null) < 1.e-7
nullmask += np.abs(hhhv-null) < 1.e-7
nullmask += np.abs(hhvv-null) < 1.e-7
nullmask += np.abs(hvvv-null) < 1.e-7
hhhh[nullmask], vvvv[nullmask] = 0., 0.
hvhv[nullmask] = 0.
hhhhtype = None
if hhhh.dtype != np.float32:
hhhhtype = hhhh.dtype
hhhh = hhhh.astype(np.float32)
vvvv = vvvv.astype(np.float32)
hvhv = hvhv.astype(np.float32)
hhhv = hhhv.astype(np.complex64)
hhvv = hhvv.astype(np.complex64)
hvvv = hvvv.astype(np.complex64)
if not all({2-x for x in [hhhh.ndim, vvvv.ndim, hvhv.ndim, hhhv.ndim, hhvv.ndim, hvvv.ndim]}):
hhhh, vvvv = hhhh.flatten(), vvvv.flatten()
hvhv, hhvv = hvhv.flatten(), hhvv.flatten()
hhhv, hvvv = hhhv.flatten(), hvvv.flatten()
P = cloude_pot(hhhh, vvvv, hvhv, hhhv, hhvv, hvvv, mtf, numthrd)
if hhhhtype: P = P.astype(hhhhtype)
P = P.reshape(3,-1)
if null: P[0,nullmask], P[1,nullmask], P[2,nullmask] = null, null, null
return P[0,:], P[1,:], P[2,:]
def decomp_cp(hhhh,vvvv,hvhv,hhhv,hhvv,hvvv,matform='C',null=None,numthrd=None,maxthrd=8):
__doc__ = decomp_haa.__doc__
return decomp_haa(hhhh=hhhh,vvvv=vvvv,hvhv=hvhv,hhhv=hhhv,hhvv=hhvv,hvvv=hvvv,
matform=matform,null=null,numthrd=numthrd,maxthrd=maxthrd)
| bminchew/PySAR | pysar/polsar/decomp.py | Python | gpl-3.0 | 6,649 | 0.025417 |
# sAsync:
# An enhancement to the SQLAlchemy package that provides persistent
# item-value stores, arrays, and dictionaries, and an access broker for
# conveniently managing database access, table setup, and
# transactions. Everything can be run in an asynchronous fashion using
# the Twisted framework and its deferred processing capabilities.
#
# Copyright (C) 2006, 2015 by Edwin A. Suominen, http://edsuom.com
#
# See edsuom.com for API documentation as well as information about
# Ed's background and other projects, software and otherwise.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Unit tests for sasync.items.py.
"""
from twisted.internet.defer import Deferred, DeferredList
from sqlalchemy import *
from sasync.database import transact, AccessBroker
import sasync.items as items
from sasync.test.testbase import MockThing, TestCase
GROUP_ID = 123
VERBOSE = False
db = 'items.db'
class TestableItemsTransactor(items.Transactor):
@transact
def pre(self):
# Group 123
self.sasync_items.insert().execute(
group_id=123, name='foo', value='OK')
# Set up an experienced MockThing to have pickled
thing = MockThing()
thing.method(1)
self.sasync_items.insert().execute(
group_id=123, name='bar', value=thing)
# Group 124
self.sasync_items.insert().execute(
group_id=124, name='foo', value='bogus')
self.sasync_items.insert().execute(
group_id=124, name='invalid', value='bogus')
@transact
def post(self):
self.sasync_items.delete().execute()
class ItemsMixin:
def tearDown(self):
def _tearDown():
si = self.i.t.sasync_items
si.delete(si.c.group_id == GROUP_ID).execute()
d = self.i.t.deferToQueue(_tearDown, niceness=20)
d.addCallback(lambda _: self.i.shutdown())
return d
class TestItemsTransactor(ItemsMixin, TestCase):
def setUp(self):
url = "sqlite:///%s" % db
self.i = items.Items(GROUP_ID, url)
self.i.t = TestableItemsTransactor(self.i.groupID, url)
return self.i.t.pre()
def tearDown(self):
return self.i.t.post()
def test_load(self):
def gotValue(value, name):
if name == 'foo':
self.failUnlessEqual(value, 'OK')
else:
self.failUnless(
isinstance(value, MockThing),
"Item 'bar' is a '%s', not an instance of 'MockThing'" \
% value)
self.failUnless(
value.beenThereDoneThat,
"Class instance wasn't properly persisted with its state")
self.failUnlessEqual(
value.method(2.5), 5.0,
"Class instance wasn't properly persisted with its method")
dList = []
for name in ('foo', 'bar'):
dList.append(self.i.t.load(name).addCallback(gotValue, name))
return DeferredList(dList)
def test_load(self):
def gotValue(value, name):
if name == 'foo':
self.failUnlessEqual(value, 'OK')
else:
self.failUnless(
isinstance(value, MockThing),
"Item 'bar' is a '%s', not an instance of 'MockThing'" \
% value)
self.failUnless(
value.beenThereDoneThat,
"Class instance wasn't properly persisted with its state")
self.failUnlessEqual(
value.method(2.5), 5.0,
"Class instance wasn't properly persisted with its method")
dList = []
for name in ('foo', 'bar'):
dList.append(self.i.t.load(name).addCallback(gotValue, name))
return DeferredList(dList)
def test_loadAbsent(self):
def gotValue(value):
self.failUnless(
isinstance(value, items.Missing),
"Should have returned 'Missing' object, not '%s'!" % \
str(value))
def gotExpectedError(failure):
self.fail("Shouldn't have raised error on missing value")
return self.i.t.load('invalid').addCallbacks(
gotValue, gotExpectedError)
def test_loadAll(self):
def loaded(items):
itemKeys = items.keys()
itemKeys.sort()
self.failUnlessEqual(itemKeys, ['bar', 'foo'])
return self.i.t.loadAll().addCallback(loaded)
def insertLots(self, callback):
noviceThing = MockThing()
experiencedThing = MockThing()
experiencedThing.method(0)
self.whatToInsert = {
'alpha':5937341,
'bravo':'abc',
'charlie':-3.1415,
'delta':(1,2,3),
'echo':True,
'foxtrot':False,
'golf':noviceThing,
'hotel':experiencedThing,
'india':MockThing
}
dList = []
for name, value in self.whatToInsert.iteritems():
dList.append(self.i.t.insert(name, value))
return DeferredList(dList).addCallback(
callback, self.whatToInsert.copy())
def test_insert(self):
def done(null, items):
def check():
table = self.i.t.sasync_items
for name, inserted in items.iteritems():
value = table.select(
and_(table.c.group_id == 123,
table.c.name == name)
).execute().fetchone()['value']
msg = "Inserted '{}:{}' ".format(name, inserted) +\
"but read '{}' back from the database!".format(value)
self.failUnlessEqual(value, inserted, msg)
for otherName, otherValue in items.iteritems():
if otherName != name and value == otherValue:
self.fail(
"Inserted item '%s' is equal to item '%s'" % \
(name, otherName))
return self.i.t.deferToQueue(check)
return self.insertLots(done)
def test_deleteOne(self):
def gotOriginal(value):
self.failUnlessEqual(value, 'OK')
return self.i.t.delete('foo').addCallback(getAfterDeleted)
def getAfterDeleted(null):
return self.i.t.load('foo').addCallback(checkIfDeleted)
def checkIfDeleted(value):
self.failUnless(isinstance(value, items.Missing))
return self.i.t.load('foo').addCallback(gotOriginal)
def test_deleteMultiple(self):
def getAfterDeleted(null):
return self.i.t.loadAll().addCallback(checkIfDeleted)
def checkIfDeleted(values):
self.failUnlessEqual(values, {})
return self.i.t.delete('foo', 'bar').addCallback(getAfterDeleted)
def test_namesFew(self):
def got(names):
names.sort()
self.failUnlessEqual(names, ['bar', 'foo'])
return self.i.t.names().addCallback(got)
def test_namesMany(self):
def get(null, items):
return self.i.t.names().addCallback(got, items.keys())
def got(names, shouldHave):
shouldHave += ['foo', 'bar']
names.sort()
shouldHave.sort()
self.failUnlessEqual(names, shouldHave)
return self.insertLots(get)
def test_update(self):
def update(null, items):
return DeferredList([
self.i.t.update('alpha', 1),
self.i.t.update('bravo', 2),
self.i.t.update('charlie', 3)
]).addCallback(check, items)
def check(null, items):
return self.i.t.loadAll().addCallback(loaded, items)
def loaded(loadedItems, controlItems):
controlItems.update({'alpha':1, 'bravo':2, 'charlie':3})
for name, value in controlItems.iteritems():
self.failUnlessEqual(
value, loadedItems.get(name, 'Impossible Value'))
return self.insertLots(update)
class TestItems(ItemsMixin, TestCase):
def setUp(self):
self.i = items.Items(GROUP_ID, "sqlite:///%s" % db)
def test_insertAndLoad(self):
nouns = ('lamp', 'rug', 'chair')
def first(null):
return self.i.loadAll().addCallback(second)
def second(items):
self.failUnlessEqual(items['Nouns'], nouns)
return self.i.insert('Nouns', nouns).addCallback(first)
def test_insertAndDelete(self):
items = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4}
def first(null):
return self.i.delete('c').addCallback(second)
def second(null):
return self.i.names().addCallback(third)
def third(nameList):
desiredList = [x for x in items.keys() if x != 'c']
desiredList.sort()
nameList.sort()
self.failUnlessEqual(nameList, desiredList)
dL = []
for name, value in items.iteritems():
dL.append(self.i.insert(name, value))
return DeferredList(dL).addCallback(first)
def test_insertAndLoadAll(self):
items = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4}
def first(null):
return self.i.loadAll().addCallback(second)
def second(loadedItems):
self.failUnlessEqual(loadedItems, items)
dL = []
for name, value in items.iteritems():
dL.append(self.i.insert(name, value))
return DeferredList(dL).addCallback(first)
def test_insertAndUpdate(self):
items = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4}
def first(null):
return self.i.update('b', 10).addCallback(second)
def second(null):
return self.i.loadAll().addCallback(third)
def third(loadedItems):
expectedItems = {'a':0, 'b':10, 'c':2, 'd':3, 'e':4}
self.failUnlessEqual(loadedItems, expectedItems)
dL = []
for name, value in items.iteritems():
dL.append(self.i.insert(name, value))
return DeferredList(dL).addCallback(first)
class TestItemsIntegerNames(ItemsMixin, TestCase):
def setUp(self):
self.items = {'1':'a', 2:'b', 3:'c', '04':'d'}
self.i = items.Items(GROUP_ID, "sqlite:///%s" % db, nameType=int)
def insertStuff(self):
dL = []
for name, value in self.items.iteritems():
dL.append(self.i.insert(name, value))
return DeferredList(dL)
def test_names(self):
def first(null):
return self.i.names().addCallback(second)
def second(names):
names.sort()
self.failUnlessEqual(names, [1, 2, 3, 4])
return self.insertStuff().addCallback(first)
def test_loadAll(self):
def first(null):
return self.i.loadAll().addCallback(second)
def second(loaded):
self.failUnlessEqual(loaded, {1:'a', 2:'b', 3:'c', 4:'d'})
return self.insertStuff().addCallback(first)
class TestItemsStringNames(ItemsMixin, TestCase):
def setUp(self):
self.items = {'1':'a', 2:'b', u'3':'c', "4":'d'}
self.i = items.Items(GROUP_ID, "sqlite:///%s" % db, nameType=str)
def insertStuff(self):
dL = []
for name, value in self.items.iteritems():
dL.append(self.i.insert(name, value))
return DeferredList(dL)
def test_names(self):
def first(null):
return self.i.names().addCallback(second)
def second(names):
names.sort()
self.failUnlessEqual(names, ['1', '2', '3', '4'])
return self.insertStuff().addCallback(first)
def test_loadAll(self):
def first(null):
return self.i.loadAll().addCallback(second)
def second(loaded):
self.failUnlessEqual(loaded, {'1':'a', '2':'b', '3':'c', '4':'d'})
return self.insertStuff().addCallback(first)
| edsuom/sAsync | sasync/test/test_items.py | Python | apache-2.0 | 12,595 | 0.006431 |
import json
import time
from _md5 import md5
import requests
import RolevPlayer as r
def now_playing_last_fm(artist, track):
update_now_playing_sig = md5(("api_key" + r.API_KEY +
"artist" + artist +
"method" + "track.updateNowPlaying" +
"sk" + r.SK +
"track" + track +
r.SECRET).encode('utf-8')).hexdigest()
url = "http://ws.audioscrobbler.com/2.0/?method=track.updateNowPlaying" + \
"&api_key=" + r.API_KEY + \
"&api_sig=" + update_now_playing_sig + \
"&artist=" + artist + \
"&format=json" + \
"&sk=" + r.SK + \
"&track=" + track
req = requests.post(url).text
json_obj = json.loads(req)
def scrobble(artist, track):
# this gives us a timestamp, casted to integer
ts = time.time()
scrobbling_sig = md5(("api_key" + r.API_KEY +
"artist" + artist +
"method" + "track.scrobble" +
"sk" + r.SK +
"timestamp" + str(ts) +
"track" + track +
r.SECRET).encode('utf-8')).hexdigest()
req = requests.post(
"http://ws.audioscrobbler.com/2.0/?method=track.scrobble" +
"&api_key=" + r.API_KEY +
"&api_sig=" + scrobbling_sig +
"&artist=" + artist +
"&format=json" +
"&sk=" + r.SK +
"×tamp=" + str(ts) +
"&track=" + track).text
json_obj = json.loads(req)
| SimeonRolev/RolevPlayerQT | RolevPlayer/Scrobbler.py | Python | gpl-3.0 | 1,642 | 0.003654 |
#!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_REMOVE_FILES_",
"example_value": """
[
"/tmp/foo",
"/bar/to/delete"
]
""",
"description": """* Verify files and file trees are uninstalled
""",
"form_definition": """
Desc: |
A rule defining a set of files to remove, fed to the 'remove_files' compliance object.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: remove_files
Type: json
Format: list
Inputs:
-
Id: path
Label: File path
DisplayModeLabel: ""
LabelCss: edit16
Mandatory: Yes
Help: You must set paths in fully qualified form.
Type: string
""",
}
import os
import sys
import re
import json
from glob import glob
import shutil
sys.path.append(os.path.dirname(__file__))
from comp import *
blacklist = [
"/",
"/root"
]
class CompRemoveFiles(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
patterns = self.get_rules()
patterns = sorted(list(set(patterns)))
self.files = self.expand_patterns(patterns)
if len(self.files) == 0:
pinfo("no files matching patterns")
raise NotApplicable
def expand_patterns(self, patterns):
l = []
for pattern in patterns:
l += glob(pattern)
return l
def fixable(self):
return RET_NA
def check_file(self, _file):
if not os.path.exists(_file):
pinfo(_file, "does not exist. on target.")
return RET_OK
perror(_file, "exists. shouldn't")
return RET_ERR
def fix_file(self, _file):
if not os.path.exists(_file):
return RET_OK
try:
if os.path.isdir(_file) and not os.path.islink(_file):
shutil.rmtree(_file)
else:
os.unlink(_file)
pinfo(_file, "deleted")
except Exception as e:
perror("failed to delete", _file, "(%s)"%str(e))
return RET_ERR
return RET_OK
def check(self):
r = 0
for _file in self.files:
r |= self.check_file(_file)
return r
def fix(self):
r = 0
for _file in self.files:
r |= self.fix_file(_file)
return r
if __name__ == "__main__":
main(CompRemoveFiles)
| tanji/replication-manager | share/opensvc/compliance/com.replication-manager/remove_files.py | Python | gpl-3.0 | 2,367 | 0.005492 |
conanfile = """from conans import ConanFile, CMake, tools
import os
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False]}}
default_options = "shared=False"
generators = "cmake"
def source(self):
self.run("git clone https://github.com/memsharded/hello.git")
self.run("cd hello && git checkout static_shared")
# This small hack might be useful to guarantee proper /MT /MD linkage in MSVC
# if the packaged project doesn't have variables to set it properly
tools.replace_in_file("hello/CMakeLists.txt", "PROJECT(MyHello)", '''PROJECT(MyHello)
include(${{CMAKE_BINARY_DIR}}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self.settings)
shared = "-DBUILD_SHARED_LIBS=ON" if self.options.shared else ""
self.run('cmake hello %s %s' % (cmake.command_line, shared))
self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="hello")
self.copy("*hello.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello"]
"""
conanfile_header = """from conans import ConanFile, tools
import os
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
# No settings/options are necessary, this is header only
def source(self):
'''retrieval of the source code here. Remember you can also put the code in the folder and
use exports instead of retrieving it with this source() method
'''
#self.run("git clone ...") or
#tools.download("url", "file.zip")
#tools.unzip("file.zip" )
def package(self):
self.copy("*.h", "include")
"""
test_conanfile = """from conans import ConanFile, CMake
import os
channel = os.getenv("CONAN_CHANNEL", "{channel}")
username = os.getenv("CONAN_USERNAME", "{user}")
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "{name}/{version}@%s/%s" % (username, channel)
generators = "cmake"
def build(self):
cmake = CMake(self.settings)
self.run('cmake "%s" %s' % (self.conanfile_directory, cmake.command_line))
self.run("cmake --build . %s" % cmake.build_config)
def imports(self):
self.copy("*.dll", "bin", "bin")
self.copy("*.dylib", "bin", "bin")
def test(self):
os.chdir("bin")
self.run(".%sexample" % os.sep)
"""
test_cmake = """PROJECT(PackageTest)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
ADD_EXECUTABLE(example example.cpp)
TARGET_LINK_LIBRARIES(example ${CONAN_LIBS})
"""
test_main = """#include <iostream>
#include "hello.h"
int main() {
hello();
std::cout<<"*** Running example, will fail by default, implement yours! ***\\n";
return -1; // fail by default, remember to implement your test
}
"""
| AversivePlusPlus/AversivePlusPlus | tools/conan/conans/client/new.py | Python | bsd-3-clause | 3,503 | 0.001427 |
"""Tests for the Linky config flow."""
from pylinky.exceptions import (
PyLinkyAccessException,
PyLinkyEnedisException,
PyLinkyException,
PyLinkyWrongLoginException,
)
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.linky.const import DEFAULT_TIMEOUT, DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
USERNAME = "username@hotmail.fr"
USERNAME_2 = "username@free.fr"
PASSWORD = "password"
TIMEOUT = 20
@pytest.fixture(name="login")
def mock_controller_login():
"""Mock a successful login."""
with patch(
"homeassistant.components.linky.config_flow.LinkyClient"
) as service_mock:
service_mock.return_value.login = Mock(return_value=True)
service_mock.return_value.close_session = Mock(return_value=None)
yield service_mock
@pytest.fixture(name="fetch_data")
def mock_controller_fetch_data():
"""Mock a successful get data."""
with patch(
"homeassistant.components.linky.config_flow.LinkyClient"
) as service_mock:
service_mock.return_value.fetch_data = Mock(return_value={})
service_mock.return_value.close_session = Mock(return_value=None)
yield service_mock
async def test_user(hass: HomeAssistantType, login, fetch_data):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME
assert result["title"] == USERNAME
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == DEFAULT_TIMEOUT
async def test_import(hass: HomeAssistantType, login, fetch_data):
"""Test import step."""
# import with username and password
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME
assert result["title"] == USERNAME
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == DEFAULT_TIMEOUT
# import with all
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: USERNAME_2,
CONF_PASSWORD: PASSWORD,
CONF_TIMEOUT: TIMEOUT,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME_2
assert result["title"] == USERNAME_2
assert result["data"][CONF_USERNAME] == USERNAME_2
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == TIMEOUT
async def test_abort_if_already_setup(hass: HomeAssistantType, login, fetch_data):
"""Test we abort if Linky is already setup."""
MockConfigEntry(
domain=DOMAIN,
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
unique_id=USERNAME,
).add_to_hass(hass)
# Should fail, same USERNAME (import)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same USERNAME (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_login_failed(hass: HomeAssistantType, login):
"""Test when we have errors during login."""
login.return_value.login.side_effect = PyLinkyAccessException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "access"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.login.side_effect = PyLinkyWrongLoginException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "wrong_login"}
hass.config_entries.flow.async_abort(result["flow_id"])
async def test_fetch_failed(hass: HomeAssistantType, login):
"""Test when we have errors during fetch."""
login.return_value.fetch_data.side_effect = PyLinkyAccessException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "access"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.fetch_data.side_effect = PyLinkyEnedisException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "enedis"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.fetch_data.side_effect = PyLinkyException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
hass.config_entries.flow.async_abort(result["flow_id"])
| nkgilley/home-assistant | tests/components/linky/test_config_flow.py | Python | apache-2.0 | 6,904 | 0.000145 |
"""Helps build config packages for installer-specific templates.
Takes in a bunch of configuration files, as well as functions to calculate the values/strings which
need to be put into the configuration.
Operates strictly:
- All paramaters are strings. All things calculated / derived are strings.
- Every given parameter must map to some real config option.
- Every config option must be given only once.
- Defaults can be overridden. If no default is given, the parameter must be specified
- empty string is not the same as "not specified"
"""
import importlib.machinery
import json
import logging as log
import os
import os.path
import pprint
import textwrap
from copy import copy, deepcopy
from typing import List
import yaml
import gen.calc
import gen.internals
import gen.template
import gen.util
from gen.exceptions import ValidationError
from pkgpanda import PackageId
from pkgpanda.util import (
hash_checkout,
json_prettyprint,
load_string,
split_by_token,
write_json,
write_string,
write_yaml,
)
# List of all roles all templates should have.
role_names = {"master", "slave", "slave_public"}
role_template = '/etc/mesosphere/roles/{}'
CLOUDCONFIG_KEYS = {'coreos', 'runcmd', 'apt_sources', 'root', 'mounts', 'disk_setup', 'fs_setup', 'bootcmd'}
PACKAGE_KEYS = {'package', 'root'}
# Allow overriding calculators with a `gen_extra/calc.py` if it exists
gen_extra_calc = None
if os.path.exists('gen_extra/calc.py'):
gen_extra_calc = importlib.machinery.SourceFileLoader('gen_extra.calc', 'gen_extra/calc.py').load_module()
def stringify_configuration(configuration: dict):
"""Create a stringified version of the complete installer configuration
to send to gen.generate()"""
gen_config = {}
for key, value in configuration.items():
if isinstance(value, list) or isinstance(value, dict):
log.debug("Caught %s for genconf configuration, transforming to JSON string: %s", type(value), value)
value = json.dumps(value)
elif isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
elif isinstance(value, int):
log.debug("Caught int for genconf configuration, transforming to string: %s", value)
value = str(value)
elif isinstance(value, str):
pass
else:
log.error("Invalid type for value of %s in config. Got %s, only can handle list, dict, "
"int, bool, and str", key, type(value))
raise Exception()
gen_config[key] = value
log.debug('Stringified configuration: \n{}'.format(gen_config))
return gen_config
def add_roles(cloudconfig, roles):
for role in roles:
cloudconfig['write_files'].append({
"path": role_template.format(role),
"content": ""})
return cloudconfig
def add_units(cloudconfig, services, cloud_init_implementation='coreos'):
'''
Takes a services dict in the format of CoreOS cloud-init 'units' and
injects into cloudconfig a transformed version appropriate for the
cloud_init_implementation. See:
https://coreos.com/os/docs/latest/cloud-config.html for the CoreOS 'units'
specification. See: https://cloudinit.readthedocs.io/en/latest/index.html
for the Canonical implementation.
Parameters:
* cloudconfig is a dict
* services is a list of dict's
* cloud_init_implementation is a string: 'coreos' or 'canonical'
'''
if cloud_init_implementation == 'canonical':
cloudconfig.setdefault('write_files', [])
cloudconfig.setdefault('runcmd', [])
for unit in services:
unit_name = unit['name']
if 'content' in unit:
write_files_entry = {'path': '/etc/systemd/system/{}'.format(unit_name),
'content': unit['content'],
'permissions': '0644'}
cloudconfig['write_files'].append(write_files_entry)
if 'enable' in unit and unit['enable']:
runcmd_entry = ['systemctl', 'enable', unit_name]
cloudconfig['runcmd'].append(runcmd_entry)
if 'command' in unit:
opts = []
if 'no_block' in unit and unit['no_block']:
opts.append('--no-block')
if unit['command'] in ['start', 'stop', 'reload', 'restart', 'try-restart', 'reload-or-restart',
'reload-or-try-restart']:
runcmd_entry = ['systemctl'] + opts + [unit['command'], unit_name]
else:
raise Exception("Unsupported unit command: {}".format(unit['command']))
cloudconfig['runcmd'].append(runcmd_entry)
elif cloud_init_implementation == 'coreos':
cloudconfig.setdefault('coreos', {}).setdefault('units', [])
cloudconfig['coreos']['units'] += services
else:
raise Exception("Parameter value '{}' is invalid for cloud_init_implementation".format(
cloud_init_implementation))
return cloudconfig
# For converting util -> a namespace only.
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
def render_cloudconfig(data):
return "#cloud-config\n" + render_yaml(data)
utils = Bunch({
"role_template": role_template,
"add_roles": add_roles,
"role_names": role_names,
"add_services": None,
"add_stable_artifact": None,
"add_channel_artifact": None,
"add_units": add_units,
"render_cloudconfig": render_cloudconfig
})
def render_yaml(data):
return yaml.dump(data, default_style='|', default_flow_style=False)
# Recursively merge to python dictionaries.
# If both base and addition contain the same key, that key's value will be
# merged if it is a dictionary.
# This is unlike the python dict.update() method which just overwrites matching
# keys.
def merge_dictionaries(base, additions):
base_copy = base.copy()
for k, v in additions.items():
try:
if k not in base:
base_copy[k] = v
continue
if isinstance(v, dict) and isinstance(base_copy[k], dict):
base_copy[k] = merge_dictionaries(base_copy.get(k, dict()), v)
continue
# Append arrays
if isinstance(v, list) and isinstance(base_copy[k], list):
base_copy[k].extend(v)
continue
# Merge sets
if isinstance(v, set) and isinstance(base_copy[k], set):
base_copy[k] |= v
continue
# Unknown types
raise ValueError("Can't merge type {} into type {}".format(type(v), type(base_copy[k])))
except ValueError as ex:
raise ValueError("{} inside key {}".format(ex, k)) from ex
return base_copy
def load_templates(template_dict):
result = dict()
for name, template_list in template_dict.items():
result_list = list()
for template_name in template_list:
result_list.append(gen.template.parse_resources(template_name))
extra_filename = "gen_extra/" + template_name
if os.path.exists(extra_filename):
result_list.append(gen.template.parse_str(
load_string(extra_filename)))
result[name] = result_list
return result
# Render the Jinja/YAML into YAML, then load the YAML and merge it to make the
# final configuration files.
def render_templates(template_dict, arguments):
rendered_templates = dict()
templates = load_templates(template_dict)
for name, templates in templates.items():
full_template = None
for template in templates:
rendered_template = template.render(arguments)
# If not yaml, just treat opaquely.
if not name.endswith('.yaml'):
# No merging support currently.
assert len(templates) == 1
full_template = rendered_template
continue
template_data = yaml.safe_load(rendered_template)
if full_template:
full_template = merge_dictionaries(full_template, template_data)
else:
full_template = template_data
rendered_templates[name] = full_template
return rendered_templates
# Collect the un-bound / un-set variables from all the given templates to build
# the schema / configuration target. The templates and their structure serve
# as the schema for what configuration a user must provide.
def target_from_templates(template_dict):
# NOTE: the individual yaml template targets are merged into one target
# since we never want to target just one template at a time for now (they
# all merge into one config package).
target = gen.internals.Target()
templates = load_templates(template_dict)
for template_list in templates.values():
for template in template_list:
target += template.target_from_ast()
return [target]
def write_to_non_taken(base_filename, json):
number = 0
filename = base_filename
while (os.path.exists(filename)):
number += 1
filename = base_filename + '.{}'.format(number)
write_json(filename, json)
return filename
def do_gen_package(config, package_filename):
# Generate the specific dcos-config package.
# Version will be setup-{sha1 of contents}
with gen.util.pkgpanda_package_tmpdir() as tmpdir:
# Only contains package, root
assert config.keys() == {"package"}
# Write out the individual files
for file_info in config["package"]:
assert file_info.keys() <= {"path", "content", "permissions"}
if file_info['path'].startswith('/'):
path = tmpdir + file_info['path']
else:
path = tmpdir + '/' + file_info['path']
try:
if os.path.dirname(path):
os.makedirs(os.path.dirname(path), mode=0o755)
except FileExistsError:
pass
with open(path, 'w') as f:
f.write(file_info['content'] or '')
# the file has special mode defined, handle that.
if 'permissions' in file_info:
assert isinstance(file_info['permissions'], str)
os.chmod(path, int(file_info['permissions'], 8))
else:
os.chmod(path, 0o644)
gen.util.make_pkgpanda_package(tmpdir, package_filename)
def render_late_content(content, late_values):
def _dereference_placeholders(parts):
for part, is_placeholder in parts:
if is_placeholder:
if part not in late_values:
log.debug('Found placeholder for unknown value "{}" in late config: {}'.format(part, repr(content)))
raise Exception('Bad late config file: Found placeholder for unknown value "{}"'.format(part))
yield late_values[part]
else:
yield part
return ''.join(_dereference_placeholders(split_by_token(
gen.internals.LATE_BIND_PLACEHOLDER_START,
gen.internals.LATE_BIND_PLACEHOLDER_END,
content,
strip_token_decoration=True,
)))
def _late_bind_placeholder_in(string_):
return gen.internals.LATE_BIND_PLACEHOLDER_START in string_ or gen.internals.LATE_BIND_PLACEHOLDER_END in string_
def resolve_late_package(config, late_values):
resolved_config = {
'package': [
{k: render_late_content(v, late_values) if k == 'content' else v for k, v in file_info.items()}
for file_info in config['package']
]
}
assert not any(
_late_bind_placeholder_in(v) for file_info in resolved_config['package'] for v in file_info.values()
), 'Resolved late package must not contain late value placeholder: {}'.format(resolved_config)
return resolved_config
def extract_files_containing_late_variables(start_files):
found_files = []
left_files = []
for file_info in deepcopy(start_files):
assert not any(_late_bind_placeholder_in(v) for k, v in file_info.items() if k != 'content'), (
'File info must not contain late config placeholder in fields other than content: {}'.format(file_info)
)
if file_info['content'] and _late_bind_placeholder_in(file_info['content']):
found_files.append(file_info)
else:
left_files.append(file_info)
# All files still belong somewhere
assert len(found_files) + len(left_files) == len(start_files)
return found_files, left_files
# Validate all arguments passed in actually correspond to parameters to
# prevent human typo errors.
# This includes all possible sub scopes (Including config for things you don't use is fine).
def flatten_parameters(scoped_parameters):
flat = copy(scoped_parameters.get('variables', set()))
for name, possible_values in scoped_parameters.get('sub_scopes', dict()).items():
flat.add(name)
for sub_scope in possible_values.values():
flat |= flatten_parameters(sub_scope)
return flat
def validate_all_arguments_match_parameters(parameters, setters, arguments):
errors = dict()
# Gather all possible parameters from templates as well as setter parameters.
all_parameters = flatten_parameters(parameters)
for setter_list in setters.values():
for setter in setter_list:
all_parameters |= setter.parameters
all_parameters.add(setter.name)
all_parameters |= {name for name, value in setter.conditions}
# Check every argument is in the set of parameters.
for argument in arguments:
if argument not in all_parameters:
errors[argument] = 'Argument {} given but not in possible parameters {}'.format(argument, all_parameters)
if len(errors):
raise ValidationError(errors, set())
def validate(
arguments,
extra_templates=list(),
extra_sources=list()):
sources, targets, _ = get_dcosconfig_source_target_and_templates(arguments, extra_templates, extra_sources)
return gen.internals.resolve_configuration(sources, targets).status_dict
def user_arguments_to_source(user_arguments) -> gen.internals.Source:
"""Convert all user arguments to be a gen.internals.Source"""
# Make sure all user provided arguments are strings.
# TODO(cmaloney): Loosen this restriction / allow arbitrary types as long
# as they all have a gen specific string form.
gen.internals.validate_arguments_strings(user_arguments)
user_source = gen.internals.Source(is_user=True)
for name, value in user_arguments.items():
user_source.add_must(name, value)
return user_source
# TODO(cmaloney): This function should disolve away like the ssh one is and just become a big
# static dictonary or pass in / construct on the fly at the various template callsites.
def get_dcosconfig_source_target_and_templates(
user_arguments: dict,
extra_templates: List[str],
extra_sources: List[gen.internals.Source]):
log.info("Generating configuration files...")
# TODO(cmaloney): Make these all just defined by the base calc.py
config_package_names = ['dcos-config', 'dcos-metadata']
template_filenames = ['dcos-config.yaml', 'cloud-config.yaml', 'dcos-metadata.yaml', 'dcos-services.yaml']
# TODO(cmaloney): Check there are no duplicates between templates and extra_template_files
template_filenames += extra_templates
# Re-arrange templates to be indexed by common name. Only allow multiple for one key if the key
# is yaml (ends in .yaml).
templates = dict()
for filename in template_filenames:
key = os.path.basename(filename)
templates.setdefault(key, list())
templates[key].append(filename)
if len(templates[key]) > 1 and not key.endswith('.yaml'):
raise Exception(
"Internal Error: Only know how to merge YAML templates at this point in time. "
"Can't merge template {} in template_list {}".format(filename, templates[key]))
targets = target_from_templates(templates)
base_source = gen.internals.Source(is_user=False)
base_source.add_entry(gen.calc.entry, replace_existing=False)
if gen_extra_calc:
base_source.add_entry(gen_extra_calc.entry, replace_existing=True)
def add_builtin(name, value):
base_source.add_must(name, json_prettyprint(value))
sources = [base_source, user_arguments_to_source(user_arguments)] + extra_sources
# Add builtin variables.
# TODO(cmaloney): Hash the contents of all the templates rather than using the list of filenames
# since the filenames might not live in this git repo, or may be locally modified.
add_builtin('template_filenames', template_filenames)
add_builtin('config_package_names', list(config_package_names))
# Add placeholders for builtin variables whose values will be calculated after all others, so that we won't get
# unset argument errors. The placeholder value with be replaced with the actual value after all other variables are
# calculated.
temporary_str = 'DO NOT USE THIS AS AN ARGUMENT TO OTHER ARGUMENTS. IT IS TEMPORARY'
add_builtin('user_arguments_full', temporary_str)
add_builtin('user_arguments', temporary_str)
add_builtin('config_yaml_full', temporary_str)
add_builtin('config_yaml', temporary_str)
add_builtin('expanded_config', temporary_str)
add_builtin('expanded_config_full', temporary_str)
# Note: must come last so the hash of the "base_source" this is beign added to contains all the
# variables but this.
add_builtin('sources_id', hash_checkout([hash_checkout(source.make_id()) for source in sources]))
return sources, targets, templates
def build_late_package(late_files, config_id, provider):
if not late_files:
return None
# Add a empty pkginfo.json to the late package after validating there
# isn't already one.
for file_info in late_files:
assert file_info['path'] != '/pkginfo.json'
assert file_info['path'].startswith('/')
late_files.append({
"path": "/pkginfo.json",
"content": "{}"})
return {
'package': late_files,
'name': 'dcos-provider-{}-{}--setup'.format(config_id, provider)
}
def validate_and_raise(sources, targets):
# TODO(cmaloney): Make it so we only get out the dcosconfig target arguments not all the config target arguments.
resolver = gen.internals.resolve_configuration(sources, targets)
status = resolver.status_dict
if status['status'] == 'errors':
raise ValidationError(errors=status['errors'], unset=status['unset'])
return resolver
def get_late_variables(resolver, sources):
# Gather out the late variables. The presence of late variables changes
# whether or not a late package is created
late_variables = dict()
# TODO(branden): Get the late vars and expressions from resolver.late
for source in sources:
for setter_list in source.setters.values():
for setter in setter_list:
if not setter.is_late:
continue
if setter.name not in resolver.late:
continue
# Skip late vars that aren't referenced by config.
if not resolver.arguments[setter.name].is_finalized:
continue
# Validate a late variable should only have one source.
assert setter.name not in late_variables
late_variables[setter.name] = setter.late_expression
log.debug('Late variables:\n{}'.format(pprint.pformat(late_variables)))
return late_variables
def get_secret_variables(sources):
return list(set(var_name for source in sources for var_name in source.secret))
def get_final_arguments(resolver):
return {k: v.value for k, v in resolver.arguments.items() if v.is_finalized}
def format_expanded_config(config):
return textwrap.indent(json_prettyprint(config), prefix=(' ' * 3))
def user_arguments_to_yaml(user_arguments: dict):
return textwrap.indent(
yaml.dump(user_arguments, default_style='|', default_flow_style=False, indent=2),
prefix=(' ' * 3),
)
def generate(
arguments,
extra_templates=list(),
extra_sources=list(),
extra_targets=list()):
# To maintain the old API where we passed arguments rather than the new name.
user_arguments = arguments
arguments = None
sources, targets, templates = get_dcosconfig_source_target_and_templates(
user_arguments, extra_templates, extra_sources)
resolver = validate_and_raise(sources, targets + extra_targets)
argument_dict = get_final_arguments(resolver)
late_variables = get_late_variables(resolver, sources)
secret_builtins = ['expanded_config_full', 'user_arguments_full', 'config_yaml_full']
secret_variables = set(get_secret_variables(sources) + secret_builtins)
masked_value = '**HIDDEN**'
# Calculate values for builtin variables.
user_arguments_masked = {k: (masked_value if k in secret_variables else v) for k, v in user_arguments.items()}
argument_dict['user_arguments_full'] = json_prettyprint(user_arguments)
argument_dict['user_arguments'] = json_prettyprint(user_arguments_masked)
argument_dict['config_yaml_full'] = user_arguments_to_yaml(user_arguments)
argument_dict['config_yaml'] = user_arguments_to_yaml(user_arguments_masked)
# The expanded_config and expanded_config_full variables contain all other variables and their values.
# expanded_config is a copy of expanded_config_full with secret values removed. Calculating these variables' values
# must come after the calculation of all other variables to prevent infinite recursion.
# TODO(cmaloney): Make this late-bound by gen.internals
expanded_config_full = {
k: v for k, v in argument_dict.items()
# Omit late-bound variables whose values have not yet been calculated.
if not v.startswith(gen.internals.LATE_BIND_PLACEHOLDER_START)
}
expanded_config_scrubbed = {k: v for k, v in expanded_config_full.items() if k not in secret_variables}
argument_dict['expanded_config_full'] = format_expanded_config(expanded_config_full)
argument_dict['expanded_config'] = format_expanded_config(expanded_config_scrubbed)
log.debug(
"Final arguments:" + json_prettyprint({
# Mask secret config values.
k: (masked_value if k in secret_variables else v) for k, v in argument_dict.items()
})
)
# Fill in the template parameters
# TODO(cmaloney): render_templates should ideally take the template targets.
rendered_templates = render_templates(templates, argument_dict)
# Validate there aren't any unexpected top level directives in any of the files
# (likely indicates a misspelling)
for name, template in rendered_templates.items():
if name == 'dcos-services.yaml': # yaml list of the service files
assert isinstance(template, list)
elif name == 'cloud-config.yaml':
assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
elif isinstance(template, str): # Not a yaml template
pass
else: # yaml template file
log.debug("validating template file %s", name)
assert template.keys() <= PACKAGE_KEYS, template.keys()
stable_artifacts = []
channel_artifacts = []
# Find all files which contain late bind variables and turn them into a "late bind package"
# TODO(cmaloney): check there are no late bound variables in cloud-config.yaml
late_files, regular_files = extract_files_containing_late_variables(
rendered_templates['dcos-config.yaml']['package'])
# put the regular files right back
rendered_templates['dcos-config.yaml'] = {'package': regular_files}
# Render cluster package list artifact.
cluster_package_list_filename = 'package_lists/{}.package_list.json'.format(
argument_dict['cluster_package_list_id']
)
os.makedirs(os.path.dirname(cluster_package_list_filename), mode=0o755, exist_ok=True)
write_string(cluster_package_list_filename, argument_dict['cluster_packages'])
log.info('Cluster package list: {}'.format(cluster_package_list_filename))
stable_artifacts.append(cluster_package_list_filename)
def make_package_filename(package_id, extension):
return 'packages/{0}/{1}{2}'.format(
package_id.name,
repr(package_id),
extension)
# Render all the cluster packages
cluster_package_info = {}
# Prepare late binding config, if any.
late_package = build_late_package(late_files, argument_dict['config_id'], argument_dict['provider'])
if late_variables:
# Render the late binding package. This package will be downloaded onto
# each cluster node during bootstrap and rendered into the final config
# using the values from the late config file.
late_package_id = PackageId(late_package['name'])
late_package_filename = make_package_filename(late_package_id, '.dcos_config')
os.makedirs(os.path.dirname(late_package_filename), mode=0o755)
write_yaml(late_package_filename, {'package': late_package['package']}, default_flow_style=False)
log.info('Package filename: {}'.format(late_package_filename))
stable_artifacts.append(late_package_filename)
# Add the late config file to cloud config. The expressions in
# late_variables will be resolved by the service handling the cloud
# config (e.g. Amazon CloudFormation). The rendered late config file
# on a cluster node's filesystem will contain the final values.
rendered_templates['cloud-config.yaml']['root'].append({
'path': '/etc/mesosphere/setup-flags/late-config.yaml',
'permissions': '0644',
'owner': 'root',
# TODO(cmaloney): don't prettyprint to save bytes.
# NOTE: Use yaml here simply to make avoiding painful escaping and
# unescaping easier.
'content': render_yaml({
'late_bound_package_id': late_package['name'],
'bound_values': late_variables
})})
# Collect metadata for cluster packages.
for package_id_str in json.loads(argument_dict['cluster_packages']):
package_id = PackageId(package_id_str)
package_filename = make_package_filename(package_id, '.tar.xz')
cluster_package_info[package_id.name] = {
'id': package_id_str,
'filename': package_filename
}
# Render config packages.
config_package_ids = json.loads(argument_dict['config_package_ids'])
for package_id_str in config_package_ids:
package_id = PackageId(package_id_str)
package_filename = cluster_package_info[package_id.name]['filename']
do_gen_package(rendered_templates[package_id.name + '.yaml'], cluster_package_info[package_id.name]['filename'])
stable_artifacts.append(package_filename)
# Convert cloud-config to just contain write_files rather than root
cc = rendered_templates['cloud-config.yaml']
# Shouldn't contain any packages. Providers should pull what they need to
# late bind out of other packages via cc_package_file.
assert 'package' not in cc
cc_root = cc.pop('root', [])
# Make sure write_files exists.
assert 'write_files' not in cc
cc['write_files'] = []
# Do the transform
for item in cc_root:
assert item['path'].startswith('/')
cc['write_files'].append(item)
rendered_templates['cloud-config.yaml'] = cc
# Add utils that need to be defined here so they can be bound to locals.
def add_services(cloudconfig, cloud_init_implementation):
return add_units(cloudconfig, rendered_templates['dcos-services.yaml'], cloud_init_implementation)
utils.add_services = add_services
def add_stable_artifact(filename):
assert filename not in stable_artifacts + channel_artifacts
stable_artifacts.append(filename)
utils.add_stable_artifact = add_stable_artifact
def add_channel_artifact(filename):
assert filename not in stable_artifacts + channel_artifacts
channel_artifacts.append(filename)
utils.add_channel_artifact = add_channel_artifact
return Bunch({
'arguments': argument_dict,
'cluster_packages': cluster_package_info,
'stable_artifacts': stable_artifacts,
'channel_artifacts': channel_artifacts,
'templates': rendered_templates,
'utils': utils
})
| branden/dcos | gen/__init__.py | Python | apache-2.0 | 29,065 | 0.002408 |
# disk.py
#
# Copyright (C) 2014-2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Utilities relating to disk manangement
from kano.utils.shell import run_cmd
def get_free_space(path="/"):
"""
Returns the amount of free space in certain location in MB
:param path: The location to measure the free space at.
:type path: str
:return: Number of free megabytes.
:rtype: int
"""
out, dummy_err, dummy_rv = run_cmd("df {}".format(path))
dummy_device, dummy_size, dummy_used, free, dummy_percent, dummy_mp = \
out.split('\n')[1].split()
return int(free) / 1024
def get_partition_info():
device = '/dev/mmcblk0'
try:
cmd = 'lsblk -n -b {} -o SIZE'.format(device)
stdout, dummy_stderr, returncode = run_cmd(cmd)
if returncode != 0:
from kano.logging import logger
logger.warning("error running lsblk")
return []
lines = stdout.strip().split('\n')
sizes = map(int, lines)
return sizes
except Exception:
return []
| KanoComputing/kano-toolset | kano/utils/disk.py | Python | gpl-2.0 | 1,133 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20150819_0050'),
]
operations = [
migrations.AlterUniqueTogether(
name='test',
unique_together=set([('owner', 'name')]),
),
]
| swarmer/tester | core/migrations/0009_auto_20150821_0243.py | Python | mit | 376 | 0 |
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# All the memory versions need to use LOCK, regardless of if it was set
def macroop XCHG_R_R
{
# Use the xor trick instead of moves to reduce register pressure.
# This probably doesn't make much of a difference, but it's easy.
xor reg, reg, regm
xor regm, regm, reg
xor reg, reg, regm
};
def macroop XCHG_R_M
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_R_P
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
def macroop XCHG_M_R
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_P_R
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
def macroop XCHG_LOCKED_M_R
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_LOCKED_P_R
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
'''
| koparasy/faultinjection-gem5 | src/arch/x86/isa/insts/general_purpose/data_transfer/xchg.py | Python | bsd-3-clause | 3,156 | 0 |
import EightPuzzleWithHeuristics as Problem
# puzzle0:
CREATE_INITIAL_STATE = lambda: Problem.State([0, 1, 2, 3, 4, 5, 6, 7, 8]) | vaibhavi-r/CSE-415 | Assignment3/puzzle0.py | Python | mit | 129 | 0.015504 |
import datetime
import feedparser
import json
import logging
import random
import re
import string # pylint: disable=W0402
import urllib
import uuid
import time
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.core.cache import cache
from django.core.context_processors import csrf
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.core.validators import validate_email, validate_slug, ValidationError
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError, transaction
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseNotAllowed, Http404
from django.shortcuts import redirect
from django_future.csrf import ensure_csrf_cookie
from django.utils.http import cookie_date
from django.utils.http import base36_to_int
from django.utils.translation import ugettext as _
from ratelimitbackend.exceptions import RateLimitException
from mitxmako.shortcuts import render_to_response, render_to_string
from bs4 import BeautifulSoup
from student.models import (Registration, UserProfile, TestCenterUser, TestCenterUserForm,
TestCenterRegistration, TestCenterRegistrationForm,
PendingNameChange, PendingEmailChange,
CourseEnrollment, unique_id_for_user,
get_testcenter_registration, CourseEnrollmentAllowed)
from student.forms import PasswordResetFormNoActive
from certificates.models import CertificateStatuses, certificate_status_for_student
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement
from courseware.access import has_access
from external_auth.models import ExternalAuthMap
from statsd import statsd
from pytz import UTC
log = logging.getLogger("mitx.student")
AUDIT_LOG = logging.getLogger("audit")
Article = namedtuple('Article', 'title url author image deck publication publish_date')
def csrf_token(context):
''' A csrf token that can be included in a form.
'''
csrf_token = context.get('csrf_token', '')
if csrf_token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (csrf_token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context={}, user=None):
'''
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
'''
# The course selection work is done in courseware.courses.
domain = settings.MITX_FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain == False:
domain = request.META.get('HTTP_HOST')
courses = get_courses(None, domain=domain)
courses = sort_by_announcement(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def course_from_id(course_id):
"""Return the CourseDescriptor corresponding to this course_id"""
course_loc = CourseDescriptor.id_to_location(course_id)
return modulestore().get_instance(course_id, course_loc)
day_pattern = re.compile(r'\s\d+,\s')
multimonth_pattern = re.compile(r'\s?\-\s?\S+\s')
def _get_date_for_press(publish_date):
# strip off extra months, and just use the first:
date = re.sub(multimonth_pattern, ", ", publish_date)
if re.search(day_pattern, date):
date = datetime.datetime.strptime(date, "%B %d, %Y").replace(tzinfo=UTC)
else:
date = datetime.datetime.strptime(date, "%B, %Y").replace(tzinfo=UTC)
return date
def press(request):
json_articles = cache.get("student_press_json_articles")
if json_articles is None:
if hasattr(settings, 'RSS_URL'):
content = urllib.urlopen(settings.PRESS_URL).read()
json_articles = json.loads(content)
else:
content = open(settings.PROJECT_ROOT / "templates" / "press.json").read()
json_articles = json.loads(content)
cache.set("student_press_json_articles", json_articles)
articles = [Article(**article) for article in json_articles]
articles.sort(key=lambda item: _get_date_for_press(item.publish_date), reverse=True)
return render_to_response('static_templates/press.html', {'articles': articles})
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course):
"""
Get the certificate info needed to render the dashboard section for the given
student and course. Returns a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
"""
if not course.has_ended():
return {}
return _cert_info(user, course, certificate_status_for_student(user, course.id))
def _cert_info(user, course, cert_status):
"""
Implements the logic for cert_info -- split out for testing.
"""
default_status = 'processing'
default_info = {'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False}
if cert_status is None:
return default_info
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
}
status = template_state.get(cert_status['status'], default_status)
d = {'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating', }
if (status in ('generating', 'ready', 'notpassing', 'restricted') and
course.end_of_course_survey_url is not None):
d.update({
'show_survey_button': True,
'survey_url': process_survey_link(course.end_of_course_survey_url, user)})
else:
d['show_survey_button'] = False
if status == 'ready':
if 'download_url' not in cert_status:
log.warning("User %s has a downloadable cert for %s, but no download url",
user.username, course.id)
return default_info
else:
d['download_url'] = cert_status['download_url']
if status in ('generating', 'ready', 'notpassing', 'restricted'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
d['grade'] = cert_status['grade']
return d
@ensure_csrf_cookie
def signin_user(request):
"""
This view will display the non-modal login form
"""
if request.user.is_authenticated():
return redirect(reverse('dashboard'))
context = {
'course_id': request.GET.get('course_id'),
'enrollment_action': request.GET.get('enrollment_action')
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""
This view will display the non-modal registration form
"""
if request.user.is_authenticated():
return redirect(reverse('dashboard'))
context = {
'course_id': request.GET.get('course_id'),
'enrollment_action': request.GET.get('enrollment_action')
}
if extra_context is not None:
context.update(extra_context)
return render_to_response('register.html', context)
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
# Build our courses list for the user, but ignore any courses that no longer
# exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
courses = []
for enrollment in CourseEnrollment.enrollments_for_user(user):
try:
courses.append(course_from_id(enrollment.course_id))
except ItemNotFoundError:
log.error("User {0} enrolled in non-existent course {1}"
.format(user.username, enrollment.course_id))
message = ""
if not user.is_active:
message = render_to_string('registration/activate_account_notice.html', {'email': user.email})
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'global', 'staff'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(course.id for course in courses
if has_access(request.user, course, 'load'))
cert_statuses = {course.id: cert_info(request.user, course) for course in courses}
exam_registrations = {course.id: exam_registration_info(request.user, course) for course in courses}
# get info w.r.t ExternalAuthMap
external_auth_map = None
try:
external_auth_map = ExternalAuthMap.objects.get(user=user)
except ExternalAuthMap.DoesNotExist:
pass
context = {'courses': courses,
'message': message,
'external_auth_map': external_auth_map,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'cert_statuses': cert_statuses,
'exam_registrations': exam_registrations,
}
return render_to_response('dashboard.html', context)
def try_change_enrollment(request):
"""
This method calls change_enrollment if the necessary POST
parameters are present, but does not return anything. It
simply logs the result or exception. This is usually
called after a registration or login, as secondary action.
It should not interrupt a successful registration or login.
"""
if 'enrollment_action' in request.POST:
try:
enrollment_response = change_enrollment(request)
# There isn't really a way to display the results to the user, so we just log it
# We expect the enrollment to be a success, and will show up on the dashboard anyway
log.info(
"Attempted to automatically enroll after login. Response code: {0}; response body: {1}".format(
enrollment_response.status_code,
enrollment_response.content
)
)
except Exception, e:
log.exception("Exception automatically enrolling after login: {0}".format(str(e)))
def change_enrollment(request):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request or
as a post-login/registration helper, so the error messages in the responses
should never actually be user-visible.
"""
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
user = request.user
if not user.is_authenticated():
return HttpResponseForbidden()
action = request.POST.get("enrollment_action")
course_id = request.POST.get("course_id")
if course_id is None:
return HttpResponseBadRequest(_("Course id not specified"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
try:
course = course_from_id(course_id)
except ItemNotFoundError:
log.warning("User {0} tried to enroll in non-existent course {1}"
.format(user.username, course_id))
return HttpResponseBadRequest(_("Course id is invalid"))
if not has_access(user, course, 'enroll'):
return HttpResponseBadRequest(_("Enrollment is closed"))
org, course_num, run = course_id.split("/")
statsd.increment("common.student.enrollment",
tags=["org:{0}".format(org),
"course:{0}".format(course_num),
"run:{0}".format(run)])
CourseEnrollment.enroll(user, course.id)
return HttpResponse()
elif action == "unenroll":
try:
CourseEnrollment.unenroll(user, course_id)
org, course_num, run = course_id.split("/")
statsd.increment("common.student.unenrollment",
tags=["org:{0}".format(org),
"course:{0}".format(course_num),
"run:{0}".format(run)])
return HttpResponse()
except CourseEnrollment.DoesNotExist:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
@ensure_csrf_cookie
def accounts_login(request, error=""):
return render_to_response('login.html', {'error': error})
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""):
''' AJAX request to log in the user. '''
if 'email' not in request.POST or 'password' not in request.POST:
return HttpResponse(json.dumps({'success': False,
'value': _('There was an error receiving your login information. Please email us.')})) # TODO: User error message
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
user = None
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return HttpResponse(json.dumps({'success': False,
'value': _('Too many failed login attempts. Try again later.')}))
if user is None:
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return HttpResponse(json.dumps({'success': False,
'value': _('Email or password is incorrect.')}))
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as e:
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(e)
raise
try_change_enrollment(request)
statsd.increment("common.student.successful_login")
response = HttpResponse(json.dumps({'success': True}))
# set the login cookie for the edx marketing site
# we want this cookie to be accessed via javascript
# so httponly is set to None
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
response.set_cookie(settings.EDXMKTG_COOKIE_NAME,
'true', max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path='/',
secure=None,
httponly=None)
return response
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation message. Please check your e-mail for the activation instructions.")
return HttpResponse(json.dumps({'success': False,
'value': not_activated_msg}))
@ensure_csrf_cookie
def logout_user(request):
'''
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
'''
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
response = redirect('/')
response.delete_cookie(settings.EDXMKTG_COOKIE_NAME,
path='/',
domain=settings.SESSION_COOKIE_DOMAIN)
return response
@login_required
@ensure_csrf_cookie
def change_setting(request):
''' JSON call to change a profile setting: Right now, location
'''
# TODO (vshnayder): location is no longer used
up = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
up.location = request.POST['location']
up.save()
return HttpResponse(json.dumps({'success': True,
'location': up.location, }))
def _do_create_account(post_vars):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
user = User(username=post_vars['username'],
email=post_vars['email'],
is_active=False)
user.set_password(post_vars['password'])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
user.save()
except IntegrityError:
js = {'success': False}
# Figure out the cause of the integrity error
if len(User.objects.filter(username=post_vars['username'])) > 0:
js['value'] = _("An account with the Public Username '{username}' already exists.").format(username=post_vars['username'])
js['field'] = 'username'
return HttpResponse(json.dumps(js))
if len(User.objects.filter(email=post_vars['email'])) > 0:
js['value'] = _("An account with the Email '{email}' already exists.").format(email=post_vars['email'])
js['field'] = 'email'
return HttpResponse(json.dumps(js))
raise
registration.register(user)
profile = UserProfile(user=user)
profile.name = post_vars['name']
profile.level_of_education = post_vars.get('level_of_education')
profile.gender = post_vars.get('gender')
profile.mailing_address = post_vars.get('mailing_address')
profile.goals = post_vars.get('goals')
try:
profile.year_of_birth = int(post_vars['year_of_birth'])
except (ValueError, KeyError):
# If they give us garbage, just ignore it instead
# of asking them to put an integer.
profile.year_of_birth = None
try:
profile.save()
except Exception:
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
return (user, profile, registration)
@ensure_csrf_cookie
def create_account(request, post_override=None):
'''
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
'''
js = {'success': False}
post_vars = post_override if post_override else request.POST
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
DoExternalAuth = 'ExternalAuthMap' in request.session
if DoExternalAuth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
email = eamap.external_email
except ValidationError:
email = post_vars.get('email', '')
if eamap.external_name.strip() == '':
name = post_vars.get('name', '')
else:
name = eamap.external_name
password = eamap.internal_password
post_vars = dict(post_vars.items())
post_vars.update(dict(email=email, name=name, password=password))
log.debug(u'In create_account with external_auth: user = %s, email=%s', name, email)
# Confirm we have a properly formed request
for a in ['username', 'email', 'password', 'name']:
if a not in post_vars:
js['value'] = _("Error (401 {field}). E-mail us.").format(field=a)
js['field'] = a
return HttpResponse(json.dumps(js))
if post_vars.get('honor_code', 'false') != u'true':
js['value'] = _("To enroll, you must follow the honor code.").format(field=a)
js['field'] = 'honor_code'
return HttpResponse(json.dumps(js))
# Can't have terms of service for certain SHIB users, like at Stanford
tos_not_required = settings.MITX_FEATURES.get("AUTH_USE_SHIB") \
and settings.MITX_FEATURES.get('SHIB_DISABLE_TOS') \
and DoExternalAuth and ("shib" in eamap.external_domain)
if not tos_not_required:
if post_vars.get('terms_of_service', 'false') != u'true':
js['value'] = _("You must accept the terms of service.").format(field=a)
js['field'] = 'terms_of_service'
return HttpResponse(json.dumps(js))
# Confirm appropriate fields are there.
# TODO: Check e-mail format is correct.
# TODO: Confirm e-mail is not from a generic domain (mailinator, etc.)? Not sure if
# this is a good idea
# TODO: Check password is sane
required_post_vars = ['username', 'email', 'name', 'password', 'terms_of_service', 'honor_code']
if tos_not_required:
required_post_vars = ['username', 'email', 'name', 'password', 'honor_code']
for a in required_post_vars:
if len(post_vars[a]) < 2:
error_str = {'username': 'Username must be minimum of two characters long.',
'email': 'A properly formatted e-mail is required.',
'name': 'Your legal name must be a minimum of two characters long.',
'password': 'A valid password is required.',
'terms_of_service': 'Accepting Terms of Service is required.',
'honor_code': 'Agreeing to the Honor Code is required.'}
js['value'] = error_str[a]
js['field'] = a
return HttpResponse(json.dumps(js))
try:
validate_email(post_vars['email'])
except ValidationError:
js['value'] = _("Valid e-mail is required.").format(field=a)
js['field'] = 'email'
return HttpResponse(json.dumps(js))
try:
validate_slug(post_vars['username'])
except ValidationError:
js['value'] = _("Username should only consist of A-Z and 0-9, with no spaces.").format(field=a)
js['field'] = 'username'
return HttpResponse(json.dumps(js))
# Ok, looks like everything is legit. Create the account.
ret = _do_create_account(post_vars)
if isinstance(ret, HttpResponse): # if there was an error then return that
return ret
(user, profile, registration) = ret
d = {'name': post_vars['name'],
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', d)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', d)
# dont send email if we are doing load testing or random user generation for some reason
if not (settings.MITX_FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING')):
try:
if settings.MITX_FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.MITX_FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [dest_addr], fail_silently=False)
else:
_res = user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except:
log.warning('Unable to send activation email to user', exc_info=True)
js['value'] = _('Could not send activation e-mail.')
return HttpResponse(json.dumps(js))
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
login_user = authenticate(username=post_vars['username'], password=post_vars['password'])
login(request, login_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if login_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(login_user.username))
if DoExternalAuth:
eamap.user = login_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info("User registered with external_auth %s", post_vars['username'])
AUDIT_LOG.info('Updated ExternalAuthMap for %s to be %s', post_vars['username'], eamap)
if settings.MITX_FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
login_user.is_active = True
login_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(login_user.username, login_user.email))
try_change_enrollment(request)
statsd.increment("common.student.account_created")
js = {'success': True}
HttpResponse(json.dumps(js), mimetype="application/json")
response = HttpResponse(json.dumps({'success': True}))
# set the login cookie for the edx marketing site
# we want this cookie to be accessed via javascript
# so httponly is set to None
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
response.set_cookie(settings.EDXMKTG_COOKIE_NAME,
'true', max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path='/',
secure=None,
httponly=None)
return response
def exam_registration_info(user, course):
""" Returns a Registration object if the user is currently registered for a current
exam of the course. Returns None if the user is not registered, or if there is no
current exam for the course.
"""
exam_info = course.current_test_center_exam
if exam_info is None:
return None
exam_code = exam_info.exam_series_code
registrations = get_testcenter_registration(user, course.id, exam_code)
if registrations:
registration = registrations[0]
else:
registration = None
return registration
@login_required
@ensure_csrf_cookie
def begin_exam_registration(request, course_id):
""" Handles request to register the user for the current
test center exam of the specified course. Called by form
in dashboard.html.
"""
user = request.user
try:
course = course_from_id(course_id)
except ItemNotFoundError:
log.error("User {0} enrolled in non-existent course {1}".format(user.username, course_id))
raise Http404
# get the exam to be registered for:
# (For now, we just assume there is one at most.)
# if there is no exam now (because someone bookmarked this stupid page),
# then return a 404:
exam_info = course.current_test_center_exam
if exam_info is None:
raise Http404
# determine if the user is registered for this course:
registration = exam_registration_info(user, course)
# we want to populate the registration page with the relevant information,
# if it already exists. Create an empty object otherwise.
try:
testcenteruser = TestCenterUser.objects.get(user=user)
except TestCenterUser.DoesNotExist:
testcenteruser = TestCenterUser()
testcenteruser.user = user
context = {'course': course,
'user': user,
'testcenteruser': testcenteruser,
'registration': registration,
'exam_info': exam_info,
}
return render_to_response('test_center_register.html', context)
@ensure_csrf_cookie
def create_exam_registration(request, post_override=None):
'''
JSON call to create a test center exam registration.
Called by form in test_center_register.html
'''
post_vars = post_override if post_override else request.POST
# first determine if we need to create a new TestCenterUser, or if we are making any update
# to an existing TestCenterUser.
username = post_vars['username']
user = User.objects.get(username=username)
course_id = post_vars['course_id']
course = course_from_id(course_id) # assume it will be found....
# make sure that any demographic data values received from the page have been stripped.
# Whitespace is not an acceptable response for any of these values
demographic_data = {}
for fieldname in TestCenterUser.user_provided_fields():
if fieldname in post_vars:
demographic_data[fieldname] = (post_vars[fieldname]).strip()
try:
testcenter_user = TestCenterUser.objects.get(user=user)
needs_updating = testcenter_user.needs_update(demographic_data)
log.info("User {0} enrolled in course {1} {2}updating demographic info for exam registration".format(user.username, course_id, "" if needs_updating else "not "))
except TestCenterUser.DoesNotExist:
# do additional initialization here:
testcenter_user = TestCenterUser.create(user)
needs_updating = True
log.info("User {0} enrolled in course {1} creating demographic info for exam registration".format(user.username, course_id))
# perform validation:
if needs_updating:
# first perform validation on the user information
# using a Django Form.
form = TestCenterUserForm(instance=testcenter_user, data=demographic_data)
if form.is_valid():
form.update_and_save()
else:
response_data = {'success': False}
# return a list of errors...
response_data['field_errors'] = form.errors
response_data['non_field_errors'] = form.non_field_errors()
return HttpResponse(json.dumps(response_data), mimetype="application/json")
# create and save the registration:
needs_saving = False
exam = course.current_test_center_exam
exam_code = exam.exam_series_code
registrations = get_testcenter_registration(user, course_id, exam_code)
if registrations:
registration = registrations[0]
# NOTE: we do not bother to check here to see if the registration has changed,
# because at the moment there is no way for a user to change anything about their
# registration. They only provide an optional accommodation request once, and
# cannot make changes to it thereafter.
# It is possible that the exam_info content has been changed, such as the
# scheduled exam dates, but those kinds of changes should not be handled through
# this registration screen.
else:
accommodation_request = post_vars.get('accommodation_request', '')
registration = TestCenterRegistration.create(testcenter_user, exam, accommodation_request)
needs_saving = True
log.info("User {0} enrolled in course {1} creating new exam registration".format(user.username, course_id))
if needs_saving:
# do validation of registration. (Mainly whether an accommodation request is too long.)
form = TestCenterRegistrationForm(instance=registration, data=post_vars)
if form.is_valid():
form.update_and_save()
else:
response_data = {'success': False}
# return a list of errors...
response_data['field_errors'] = form.errors
response_data['non_field_errors'] = form.non_field_errors()
return HttpResponse(json.dumps(response_data), mimetype="application/json")
# only do the following if there is accommodation text to send,
# and a destination to which to send it.
# TODO: still need to create the accommodation email templates
# if 'accommodation_request' in post_vars and 'TESTCENTER_ACCOMMODATION_REQUEST_EMAIL' in settings:
# d = {'accommodation_request': post_vars['accommodation_request'] }
#
# # composes accommodation email
# subject = render_to_string('emails/accommodation_email_subject.txt', d)
# # Email subject *must not* contain newlines
# subject = ''.join(subject.splitlines())
# message = render_to_string('emails/accommodation_email.txt', d)
#
# try:
# dest_addr = settings['TESTCENTER_ACCOMMODATION_REQUEST_EMAIL']
# from_addr = user.email
# send_mail(subject, message, from_addr, [dest_addr], fail_silently=False)
# except:
# log.exception(sys.exc_info())
# response_data = {'success': False}
# response_data['non_field_errors'] = [ 'Could not send accommodation e-mail.', ]
# return HttpResponse(json.dumps(response_data), mimetype="application/json")
js = {'success': True}
return HttpResponse(json.dumps(js), mimetype="application/json")
def auto_auth(request):
"""
Automatically logs the user in with a generated random credentials
This view is only accessible when
settings.MITX_SETTINGS['AUTOMATIC_AUTH_FOR_TESTING'] is true.
"""
def get_dummy_post_data(username, password, email, name):
"""
Return a dictionary suitable for passing to post_vars of _do_create_account or post_override
of create_account, with specified values.
"""
return {'username': username,
'email': email,
'password': password,
'name': name,
'honor_code': u'true',
'terms_of_service': u'true', }
# generate random user credentials from a small name space (determined by settings)
name_base = 'USER_'
pass_base = 'PASS_'
max_users = settings.MITX_FEATURES.get('MAX_AUTO_AUTH_USERS', 200)
number = random.randint(1, max_users)
# Get the params from the request to override default user attributes if specified
qdict = request.GET
# Use the params from the request, otherwise use these defaults
username = qdict.get('username', name_base + str(number))
password = qdict.get('password', pass_base + str(number))
email = qdict.get('email', '%s_dummy_test@mitx.mit.edu' % username)
name = qdict.get('name', '%s Test' % username)
# if they already are a user, log in
try:
user = User.objects.get(username=username)
user = authenticate(username=username, password=password, request=request)
login(request, user)
# else create and activate account info
except ObjectDoesNotExist:
post_override = get_dummy_post_data(username, password, email, name)
create_account(request, post_override=post_override)
request.user.is_active = True
request.user.save()
# return empty success
return HttpResponse('')
@ensure_csrf_cookie
def activate_account(request, key):
''' When link in activation e-mail is clicked
'''
r = Registration.objects.filter(activation_key=key)
if len(r) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not r[0].user.is_active:
r[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
student = User.objects.filter(id=r[0].user_id)
if student:
ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email)
for cea in ceas:
if cea.auto_enroll:
CourseEnrollment.enroll(student[0], cea.course_id)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(r) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponse(_("Unknown error. Please e-mail us to let us know how it happened."))
@ensure_csrf_cookie
def password_reset(request):
''' Attempts to send a password reset e-mail. '''
if request.method != "POST":
raise Http404
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=settings.DEFAULT_FROM_EMAIL,
request=request,
domain_override=request.get_host())
return HttpResponse(json.dumps({'success': True,
'value': render_to_string('registration/password_reset_done.html', {})}))
else:
return HttpResponse(json.dumps({'success': False,
'error': _('Invalid e-mail or user')}))
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
''' A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
'''
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": settings.PLATFORM_NAME}
return password_reset_confirm(
request, uidb36=uidb36, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return HttpResponse(json.dumps({'success': False,
'error': _('No inactive user with this e-mail exists')}))
d = {'name': user.profile.name,
'key': reg.activation_key}
subject = render_to_string('emails/activation_email_subject.txt', d)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', d)
try:
_res = user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except:
log.warning('Unable to send reactivation email', exc_info=True)
return HttpResponse(json.dumps({'success': False, 'error': _('Unable to send reactivation email')}))
return HttpResponse(json.dumps({'success': True}))
@ensure_csrf_cookie
def change_email_request(request):
''' AJAX call from the profile page. User wants a new e-mail.
'''
## Make sure it checks for existing e-mail conflicts
if not request.user.is_authenticated:
raise Http404
user = request.user
if not user.check_password(request.POST['password']):
return HttpResponse(json.dumps({'success': False,
'error': _('Invalid password')}))
new_email = request.POST['new_email']
try:
validate_email(new_email)
except ValidationError:
return HttpResponse(json.dumps({'success': False,
'error': _('Valid e-mail address required.')}))
if User.objects.filter(email=new_email).count() != 0:
## CRITICAL TODO: Handle case sensitivity for e-mails
return HttpResponse(json.dumps({'success': False,
'error': _('An account with this e-mail already exists.')}))
pec_list = PendingEmailChange.objects.filter(user=request.user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
pec.new_email = request.POST['new_email']
pec.activation_key = uuid.uuid4().hex
pec.save()
if pec.new_email == user.email:
pec.delete()
return HttpResponse(json.dumps({'success': False,
'error': _('Old email is the same as the new email.')}))
d = {'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email}
subject = render_to_string('emails/email_change_subject.txt', d)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', d)
_res = send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [pec.new_email])
return HttpResponse(json.dumps({'success': True}))
@ensure_csrf_cookie
@transaction.commit_manually
def confirm_email_change(request, key):
''' User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
'''
try:
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
transaction.rollback()
return render_to_response("invalid_email_key.html", {})
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
transaction.rollback()
return render_to_response("email_exists.html", {})
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
up = UserProfile.objects.get(user=user)
meta = up.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
up.set_meta(meta)
up.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception:
transaction.rollback()
log.warning('Unable to send confirmation email to old address', exc_info=True)
return render_to_response("email_change_failed.html", {'email': user.email})
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception:
transaction.rollback()
log.warning('Unable to send confirmation email to new address', exc_info=True)
return render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.commit()
return render_to_response("email_change_successful.html", address_context)
except Exception:
# If we get an unexpected exception, be sure to rollback the transaction
transaction.rollback()
raise
@ensure_csrf_cookie
def change_name_request(request):
''' Log a request for a new name. '''
if not request.user.is_authenticated:
raise Http404
try:
pnc = PendingNameChange.objects.get(user=request.user)
except PendingNameChange.DoesNotExist:
pnc = PendingNameChange()
pnc.user = request.user
pnc.new_name = request.POST['new_name']
pnc.rationale = request.POST['rationale']
if len(pnc.new_name) < 2:
return HttpResponse(json.dumps({'success': False, 'error': _('Name required')}))
pnc.save()
# The following automatically accepts name change requests. Remove this to
# go back to the old system where it gets queued up for admin approval.
accept_name_change_by_id(pnc.id)
return HttpResponse(json.dumps({'success': True}))
@ensure_csrf_cookie
def pending_name_changes(request):
''' Web page which allows staff to approve or reject name changes. '''
if not request.user.is_staff:
raise Http404
changes = list(PendingNameChange.objects.all())
js = {'students': [{'new_name': c.new_name,
'rationale': c.rationale,
'old_name': UserProfile.objects.get(user=c.user).name,
'email': c.user.email,
'uid': c.user.id,
'cid': c.id} for c in changes]}
return render_to_response('name_changes.html', js)
@ensure_csrf_cookie
def reject_name_change(request):
''' JSON: Name change process. Course staff clicks 'reject' on a given name change '''
if not request.user.is_staff:
raise Http404
try:
pnc = PendingNameChange.objects.get(id=int(request.POST['id']))
except PendingNameChange.DoesNotExist:
return HttpResponse(json.dumps({'success': False, 'error': _('Invalid ID')}))
pnc.delete()
return HttpResponse(json.dumps({'success': True}))
def accept_name_change_by_id(id):
try:
pnc = PendingNameChange.objects.get(id=id)
except PendingNameChange.DoesNotExist:
return HttpResponse(json.dumps({'success': False, 'error': _('Invalid ID')}))
u = pnc.user
up = UserProfile.objects.get(user=u)
# Save old name
meta = up.get_meta()
if 'old_names' not in meta:
meta['old_names'] = []
meta['old_names'].append([up.name, pnc.rationale, datetime.datetime.now(UTC).isoformat()])
up.set_meta(meta)
up.name = pnc.new_name
up.save()
pnc.delete()
return HttpResponse(json.dumps({'success': True}))
@ensure_csrf_cookie
def accept_name_change(request):
''' JSON: Name change process. Course staff clicks 'accept' on a given name change
We used this during the prototype but now we simply record name changes instead
of manually approving them. Still keeping this around in case we want to go
back to this approval method.
'''
if not request.user.is_staff:
raise Http404
return accept_name_change_by_id(int(request.POST['id']))
| wwj718/edx-video | common/djangoapps/student/views.py | Python | agpl-3.0 | 50,206 | 0.002769 |
#!/usr/bin/env python3
from os import environ, system
from subprocess import Popen
print('\nUltimate Doom (Classic)')
print('Link: https://store.steampowered.com/app/2280/Ultimate_Doom/\n')
home = environ['HOME']
core = home + '/bin/games/steam-connect/steam-connect-core.py'
logo = home + '/bin/games/steam-connect/doom-logo.txt'
game = 'doom-1'
stid = '2280'
proc = 'gzdoom'
flag = ' +set dmflags 4521984'
conf = ' -config ' + home + '/.config/gzdoom/gzdoom-classic.ini'
save = ' -savedir ' + home + '/.config/gzdoom/saves/' + game
iwad = ' -iwad DOOM.WAD'
mods = ' -file music-doom.zip sprite-fix-6-d1.zip doom-sfx-high.zip speed-weapons.zip'
args = proc + flag + conf + save + iwad + mods
system('cat ' + logo)
Popen([core, stid, args]).wait()
| noirhat/bin | games/doom/doom-1.py | Python | gpl-2.0 | 755 | 0.003974 |
import csv
from django.db import transaction
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django.utils import six
from django.utils.translation import ugettext_lazy as _
class CSVImportError(Exception):
pass
class ImportCSVForm(forms.Form):
csv_file = forms.FileField(required=True, label=_('CSV File'))
has_headers = forms.BooleanField(
label=_('Has headers'),
help_text=_('Check this if your CSV file '
'has a row with column headers.'),
initial=True,
required=False,
)
def __init__(self, *args, **kwargs):
self.importer_class = kwargs.pop('importer_class')
self.dialect = kwargs.pop('dialect')
super(ImportCSVForm, self).__init__(*args, **kwargs)
self.fields['csv_file'].help_text = "Expected fields: {}".format(self.expected_fields)
def clean_csv_file(self):
if six.PY3:
# DictReader expects a str, not bytes in Python 3.
csv_text = self.cleaned_data['csv_file'].read()
csv_decoded = six.StringIO(csv_text.decode('utf-8'))
return csv_decoded
else:
return self.cleaned_data['csv_file']
@property
def expected_fields(self):
fields = self.importer_class._meta.fields
return ', '.join(fields)
@transaction.atomic
def import_csv(self):
try:
reader = csv.DictReader(
self.cleaned_data['csv_file'],
fieldnames=self.importer_class._meta.fields,
dialect=self.dialect,
)
reader_iter = enumerate(reader, 1)
if self.cleaned_data['has_headers']:
six.advance_iterator(reader_iter)
self.process_csv(reader_iter)
if not self.is_valid():
raise CSVImportError() # Abort the transaction
except csv.Error:
self.append_import_error(_("Bad CSV format"))
raise CSVImportError()
def process_csv(self, reader):
for i, row in reader:
self.process_row(i, row)
def append_import_error(self, error, rownumber=None, column_name=None):
if rownumber is not None:
if column_name is not None:
# Translators: "{row}", "{column}" and "{error}"
# should not be translated
fmt = _("Could not import row #{row}: {column} - {error}")
else:
# Translators: "{row}" and "{error}" should not be translated
fmt = _("Could not import row #{row}: {error}")
else:
if column_name is not None:
raise ValueError("Cannot raise a CSV import error on a specific "
"column with no row number.")
else:
# Translators: "{error}" should not be translated
fmt = _("Could not import the CSV document: {error}")
if NON_FIELD_ERRORS not in self._errors:
self._errors[NON_FIELD_ERRORS] = self.error_class()
self._errors[NON_FIELD_ERRORS].append(
fmt.format(error=error, row=rownumber, column=column_name))
def process_row(self, i, row):
importer = self.importer_class(data=row)
if importer.is_valid():
importer.save()
else:
for error in importer.non_field_errors():
self.append_import_error(rownumber=i, error=error)
for field in importer:
for error in field.errors:
self.append_import_error(rownumber=i, column_name=field.label,
error=error)
| fusionbox/django-importcsvadmin | importcsvadmin/forms.py | Python | bsd-2-clause | 3,694 | 0.000812 |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.json import dumps
import MaKaC
def jsonDescriptor(object):
# TODO: Merge with locators?
if isinstance(object, MaKaC.conference.Conference):
return {'conference': object.getId()}
elif isinstance(object, MaKaC.conference.Contribution):
return {'conference': object.getConference().getId(),
'contribution': object.getId()}
elif isinstance(object, MaKaC.conference.Session):
return {'conference': object.getConference().getId(),
'session': object.getId()}
elif isinstance(object, MaKaC.conference.SessionSlot):
return {'conference': object.getConference().getId(),
'session': object.getSession().getId(),
'slot': object.getId()}
elif isinstance(object, MaKaC.schedule.BreakTimeSchEntry):
info = {'conference': object.getOwner().getConference().getId(),
'break': object.getId()}
if isinstance(object.getOwner(), MaKaC.conference.SessionSlot):
info['slot'] = object.getOwner().getId()
info['session'] = object.getOwner().getSession().getId()
return info
return None
def jsonDescriptorType(descriptor):
if 'break' in descriptor:
return MaKaC.schedule.BreakTimeSchEntry
elif 'slot' in descriptor:
return MaKaC.conference.SessionSlot
elif 'contribution' in descriptor:
return MaKaC.conference.Contribution
elif 'session' in descriptor:
return MaKaC.conference.Session
elif 'conference' in descriptor:
return MaKaC.conference.Conference
else:
return None
def decideInheritanceText(event):
if isinstance(event, MaKaC.conference.SessionSlot):
text = _("Inherit from parent slot")
elif isinstance(event, MaKaC.conference.Session):
text = _("Inherit from parent session")
elif isinstance(event, MaKaC.conference.Conference):
text = _("Inherit from parent event")
else:
text = str(repr(parent))
return text
def roomInfo(event, level='real'):
# gets inherited/real/own location/room properties
if level == 'inherited':
room = event.getInheritedRoom()
location = event.getInheritedLocation()
text = decideInheritanceText(event.getLocationParent())
elif level == 'real':
room = event.getRoom()
location = event.getLocation()
text = decideInheritanceText(event)
elif level == 'own':
room = event.getOwnRoom()
location = event.getOwnLocation()
text = ''
locationName, roomName, address = None, None, None
if location:
locationName = location.getName()
address = location.getAddress()
if room:
roomName = room.getName()
return {'location': locationName,
'room': roomName,
'address': address,
'text': text}
| XeCycle/indico | indico/MaKaC/services/interface/rpc/offline.py | Python | gpl-3.0 | 3,627 | 0.001103 |
#
# Here is a more complicated example that loads a .csv file and
# then creates a plot from the x,y data in it.
# The data file is the saved curve from partsim.com of the low pass filter.
# It was saved as xls file and then opened in Excel and exported to csv
#
# First import the csv parser, the numeric tools and plotting tools
import csv
import numpy as np # This gives numpy the shorthand np
import matplotlib.pyplot as plt
#
# Open the file
#
f = open("low_pass_filter.csv")
#
# Pass the file to the csv parser
#
data = csv.reader(f)
headers = data.next()
units = data.next()
#
# Here is a "wicked" way in Python that does quicker what the
# the more verbose code does below. It is "Matlab" like.
# dat = np.array([ [float(z) for z in x] for x in data ]) # put the data in dat as floats.
# x_ar = dat[:,0] # select the first column
# y1_ar = dat[:,1] # select the second column
# y2_ar = dat[:,2] # select the third column
x_ar = [] # Create a new list (array) called dat to hold the data.
y1_ar = []
y2_ar = []
for (x,y1,y2) in data: # Unpack the csv data into x,y1,y2 variables.
x_ar.append( float(x))
y1_ar.append(float(y1))
y2_ar.append(float(y2)) # Convert the variable from string to float and add to dat
#
# Now plot the data. plt.plot returns a tuple (plot, )
#
(p1,) = plt.plot(x_ar,y1_ar,color='green',label=headers[1])
(p2,) = plt.plot(x_ar,y2_ar,color='blue',label=headers[2])
plt.legend(handles=[p1,p2]) # make sure the legend is drawn
plt.xscale('log') # plot with a log x axis
plt.yscale('log')
plt.grid(True) # and a grid.
plt.title('Low pass filter')
plt.xlabel('F[Hz]',position=(0.9,1))
plt.ylabel('Amplitude [Volt]')
plt.show() # show the plot.
| mholtrop/Phys605 | Python/Getting_Started/CSV_Plot.py | Python | gpl-3.0 | 1,762 | 0.009081 |
import numpy as np
import logging
import glob
import bandwise_features as BF
import time
import mir3.modules.features.stats as feat_stats
import mir3.modules.tool.to_texture_window as texture_window
import remove_random_noise as rrn
from multiprocessing import Pool
logger = logging.getLogger("birdclef_tza_bands")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
class BandJob:
"""
:type filename: string
:type band_iterator: string
:type band_step: int
:type lnf_use: bool
:type lnf_compensation: string
:type lnf_passes: int
"""
def __init__(self, filename, band_iterator='mel', band_step=500, band_nbands=None, also_one_band=False, lnf_use=False, lnf_compensation='log10', lnf_passes=1):
self.filename = filename
self.band_iterator = band_iterator
self.band_step = band_step
self.band_nbands = band_nbands
self.also_one_band=also_one_band
self.lnf_use = lnf_use
self.lnf_compensation = lnf_compensation
self.lnf_passes = lnf_passes
class BandExperiment:
def __init__(self, mirex_list_file, mirex_scratch_folder,
output_file,
band_iterator='mel',
band_step=500,
band_nbands=None,
also_one_band=False,
lnf_use=False,
lnf_compensation='log10',
lnf_passes=1,
mean=True, variance=True, slope=False, limits=False, csv=False, normalize=True):
self.mirex_list_file=mirex_list_file
self.mirex_scratch_folder=mirex_scratch_folder
self.output_file=output_file
self.band_iterator=band_iterator
self.band_step=band_step
self.band_nbands=band_nbands
self.also_one_band=also_one_band
self.lnf_use=lnf_use
self.lnf_compensation=lnf_compensation
self.lnf_passes=lnf_passes
self.mean=mean
self.variance=variance
self.slope=slope
self.limits=limits
self.csv=csv
self.normalize=normalize
# def tza_sep_bands_parallel(experiment, n_processes = 1):
# """
# :type experiment: BandExperiment
# :type n_processes: int
# """
#
# files = sorted(glob.glob(experiment.wav_path + "*.wav"))
# jobs = []
# for f in files:
# jobs.append(BandJob(f, experiment.band_iterator, experiment.band_step, experiment.band_nbands,
# lnf_use=experiment.lnf_use,
# lnf_compensation=experiment.lnf_compensation,
# lnf_passes=experiment.lnf_passes))
#
# pool = Pool(processes=n_processes)
#
# features = pool.map(tza_sep_bands, jobs)
#
# pool.close()
# pool.join()
#
# n_bands = (len(features[0]) - 2) / 6
#
# print "number of bands: ", n_bands, len(features[0])
#
# bands = dict()
#
# for band in features:
# for i in range(0, len(band)-2, 6):
# track_feats = []
# for k in range(6):
# track_feats.append(band[i+k])
# key = band[i].metadata.feature.split("_")[1]
# if not bands.has_key(key):
# bands[key] = []
# bands[key].append(track_feats)
#
# for band in bands:
# print band
# for track in bands[band]:
# print track[0].metadata.filename
# for feature in track:
# print feature.metadata.feature
#
# #TODO: tenho que fazer o feats.join.... pra fazer o join precisa de um objeto Bandwise features
# #for band in bands:
#
#
# #
# # stats = feat_stats.Stats()
# # m = stats.stats(features,
# # mean=experiment.mean,
# # variance=experiment.variance,
# # slope=experiment.slope,
# # limits=experiment.limits,
# # csv=experiment.csv,
# # normalize=experiment.normalize)
# #
# # f = open(experiment.output_file, "wb")
# #
# # m.save(f)
# #
# # f.close()
#
# def tza_sep_bands(job):
# """
# :type job: BandJob
# """
#
# if job.lnf_use:
# feats = BF.BandwiseFeatures(job.filename, db_spec=False)
# rrn.remove_random_noise(feats.spectrogram, filter_compensation=job.lnf_compensation, passes=job.lnf_passes)
# feats.spec_to_db()
# else:
# feats = BF.BandwiseFeatures(job.filename)
#
# if job.band_iterator == 'one':
# a = BF.OneBand(low=int(feats.spectrogram.metadata.min_freq),
# high=int(feats.spectrogram.metadata.max_freq))
#
# if job.band_iterator == 'linear':
# a = BF.LinearBand(low=int(feats.spectrogram.metadata.min_freq),
# high=int(feats.spectrogram.metadata.max_freq),
# step=job.band_step,
# nbands=job.band_nbands)
# if job.band_iterator == 'mel':
# a = BF.MelBand(low=int(feats.spectrogram.metadata.min_freq),
# high=int(feats.spectrogram.metadata.max_freq),
# step=job.band_step,
# nbands=job.band_nbands)
#
# logger.debug("Extracting features for %s", job.filename)
# T0 = time.time()
# feats.calculate_features_per_band(a)
# T1 = time.time()
# logger.debug("Feature extraction took %f seconds", T1 - T0)
#
# return feats.band_features
def tza_bands_parallel(experiment, n_processes = 1):
"""
:type experiment: BandExperiment
:type n_processes: int
"""
jobs = []
with open(experiment.mirex_list_file) as f:
files = f.read().splitlines()
for f in files:
jobs.append(BandJob(f, experiment.band_iterator, experiment.band_step, experiment.band_nbands,
also_one_band=experiment.also_one_band,
lnf_use=experiment.lnf_use,
lnf_compensation=experiment.lnf_compensation,
lnf_passes=experiment.lnf_passes))
#calculate features
pool = Pool(processes=n_processes)
features = pool.map(tza_bands, jobs)
pool.close()
pool.join()
jobs = []
for f in features:
jobs.append((f, 100))
#calculate texture windows
pool = Pool(processes=n_processes)
textures = pool.map(tza_calc_textures, jobs)
pool.close()
pool.join()
stats = feat_stats.Stats()
m = stats.stats(textures,
mean=experiment.mean,
variance=experiment.variance,
slope=experiment.slope,
limits=experiment.limits,
csv=experiment.csv,
normalize=experiment.normalize)
f = open(experiment.mirex_scratch_folder + "/" + experiment.output_file, "wb")
m.save(f, restore_state=True)
f.close()
return m
def tza_calc_textures(args):
tw = texture_window.ToTextureWindow()
feature = args[0]
logger.debug("calculating textures for %s", feature.metadata.filename)
T0 = time.time()
results = tw.to_texture(feature, args[1])
T1 = time.time()
logger.debug("texture calculation took %f seconds", T1-T0)
return results
def tza_bands(job):
"""
:type job: BandJob
"""
if job.lnf_use:
feats = BF.BandwiseFeatures(job.filename, db_spec=False)
rrn.remove_random_noise(feats.spectrogram, filter_compensation=job.lnf_compensation, passes=job.lnf_passes)
feats.spec_to_db()
else:
feats = BF.BandwiseFeatures(job.filename)
if job.band_iterator == 'one':
a = BF.OneBand(low=int(feats.spectrogram.metadata.min_freq),
high=int(feats.spectrogram.metadata.max_freq))
if job.band_iterator == 'linear':
a = BF.LinearBand(low=int(feats.spectrogram.metadata.min_freq),
high=int(feats.spectrogram.metadata.max_freq),
step=job.band_step,
nbands=job.band_nbands)
if job.band_iterator == 'mel':
a = BF.MelBand(low=int(feats.spectrogram.metadata.min_freq),
high=int(feats.spectrogram.metadata.max_freq),
step=job.band_step,
nbands=job.band_nbands)
logger.debug("Extracting features for %s", job.filename)
T0 = time.time()
feats.calculate_features_per_band(a, also_one_band=job.also_one_band, discard_bin_zero=True)
T1 = time.time()
logger.debug("Feature extraction took %f seconds", T1 - T0)
feats.join_bands(crop=True)
return feats.joined_features
def MIREX_ExtractFeatures(scratch_folder, feature_extraction_list, n_processes,**kwargs):
also_one_band = False
if kwargs.has_key("also_one_band"):
if kwargs['also_one_band'] == True:
also_one_band = True
exp = BandExperiment(feature_extraction_list, scratch_folder,
output_file=kwargs['output_file'],
band_iterator=kwargs['band_iterator'],
band_nbands=kwargs['band_nbands'],
also_one_band=also_one_band)
if also_one_band:
print 'also running fullband'
return tza_bands_parallel(exp, n_processes=n_processes)
if __name__ == "__main__":
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_bands_500.fm", band_iterator='linear', band_step=500)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_bands_1000.fm", band_iterator='linear', band_step=1000)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_bands_2000.fm", band_iterator='linear', band_step=2000)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_bands_100.fm", band_iterator='mel', band_step=100)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_bands_300.fm", band_iterator='mel', band_step=300)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_bands_500.fm", band_iterator='mel', band_step=500)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_bands_1000.fm", band_iterator='mel', band_step=1000)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_one_band.fm", band_iterator='one')
# tza_bands_parallel(exp, n_processes=4)
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_10b.fm", band_iterator='linear', band_nbands=10)
# tza_bands_parallel(exp, n_processes=4)
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_30b.fm", band_iterator='linear', band_nbands=30)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_50b.fm", band_iterator='linear', band_nbands=50)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_10b.fm", band_iterator='mel', band_nbands=10)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_30b.fm", band_iterator='mel', band_nbands=30)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_50b.fm", band_iterator='mel', band_nbands=50)
# tza_bands_parallel(exp, n_processes=4)
####WITHTEXTURES############
MIREX_ExtractFeatures("fm/genres", "mirex/file_lists/gtzan_small2.txt", 4, also_one_band=True, output_file="teste.fm", band_iterator='mel', band_nbands=10)
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_one_band_tex.fm", band_iterator='one')
# tza_bands_parallel(exp, n_processes=4)
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_10b_tex.fm", band_iterator='linear', band_nbands=10)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_30b_tex.fm", band_iterator='linear', band_nbands=30)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_50b_tex.fm", band_iterator='linear', band_nbands=50)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_10b_tex.fm", band_iterator='mel', band_nbands=10)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_30b_tex.fm", band_iterator='mel', band_nbands=30)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_50b_tex.fm", band_iterator='mel', band_nbands=50)
# tza_bands_parallel(exp, n_processes=4)
#exp = BandExperiment("./lesslinks/", "sepbands", band_iterator='mel', band_nbands=10)
#tza_sep_bands_parallel(exp, n_processes=4) | pymir3/pymir3 | scripts/ismir2016/birdclef_tza_bands.py | Python | mit | 13,712 | 0.004959 |
# -*- coding=utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import os
import sys
import unittest
try:
import mock
except ImportError:
from unittest import mock
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.common.types import InvalidCredsError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.oss import OSSConnection
from libcloud.storage.drivers.oss import OSSStorageDriver
from libcloud.storage.drivers.oss import CHUNK_SIZE
from libcloud.storage.drivers.dummy import DummyIterator
from libcloud.test import MockHttp, generate_random_data, make_response # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import STORAGE_OSS_PARAMS
class OSSConnectionTestCase(unittest.TestCase):
def setUp(self):
self.conn = OSSConnection('44CF9590006BF252F707',
'OtxrzxIsfpFjA7SwPzILwy8Bw21TLhquhboDYROV')
def test_signature(self):
expected = b('26NBxoKdsyly4EDv6inkoDft/yA=')
headers = {
'Content-MD5': 'ODBGOERFMDMzQTczRUY3NUE3NzA5QzdFNUYzMDQxNEM=',
'Content-Type': 'text/html',
'Expires': 'Thu, 17 Nov 2005 18:49:58 GMT',
'X-OSS-Meta-Author': 'foo@bar.com',
'X-OSS-Magic': 'abracadabra',
'Host': 'oss-example.oss-cn-hangzhou.aliyuncs.com'
}
action = '/oss-example/nelson'
actual = OSSConnection._get_auth_signature('PUT', headers, {},
headers['Expires'],
self.conn.key,
action,
'x-oss-')
self.assertEqual(expected, actual)
class ObjectTestCase(unittest.TestCase):
def test_object_with_chinese_name(self):
driver = OSSStorageDriver(*STORAGE_OSS_PARAMS)
obj = Object(name='中文', size=0, hash=None, extra=None,
meta_data=None, container=None, driver=driver)
self.assertTrue(obj.__repr__() is not None)
class OSSMockHttp(MockHttp, unittest.TestCase):
fixtures = StorageFileFixtures('oss')
base_headers = {}
def _unauthorized(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers_empty(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
body = self.fixtures.load('list_containers.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_empty(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_chinese(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_chinese.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_prefix(self, method, url, body, headers):
params = {'prefix': self.test.prefix}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('list_container_objects_prefix.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _get_container(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _get_object(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _notexisted_get_object(self, method, url, body, headers):
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_get_object(self, method, url, body, headers):
self.base_headers.update(
{'accept-ranges': 'bytes',
'connection': 'keep-alive',
'content-length': '0',
'content-type': 'application/octet-stream',
'date': 'Sat, 16 Jan 2016 15:38:14 GMT',
'etag': '"D41D8CD98F00B204E9800998ECF8427E"',
'last-modified': 'Fri, 15 Jan 2016 14:43:15 GMT',
'server': 'AliyunOSS',
'x-oss-object-type': 'Normal',
'x-oss-request-id': '569A63E6257784731E3D877F',
'x-oss-meta-rabbits': 'monkeys'})
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _invalid_name(self, method, url, body, headers):
# test_create_container_bad_request
return (httplib.BAD_REQUEST,
body,
headers,
httplib.responses[httplib.OK])
def _already_exists(self, method, url, body, headers):
# test_create_container_already_existed
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _create_container(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
self.assertEqual('', body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _create_container_location(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
location_constraint = ('<CreateBucketConfiguration>'
'<LocationConstraint>%s</LocationConstraint>'
'</CreateBucketConfiguration>' %
self.test.ex_location)
self.assertEqual(location_constraint, body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_doesnt_exist(self, method, url, body, headers):
# test_delete_container_doesnt_exist
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_not_empty(self, method, url, body, headers):
# test_delete_container_not_empty
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container(self, method, url, body, headers):
return (httplib.NO_CONTENT,
body,
self.base_headers,
httplib.responses[httplib.NO_CONTENT])
def _foo_bar_object_not_found(self, method, url, body, headers):
# test_delete_object_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object_delete(self, method, url, body, headers):
# test_delete_object
return (httplib.NO_CONTENT,
body,
headers,
httplib.responses[httplib.OK])
def _list_multipart(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'key-marker' not in query:
body = self.fixtures.load('ex_iterate_multipart_uploads_p1.xml')
else:
body = self.fixtures.load('ex_iterate_multipart_uploads_p2.xml')
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object(self, method, url, body, headers):
# test_download_object_success
body = generate_random_data(1000)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object_invalid_size(self, method, url, body, headers):
# test_upload_object_invalid_file_size
body = ''
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_stream_data_multipart(self, method, url, body, headers):
headers = {}
body = ''
headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
class OSSStorageDriverTestCase(unittest.TestCase):
driver_type = OSSStorageDriver
driver_args = STORAGE_OSS_PARAMS
mock_response_klass = OSSMockHttp
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args)
def setUp(self):
self.driver_type.connectionCls.conn_class = self.mock_response_klass
self.mock_response_klass.type = None
self.mock_response_klass.test = self
self.driver = self.create_driver()
def tearDown(self):
self._remove_test_file()
def _remove_test_file(self):
file_path = os.path.abspath(__file__) + '.temp'
try:
os.unlink(file_path)
except OSError:
pass
def test_invalid_credentials(self):
self.mock_response_klass.type = 'unauthorized'
self.assertRaises(InvalidCredsError, self.driver.list_containers)
def test_list_containers_empty(self):
self.mock_response_klass.type = 'list_containers_empty'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_list_containers_success(self):
self.mock_response_klass.type = 'list_containers'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 2)
container = containers[0]
self.assertEqual('xz02tphky6fjfiuc0', container.name)
self.assertTrue('creation_date' in container.extra)
self.assertEqual('2014-05-15T11:18:32.000Z',
container.extra['creation_date'])
self.assertTrue('location' in container.extra)
self.assertEqual('oss-cn-hangzhou-a', container.extra['location'])
self.assertEqual(self.driver, container.driver)
def test_list_container_objects_empty(self):
self.mock_response_klass.type = 'list_container_objects_empty'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
def test_list_container_objects_success(self):
self.mock_response_klass.type = 'list_container_objects'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 2)
obj = objects[0]
self.assertEqual(obj.name, 'en/')
self.assertEqual(obj.hash, 'D41D8CD98F00B204E9800998ECF8427E')
self.assertEqual(obj.size, 0)
self.assertEqual(obj.container.name, 'test_container')
self.assertEqual(
obj.extra['last_modified'], '2016-01-15T14:43:15.000Z')
self.assertTrue('owner' in obj.meta_data)
def test_list_container_objects_with_chinese(self):
self.mock_response_klass.type = 'list_container_objects_chinese'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 2)
obj = [o for o in objects
if o.name == 'WEB控制台.odp'][0]
self.assertEqual(obj.hash, '281371EA1618CF0E645D6BB90A158276')
self.assertEqual(obj.size, 1234567)
self.assertEqual(obj.container.name, 'test_container')
self.assertEqual(
obj.extra['last_modified'], '2016-01-15T14:43:06.000Z')
self.assertTrue('owner' in obj.meta_data)
def test_list_container_objects_with_prefix(self):
self.mock_response_klass.type = 'list_container_objects_prefix'
container = Container(name='test_container', extra={},
driver=self.driver)
self.prefix = 'test_prefix'
objects = self.driver.list_container_objects(container=container,
prefix=self.prefix)
self.assertEqual(len(objects), 2)
def test_get_container_doesnt_exist(self):
self.mock_response_klass.type = 'get_container'
self.assertRaises(ContainerDoesNotExistError,
self.driver.get_container,
container_name='not-existed')
def test_get_container_success(self):
self.mock_response_klass.type = 'get_container'
container = self.driver.get_container(
container_name='xz02tphky6fjfiuc0')
self.assertTrue(container.name, 'xz02tphky6fjfiuc0')
def test_get_object_container_doesnt_exist(self):
self.mock_response_klass.type = 'get_object'
self.assertRaises(ObjectDoesNotExistError,
self.driver.get_object,
container_name='xz02tphky6fjfiuc0',
object_name='notexisted')
def test_get_object_success(self):
self.mock_response_klass.type = 'get_object'
obj = self.driver.get_object(container_name='xz02tphky6fjfiuc0',
object_name='test')
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.container.name, 'xz02tphky6fjfiuc0')
self.assertEqual(obj.size, 0)
self.assertEqual(obj.hash, 'D41D8CD98F00B204E9800998ECF8427E')
self.assertEqual(obj.extra['last_modified'],
'Fri, 15 Jan 2016 14:43:15 GMT')
self.assertEqual(obj.extra['content_type'], 'application/octet-stream')
self.assertEqual(obj.meta_data['rabbits'], 'monkeys')
def test_create_container_bad_request(self):
# invalid container name, returns a 400 bad request
self.mock_response_klass.type = 'invalid_name'
self.assertRaises(ContainerError,
self.driver.create_container,
container_name='invalid_name')
def test_create_container_already_exists(self):
# container with this name already exists
self.mock_response_klass.type = 'already_exists'
self.assertRaises(InvalidContainerNameError,
self.driver.create_container,
container_name='new-container')
def test_create_container_success(self):
# success
self.mock_response_klass.type = 'create_container'
name = 'new_container'
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
def test_create_container_with_ex_location(self):
self.mock_response_klass.type = 'create_container_location'
name = 'new_container'
self.ex_location = 'oss-cn-beijing'
container = self.driver.create_container(container_name=name,
ex_location=self.ex_location)
self.assertEqual(container.name, name)
self.assertTrue(container.extra['location'], self.ex_location)
def test_delete_container_doesnt_exist(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'delete_container_doesnt_exist'
self.assertRaises(ContainerDoesNotExistError,
self.driver.delete_container,
container=container)
def test_delete_container_not_empty(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'delete_container_not_empty'
self.assertRaises(ContainerIsNotEmptyError,
self.driver.delete_container,
container=container)
def test_delete_container_success(self):
self.mock_response_klass.type = 'delete_container'
container = Container(name='new_container', extra=None,
driver=self.driver)
self.assertTrue(self.driver.delete_container(container=container))
def test_download_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_invalid_file_size(self):
self.mock_response_klass.type = 'invalid_size'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertFalse(result)
def test_download_object_not_found(self):
self.mock_response_klass.type = 'not_found'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
self.assertRaises(ObjectDoesNotExistError,
self.driver.download_object,
obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
def test_download_object_as_stream_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
stream = self.driver.download_object_as_stream(obj=obj,
chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_upload_object_invalid_hash1(self):
def upload_file(self, object_name=None, content_type=None,
request_path=None, request_method=None,
headers=None, file_path=None, stream=None):
return {'response': make_response(200, headers={'etag': '2345'}),
'bytes_transferred': 1000,
'data_hash': 'hash343hhash89h932439jsaa89'}
self.mock_response_klass.type = 'INVALID_HASH1'
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
try:
self.driver.upload_object(file_path=file_path, container=container,
object_name=object_name,
verify_hash=True)
except ObjectHashMismatchError:
pass
else:
self.fail(
'Invalid hash was returned but an exception was not thrown')
finally:
self.driver_type._upload_object = old_func
def test_upload_object_success(self):
def upload_file(self, object_name=None, content_type=None,
request_path=None, request_method=None,
headers=None, file_path=None, stream=None):
return {'response': make_response(200,
headers={'etag': '0cc175b9c0f1b6a831c399e269772661'}),
'bytes_transferred': 1000,
'data_hash': '0cc175b9c0f1b6a831c399e269772661'}
self.mock_response_klass.type = None
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, 1000)
self.assertTrue('some-value' in obj.meta_data)
self.driver_type._upload_object = old_func
def test_upload_object_with_acl(self):
def upload_file(self, object_name=None, content_type=None,
request_path=None, request_method=None,
headers=None, file_path=None, stream=None):
return {'response': make_response(200, headers={'etag': '0cc175b9c0f1b6a831c399e269772661'}),
'bytes_transferred': 1000,
'data_hash': '0cc175b9c0f1b6a831c399e269772661'}
self.mock_response_klass.type = None
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'acl': 'public-read'}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, 1000)
self.assertEqual(obj.extra['acl'], 'public-read')
self.driver_type._upload_object = old_func
def test_upload_object_with_invalid_acl(self):
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'acl': 'invalid-acl'}
self.assertRaises(AttributeError,
self.driver.upload_object,
file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
def test_upload_empty_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_response_klass.type = 'multipart'
else:
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(data=[''])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 0)
def test_upload_small_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_response_klass.type = 'multipart'
else:
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(data=['2', '3', '5'])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
def test_upload_big_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_response_klass.type = 'multipart'
else:
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(
data=['2' * CHUNK_SIZE, '3' * CHUNK_SIZE, '5'])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, CHUNK_SIZE * 2 + 1)
def test_upload_object_via_stream_abort(self):
if not self.driver.supports_multipart_upload:
return
self.mock_response_klass.type = 'MULTIPART'
def _faulty_iterator():
for i in range(0, 5):
yield str(i)
raise RuntimeError('Error in fetching data')
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = _faulty_iterator()
extra = {'content_type': 'text/plain'}
try:
self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
except Exception:
pass
return
def test_ex_iterate_multipart_uploads(self):
if not self.driver.supports_multipart_upload:
return
self.mock_response_klass.type = 'list_multipart'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
for upload in self.driver.ex_iterate_multipart_uploads(container,
max_uploads=2):
self.assertTrue(upload.key is not None)
self.assertTrue(upload.id is not None)
self.assertTrue(upload.initiated is not None)
def test_ex_abort_all_multipart_uploads(self):
if not self.driver.supports_multipart_upload:
return
self.mock_response_klass.type = 'list_multipart'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
with mock.patch('libcloud.storage.drivers.oss.OSSStorageDriver'
'._abort_multipart', autospec=True) as mock_abort:
self.driver.ex_abort_all_multipart_uploads(container)
self.assertEqual(3, mock_abort.call_count)
def test_delete_object_not_found(self):
self.mock_response_klass.type = 'not_found'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
self.assertRaises(ObjectDoesNotExistError,
self.driver.delete_object,
obj=obj)
def test_delete_object_success(self):
self.mock_response_klass.type = 'delete'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
result = self.driver.delete_object(obj=obj)
self.assertTrue(result)
if __name__ == '__main__':
sys.exit(unittest.main())
| Kami/libcloud | libcloud/test/storage/test_oss.py | Python | apache-2.0 | 31,715 | 0.000126 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
Задание 1: классный Человек.
УСЛОВИЕ:
Реализовать класс Person, который отображает запись в книге контактов.
Класс имеет 4 атрибута:
- surname - строка - фамилия контакта (обязательный)
- first_name - строка - имя контакта (обязательный)
- nickname - строка - псевдоним (опциональный)
- birth_date - объект datetime.date (обязательный)
Каждый вызов класса должен создавать экземпляр (инстанс) класса с указанными
атрибутами.
Также класс имеет 2 метода:
- get_age() - считает возраст контакта в полных годах на дату вызова и
возвращает строку вида: "27";
- get_fullname() - возвращает строку, отражающую полное имя (фамилия + имя)
контакта;
"""
__author__ = "Sergei Shybkoi"
__copyright__ = "Copyright 2014, The Homework Project"
__email__ = "heap_@mail.ru"
__status__ = "Production"
__date__ = "2014-11-18"
import datetime
class Person(object):
u"""Класс Person"""
def __init__(self, surname, first_name, birth_date, nickname=None):
u"""Инишн класса"""
try:
var_date = datetime.datetime.strptime(birth_date, "%Y-%m-%d")
res_date = datetime.date(var_date.year,
var_date.month, var_date.day)
except TypeError:
print "Incorrect type of birthday date!"
res_date = None
except ValueError:
print "Wrong value of birthday date!"
res_date = None
self.surname = surname
self.first_name = first_name
self.birth_date = res_date
if nickname is not None:
self.nickname = nickname
def get_age(self):
u"""Метод класса подсчитывает и выводит количество полных лет"""
if self.birth_date is not None:
today_date = datetime.date.today()
delta = today_date.year - self.birth_date.year
if today_date.month <= self.birth_date.month \
and today_date.day < self.birth_date.day:
delta -= 1
print "Age:", delta
return str(delta)
else:
print "No correct data about person's birthday."
return "0"
def get_fullname(self):
u"""Метод выводит и возвращаем полное имя экземпляра класса Person"""
print self.surname, self.first_name
return self.surname + " " + self.first_name
| pybursa/homeworks | s_shybkoy/hw5/hw5_task1.py | Python | gpl-2.0 | 2,975 | 0.00041 |
# -*- coding:utf-8 -*-
'''
x11perf测试工具执行脚本
'''
import os, shutil, re
from test import BaseTest
from lpt.lib.error import *
from lpt.lib import lptxml
from lpt.lib import lptlog
from lpt.lib.share import utils
from lpt.lib import lptreport
import glob
glxgears_keys = ["gears"]
class TestControl(BaseTest):
'''
继承BaseTest属性和方法
'''
def __init__(self, jobs_xml, job_node, tool, tarball='UnixBench5.1.3-1.tar.bz2'):
super(TestControl, self).__init__(jobs_xml, job_node, tool, tarball)
def check_deps(self):
'''编译ubgears需要提供libx11-devel包和libGL-devel包
'''
utils.has_gcc()
utils.has_file("libX11-devel", "/usr/include/X11/Xlib.h")
utils.has_file("libGL-devel", "/usr/include/GL/gl.h")
utils.has_file("libXext-devel","/usr/include/X11/extensions/Xext.h")
def setup(self):
'''编译源码,设置程序
'''
if not self.check_bin(self.processBin):
self.tar_src_dir = self.extract_bar()
os.chdir(self.tar_src_dir)
utils.make(extra='clean', make='make')
#修改Makefile文件
lptlog.info("修改Makefile, 取消#GRAPHIC_TESTS = defined注释")
cmd = '''sed -i "s/^#GRAPHIC_TESTS/GRAPHIC_TESTS/g" Makefile '''
utils.system(cmd)
self.compile(make_status=True)
os.chdir(self.lpt_root)
def run(self):
tool_node = self.check_tool_result_node()
lptlog.info("----------开始获取测试参数")
self.times = self.get_config_value(tool_node, "times", 10, valueType=int)
lptlog.info("测试次数: %d" % self.times)
self.parallels = [1]
cmd = "./Run"
args_list = ["ubgears", "-i", "%d" % self.times]
self.mainParameters["parameters"] = " ".join([cmd]+args_list)
#运行unixbench程序,进入unixbench 根目录
os.chdir(self.tar_src_dir)
utils.system("rm -rf results/*")
lptlog.info("---------运行测试脚本")
utils.run_shell2(cmd, args_list=args_list, file=os.devnull)
os.chdir(self.lpt_root)
def create_result(self):
#数据处理
#
os.chdir(self.tar_src_dir)
temp_result_list = glob.glob("./results/*[0-9]")
if not temp_result_list:
raise NameError, "% result data not found.." % self.tool
else:
temp_result_file = temp_result_list[0]
self.__match_index(temp_result_file)
#返回根目录
os.chdir(self.lpt_root)
def __match_index(self, file):
'''获取unixbench屏幕输出
'''
self.parallels = [1]
self.times = 3
result_dic = {}.fromkeys(glxgears_keys, 0)
result_lines = utils.read_all_lines(file)
for parallel in self.parallels:
re_match = "[\d]+ CPUs in system; running %d parallel copy of tests" % parallel
parallel_result_dic = result_dic.copy()
for line in result_lines:
if re.search(re_match, line, re.I):
parallel_index = result_lines.index(line)
paralell_result_list = [ self.__get_value(result_lines, parallel_index+index) for index in (5,) ]
for l,v in zip(tuple(glxgears_keys), tuple([utils.change_type(i) for i in paralell_result_list])):
parallel_result_dic[l] = "%.1f" % v
parallel_result_attrib = self.create_result_node_attrib("Average", self.times, parallel, self.parallels)
self.result_list.append([parallel_result_attrib, parallel_result_dic])
def __get_value(self, lines, index):
return lines[index].split()[-2]
| ShaolongHu/lpts | tests/glxgears.py | Python | gpl-2.0 | 3,938 | 0.01248 |
"""The tests for hls streams."""
from datetime import timedelta
from io import BytesIO
from unittest.mock import patch
from homeassistant.setup import async_setup_component
from homeassistant.components.stream.core import Segment
from homeassistant.components.stream.recorder import recorder_save_worker
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.stream.common import (
generate_h264_video, preload_stream)
async def test_record_stream(hass, hass_client):
"""
Test record stream.
Purposefully not mocking anything here to test full
integration with the stream component.
"""
await async_setup_component(hass, 'stream', {
'stream': {}
})
with patch(
'homeassistant.components.stream.recorder.recorder_save_worker'):
# Setup demo track
source = generate_h264_video()
stream = preload_stream(hass, source)
recorder = stream.add_provider('recorder')
stream.start()
segments = 0
while True:
segment = await recorder.recv()
if not segment:
break
segments += 1
stream.stop()
assert segments > 1
async def test_recorder_timeout(hass, hass_client):
"""Test recorder timeout."""
await async_setup_component(hass, 'stream', {
'stream': {}
})
with patch(
'homeassistant.components.stream.recorder.RecorderOutput.cleanup'
) as mock_cleanup:
# Setup demo track
source = generate_h264_video()
stream = preload_stream(hass, source)
recorder = stream.add_provider('recorder')
stream.start()
await recorder.recv()
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert mock_cleanup.called
async def test_recorder_save():
"""Test recorder save."""
# Setup
source = generate_h264_video()
output = BytesIO()
output.name = 'test.mp4'
# Run
recorder_save_worker(output, [Segment(1, source, 4)])
# Assert
assert output.getvalue()
| auduny/home-assistant | tests/components/stream/test_recorder.py | Python | apache-2.0 | 2,235 | 0 |
#!/usr/bin/env python3
"""hello with args"""
import sys
import os
args = sys.argv
if len(args) != 2:
script = os.path.basename(args[0])
print('Usage: {} NAME'.format(script))
sys.exit(1)
name = args[1]
print('Hello, {}!'.format(name))
| kyclark/metagenomics-book | python/hello/hello_arg3.py | Python | gpl-3.0 | 251 | 0 |
import os
import platform
import subprocess
import cat_service
from apps.general import headers
def main():
headers.print_header('LOLCAT FACTORY')
# look for a directory if not there create it
dir_path = get_or_create_output_folder()
n_cats = get_number_cats()
# contact the lol cat api, get binary
download_cats(dir_path, n_cats)
# launch explorer
display_cats(dir_path)
def get_or_create_output_folder():
dir_path = os.path.join('C:\\Users', 'Catriona', 'Desktop', 'Lolcats')
if not os.path.exists(dir_path) or not os.path.isdir(dir_path):
os.mkdir(dir_path)
return dir_path
def get_number_cats():
n_cats = 0
while True:
number_files = input('On a scale of 1 to 10 how much cheering up do you need?')
if number_files.isnumeric():
n_cats = int(number_files)
return n_cats
print('That was not a valid number please try again')
def download_cats(dir_path, n_cats):
for i in range(n_cats):
cat_service.get_cat(dir_path, 'lol_cat{}.jpg'.format(i))
def display_cats(folder):
print('Opening folder: {}'.format(folder))
if platform.system() == 'Darwin':
subprocess.call(['open', folder])
elif platform.system() == 'Windows':
print('with windows')
subprocess.call(['explorer', folder])
elif platform.system() == 'Linux':
subprocess.call(['xdg-open', folder])
else:
print('Do not support your os "{}"'.format(platform.system()))
if __name__ == '__main__':
main()
| egancatriona1/python-jumpstart | apps/06_lolcat_factory/you_try/program.py | Python | mit | 1,553 | 0.000644 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twit', '0006_auto_20160419_0248'),
]
operations = [
migrations.CreateModel(
name='Retweet',
fields=[
('id', models.BigIntegerField(serialize=False, help_text='Unique id that comes from Twitter', primary_key=True)),
('created_at', models.DateTimeField(help_text='Time tweet was created')),
('tweet', models.ForeignKey(to='twit.Tweet')),
('user', models.ForeignKey(to='twit.User')),
],
),
]
| arunchaganty/presidential-debates | django/twit/migrations/0007_retweet.py | Python | mit | 697 | 0.002869 |
# -*- coding: utf-8 -*-
import socket
import struct
import signal
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
class Connection(object):
"""
サーバとの通信用クラス
"""
BINARY_INT = '!1I'
BINARY_TABLE = '!120I'
def __init__(self, addr='127.0.0.1', port=42485):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.sock.connect((addr, port))
def __enter__(self):
return self
def __exit__(self, *exc):
self.sock.close()
def recv_int(self):
unpacked_value = self._recv_msg(byte_length=4)
s = struct.Struct(self.BINARY_INT)
integer = s.unpack(unpacked_value)
return integer[0]
def recv_table(self):
unpacked_value = self._recv_msg(byte_length=480)
s = struct.Struct(self.BINARY_TABLE)
ls = s.unpack(unpacked_value)
table = [ls[15 * i: 15 * (i + 1)][:] for i in range(8)] # 8x15のリストに変換
return table
def _recv_msg(self, byte_length):
unpacked_data = b''
while len(unpacked_data) < byte_length:
chunk = self.sock.recv(byte_length - len(unpacked_data), 0)
if chunk == b'':
raise RuntimeError('socket connection broken')
unpacked_data += chunk
return unpacked_data
# この中で、配列を構築すべきではない。構築する部分は分離して配列をsend_tableに渡すべき
def send_name(self, name, protocol=20070):
table = [[0] * 15 for i in range(8)]
table[0][0] = protocol
for i, ch in enumerate(name):
table[1][i] = ord(ch)
self.send_table(table)
def send_table(self, table):
ls = [item for inner in table for item in inner] # 2次元リストを1次元に変換
s = struct.Struct(self.BINARY_TABLE)
packed_value = s.pack(*ls)
self._send_msg(packed_value)
def _send_msg(self, msg):
self.sock.sendall(msg)
| Hironsan/uecda-pyclient | src/connection.py | Python | mit | 1,996 | 0 |
from share.transform.chain.exceptions import * # noqa
from share.transform.chain.links import * # noqa
from share.transform.chain.parsers import * # noqa
from share.transform.chain.transformer import ChainTransformer # noqa
from share.transform.chain.links import Context
# Context singleton to be used for parser definitions
# Class SHOULD be thread safe
# Accessing subattribtues will result in a new copy of the context
# to avoid leaking data between chains
ctx = Context()
| CenterForOpenScience/SHARE | share/transform/chain/__init__.py | Python | apache-2.0 | 484 | 0 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['checksum', 'logger', 'view']
db.delete_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
# Removing unique constraint on 'MessageFilterValue', fields ['group', 'value', 'key']
db.delete_unique('sentry_messagefiltervalue', ['group_id', 'value', 'key'])
# Removing unique constraint on 'FilterValue', fields ['key', 'value']
db.delete_unique('sentry_filtervalue', ['key', 'value'])
# Removing unique constraint on 'MessageCountByMinute', fields ['date', 'group']
db.delete_unique('sentry_messagecountbyminute', ['date', 'group_id'])
# Adding field 'Message.project'
db.add_column('sentry_message', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding field 'MessageCountByMinute.project'
db.add_column('sentry_messagecountbyminute', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'MessageCountByMinute', fields ['project', 'date', 'group']
db.create_unique('sentry_messagecountbyminute', ['project_id', 'date', 'group_id'])
# Adding field 'FilterValue.project'
db.add_column('sentry_filtervalue', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'FilterValue', fields ['project', 'value', 'key']
db.create_unique('sentry_filtervalue', ['project_id', 'value', 'key'])
# Adding field 'MessageFilterValue.project'
db.add_column('sentry_messagefiltervalue', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'MessageFilterValue', fields ['project', 'group', 'value', 'key']
db.create_unique('sentry_messagefiltervalue', ['project_id', 'group_id', 'value', 'key'])
# Adding field 'GroupedMessage.project'
db.add_column('sentry_groupedmessage', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'GroupedMessage', fields ['project', 'checksum', 'logger', 'view']
db.create_unique('sentry_groupedmessage', ['project_id', 'checksum', 'logger', 'view'])
def backwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['project', 'checksum', 'logger', 'view']
db.delete_unique('sentry_groupedmessage', ['project_id', 'checksum', 'logger', 'view'])
# Removing unique constraint on 'MessageFilterValue', fields ['project', 'group', 'value', 'key']
db.delete_unique('sentry_messagefiltervalue', ['project_id', 'group_id', 'value', 'key'])
# Removing unique constraint on 'FilterValue', fields ['project', 'value', 'key']
db.delete_unique('sentry_filtervalue', ['project_id', 'value', 'key'])
# Removing unique constraint on 'MessageCountByMinute', fields ['project', 'date', 'group']
db.delete_unique('sentry_messagecountbyminute', ['project_id', 'date', 'group_id'])
# Deleting field 'Message.project'
db.delete_column('sentry_message', 'project_id')
# Deleting field 'MessageCountByMinute.project'
db.delete_column('sentry_messagecountbyminute', 'project_id')
# Adding unique constraint on 'MessageCountByMinute', fields ['date', 'group']
db.create_unique('sentry_messagecountbyminute', ['date', 'group_id'])
# Deleting field 'FilterValue.project'
db.delete_column('sentry_filtervalue', 'project_id')
# Adding unique constraint on 'FilterValue', fields ['key', 'value']
db.create_unique('sentry_filtervalue', ['key', 'value'])
# Deleting field 'MessageFilterValue.project'
db.delete_column('sentry_messagefiltervalue', 'project_id')
# Adding unique constraint on 'MessageFilterValue', fields ['group', 'value', 'key']
db.create_unique('sentry_messagefiltervalue', ['group_id', 'value', 'key'])
# Deleting field 'GroupedMessage.project'
db.delete_column('sentry_groupedmessage', 'project_id')
# Adding unique constraint on 'GroupedMessage', fields ['checksum', 'logger', 'view']
db.create_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('project', 'logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permissions': ('django.db.models.fields.BigIntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['sentry']
| beni55/sentry | src/sentry/migrations/0015_auto__add_field_message_project__add_field_messagecountbyminute_projec.py | Python | bsd-3-clause | 15,392 | 0.007926 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# seriesly - XBMC Plugin
# Conector para Wupload
# http://blog.tvalacarta.info/plugin-xbmc/seriesly/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
logger.info("[wupload.py] test_video_exists(page_url='%s')" % page_url)
# Existe: http://www.wupload.com/file/2666595132
# No existe: http://www.wupload.es/file/2668162342
location = scrapertools.get_header_from_response(page_url,header_to_get="location")
logger.info("location="+location)
if location!="":
page_url = location
data = scrapertools.downloadpageWithoutCookies(page_url)
logger.info("data="+data)
patron = '<p class="fileInfo filename"><span>Filename: </span> <strong>([^<]+)</strong></p>'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
return True,""
else:
patron = '<p class="deletedFile">(Sorry, this file has been removed.)</p>'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
return False,matches[0]
patron = '<div class="section CL3 regDownloadMessage"> <h3>(File does not exist)</h3> </div>'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
return False,matches[0]
return True,""
# Returns an array of possible video url's from the page_url
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[wupload.py] get_video_url( page_url='%s' , user='%s' , password='%s', video_password=%s)" % (page_url , user , "**************************"[0:len(password)] , video_password) )
if not premium:
#return get_free_url(page_url)
logger.info("[wupload.py] free no soportado")
else:
# Hace el login y consigue la cookie
#login_url = "http://www.wupload.es/account/login"
login_url = "http://www.wupload.com/account/login"
post = "email="+user.replace("@","%40")+"&redirect=%2F&password="+password+"&rememberMe=1"
location = scrapertools.get_header_from_response( url=login_url, header_to_get="location", post=post)
logger.info("location="+location)
if location!="":
login_url = location
data = scrapertools.cache_page(url=login_url, post=post)
# Obtiene la URL final
headers = scrapertools.get_headers_from_response(page_url)
location1 = ""
for header in headers:
logger.info("header1="+str(header))
if header[0]=="location":
location1 = header[1]
logger.info("location1="+str(header))
# Obtiene la URL final
headers = scrapertools.get_headers_from_response(location1)
location2 = ""
content_disposition = ""
for header in headers:
logger.info("header2="+str(header))
if header[0]=="location":
location2 = header[1]
location = location2
if location=="":
location = location1
return [ ["(Premium) [wupload]",location + "|" + "User-Agent="+urllib.quote("Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12") ] ]
return []
def get_free_url(page_url):
location = scrapertools.get_header_from_response(page_url,header_to_get="location")
if location!="":
page_url = location
logger.info("[wupload.py] location=%s" % page_url)
video_id = extract_id(page_url)
logger.info("[wupload.py] video_id=%s" % video_id)
data = scrapertools.cache_page(url=page_url)
patron = 'href="(.*?start=1.*?)"'
matches = re.compile(patron).findall(data)
scrapertools.printMatches(matches)
if len(matches)==0:
logger.error("[wupload.py] No encuentra el enlace Free")
return []
# Obtiene link de descarga free
download_link = matches[0]
if not download_link.startswith("http://"):
download_link = urlparse.urljoin(page_url,download_link)
logger.info("[wupload.py] Link descarga: "+ download_link)
# Descarga el enlace
headers = []
headers.append( ["X-Requested-With", "XMLHttpRequest"] )
headers.append( ["Referer" , page_url ])
headers.append( ["User-Agent" , "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12" ])
headers.append( ["Content-Type" , "application/x-www-form-urlencoded; charset=UTF-8"])
headers.append( ["Accept-Encoding" , "gzip, deflate"])
headers.append( ["Accept","*/*"])
headers.append( ["Accept-Language","es-es,es;q=0.8,en-us;q=0.5,en;q=0.3"])
headers.append( ["Accept-Charset","ISO-8859-1,utf-8;q=0.7,*;q=0.7"])
headers.append( ["Connection","keep-alive"])
headers.append( ["Pragma","no-cache"])
headers.append( ["Cache-Control","no-cache"])
data = scrapertools.cache_page( download_link , headers=headers, post="" )
logger.info(data)
while True:
# Detecta el tiempo de espera
patron = "countDownDelay = (\d+)"
matches = re.compile(patron).findall(data)
if len(matches)>0:
tiempo_espera = int(matches[0])
logger.info("[wupload.py] tiempo de espera %d segundos" % tiempo_espera)
#import time
#time.sleep(tiempo_espera)
from platformcode.xbmc import xbmctools
resultado = xbmctools.handle_wait(tiempo_espera+5,"Progreso","Conectando con servidor Wupload (Free)")
if resultado == False:
break
tm = get_match(data,"name='tm' value='([^']+)'")
tm_hash = get_match(data,"name='tm_hash' value='([^']+)'")
post = "tm=" + tm + "&tm_hash=" + tm_hash
data = scrapertools.cache_page( download_link , headers=headers, post=post )
logger.info(data)
else:
logger.info("[wupload.py] no encontrado tiempo de espera")
# Detecta captcha
patron = "Recaptcha\.create"
matches = re.compile(patron).findall(data)
if len(matches)>0:
logger.info("[wupload.py] est� pidiendo el captcha")
recaptcha_key = get_match( data , 'Recaptcha\.create\("([^"]+)"')
logger.info("[wupload.py] recaptcha_key="+recaptcha_key)
data_recaptcha = scrapertools.cache_page("http://www.google.com/recaptcha/api/challenge?k="+recaptcha_key)
patron="challenge.*?'([^']+)'"
challenges = re.compile(patron, re.S).findall(data_recaptcha)
if(len(challenges)>0):
challenge = challenges[0]
image = "http://www.google.com/recaptcha/api/image?c="+challenge
#CAPTCHA
exec "import seriesly.captcha as plugin"
tbd = plugin.Keyboard("","",image)
tbd.doModal()
confirmed = tbd.isConfirmed()
if (confirmed):
tecleado = tbd.getText()
#logger.info("")
#tecleado = raw_input('Grab ' + image + ' : ')
post = "recaptcha_challenge_field=%s&recaptcha_response_field=%s" % (challenge,tecleado.replace(" ","+"))
data = scrapertools.cache_page( download_link , headers=headers, post=post )
logger.info(data)
else:
logger.info("[wupload.py] no encontrado captcha")
# Detecta captcha
patron = '<p><a href="(http\:\/\/.*?wupload[^"]+)">'
matches = re.compile(patron).findall(data)
if len(matches)>0:
final_url = matches[0]
'''
'GET /download/2616019677/4f0391ba/9bed4add/0/1/580dec58/3317afa30905a31794733c6a32da1987719292ff
HTTP/1.1
Accept-Language: es-es,es;q=0.8,en-us;q=0.5,en;q=0.3
Accept-Encoding: gzip, deflate
Connection: close\r\nAccept: */*\r\nUser-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12
Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7
Host: s107.wupload.es
Referer: http://www.wupload.es/file/2616019677
Pragma: no-cache
Cache-Control: no-cache
Content-Type: application/x-www-form-urlencoded; charset=UTF-8
00:39:39 T:2956623872 NOTICE: reply:
00:39:39 T:2956623872 NOTICE: 'HTTP/1.1 200 OK\r\n'
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Server: nginx
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Date: Tue, 03 Jan 2012 23:39:39 GMT
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Content-Type: "application/octet-stream"
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Content-Length: 230336429
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Last-Modified: Tue, 06 Sep 2011 01:07:26 GMT
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Connection: close
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Set-Cookie: dlc=1; expires=Thu, 02-Feb-2012 23:39:39 GMT; path=/; domain=.wupload.es
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: : attachment; filename="BNS609.mp4"
'''
logger.info("[wupload.py] link descarga " + final_url)
return [["(Free)",final_url + '|' + 'Referer=' + urllib.quote(page_url) + "&Content-Type=" + urllib.quote("application/x-www-form-urlencoded; charset=UTF-8")+"&Cookie="+urllib.quote("lastUrlLinkId="+video_id)]]
else:
logger.info("[wupload.py] no detectado link descarga")
def extract_id(url):
return get_match(url, 'wupload.*?/file/(\d+)')
def get_match(data, regex) :
match = "";
m = re.search(regex, data)
if m != None :
match = m.group(1)
return match
def find_videos(data):
encontrados = set()
devuelve = []
patronvideos = '(http://www.wupload.*?/file/\d+)'
logger.info("[wupload.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos).findall(data)
for match in matches:
titulo = "[wupload]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'wupload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# Encontrado en animeflv
#s=mediafire.com%2F%3F7fsmmq2144fx6t4|-|wupload.com%2Ffile%2F2653904582
patronvideos = 'wupload.com\%2Ffile\%2F(\d+)'
logger.info("[wupload.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos).findall(data)
for match in matches:
titulo = "[wupload]"
url = "http://www.wupload.com/file/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'wupload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve | conejoninja/xbmc-seriesly | servers/wupload.py | Python | gpl-3.0 | 11,622 | 0.015835 |
import urllib
import urllib2
import cookielib
import logging
class GISTokenGenerator:
def __init__(self, email, password):
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
self.email = email
self.login_data = urllib.urlencode({'user[email]': email, 'user[password]': password})
def generate_token(self):
logging.info('Generating a token for {0}...'.format(self.email))
self.opener.open('https://auth.aiesec.org/users/sign_in', self.login_data)
token = None
for cookie in self.cj:
if cookie.name == 'expa_token':
token = cookie.value
if token is None:
raise Exception('Unable to generate a token for {0}!'.format(self.email))
return token
| AIESECGermany/gis-hubspot-sync | gis_token_generator.py | Python | gpl-2.0 | 825 | 0.004848 |
import sys, os
from pythonparser import diagnostic
from ...language.environment import ProcessArgumentManager
from ...master.databases import DeviceDB, DatasetDB
from ...master.worker_db import DeviceManager, DatasetManager
from ..module import Module
from ..embedding import Stitcher
from ..targets import OR1KTarget
from . import benchmark
def main():
if not len(sys.argv) == 2:
print("Expected exactly one module filename", file=sys.stderr)
exit(1)
def process_diagnostic(diag):
print("\n".join(diag.render()), file=sys.stderr)
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
with open(sys.argv[1]) as f:
testcase_code = compile(f.read(), f.name, "exec")
testcase_vars = {'__name__': 'testbench'}
exec(testcase_code, testcase_vars)
device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
device_mgr = DeviceManager(DeviceDB(device_db_path))
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.pyon")
dataset_mgr = DatasetManager(DatasetDB(dataset_db_path))
argument_mgr = ProcessArgumentManager({})
def embed():
experiment = testcase_vars["Benchmark"]((device_mgr, dataset_mgr, argument_mgr))
stitcher = Stitcher(core=experiment.core, dmgr=device_mgr)
stitcher.stitch_call(experiment.run, (), {})
stitcher.finalize()
return stitcher
stitcher = embed()
module = Module(stitcher)
target = OR1KTarget()
llvm_ir = target.compile(module)
elf_obj = target.assemble(llvm_ir)
elf_shlib = target.link([elf_obj])
benchmark(lambda: embed(),
"ARTIQ embedding")
benchmark(lambda: Module(stitcher),
"ARTIQ transforms and validators")
benchmark(lambda: target.compile(module),
"LLVM optimizations")
benchmark(lambda: target.assemble(llvm_ir),
"LLVM machine code emission")
benchmark(lambda: target.link([elf_obj]),
"Linking")
benchmark(lambda: target.strip(elf_shlib),
"Stripping debug information")
if __name__ == "__main__":
main()
| JQIamo/artiq | artiq/compiler/testbench/perf_embedding.py | Python | lgpl-3.0 | 2,231 | 0.001793 |
import json
import os
import urllib
from ..storage import WatchLaterList, FunctionCache, SearchHistory, FavoriteList, AccessManager
from .abstract_settings import AbstractSettings
from .. import utils
class AbstractContext(object):
CACHE_ONE_MINUTE = 60
CACHE_ONE_HOUR = 60 * CACHE_ONE_MINUTE
CACHE_ONE_DAY = 24 * CACHE_ONE_HOUR
CACHE_ONE_WEEK = 7 * CACHE_ONE_DAY
CACHE_ONE_MONTH = 4 * CACHE_ONE_WEEK
SORT_METHOD_ALBUM = 'album'
SORT_METHOD_ALBUM_IGNORE_THE = 'album_ignore_the'
SORT_METHOD_ARTIST = 'artist'
SORT_METHOD_ARTIST_IGNORE_THE = 'artist_ignore_the'
SORT_METHOD_BIT_RATE = 'bit_rate'
SORT_METHOD_CHANNEL = 'channel'
SORT_METHOD_COUNTRY = 'country'
SORT_METHOD_DATE = 'date'
SORT_METHOD_DATE_ADDED = 'date_added'
SORT_METHOD_DATE_TAKEN = 'date_taken'
SORT_METHOD_DRIVE_TYPE = 'drive_type'
SORT_METHOD_DURATION = 'duration'
SORT_METHOD_EPISODE = 'episode'
SORT_METHOD_FILE = 'file'
SORT_METHOD_FULL_PATH = 'full_path'
SORT_METHOD_GENRE = 'genre'
SORT_METHOD_LABEL = 'label'
SORT_METHOD_LABEL_IGNORE_FOLDERS = 'label_ignore_folders'
SORT_METHOD_LABEL_IGNORE_THE = 'label_ignore_the'
SORT_METHOD_LAST_PLAYED = 'last_played'
SORT_METHOD_LISTENERS = 'listeners'
SORT_METHOD_MPAA_RATING = 'mpaa_rating'
SORT_METHOD_NONE = 'none'
SORT_METHOD_PLAY_COUNT = 'play_count'
SORT_METHOD_PLAYLIST_ORDER = 'playlist_order'
SORT_METHOD_PRODUCTION_CODE = 'production_code'
SORT_METHOD_PROGRAM_COUNT = 'program_count'
SORT_METHOD_SIZE = 'size'
SORT_METHOD_SONG_RATING = 'song_rating'
SORT_METHOD_STUDIO = 'studio'
SORT_METHOD_STUDIO_IGNORE_THE = 'studio_ignore_the'
SORT_METHOD_TITLE = 'title'
SORT_METHOD_TITLE_IGNORE_THE = 'title_ignore_the'
SORT_METHOD_TRACK_NUMBER = 'track_number'
SORT_METHOD_UNSORTED = 'unsorted'
SORT_METHOD_VIDEO_RATING = 'video_rating'
SORT_METHOD_VIDEO_RUNTIME = 'video_runtime'
SORT_METHOD_VIDEO_SORT_TITLE = 'video_sort_title'
SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = 'video_sort_title_ignore_the'
SORT_METHOD_VIDEO_TITLE = 'video_title'
SORT_METHOD_VIDEO_YEAR = 'video_year'
CONTENT_TYPE_FILES = 'files'
CONTENT_TYPE_SONGS = 'songs'
CONTENT_TYPE_ARTISTS = 'artists'
CONTENT_TYPE_ALBUMS = 'albums'
CONTENT_TYPE_MOVIES = 'movies'
CONTENT_TYPE_TV_SHOWS = 'tvshows'
CONTENT_TYPE_EPISODES = 'episodes'
CONTENT_TYPE_MUSIC_VIDEOS = 'musicvideos'
LOG_DEBUG = 0
LOG_INFO = 1
LOG_WARNING = 2
LOG_ERROR = 3
def __init__(self, path=u'/', params=None, plugin_name=u'', plugin_id=u''):
if not params:
params = {}
pass
self._path_match = None
self._python_version = None
self._cache_path = None
self._function_cache = None
self._search_history = None
self._favorite_list = None
self._watch_later_list = None
self._access_manager = None
self._plugin_name = unicode(plugin_name)
self._version = 'UNKNOWN'
self._plugin_id = plugin_id
self._path = path
self._params = params
self._utils = None
self._view_mode = None
# create valid uri
self._uri = self.create_uri(self._path, self._params)
pass
def set_path_match(self, path_match):
"""
Sets the current regular expression match for a navigated path
:param path_match: regular expression match
"""
self._path_match = path_match
pass
def get_path_match(self):
"""
Returns the current path match of regular expression
:return: match of regular expression
"""
return self._path_match
def format_date_short(self, date_obj):
raise NotImplementedError()
def format_time(self, time_obj):
raise NotImplementedError()
def get_language(self):
raise NotImplementedError()
def _get_cache_path(self):
if not self._cache_path:
self._cache_path = os.path.join(self.get_data_path(), 'kodion')
pass
return self._cache_path
def get_function_cache(self):
if not self._function_cache:
settings = self.get_settings()
max_cache_size_mb = settings.get_int(AbstractSettings.ADDON_CACHE_SIZE, 5)
self._function_cache = FunctionCache(os.path.join(self._get_cache_path(), 'cache'),
max_file_size_kb=max_cache_size_mb * 1024)
if settings.is_clear_cache_enabled():
self.log_info('Clearing cache...')
settings.disable_clear_cache()
self._function_cache.remove_file()
self.log_info('Clearing cache done')
pass
pass
return self._function_cache
def cache_function(self, seconds, func, *args, **keywords):
return self.get_function_cache().get(seconds, func, *args, **keywords)
def get_search_history(self):
if not self._search_history:
max_search_history_items = self.get_settings().get_int(AbstractSettings.ADDON_SEARCH_SIZE, 50,
lambda x: x * 10)
self._search_history = SearchHistory(os.path.join(self._get_cache_path(), 'search'),
max_search_history_items)
pass
return self._search_history
def get_favorite_list(self):
if not self._favorite_list:
self._favorite_list = FavoriteList(os.path.join(self._get_cache_path(), 'favorites'))
pass
return self._favorite_list
def get_watch_later_list(self):
if not self._watch_later_list:
self._watch_later_list = WatchLaterList(os.path.join(self._get_cache_path(), 'watch_later'))
pass
return self._watch_later_list
def get_access_manager(self):
if not self._access_manager:
self._access_manager = AccessManager(self.get_settings())
pass
return self._access_manager
def get_video_playlist(self):
raise NotImplementedError()
def get_audio_playlist(self):
raise NotImplementedError()
def get_video_player(self):
raise NotImplementedError()
def get_audio_player(self):
raise NotImplementedError()
def get_ui(self):
raise NotImplementedError()
def get_system_version(self):
raise NotImplementedError()
def get_system_name(self):
raise NotImplementedError()
def get_python_version(self):
if not self._python_version:
try:
import platform
python_version = str(platform.python_version())
python_version = python_version.split('.')
self._python_version = tuple(map(lambda x: int(x), python_version))
except Exception, ex:
self.log_error('Unable to get the version of python')
self.log_error(ex.__str__())
self._python_version = [0, 0]
pass
pass
return self._python_version
def create_uri(self, path=u'/', params=None):
if not params:
params = {}
pass
uri_path = utils.path.to_uri(path)
if uri_path:
uri = "%s://%s%s" % ('plugin', utils.strings.to_utf8(self._plugin_id), uri_path)
else:
uri = "%s://%s/" % ('plugin', utils.strings.to_utf8(self._plugin_id))
pass
if len(params) > 0:
# make a copy of the map
uri_params = {}
uri_params.update(params)
# encode in utf-8
for key in uri_params:
param = params[key]
# convert dict to string via json
if isinstance(param, dict):
param = json.dumps(param)
pass
uri_params[key] = utils.strings.to_utf8(param)
pass
uri += '?' + urllib.urlencode(uri_params)
pass
return uri
def get_path(self):
return self._path
def get_params(self):
return self._params
def get_param(self, name, default=None):
return self.get_params().get(name, default)
def get_data_path(self):
"""
Returns the path for read/write nightcrawler of files
:return:
"""
raise NotImplementedError()
def get_native_path(self):
raise NotImplementedError()
def get_icon(self):
return os.path.join(self.get_native_path(), 'icon.png')
def get_fanart(self):
return os.path.join(self.get_native_path(), 'fanart.jpg')
def create_resource_path(self, relative_path):
relative_path = utils.path.normalize(relative_path)
path_comps = relative_path.split('/')
return os.path.join(self.get_native_path(), 'resources', *path_comps)
def get_uri(self):
return self._uri
def get_name(self):
return self._plugin_name
def get_version(self):
return self._version
def get_id(self):
return self._plugin_id
def get_handle(self):
raise NotImplementedError()
def get_settings(self):
raise NotImplementedError()
def localize(self, text_id, default=u''):
raise NotImplementedError()
def set_content_type(self, content_type):
raise NotImplementedError()
def add_sort_method(self, *sort_methods):
raise NotImplementedError()
def log(self, text, log_level):
raise NotImplementedError()
def log_debug(self, text):
self.log(text, self.LOG_DEBUG)
pass
def log_info(self, text):
self.log(text, self.LOG_INFO)
pass
def log_warning(self, text):
self.log(text, self.LOG_WARNING)
pass
def log_error(self, text):
self.log(text, self.LOG_ERROR)
pass
def clone(self, new_path=None, new_params=None):
raise NotImplementedError()
def execute(self, command):
raise NotImplementedError()
def sleep(self, milli_seconds):
raise NotImplementedError()
def resolve_item(self, item):
raise NotImplementedError()
def add_item(self, item):
raise NotImplementedError()
def end_of_content(self, succeeded=True):
raise NotImplementedError() | listamilton/supermilton.repository | plugin.audio.soundcloud/resources/lib/nightcrawler/core/abstract_context.py | Python | gpl-2.0 | 10,518 | 0.001236 |