code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
"""This is for cleaning up binary files improperly added to CVS. This script
scans the given path to find binary files; checks with CVS to see if the sticky
options are set to -kb; finally if sticky options are not -kb then uses 'cvs
admin' to set the -kb option.
This script ignores CVS directories, symbolic links, and files not known under
CVS control (cvs status is 'Unknown').
Run this on a CHECKED OUT module sandbox, not on the repository itself. After
if fixes the sticky options on any files you should manually do a 'cvs commit'
to accept the changes. Then be sure to have all users do a 'cvs up -A' to
update the Sticky Option status.
Noah Spurrier
20030426
"""
import os, sys, time
import pexpect
VERBOSE = 1
def is_binary (filename):
"""Assume that any file with a character where the 8th bit is set is
binary. """
fin = open(filename, 'rb')
wholething = fin.read()
fin.close()
for c in wholething:
if c & 0x80:
return 1
return 0
def is_kb_sticky (filename):
"""This checks if 'cvs status' reports '-kb' for Sticky options. If the
Sticky Option status is '-ks' then this returns 1. If the status is
'Unknown' then it returns 1. Otherwise 0 is returned. """
try:
s = pexpect.spawn ('cvs status %s' % filename)
i = s.expect (['Sticky Options:\s*(\S*)\s*\r\n', 'Status: Unknown'])
if i==1 and VERBOSE:
print('File not part of CVS repository:', filename)
return 1 # Pretend it's OK.
if s.match.group(1) == '-kb':
return 1
s = None
except:
print('Something went wrong trying to run external cvs command.')
print(' cvs status %s' % filename)
print('The cvs command returned:')
print(s.before)
return 0
def cvs_admin_kb (filename):
"""This uses 'cvs admin' to set the '-kb' sticky option. """
s = pexpect.run ('cvs admin -kb %s' % filename)
print('cvs admin -kb %s' % filename)
# There is a timing issue. If I run 'cvs admin' too quickly
# cvs sometimes has trouble obtaining the directory lock.
time.sleep(1)
def walk_and_clean_cvs_binaries (dirpath, dirnames, filenames):
"""This contains the logic for processing files. This is the os.path.walk
callback. This skips dirnames that end in CVS. """
if dirpath.endswith('CVS'):
return
for n in filenames:
fullpath = os.path.join (dirpath, n)
if os.path.islink(fullpath):
continue
if is_binary(fullpath):
if not is_kb_sticky (fullpath):
if VERBOSE: print(fullpath)
cvs_admin_kb (fullpath)
def main ():
if len(sys.argv) == 1:
root = '.'
else:
root = sys.argv[1]
# CVS don't like absolute pathnames.
os.chdir(root)
for dirpath, dirnames, filenames in os.walk ('.'):
walk_and_clean_cvs_binaries(dirpath, dirnames, filenames)
if __name__ == '__main__':
main ()
| yuzhichang/pexpect | examples/fix_cvs_files.py | Python | mit | 3,020 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# mk-docktest-01.py 2014-10-30 1.1
# (C) Mikhail (myke) Kolodin, 2104
# test own tests acc to ofic docs - OK
import time
import docker
import psutil
from pprint import pprint as pp
def main1():
try:
print "making client, ",
c = docker.Client(base_url='unix://var/run/docker.sock',
version='1.14',
timeout=10)
print "client ready, "
print "create container, ",
ctr = c.create_container('ubuntu:14.04', name="hello-2", command='env', environment={'vasya': 'pupking'}, ports=[80, 8080])
print "created container, ",
print "starting, ",
c.start(ctr, port_bindings={80:6080, 8080:6081})
print "started, ",
pp(c.info())
pp(c.inspect_container(ctr))
# print c.info()
# print c.inspect_container(ctr)
print "killing, ",
# c.kill(ctr)
c.stop(ctr)
c.remove_container(ctr)
print "killed, ",
v = c.version()
print v
except:
print "bad docker"
return 0
if __name__ == '__main__':
main1()
#http://serverascode.com/2014/06/05/docker-python.html
#~ making client, client ready,
#~ create container, created container, starting, started, {u'Containers': 1,
#~ u'Debug': 0,
#~ u'Driver': u'aufs',
#~ u'DriverStatus': [[u'Root Dir', u'/var/lib/docker/aufs'], [u'Dirs', u'257']],
#~ u'ExecutionDriver': u'native-0.2',
#~ u'IPv4Forwarding': 1,
#~ u'Images': 255,
#~ u'IndexServerAddress': u'https://index.docker.io/v1/',
#~ u'InitPath': u'/usr/bin/docker',
#~ u'InitSha1': u'',
#~ u'KernelVersion': u'3.13.0-24-generic',
#~ u'MemoryLimit': 1,
#~ u'NEventsListener': 0,
#~ u'NFd': 15,
#~ u'NGoroutines': 13,
#~ u'OperatingSystem': u'Ubuntu 14.04.1 LTS',
#~ u'SwapLimit': 0}
#~ {u'Args': [],
#~ u'Config': {u'AttachStderr': True,
#~ u'AttachStdin': False,
#~ u'AttachStdout': True,
#~ u'Cmd': [u'env'],
#~ u'CpuShares': 0,
#~ u'Cpuset': u'',
#~ u'Domainname': u'',
#~ u'Entrypoint': None,
#~ u'Env': [u'vasya=pupking',
#~ u'HOME=/',
#~ u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
#~ u'ExposedPorts': {u'80/tcp': {}, u'8080/tcp': {}},
#~ u'Hostname': u'0000582920a3',
#~ u'Image': u'ubuntu:14.04',
#~ u'Memory': 0,
#~ u'MemorySwap': 0,
#~ u'NetworkDisabled': False,
#~ u'OnBuild': None,
#~ u'OpenStdin': False,
#~ u'PortSpecs': None,
#~ u'StdinOnce': False,
#~ u'Tty': False,
#~ u'User': u'',
#~ u'Volumes': None,
#~ u'WorkingDir': u''},
#~ u'Created': u'2014-10-29T18:08:28.152346779Z',
#~ u'Driver': u'aufs',
#~ u'ExecDriver': u'native-0.2',
#~ u'HostConfig': {u'Binds': None,
#~ u'CapAdd': None,
#~ u'CapDrop': None,
#~ u'ContainerIDFile': u'',
#~ u'Devices': None,
#~ u'Dns': None,
#~ u'DnsSearch': None,
#~ u'Links': None,
#~ u'LxcConf': None,
#~ u'NetworkMode': u'',
#~ u'PortBindings': {u'80/tcp': [{u'HostIp': u'',
#~ u'HostPort': u'6080'}],
#~ u'8080/tcp': [{u'HostIp': u'',
#~ u'HostPort': u'6081'}]},
#~ u'Privileged': False,
#~ u'PublishAllPorts': False,
#~ u'RestartPolicy': {u'MaximumRetryCount': 0, u'Name': u''},
#~ u'VolumesFrom': None},
#~ u'HostnamePath': u'/var/lib/docker/containers/0000582920a3e3ee9059cb9ed057e131709a4e0c2d507e68a331c80055be70ab/hostname',
#~ u'HostsPath': u'/var/lib/docker/containers/0000582920a3e3ee9059cb9ed057e131709a4e0c2d507e68a331c80055be70ab/hosts',
#~ u'Id': u'0000582920a3e3ee9059cb9ed057e131709a4e0c2d507e68a331c80055be70ab',
#~ u'Image': u'c4ff7513909dedf4ddf3a450aea68cd817c42e698ebccf54755973576525c416',
#~ u'MountLabel': u'',
#~ u'Name': u'/hello-2',
#~ u'NetworkSettings': {u'Bridge': u'docker0',
#~ u'Gateway': u'172.17.42.1',
#~ u'IPAddress': u'172.17.6.194',
#~ u'IPPrefixLen': 16,
#~ u'PortMapping': None,
#~ u'Ports': {u'80/tcp': [{u'HostIp': u'0.0.0.0',
#~ u'HostPort': u'6080'}],
#~ u'8080/tcp': [{u'HostIp': u'0.0.0.0',
#~ u'HostPort': u'6081'}]}},
#~ u'Path': u'env',
#~ u'ProcessLabel': u'',
#~ u'ResolvConfPath': u'/var/lib/docker/containers/0000582920a3e3ee9059cb9ed057e131709a4e0c2d507e68a331c80055be70ab/resolv.conf',
#~ u'State': {u'ExitCode': 0,
#~ u'FinishedAt': u'0001-01-01T00:00:00Z',
#~ u'Paused': False,
#~ u'Pid': 26476,
#~ u'Restarting': False,
#~ u'Running': True,
#~ u'StartedAt': u'2014-10-29T18:08:28.302271237Z'},
#~ u'Volumes': {},
#~ u'VolumesRW': {}}
#~ killing, killed, {u'KernelVersion': u'3.13.0-24-generic', u'Arch': u'amd64', u'ApiVersion': u'1.14', u'Version': u'1.2.0', u'GitCommit': u'fa7b24f', u'Os': u'linux', u'GoVersion': u'go1.3.1'}
#~
#~
#~ ------------------
#~ (program exited with code: 0)
#~ Press return to continue
| mykespb/docking | tests/test-01/mk-docktest-01.py | Python | lgpl-3.0 | 5,642 |
"""This module defines the classes BalMask and Dla used in the
masking of DLAs"""
import logging
from astropy.table import Table
import fitsio
import numpy as np
from picca.delta_extraction.astronomical_objects.forest import Forest
from picca.delta_extraction.errors import MaskError
from picca.delta_extraction.mask import Mask
from picca.delta_extraction.utils import SPEED_LIGHT
defaults = {
"bal index type": "ai",
"los_id name": "THING_ID",
}
accepted_options = ["bal index type", "filename", "los_id name"]
# Wavelengths in Angstroms
lines = {
"lCIV": 1549,
"lNV": 1240.81,
"lLya": 1216.1,
"lCIII": 1175,
"lPV1": 1117,
"lPV2": 1128,
"lSIV1": 1062,
"lSIV2": 1074,
"lLyb": 1020,
"lOIV": 1031,
"lOVI": 1037,
"lOI": 1039
}
class BalMask(Mask):
"""Class to mask BALs
Methods
-------
__init__
apply_mask
Attributes
----------
los_ids: dict (from Mask)
A dictionary with the BALs contained in each line of sight. Keys are the
identifier for the line of sight and values are lists of (z_abs, nhi)
bal_index_type: str
BAL index type, choose either 'ai' or 'bi'. This will set which velocity
the BAL mask uses.
cat: dict
Dictionary with the BAL catalogue
logger: logging.Logger
Logger object
mask: astropy.Table
Table containing specific intervals of wavelength to be masked for BALs
"""
def __init__(self, config):
"""Initializes class instance.
Arguments
---------
config: configparser.SectionProxy
Parsed options to initialize class
Raise
-----
MaskError if there are missing variables
MaskError if input file does not have the expected extension
MaskError if input file does not have the expected fields
MaskError upon OsError when reading the mask file
"""
self.logger = logging.getLogger(__name__)
super().__init__()
filename = config.get("filename")
if filename is None:
raise MaskError("Missing argument 'filename' required by BalMask")
los_id_name = config.get("los_id name")
if los_id_name is None:
raise MaskError("Missing argument 'los_id name' required by BalMask")
elif los_id_name == "THING_ID":
ext_name = 'BALCAT'
elif los_id_name == "TARGETID":
ext_name = 'ZCATALOG'
else:
raise MaskError("Unrecognized los_id name. Expected one of 'THING_ID' "
f" or 'TARGETID'. Found {los_id_name}")
# setup bal index limit
self.bal_index_type = config.getfloat("bal index type")
if self.bal_index_type is None:
self.bal_index_type = MaskError("Missing argument 'bal index type' "
"required by BalMask")
if self.bal_index_type == "ai":
columns_list = [
los_id_name, 'VMIN_CIV_450', 'VMAX_CIV_450'
]
elif self.bal_index_type == "bi":
columns_list = [
los_id_name, 'VMIN_CIV_2000', 'VMAX_CIV_2000'
]
else:
self.bal_index_type = MaskError("In BalMask, unrecognized value "
"for 'bal_index_type'. Expected one "
"of 'ai' or 'bi'. Found "
f"{self.bal_index_type}")
self.logger.progress(f"Reading BAL catalog from: {filename}")
try:
hdul = fitsio.FITS(filename)
self.cat = {col: hdul[ext_name][col][:] for col in columns_list}
except OSError:
raise MaskError(f"Error loading BalMask. File {filename} does "
f"not have extension '{ext_name}'")
except ValueError:
aux = "', '".join(columns_list)
raise MaskError(f"Error loading BalMask. File {filename} does "
f"not have fields '{aux}' in HDU '{ext_name}'")
finally:
hdul.close()
# compute info for each line of sight
self.los_ids = {}
for los_id in np.unique(self.cat[los_id_name]):
self.los_ids[los_id] = self.add_bal_rest_frame(los_id)
num_bals = np.sum([len(los_id) for los_id in self.los_ids.values()])
self.logger.progress('In catalog: {} BAL quasars'.format(num_bals))
def add_bal_rest_frame(self, los_id, los_id_name):
"""Creates a list of wavelengths to be masked out by forest.mask
Arguments
---------
los_id: str
Line-of-sight id
los_id_name: str
Name of the line-of-sight id
"""
if self.bal_index_type == 'bi':
velocity_list = ['VMIN_CIV_2000', 'VMAX_CIV_2000']
else: # AI, the default
velocity_list = ['VMIN_CIV_450', 'VMAX_CIV_450']
mask_rest_frame_bal = Table(names=['log_lambda_min', 'log_lambda_max',
'lambda_min', 'lambda_max'],
dtype=['f4', 'f4', 'f4', 'f4'])
min_velocities = [] # list of minimum velocities
max_velocities = [] # list of maximum velocities
# Match thing_id of object to BAL catalog index
match_index = np.where(self.cat[los_id_name] == los_id)[0][0]
# Store the min/max velocity pairs from the BAL catalog
for col in velocity_list:
if col.find('VMIN') == 0:
velocity_list = self.cat[col]
for vel in velocity_list[match_index]:
if vel > 0:
min_velocities.append(vel)
else:
velocity_list = self.cat[col]
for vel in velocity_list[match_index]:
if vel > 0:
max_velocities.append(vel)
# Calculate mask width for each velocity pair, for each emission line
for min_vel, max_vel in zip(min_velocities, max_velocities):
for line in lines.values():
log_lambda_min = np.log10(line * (1 - min_vel / SPEED_LIGHT))
log_lambda_max = np.log10(line * (1 - max_vel / SPEED_LIGHT))
mask_rest_frame_bal.add_row([log_lambda_min, log_lambda_max,
10**log_lambda_min, 10**log_lambda_max,
])
return mask_rest_frame_bal
def apply_mask(self, forest):
"""Apply the mask. The mask is done by removing the affected
pixels from the arrays in Forest.mask_fields
Arguments
---------
forest: Forest
A Forest instance to which the correction is applied
Raise
-----
MaskError if Forest.wave_solution is not 'log'
"""
mask_table = self.los_ids.get(forest.los_id)
if (mask_table is not None) and len(mask_table) > 0:
# find out which pixels to mask
if Forest.wave_solution == "log":
w = np.ones(forest.log_lambda.size, dtype=bool)
for mask_range in mask_table:
rest_frame_log_lambda = forest.log_lambda - np.log10(1. + forest.z)
w &= ((rest_frame_log_lambda < mask_range['log_lambda_min']) |
(rest_frame_log_lambda > mask_range['log_lambda_max']))
elif Forest.wave_solution == "lin":
w = np.ones(forest.lambda_.size, dtype=bool)
for mask_range in mask_table:
rest_frame_lambda = forest.lambda_/(1. + forest.z)
w &= ((rest_frame_lambda < mask_range['lambda_min']) |
(rest_frame_lambda > mask_range['lambda_max']))
else:
raise MaskError("Forest.wave_solution must be either 'log' or 'lin'")
# do the actual masking
for param in Forest.mask_fields:
setattr(forest, param, getattr(forest, param)[w])
| igmhub/picca | py/picca/delta_extraction/masks/bal_mask.py | Python | gpl-3.0 | 8,113 |
import math, pdb
def load_counts(ifile):
counts = {}
total_kmers = 0.0
with open(ifile) as iinput:
for line in iinput:
word, count = line.strip().split('\t')
word = word[1:-1]
count = int(count)
counts[word] = float(count + 1) # the +1 implements add-one smoothing
total_kmers += float(count + 1)
return counts, total_kmers
def score(counts_pt, total_trimers_pt, counts_en, total_trimers_en, test_sentence):
val = 0.
for i in xrange(len(test_sentence)-3):
tri = test_sentence[i:i+3]
tri_pt = counts_pt.get(tri, 1.0) # this will attempt to get counts from the dictionary; if it fails, it will return 1.0
log_prob_tri_pt = math.log10(tri_pt/total_trimers_pt)
tri_en = counts_en.get(tri, 1.0)
log_prob_tri_en = math.log10(tri_en/total_trimers_en)
val += log_prob_tri_pt-log_prob_tri_en
if val >= 0:
language = "PT"
else:
language = "EN"
if abs(val) >= 5:
print "This is a", language, "sentence."
else:
print "This seems to be a", language, "sentence, but I'm not sure."
print "Log-ratio:", abs(val)
counts_pt, total_trimers_pt = load_counts('pt.counts.txt')
counts_en, total_trimers_en = load_counts('en.counts.txt')
while True:
test_sentence = raw_input("Type a test sentence and press ENTER:\n")
if not test_sentence: break
score(counts_pt, total_trimers_pt, counts_en, total_trimers_en, test_sentence)
| iarroyof/lxmls-toolkit | lxmls/big_data/postprocess.py | Python | mit | 1,523 |
import urllib,urllib2,re,string
import xbmc,xbmcplugin,xbmcgui,xbmcaddon
import os
import unicodedata
import time
from xml.dom.minidom import parse
from time import strftime,sleep
from datetime import date
if sys.version_info >= (2, 7):
import json as _json
print "****XBMC python version (sys.version_info)=" + str(sys.version_info) + "****"
print "****using import json as _json****"
else:
import simplejson as _json
print "****XBMC python version (sys.version_info)=" + str(sys.version_info) + "****"
print "****using import simplejson as _json****"
__settings__ = xbmcaddon.Addon(id='plugin.video.sagetv')
__language__ = __settings__.getLocalizedString
__cwd__ = __settings__.getAddonInfo('path')
sage_mac = __settings__.getSetting("sage_mac")
# SageTV recording Directories for path replacement
sage_rec = __settings__.getSetting("sage_rec")
sage_unc = __settings__.getSetting("sage_unc")
sage_rec2 = __settings__.getSetting("sage_rec2")
sage_unc2 = __settings__.getSetting("sage_unc2")
sage_rec3 = __settings__.getSetting("sage_rec3")
sage_unc3 = __settings__.getSetting("sage_unc3")
sage_rec4 = __settings__.getSetting("sage_rec4")
sage_unc4 = __settings__.getSetting("sage_unc4")
sage_rec5 = __settings__.getSetting("sage_rec5")
sage_unc5 = __settings__.getSetting("sage_unc5")
sagemappings = [ (sage_rec, sage_unc) ]
if ( sage_unc2 != '' and sage_unc2 != None ):
sagemappings.append( (sage_rec2, sage_unc2) )
if ( sage_unc3 != '' and sage_unc3 != None ):
sagemappings.append( (sage_rec3, sage_unc3) )
if ( sage_unc4 != '' and sage_unc4 != None ):
sagemappings.append( (sage_rec4, sage_unc4) )
if ( sage_unc5 != '' and sage_unc5 != None ):
sagemappings.append( (sage_rec5, sage_unc5) )
# Map file recording path to the first matching UNC path
def filemap(filepath):
for (rec, unc) in sagemappings:
if ( filepath.find(rec) != -1 ):
# If the user didn't specify a trailing \ or / in the recording path setting, add that as that's critical to mapping the path correctly
if(rec.find("\\") != -1):
if(rec.rfind("\\") != (len(rec)-1)):
rec = rec + "\\"
elif(rec.find("/") != -1):
if(rec.rfind("/") != (len(rec)-1)):
rec = rec + "/"
return filepath.replace(rec, unc)
return filepath
# SageTV URL based on user settings
strUrl = 'http://' + __settings__.getSetting("sage_user") + ':' + __settings__.getSetting("sage_pass") + '@' + __settings__.getSetting("sage_ip") + ':' + __settings__.getSetting("sage_port")
IMAGE_POSTER = xbmc.translatePath(os.path.join(__cwd__,'resources','media','poster.jpg'))
IMAGE_THUMB = xbmc.translatePath(os.path.join(__cwd__,'resources','media','thumb.jpg'))
DEFAULT_CHARSET = 'utf-8'
MIN_VERSION_SAGEX_REQUIRED = "7.1.9.12"
VERSION_XBMCJS_REQUIRED = "1.4.2"
# 500-THUMBNAIL 501/502/505/506/507/508-LIST 503-MINFO2 504-MINFO 515-MINFO3
confluence_views = [500,501,502,503,504,508]
def TOPLEVELCATEGORIES():
url = strUrl + '/sagex/api?c=xbmc:GetPluginVersion&1=sagex-api-services&encoder=json'
sagexVersion = executeSagexAPIJSONCall(url, "Result")
#First check that the sagex-services plugin exists in SageTV and can be called
if(sagexVersion == None or sagexVersion.find("Exception") != -1):
#If no plugins were returned, first check that the user has the appropriate xbmc.js which has the required GetPluginVersion method
print "************errorMsg=" + str(sagexVersion)
if(sagexVersion == "Exception: Problem accessing /sagex/api"):
print "Sagex API not installed on the SageTV server"
xbmcgui.Dialog().ok(__language__(30104),__language__(30105) + " " + MIN_VERSION_SAGEX_REQUIRED, __language__(30106),__language__(30107))
xbmc.executebuiltin('ActivateWindow(Home)')
return
elif(sagexVersion.find("javax.script.ScriptException: sun.org.mozilla.javascript.internal.EvaluatorException") != -1):
print "xbmc.js file found but does not appear to be a valid .js file and is likely corrupt"
xbmcgui.Dialog().ok(__language__(30104),__language__(30148),__language__(30146),__language__(30147))
xbmc.executebuiltin('ActivateWindow(Home)')
return
else:
print "SageTV not detected, or required plugins not installed"
xbmcgui.Dialog().ok(__language__(30100),__language__(30101),__language__(30102),__language__(30103))
xbmc.executebuiltin('ActivateWindow(Home)')
return
print "Successfully able to connect to the SageTV server @ " + __settings__.getSetting("sage_ip") + ':' + __settings__.getSetting("sage_port")
#Second check that the version of the sagex-services plugin matches the minimum version required by this addon
if(sagexVersion == ""):
xbmcgui.Dialog().ok(__language__(30104),__language__(30105) + " " + MIN_VERSION_SAGEX_REQUIRED, __language__(30106),__language__(30107))
xbmc.executebuiltin('ActivateWindow(Home)')
return
if(comparePluginVersions(sagexVersion, MIN_VERSION_SAGEX_REQUIRED) < 0):
xbmcgui.Dialog().ok(__language__(30104),__language__(30105) + " " + MIN_VERSION_SAGEX_REQUIRED, __language__(30108) + " " + sagexVersion,__language__(30109) + " " + MIN_VERSION_SAGEX_REQUIRED)
xbmc.executebuiltin('ActivateWindow(Home)')
return
#Third check that the version of xbmc.js file matches the minimum version required by this addon
url = strUrl + '/sagex/api?c=xbmc:GetXBMCJSVersionNumber&encoder=json'
xbmcjsVersion = executeSagexAPIJSONCall(url, "Result")
if(xbmcjsVersion != VERSION_XBMCJS_REQUIRED):
print "***xbmc.js version found=" + xbmcjsVersion + "; user must make sure they have the latest xbmc.js installed on their SageTV server (VERSION_XBMCJS_REQUIRED=" + VERSION_XBMCJS_REQUIRED + ")"
xbmcgui.Dialog().ok(__language__(30104),__language__(30145),__language__(30146),__language__(30147))
xbmc.executebuiltin('ActivateWindow(Home)')
return
print "TOPLEVELCATEGORIES STARTED; xbmc.js file version=" + xbmcjsVersion + ";sagex-api-services version=" + sagexVersion
print "*****SETTINGS*****"
print "*****sage_rec=" + sage_rec + ";sage_unc=" + sage_unc + "*****"
print "*****sage_rec2=" + sage_rec2 + ";sage_unc2=" + sage_unc2 + "*****"
print "*****sage_rec3=" + sage_rec3 + ";sage_unc3=" + sage_unc3 + "*****"
print "*****sage_rec4=" + sage_rec4 + ";sage_unc4=" + sage_unc4 + "*****"
print "*****sage_rec5=" + sage_rec5 + ";sage_unc5=" + sage_unc5 + "*****"
print "*****sage_mac=" + sage_mac + "*****"
#Watch Recordings
addTopLevelDir(__language__(30030), strUrl + '/sagex/api?c=xbmc:GetTVMediaFilesGroupedByTitle&size=500&encoder=json',1,IMAGE_POSTER,__language__(30036))
#View Upcoming Recordings
addTopLevelDir(__language__(30031), strUrl + '/sagex/api?command=GetScheduledRecordings&encoder=json',2,IMAGE_POSTER,__language__(30037))
#Browse Airings (by time)
addTopLevelDir(__language__(30032), strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByBoolMethod(GetAllChannels(), "IsChannelViewable", true)&size=1000&encoder=json',3,IMAGE_POSTER,__language__(30038))
#Browse Airings (by channel)
addTopLevelDir(__language__(30033), strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByBoolMethod(GetAllChannels(), "IsChannelViewable", true)&size=1000&encoder=json',4,IMAGE_POSTER,__language__(30039))
#Search for Recordings
addTopLevelDir(__language__(30034), strUrl + '/',5,IMAGE_POSTER,__language__(30040))
#Search for Airings
addTopLevelDir(__language__(30035), strUrl + '/',6,IMAGE_POSTER,__language__(30041))
xbmc.executebuiltin("Container.SetViewMode(535)")
def VIEWLISTOFRECORDEDSHOWS(url,name):
#Get the list of Recorded shows
now = time.time()
strNowObject = date.fromtimestamp(now)
now = "%02d.%02d.%s" % (strNowObject.day+1, strNowObject.month, strNowObject.year)
titleObjects = executeSagexAPIJSONCall(url, "Result")
titles = titleObjects.keys()
totalEpisodesForAllShows = 0
totalEpisodesWatchedForAllShows = 0
for title in titles:
mfsForTitle = titleObjects.get(title)
for mfSubset in mfsForTitle:
strTitle = mfSubset.get("ShowTitle")
strTitleEncoded = strTitle.encode("utf8")
strMediaFileID = mfSubset.get("MediaFileID")
strExternalID = mfSubset.get("ShowExternalID")
strGenre = mfSubset.get("ShowGenre")
startTime = float(mfSubset.get("AiringStartTime") // 1000)
strAiringdateObject = date.fromtimestamp(startTime)
strAiringdate = "%02d.%02d.%s" % (strAiringdateObject.day, strAiringdateObject.month, strAiringdateObject.year)
totalEpisodesForShow = mfSubset.get("TotalEpisodes")
totalEpisodesWatchedForShow = mfSubset.get("TotalWatchedEpisodes")
totalEpisodesForAllShows = totalEpisodesForAllShows + totalEpisodesForShow
totalEpisodesWatchedForAllShows = totalEpisodesWatchedForAllShows + totalEpisodesWatchedForShow
break
urlToShowEpisodes = strUrl + '/sagex/api?c=xbmc:GetMediaFilesForShowWithSubsetOfProperties&1=' + urllib2.quote(strTitleEncoded) + '&size=500&encoder=json'
#urlToShowEpisodes = strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByMethod(GetMediaFiles("T"),"GetMediaTitle","' + urllib2.quote(strTitle.encode("utf8")) + '",true)&size=500&encoder=json'
#urlToShowEpisodes = strUrl + '/sage/Search?searchType=TVFiles&SearchString=' + urllib2.quote(strTitle.encode("utf8")) + '&DVD=on&sort2=airdate_asc&partials=both&TimeRange=0&pagelen=100&sort1=title_asc&filename=&Video=on&search_fields=title&xml=yes'
print "ADDING strTitleEncoded=" + strTitleEncoded + "; urlToShowEpisodes=" + urlToShowEpisodes
imageUrl = strUrl + "/sagex/media/poster/" + strMediaFileID
fanartUrl = strUrl + "/sagex/media/background/" + strMediaFileID
#print "ADDING imageUrl=" + imageUrl
addDir(strTitleEncoded, urlToShowEpisodes,11,imageUrl,'',strExternalID,strAiringdate,fanartUrl,totalEpisodesForShow,totalEpisodesWatchedForShow,strGenre)
addDir('[All Shows]',strUrl + '/sagex/api?c=xbmc:GetMediaFilesForShowWithSubsetOfProperties&1=&size=500&encoder=json',11,IMAGE_POSTER,IMAGE_THUMB,'',now,'',totalEpisodesForAllShows,totalEpisodesWatchedForAllShows,'')
def VIEWLISTOFEPISODESFORSHOW(url,name):
mfs = executeSagexAPIJSONCall(url, "Result")
print "# of EPISODES for " + name + "=" + str(len(mfs))
if(mfs == None or len(mfs) == 0):
print "NO EPISODES FOUND FOR SHOW=" + name
xbmcplugin.endOfDirectory(int(sys.argv[1]), updateListing=True)
return
for mfSubset in mfs:
strTitle = mfSubset.get("ShowTitle")
strTitleEncoded = strTitle.encode("utf8")
strMediaFileID = mfSubset.get("MediaFileID")
strEpisode = mfSubset.get("EpisodeTitle")
strDescription = mfSubset.get("EpisodeDescription")
strGenre = mfSubset.get("ShowGenre")
strAiringID = mfSubset.get("AiringID")
seasonNum = int(mfSubset.get("SeasonNumber"))
episodeNum = int(mfSubset.get("EpisodeNumber"))
studio = mfSubset.get("AiringChannelName")
isFavorite = mfSubset.get("IsFavorite")
watchedDuration = mfSubset.get("WatchedDuration", 0) // 1000
fileDuration = mfSubset.get("FileDuration", 0) // 1000
isWatched = mfSubset.get("IsWatched")
isArchived = mfSubset.get("IsLibraryFile")
startTime = float(mfSubset.get("AiringStartTime") // 1000)
strAiringdateObject = date.fromtimestamp(startTime)
airTime = strftime('%H:%M', time.localtime(startTime))
strAiringdate = "%02d.%02d.%s" % (strAiringdateObject.day, strAiringdateObject.month, strAiringdateObject.year)
strOriginalAirdate = strAiringdate
if(mfSubset.get("OriginalAiringDate") > 0):
startTime = float(mfSubset.get("OriginalAiringDate") // 1000)
strOriginalAirdateObject = date.fromtimestamp(startTime)
strOriginalAirdate = "%02d.%02d.%s" % (strOriginalAirdateObject.day, strOriginalAirdateObject.month, strOriginalAirdateObject.year)
# if there is no episode name use the description in the title
if(strGenre.find("Movie")<0 and strGenre.find("Movies")<0 and strGenre.find("Film")<0 and strGenre.find("Shopping")<0 and strGenre.find("Consumer")<0):
strDisplayText = strEpisode
if(strEpisode == ""):
if(strDescription != ""):
strDisplayText = strDescription
else:
if(strGenre.find("News")>=0):
strDisplayText = studio + " News - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
elif(strGenre.find("Sports")>=0):
strDisplayText = strTitleEncoded + " - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
if(name == "[All Shows]"):
strDisplayText = strTitleEncoded + " - " + strDisplayText
else:
strDisplayText = strTitleEncoded
segs = mfSubset.get("SegmentFiles")
if(len(segs) == 1):
strMappedFilepath = filemap(mfSubset.get("SegmentFiles")[0])
else:
#If a recording has multiple segments, stack them to group the segments together such that during playback it's transparent to the user
strMappedFilepath = "stack://"
for seg in segs:
strMappedFilepath = strMappedFilepath + filemap(seg) + " , "
#Once the stack:// is generated, remove the extraneous " , " at the end of it
strMappedFilepath = strMappedFilepath[0:len(strMappedFilepath)-3]
#strMappedFilepath = strUrl + '/sagex/api?c=xbmc:GetPlaylistOfSegmentsForMediafile&1=%s&2=%s&3=%s&raw_content_type=audio/mpegurl&encoder=raw' % (strMediaFileID, urllib2.quote(sage_rec.encode("utf8")), urllib2.quote(sage_unc.encode("utf8")))
print "************SEGS=" + str(segs)
print "************strMappedFilepath=" + str(strMappedFilepath)
imageUrl = strUrl + "/sagex/media/poster/" + strMediaFileID
fanartUrl = strUrl + "/sagex/media/background/" + strMediaFileID
addMediafileLink(strDisplayText,strMappedFilepath,strDescription,imageUrl,strGenre,strOriginalAirdate,strAiringdate,strTitleEncoded,strMediaFileID,strAiringID,seasonNum,episodeNum,studio,isFavorite,isWatched,watchedDuration,fileDuration,fanartUrl,isArchived)
xbmc.executebuiltin("Container.SetViewMode(504)")
def VIEWUPCOMINGRECORDINGS(url,name):
#req = urllib.urlopen(url)
airings = executeSagexAPIJSONCall(url, "Result")
for airing in airings:
show = airing.get("Show")
strTitle = airing.get("AiringTitle")
strTitleEncoded = strTitle.encode("utf8")
strEpisode = show.get("ShowEpisode")
if(strEpisode == None):
strEpisode = ""
strDescription = show.get("ShowDescription")
if(strDescription == None):
strDescription = ""
strGenre = show.get("ShowCategoriesString")
strAiringID = str(airing.get("AiringID"))
seasonNum = int(show.get("ShowSeasonNumber"))
episodeNum = int(show.get("ShowEpisodeNumber"))
studio = airing.get("AiringChannelName")
isFavorite = airing.get("IsFavorite")
startTime = float(airing.get("AiringStartTime") // 1000)
strAiringdateObject = date.fromtimestamp(startTime)
airTime = strftime('%H:%M', time.localtime(startTime))
strAiringdate = "%02d.%02d.%s" % (strAiringdateObject.day, strAiringdateObject.month, strAiringdateObject.year)
strOriginalAirdate = strAiringdate
if(airing.get("OriginalAiringDate")):
startTime = float(airing.get("OriginalAiringDate") // 1000)
strOriginalAirdateObject = date.fromtimestamp(startTime)
strOriginalAirdate = "%02d.%02d.%s" % (strOriginalAirdateObject.day, strOriginalAirdateObject.month, strOriginalAirdateObject.year)
# if there is no episode name use the description in the title
strDisplayText = strTitleEncoded
if(strGenre.find("Movie")<0 and strGenre.find("Movies")<0 and strGenre.find("Film")<0 and strGenre.find("Shopping")<0 and strGenre.find("Consumer")<0):
if(strEpisode == ""):
if(strDescription != ""):
strDisplayText = strTitleEncoded + ' - ' + strDescription
else:
if(strGenre.find("News")>=0):
strDisplayText = studio + " News - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
elif(strGenre.find("Sports")>=0):
strDisplayText = strTitleEncoded + " - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
else:
strDisplayText = strTitleEncoded + ' - ' + strEpisode
strDisplayText = strftime('%m-%d', time.localtime(startTime)) + " @ " + airTime + ": " + strDisplayText
addAiringLink(strDisplayText,'',strDescription,IMAGE_THUMB,strGenre,strOriginalAirdate,strAiringdate,strTitleEncoded,strAiringID,seasonNum,episodeNum,studio,isFavorite, airing.get("AiringStartTime"), airing.get("AiringEndTime"))
xbmc.executebuiltin("Container.SetViewMode(504)")
def VIEWTIMESLOTLISTING(url,name):
#Show time slots as far out as 7 days from now
rangeSizeHours = 7 * 24
rangeSizeSeconds = 1 * 60 * 60
tempStartTime = time.time()
#Take the start time and round the minutes down ... e.g. if it's 9:07AM, round it down so that the start range is 9:00AM
tempStartTimeLocalTime = time.localtime(tempStartTime)
l = list(tempStartTimeLocalTime)
l[4] = 0
tempStartTimeLocalTime = time.struct_time(l)
tempStartTime = time.mktime(tempStartTimeLocalTime)
tempEndTime = tempStartTime + rangeSizeSeconds-60
tempHours = 1
while(tempHours <= rangeSizeHours):
startRange = str(long(tempStartTime * 1000))
endRange = str(long(tempEndTime * 1000))
# USE GetAiringsOnViewableChannelsAtTime
urlToAiringsInTimeslot = strUrl + '/sagex/api?command=EvaluateExpression&1=GetAiringsOnViewableChannelsAtTime("' + startRange + '","' + endRange + '",false)&encoder=json'
tempStartTimeLocalTime = time.localtime(tempStartTime)
tempEndTimeLocalTime = time.localtime(tempEndTime)
airStartTime = strftime('%H:%M', tempStartTimeLocalTime)
airEndTime = strftime('%H:%M', tempEndTimeLocalTime)
strDisplayText = strftime('%m-%d', tempStartTimeLocalTime) + " @ " + airStartTime + "-" + airEndTime
tempHours = tempHours + 1
tempStartTime = tempStartTime + rangeSizeSeconds
tempEndTime = tempStartTime + rangeSizeSeconds-60
addTimeslotDir(strDisplayText, urlToAiringsInTimeslot,31)
xbmc.executebuiltin("Container.SetViewMode(535)")
def VIEWAIRINGSONTIMESLOT(url,name):
airings = executeSagexAPIJSONCall(url, "Result")
for airing in airings:
show = airing.get("Show")
strTitle = airing.get("AiringTitle")
strTitleEncoded = strTitle.encode("utf8")
strEpisode = show.get("ShowEpisode")
if(strEpisode == None):
strEpisode = ""
strDescription = show.get("ShowDescription")
if(strDescription == None):
strDescription = ""
strGenre = show.get("ShowCategoriesString")
strAiringID = str(airing.get("AiringID"))
seasonNum = int(show.get("ShowSeasonNumber"))
episodeNum = int(show.get("ShowEpisodeNumber"))
channelName = airing.get("AiringChannelName")
channelNumber = airing.get("AiringChannelNumber")
isFavorite = airing.get("IsFavorite")
startTime = float(airing.get("AiringStartTime") // 1000)
strAiringdateObject = date.fromtimestamp(startTime)
airTime = strftime('%H:%M', time.localtime(startTime))
strAiringdate = "%02d.%02d.%s" % (strAiringdateObject.day, strAiringdateObject.month, strAiringdateObject.year)
strOriginalAirdate = strAiringdate
if(airing.get("OriginalAiringDate")):
startTime = float(airing.get("OriginalAiringDate") // 1000)
strOriginalAirdateObject = date.fromtimestamp(startTime)
strOriginalAirdate = "%02d.%02d.%s" % (strOriginalAirdateObject.day, strOriginalAirdateObject.month, strOriginalAirdateObject.year)
# if there is no episode name use the description in the title
strDisplayText = strTitleEncoded
if(strGenre.find("Movie")<0 and strGenre.find("Movies")<0 and strGenre.find("Film")<0 and strGenre.find("Shopping")<0 and strGenre.find("Consumer")<0):
if(strEpisode == ""):
if(strDescription != ""):
strDisplayText = strTitleEncoded + ' - ' + strDescription
else:
if(strGenre.find("News")>=0):
strDisplayText = channelName + " News - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
elif(strGenre.find("Sports")>=0):
strDisplayText = strTitleEncoded + " - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
else:
strDisplayText = strTitleEncoded + ' - ' + strEpisode
strDisplayText = channelNumber + "-" + channelName + " @ " + strftime('%H:%M', time.localtime(startTime)) + ": " + strDisplayText
addAiringLink(strDisplayText,'',strDescription,IMAGE_THUMB,strGenre,strOriginalAirdate,strAiringdate,strTitleEncoded,strAiringID,seasonNum,episodeNum,channelName,isFavorite, airing.get("AiringStartTime"), airing.get("AiringEndTime"))
xbmc.executebuiltin("Container.SetViewMode(504)")
def VIEWCHANNELLISTING(url,name):
print "************url=" + str(url)
channels = executeSagexAPIJSONCall(url, "Result")
for channel in channels:
channelNumber = channel.get("ChannelNumber")
channelName = channel.get("ChannelName")
channelDescription = channel.get("ChannelDescription")
channelNetwork = channel.get("ChannelNetwork")
channelStationID = channel.get("StationID")
now = time.time()
startRange = str(long(now * 1000))
rangeSizeDays = 7
rangeSizeSeconds = rangeSizeDays * 24 * 60 * 60
endRange = str(long((now + rangeSizeSeconds) * 1000))
urlToAiringsOnChannel = strUrl + '/sagex/api?command=EvaluateExpression&1=GetAiringsOnChannelAtTime(GetChannelForStationID("' + str(channelStationID) + '"),"' + startRange + '","' + endRange + '",false)&encoder=json'
logoUrl = strUrl + "/sagex/media/logo/" + str(channelStationID)
strDisplayText = channelNumber + "-" + channelName
addChannelDir(strDisplayText, urlToAiringsOnChannel,41,logoUrl,channelDescription)
xbmc.executebuiltin("Container.SetViewMode(535)")
def VIEWAIRINGSONCHANNEL(url,name):
airings = executeSagexAPIJSONCall(url, "Result")
for airing in airings:
show = airing.get("Show")
strTitle = airing.get("AiringTitle")
strTitleEncoded = strTitle.encode("utf8")
strEpisode = show.get("ShowEpisode")
if(strEpisode == None):
strEpisode = ""
strDescription = show.get("ShowDescription")
if(strDescription == None):
strDescription = ""
strGenre = show.get("ShowCategoriesString")
strAiringID = str(airing.get("AiringID"))
seasonNum = int(show.get("ShowSeasonNumber"))
episodeNum = int(show.get("ShowEpisodeNumber"))
studio = airing.get("AiringChannelName")
isFavorite = airing.get("IsFavorite")
startTime = float(airing.get("AiringStartTime") // 1000)
strAiringdateObject = date.fromtimestamp(startTime)
airTime = strftime('%H:%M', time.localtime(startTime))
strAiringdate = "%02d.%02d.%s" % (strAiringdateObject.day, strAiringdateObject.month, strAiringdateObject.year)
strOriginalAirdate = strAiringdate
if(airing.get("OriginalAiringDate")):
startTime = float(airing.get("OriginalAiringDate") // 1000)
strOriginalAirdateObject = date.fromtimestamp(startTime)
strOriginalAirdate = "%02d.%02d.%s" % (strOriginalAirdateObject.day, strOriginalAirdateObject.month, strOriginalAirdateObject.year)
# if there is no episode name use the description in the title
strDisplayText = strTitleEncoded
if(strGenre.find("Movie")<0 and strGenre.find("Movies")<0 and strGenre.find("Film")<0 and strGenre.find("Shopping")<0 and strGenre.find("Consumer")<0):
if(strEpisode == ""):
if(strDescription != ""):
strDisplayText = strTitleEncoded + ' - ' + strDescription
else:
if(strGenre.find("News")>=0):
strDisplayText = studio + " News - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
elif(strGenre.find("Sports")>=0):
strDisplayText = strTitleEncoded + " - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
else:
strDisplayText = strTitleEncoded + ' - ' + strEpisode
strDisplayText = strftime('%m-%d', time.localtime(startTime)) + " @ " + airTime + ": " + strDisplayText
addAiringLink(strDisplayText,'',strDescription,IMAGE_THUMB,strGenre,strOriginalAirdate,strAiringdate,strTitleEncoded,strAiringID,seasonNum,episodeNum,studio,isFavorite, airing.get("AiringStartTime"), airing.get("AiringEndTime"))
xbmc.executebuiltin("Container.SetViewMode(504)")
def SEARCHFORRECORDINGS(url,name):
keyboard = xbmc.Keyboard('', __language__(30110))
keyboard.doModal()
if (keyboard.isConfirmed()):
titleToSearchFor = keyboard.getText()
if(titleToSearchFor == "" or titleToSearchFor == None):
return
url = strUrl + '/sagex/api?c=xbmc:SearchForMediaFiles&1=%s&size=100&encoder=json' % urllib2.quote(titleToSearchFor.encode("utf8"))
#url = strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByMethod(GetMediaFiles("T"), "GetMediaTitle", "' + urllib2.quote(titleToSearchFor.encode("utf8")) + '", true)&size=100&encoder=json'
mfs = executeSagexAPIJSONCall(url, "Result")
print "# of EPISODES for " + titleToSearchFor + "=" + str(len(mfs))
if(mfs == None or len(mfs) == 0):
print "NO EPISODES FOUND FOR SEARCH=" + titleToSearchFor
xbmcplugin.endOfDirectory(int(sys.argv[1]), updateListing=True)
return
for mfSubset in mfs:
strTitle = mfSubset.get("ShowTitle")
strTitleEncoded = strTitle.encode("utf8")
strMediaFileID = mfSubset.get("MediaFileID")
strEpisode = mfSubset.get("EpisodeTitle")
strDescription = mfSubset.get("EpisodeDescription")
strGenre = mfSubset.get("ShowGenre")
strAiringID = mfSubset.get("AiringID")
seasonNum = int(mfSubset.get("SeasonNumber"))
episodeNum = int(mfSubset.get("EpisodeNumber"))
studio = mfSubset.get("AiringChannelName")
isFavorite = mfSubset.get("IsFavorite")
watchedDuration = mfSubset.get("WatchedDuration", 0) // 1000
fileDuration = mfSubset.get("FileDuration", 0) // 1000
isWatched = mfSubset.get("IsWatched")
isArchived = mfSubset.get("IsLibraryFile")
startTime = float(mfSubset.get("AiringStartTime") // 1000)
strAiringdateObject = date.fromtimestamp(startTime)
airTime = strftime('%H:%M', time.localtime(startTime))
strAiringdate = "%02d.%02d.%s" % (strAiringdateObject.day, strAiringdateObject.month, strAiringdateObject.year)
strOriginalAirdate = strAiringdate
if(mfSubset.get("OriginalAiringDate") > 0):
startTime = float(mfSubset.get("OriginalAiringDate") // 1000)
strOriginalAirdateObject = date.fromtimestamp(startTime)
strOriginalAirdate = "%02d.%02d.%s" % (strOriginalAirdateObject.day, strOriginalAirdateObject.month, strOriginalAirdateObject.year)
# if there is no episode name use the description in the title
strDisplayText = strTitleEncoded
if(strGenre.find("Movie")<0 and strGenre.find("Movies")<0 and strGenre.find("Film")<0 and strGenre.find("Shopping")<0 and strGenre.find("Consumer")<0):
if(strEpisode != "" and strDescription != ""):
strDisplayText = strTitleEncoded + ' - ' + strDescription
elif(strEpisode != ""):
strDisplayText = strTitleEncoded + ' - ' + strEpisode
else:
if(strGenre.find("News")>=0):
strDisplayText = studio + " News - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
elif(strGenre.find("Sports")>=0):
strDisplayText = strTitleEncoded + " - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
segs = mfSubset.get("SegmentFiles")
if(len(segs) == 1):
strMappedFilepath = filemap(mfSubset.get("SegmentFiles")[0])
else:
#If a recording has multiple segments, stack them to group the segments together such that during playback it's transparent to the user
strMappedFilepath = "stack://"
for seg in segs:
strMappedFilepath = strMappedFilepath + filemap(seg) + " , "
#Once the stack:// is generated, remove the extraneous " , " at the end of it
strMappedFilepath = strMappedFilepath[0:len(strMappedFilepath)-3]
#strMappedFilepath = strUrl + '/sagex/api?c=xbmc:GetPlaylistOfSegmentsForMediafile&1=%s&2=%s&3=%s&raw_content_type=audio/mpegurl&encoder=raw' % (strMediaFileID, urllib2.quote(sage_rec.encode("utf8")), urllib2.quote(sage_unc.encode("utf8")))
print "************SEGS=" + str(segs)
print "************strMappedFilepath=" + str(strMappedFilepath)
imageUrl = strUrl + "/sagex/media/poster/" + strMediaFileID
fanartUrl = strUrl + "/sagex/media/background/" + strMediaFileID
addMediafileLink(strDisplayText,strMappedFilepath,strDescription,imageUrl,strGenre,strOriginalAirdate,strAiringdate,strTitleEncoded,strMediaFileID,strAiringID,seasonNum,episodeNum,studio,isFavorite,isWatched,watchedDuration,fileDuration,fanartUrl,isArchived)
xbmc.executebuiltin("Container.SetViewMode(504)")
def SEARCHFORAIRINGS(url,name):
keyboard = xbmc.Keyboard('', __language__(30110))
keyboard.doModal()
if (keyboard.isConfirmed()):
titleToSearchFor = keyboard.getText()
if(titleToSearchFor == "" or titleToSearchFor == None):
return
now = time.time()
startRange = str(long(now * 1000))
#url = strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByRange(SearchByTitle("%s","T"),"GetAiringStartTime","%s",java_lang_Long_MAX_VALUE,true)&encoder=json' % (urllib2.quote(titleToSearchFor.encode("utf8")), startRange)
#url = strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByRange(SearchByTitle("%s","T"),"GetAiringStartTime",java_lang_Long_parseLong("%d"),java_lang_Long_MAX_VALUE,true)&encoder=json' % (urllib2.quote(titleToSearchFor.encode("utf8")), int(time.time()) * 1000)
url = strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByRange(SearchSelectedFields("%s",false,true,true,false,false,false,false,false,false,false,"T"),"GetAiringStartTime",java_lang_Long_parseLong("%d"),java_lang_Long_MAX_VALUE,true)&size=100&encoder=json' % (urllib2.quote(titleToSearchFor.encode("utf8")), int(time.time()) * 1000)
airings = executeSagexAPIJSONCall(url, "Result")
for airing in airings:
show = airing.get("Show")
strTitle = airing.get("AiringTitle")
strTitleEncoded = strTitle.encode("utf8")
strEpisode = show.get("ShowEpisode")
if(strEpisode == None):
strEpisode = ""
strDescription = show.get("ShowDescription")
if(strDescription == None):
strDescription = ""
strGenre = show.get("ShowCategoriesString")
strAiringID = str(airing.get("AiringID"))
seasonNum = int(show.get("ShowSeasonNumber"))
episodeNum = int(show.get("ShowEpisodeNumber"))
studio = airing.get("AiringChannelName")
isFavorite = airing.get("IsFavorite")
startTime = float(airing.get("AiringStartTime") // 1000)
strAiringdateObject = date.fromtimestamp(startTime)
airTime = strftime('%H:%M', time.localtime(startTime))
strAiringdate = "%02d.%02d.%s" % (strAiringdateObject.day, strAiringdateObject.month, strAiringdateObject.year)
strOriginalAirdate = strAiringdate
if(airing.get("OriginalAiringDate")):
startTime = float(airing.get("OriginalAiringDate") // 1000)
strOriginalAirdateObject = date.fromtimestamp(startTime)
strOriginalAirdate = "%02d.%02d.%s" % (strOriginalAirdateObject.day, strOriginalAirdateObject.month, strOriginalAirdateObject.year)
# if there is no episode name use the description in the title
strDisplayText = strTitleEncoded
if(strGenre.find("Movie")<0 and strGenre.find("Movies")<0 and strGenre.find("Film")<0 and strGenre.find("Shopping")<0 and strGenre.find("Consumer")<0):
if(strEpisode == ""):
if(strDescription != ""):
strDisplayText = strTitleEncoded + ' - ' + strDescription
else:
strDisplayText = studio + " News - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
strDescription = strGenre
else:
strDisplayText = strTitleEncoded + ' - ' + strEpisode
strDisplayText = strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime + ": " + strDisplayText
addAiringLink(strDisplayText,'',strDescription,IMAGE_THUMB,strGenre,strOriginalAirdate,strAiringdate,strTitleEncoded,strAiringID,seasonNum,episodeNum,studio,isFavorite, airing.get("AiringStartTime"), airing.get("AiringEndTime"))
xbmc.executebuiltin("Container.SetViewMode(504)")
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addMediafileLink(name,url,plot,iconimage,genre,originalairingdate,airingdate,showtitle,mediafileid,airingid,seasonnum,episodenum,studio,isfavorite,iswatched,resumetime,totaltime,fanartimage,isArchived):
ok=True
liz=xbmcgui.ListItem(name)
scriptToRun = "special://home/addons/plugin.video.sagetv/contextmenuactions.py"
actionDelete = "delete|" + strUrl + '/sagex/api?command=DeleteFile&1=mediafile:' + mediafileid
actionDeleteSetWatched = "deletesetwatched|" + strUrl + '/sagex/api?command=SetWatched&1=mediafile:' + mediafileid + "|" + strUrl + '/sagex/api?command=DeleteFile&1=mediafile:' + mediafileid
actionDeleteClearWatched = "deleteclearwatched|" + strUrl + '/sagex/api?command=ClearWatched&1=mediafile:' + mediafileid + "|" + strUrl + '/sagex/api?command=DeleteFile&1=mediafile:' + mediafileid
actionDeleteDontLike = "deletedontlike|" + strUrl + '/sagex/api?command=SetDontLike&1=mediafile:' + mediafileid + "|" + strUrl + '/sagex/api?command=DeleteFile&1=mediafile:' + mediafileid
actionDeleteWrongRecording = "deletewrongrecording|" + strUrl + '/sagex/api?command=DeleteFileWithoutPrejudice&1=mediafile:' + mediafileid
actionSetWatched = "setwatched|" + strUrl + '/sagex/api?command=SetWatched&1=mediafile:' + mediafileid
actionClearWatched = "clearwatched|" + strUrl + '/sagex/api?command=ClearWatched&1=mediafile:' + mediafileid
actionSetArchived = "setarchived|" + strUrl + '/sagex/api?command=MoveFileToLibrary&1=mediafile:' + mediafileid
actionClearArchived = "cleararchived|" + strUrl + '/sagex/api?command=MoveTVFileOutOfLibrary&1=mediafile:' + mediafileid
actionCancelRecording = "cancelrecording|" + strUrl + '/sagex/api?command=CancelRecord&1=mediafile:' + mediafileid
actionAddFavorite = "addfavorite|" + strUrl + '/sagex/api?command=AddFavorite&1=%s&2=true&3=true&4=&5=&6=&7=&8=&9=&10=&11=&12=&13=&14=' % showtitle
actionRemoveFavorite = "removefavorite|" + strUrl + '/sagex/api?command=EvaluateExpression&1=RemoveFavorite(GetFavoriteForAiring(GetAiringForID(' + airingid + ')))'
actionWatchStream = "watchstream|" + strUrl + "|" + mediafileid
bisAiringRecording = isAiringRecording(airingid)
contextMenuItems = []
if(iswatched):
contextMenuItems.append((__language__(30123), 'XBMC.RunScript(' + scriptToRun + ', ' + actionClearWatched + ')'))
liz.setInfo( type="Video", infoLabels={ "Title": name, "Plot": plot, "Genre": genre, "date": airingdate, "premiered": originalairingdate, "aired": originalairingdate, "TVShowTitle": showtitle, "season": seasonnum, "episode": episodenum, "studio": studio, "overlay": 7, "playcount": 1 } )
else:
contextMenuItems.append((__language__(30122), 'XBMC.RunScript(' + scriptToRun + ', ' + actionSetWatched + ')'))
if(resumetime != 0 and totaltime != 0):
liz.setProperty("resumetime",str(resumetime))
liz.setProperty("totaltime",str(totaltime))
liz.setInfo( type="Video", infoLabels={ "Title": name, "Plot": plot, "Genre": genre, "date": airingdate, "premiered": originalairingdate, "aired": originalairingdate, "TVShowTitle": showtitle, "season": seasonnum, "episode": episodenum, "studio": studio, "overlay": 6, "playcount": 0 } )
if(bisAiringRecording):
contextMenuItems.append((__language__(30117), 'XBMC.RunScript(' + scriptToRun + ', ' + actionCancelRecording + ')'))
if(isfavorite):
contextMenuItems.append((__language__(30118), 'XBMC.RunScript(' + scriptToRun + ', ' + actionRemoveFavorite + ')'))
else:
contextMenuItems.append((__language__(30130), 'XBMC.RunScript(' + scriptToRun + ', ' + actionAddFavorite + ')'))
else:
if(isfavorite):
contextMenuItems.append((__language__(30118), 'XBMC.RunScript(' + scriptToRun + ', ' + actionRemoveFavorite + ')'))
else:
contextMenuItems.append((__language__(30130), 'XBMC.RunScript(' + scriptToRun + ', ' + actionAddFavorite + ')'))
if(isArchived):
contextMenuItems.append((__language__(30125), 'XBMC.RunScript(' + scriptToRun + ', ' + actionClearArchived + ')'))
else:
contextMenuItems.append((__language__(30124), 'XBMC.RunScript(' + scriptToRun + ', ' + actionSetArchived + ')'))
contextMenuItems.append((__language__(30116), 'XBMC.RunScript(' + scriptToRun + ', ' + actionDelete + ')'))
contextMenuItems.append((__language__(30126), 'XBMC.RunScript(' + scriptToRun + ', ' + actionDeleteSetWatched + ')'))
contextMenuItems.append((__language__(30127), 'XBMC.RunScript(' + scriptToRun + ', ' + actionDeleteClearWatched + ')'))
contextMenuItems.append((__language__(30128), 'XBMC.RunScript(' + scriptToRun + ', ' + actionDeleteDontLike + ')'))
contextMenuItems.append((__language__(30129), 'XBMC.RunScript(' + scriptToRun + ', ' + actionDeleteWrongRecording + ')'))
contextMenuItems.append((__language__(30137), 'XBMC.RunScript(' + scriptToRun + ', ' + actionWatchStream + ')'))
liz.addContextMenuItems(contextMenuItems, True)
liz.setIconImage(iconimage)
liz.setThumbnailImage(iconimage)
liz.setProperty("fanart_image",fanartimage)
xbmcplugin.setContent(int(sys.argv[1]),'episodes')
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz,isFolder=False)
return ok
def addAiringLink(name,url,plot,iconimage,genre,originalairingdate,airingdate,showtitle,airingid,seasonnum,episodenum,studio,isfavorite,starttime,endtime):
ok=True
liz=xbmcgui.ListItem(name)
scriptToRun = "special://home/addons/plugin.video.sagetv/contextmenuactions.py"
actionCancelRecording = "cancelrecording|" + strUrl + '/sagex/api?command=CancelRecord&1=airing:' + airingid
actionAddFavorite = "addfavorite|" + strUrl + '/sagex/api?command=AddFavorite&1=%s&2=true&3=true&4=&5=&6=&7=&8=&9=&10=&11=&12=&13=&14=' % showtitle
actionRemoveFavorite = "removefavorite|" + strUrl + '/sagex/api?command=EvaluateExpression&1=RemoveFavorite(GetFavoriteForAiring(GetAiringForID(' + airingid + ')))'
actionRecord = "record|" + strUrl + '/sagex/api?command=Record&1=airing:' + airingid
actionWatchNow = "watchnow|" + strUrl + "|" + airingid
bisAiringScheduledToRecord = isAiringScheduledToRecord(airingid)
contextMenuItems = []
if(bisAiringScheduledToRecord):
contextMenuItems.append((__language__(30117), 'XBMC.RunScript(' + scriptToRun + ', ' + actionCancelRecording + ')'))
if(isfavorite):
contextMenuItems.append((__language__(30118), 'XBMC.RunScript(' + scriptToRun + ', ' + actionRemoveFavorite + ')'))
else:
contextMenuItems.append((__language__(30130), 'XBMC.RunScript(' + scriptToRun + ', ' + actionAddFavorite + ')'))
else:
if(isfavorite):
contextMenuItems.append((__language__(30119), 'XBMC.RunScript(' + scriptToRun + ', ' + actionRecord + ')'))
contextMenuItems.append((__language__(30118), 'XBMC.RunScript(' + scriptToRun + ', ' + actionRemoveFavorite + ')'))
else:
#Check if an airing is airing live right now; if it is, provide the ability to watch it live
bisAiringLiveNow = isAiringLiveNow(starttime, endtime)
print "bisAiringLiveNow=" + str(bisAiringLiveNow)
if(bisAiringLiveNow):
contextMenuItems.append((__language__(30120), 'XBMC.RunScript(' + scriptToRun + ', ' + actionWatchNow + ')'))
contextMenuItems.append((__language__(30119), 'XBMC.RunScript(' + scriptToRun + ', ' + actionRecord + ')'))
contextMenuItems.append((__language__(30130), 'XBMC.RunScript(' + scriptToRun + ', ' + actionAddFavorite + ')'))
liz.addContextMenuItems(contextMenuItems, True)
liz.setInfo( type="Video", infoLabels={ "Title": name, "Plot": plot, "Genre": genre, "date": airingdate, "premiered": originalairingdate, "aired": originalairingdate, "TVShowTitle": showtitle, "season": seasonnum, "episode": episodenum, "studio": studio } )
liz.setIconImage(iconimage)
liz.setThumbnailImage(iconimage)
xbmcplugin.setContent(int(sys.argv[1]),'episodes')
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz,isFolder=False)
return ok
# Checks if an airing is currently recording
def isAiringScheduledToRecord(airingid):
sageApiUrl = strUrl + '/sagex/api?command=EvaluateExpression&1=java_util_HashSet_contains(new_java_util_HashSet(java_util_Arrays_asList(GetScheduledRecordings())),GetAiringForID(' + airingid + '))&encoder=json'
return executeSagexAPIJSONCall(sageApiUrl, "Result")
def isAiringRecording(airingid):
sageApiUrl = strUrl + '/sagex/api?command=IsFileCurrentlyRecording&1=airing:' + airingid + '&encoder=json'
return executeSagexAPIJSONCall(sageApiUrl, "Result")
def getFavoriteIDForShowTitle(showtitle):
sageApiUrl = strUrl + '/sagex/api?c=xbmc:GetFavoriteIDForShowTitle&1=%s&encoder=json' % urllib2.quote(showtitle)
return executeSagexAPIJSONCall(sageApiUrl, "Result")
def getShowSeriesDescription(showexternalid):
sageApiUrl = strUrl + '/sagex/api?command=EvaluateExpression&1=GetSeriesDescription(GetShowSeriesInfo(GetShowForExternalID("' + showexternalid + '")))&encoder=json'
return executeSagexAPIJSONCall(sageApiUrl, "Result")
def isAiringLiveNow(starttime, endtime):
now = int(time.time()) * 1000
if(now >= starttime and now < endtime):
return True
return False
def executeSagexAPIJSONCall(url, resultToGet):
print "*** sagex request URL:" + url
url_error = False
input = ""
try:
input = urllib.urlopen(url)
except IOError, i:
print "ERROR in executeSagexAPIJSONCall: Unable to connect to SageTV server"
xbmc.executebuiltin('WakeOnLan(%s)'% sage_mac)
xbmc.sleep(15000)
url_error = True
if url_error:
input = urllib.urlopen(url)
fileData = input.read()
if(fileData.find("Problem accessing /sagex/api") != -1):
return "Exception: Problem accessing /sagex/api"
resp = unicodeToStr(_json.JSONDecoder().decode(fileData))
objKeys = resp.keys()
numKeys = len(objKeys)
if(numKeys == 1):
return resp.get(resultToGet)
elif(numKeys > 1):
error = resp.get("error")
if(error != None and error != ""):
return error
else:
return None
else:
return None
def addTopLevelDir(name,url,mode,iconimage,dirdescription):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name)
liz.setInfo(type="video", infoLabels={ "Title": name, "Plot": dirdescription } )
liz.setIconImage(iconimage)
liz.setThumbnailImage(iconimage)
#liz.setIconImage(xbmc.translatePath(os.path.join(__cwd__,'resources','media',iconimage)))
#liz.setThumbnailImage(xbmc.translatePath(os.path.join(__cwd__,'resources','media',iconimage)))
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addDir(name,url,mode,iconimage,thumbimage,showexternalid,airingdate,fanartimage,totalepisodesforshow,totalepisodeswatchedforshow,strgenre):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name)
strSeriesDescription = ""
strSeriesDescription = getShowSeriesDescription(showexternalid)
liz.setInfo(type="video", infoLabels={ "Title": name, "Plot": strSeriesDescription, "date": airingdate, "aired": airingdate, "Genre": strgenre, "episode": totalepisodesforshow } )
liz.setIconImage(iconimage)
if(thumbimage != ""):
liz.setThumbnailImage(thumbimage)
else:
liz.setThumbnailImage(iconimage)
liz.setProperty("fanart_image",fanartimage)
liz.setProperty("WatchedEpisodes",str(totalepisodeswatchedforshow))
liz.setProperty("UnWatchedEpisodes",str(totalepisodesforshow-totalepisodeswatchedforshow))
if(name != "[All Shows]"):
scriptToRun = "special://home/addons/plugin.video.sagetv/contextmenuactions.py"
actionSetAllWatched = "setallwatched|" + strUrl + '|' + name
actionClearAllWatched = "clearallwatched|" + strUrl + '|' + name
actionDeleteAll = "deleteall|" + strUrl + '|' + name
contextMenuItems = []
contextMenuItems.append((__language__(30142), 'XBMC.RunScript(' + scriptToRun + ', ' + actionSetAllWatched + ')'))
contextMenuItems.append((__language__(30143), 'XBMC.RunScript(' + scriptToRun + ', ' + actionClearAllWatched + ')'))
favID = getFavoriteIDForShowTitle(name)
if(favID != ""):
actionRemoveFavorite = "removefavorite|" + strUrl + '/sagex/api?command=EvaluateExpression&1=RemoveFavorite(GetFavoriteForID(' + favID + '))'
contextMenuItems.append((__language__(30118), 'XBMC.RunScript(' + scriptToRun + ', ' + actionRemoveFavorite + ')'))
else:
actionAddFavorite = "addfavorite|" + strUrl + '/sagex/api?command=AddFavorite&1=%s&2=true&3=true&4=&5=&6=&7=&8=&9=&10=&11=&12=&13=&14=' % name
contextMenuItems.append((__language__(30130), 'XBMC.RunScript(' + scriptToRun + ', ' + actionAddFavorite + ')'))
contextMenuItems.append((__language__(30144), 'XBMC.RunScript(' + scriptToRun + ', ' + actionDeleteAll + ')'))
liz.addContextMenuItems(contextMenuItems, True)
xbmcplugin.setContent(int(sys.argv[1]),'tvshows')
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True,totalItems=totalepisodesforshow)
return ok
def addTimeslotDir(name,url,mode):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name)
liz.setInfo(type="video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addChannelDir(name,url,mode,iconimage,channeldescription):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name)
liz.setInfo(type="video", infoLabels={ "Title": name, "Plot": channeldescription } )
liz.setIconImage(iconimage)
liz.setThumbnailImage(iconimage)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def unicodeToStr(obj):
t = obj
if(t is unicode):
return obj.encode(DEFAULT_CHARSET)
elif(t is list):
for i in range(0, len(obj)):
obj[i] = unicodeToStr(obj[i])
return obj
elif(t is dict):
for k in obj.keys():
v = obj[k]
del obj[k]
obj[k.encode(DEFAULT_CHARSET)] = unicodeToStr(v)
return obj
else:
return obj # leave numbers and booleans alone
def comparePluginVersions(s1, s2):
# See if they are equal.
if s1 == s2:
return 0
# Make sure they are the same length.
str1 = normalizePluginString(s1, len(string.split(s2, '.')))
str2 = normalizePluginString(s2, len(string.split(s1, '.')))
# Split into parts separated by '.'
p1 = string.split(str1, '.')
p2 = string.split(str2, '.')
for i in range(len(p1)):
int1 = int(p1[i])
int2 = int(p2[i])
if int1 < int2:
return -1
elif int2 < int1:
return 1
return 0
def normalizePluginString(s, l):
while len(string.split(s, '.')) < l:
s += ".0"
return s
params=get_params()
url=None
name=None
mode=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
if mode==None or url==None or len(url)<1:
print ""
TOPLEVELCATEGORIES()
#Watch Recordings
elif mode==1:
print ""+url
VIEWLISTOFRECORDEDSHOWS(url,name)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_DATE)
#View List of Episodes for a show
elif mode==11:
print ""+url
VIEWLISTOFEPISODESFORSHOW(url,name)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_EPISODE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_DATE)
#View upcoming recordings
elif mode==2:
print ""+url
VIEWUPCOMINGRECORDINGS(url,name)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_EPISODE)
#View airings by time (view list of time slots)
elif mode==3:
print ""+url
VIEWTIMESLOTLISTING(url,name)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE)
#View airings for a specific time slot
elif mode==31:
print ""+url
VIEWAIRINGSONTIMESLOT(url,name)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_EPISODE)
#View airings by channel (view list of channels)
elif mode==4:
print ""+url
VIEWCHANNELLISTING(url,name)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE)
#View airings on channel
elif mode==41:
print ""+url
VIEWAIRINGSONCHANNEL(url,name)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_EPISODE)
#Search for recordings
elif mode==5:
print ""+url
SEARCHFORRECORDINGS(url,name)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_EPISODE)
#Search for airings
elif mode==6:
print ""+url
SEARCHFORAIRINGS(url,name)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_EPISODE)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gasinger/plugin.video.sagetv | default.py | Python | gpl-2.0 | 55,730 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class UsagePaged(Paged):
"""
A paging container for iterating over a list of :class:`Usage <azure.mgmt.network.v2017_03_01.models.Usage>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Usage]'}
}
def __init__(self, *args, **kwargs):
super(UsagePaged, self).__init__(*args, **kwargs)
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/usage_paged.py | Python | mit | 917 |
from typing import Callable, List, Optional, Tuple
import numpy as np
import phidl.device_layout as pd
from gdsfactory.component import Component, ComponentReference
from gdsfactory.components.bend_euler import bend_euler
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.components.taper import taper as taper_function
from gdsfactory.cross_section import strip
from gdsfactory.port import select_ports_optical
from gdsfactory.routing.get_route import get_route
from gdsfactory.routing.utils import direction_ports_from_list_ports, flip
from gdsfactory.types import ComponentFactory, CrossSectionFactory, Number, Routes
def route_south(
component: Component,
optical_routing_type: int = 1,
excluded_ports: Optional[Tuple[str, ...]] = None,
straight_separation: Number = 4.0,
io_gratings_lines: Optional[List[List[ComponentReference]]] = None,
gc_port_name: str = 1,
bend: ComponentFactory = bend_euler,
straight: ComponentFactory = straight_function,
taper: Optional[ComponentFactory] = taper_function,
auto_widen: bool = True,
select_ports: Callable = select_ports_optical,
cross_section: CrossSectionFactory = strip,
**kwargs,
) -> Routes:
"""Returns Routes
Args:
component: component to route
optical_routing_type: routing heuristic `1` or `2`
`1` uses the component size info to estimate the box size.
`2` only looks at the optical port positions to estimate the size
excluded_ports=[]: list of port names to NOT route
straight_separation
io_gratings_lines: list of ports to which the ports produced by this
function will be connected. Supplying this information helps
avoiding straight collisions
gc_port_name: grating port name
Returns:
list of references, list of ports
Works well if the component looks rougly like a rectangular box with
north ports on the north of the box
south ports on the south of the box
east ports on the east of the box
west ports on the west of the box
"""
excluded_ports = excluded_ports or []
assert optical_routing_type in [
1,
2,
], f"optical_routing_type = {optical_routing_type}, not supported "
optical_ports = list(select_ports(component.ports).values())
optical_ports = [p for p in optical_ports if p.name not in excluded_ports]
csi = component.size_info
references = []
lengths = []
bend90 = bend(cross_section=cross_section, **kwargs) if callable(bend) else bend
dy = abs(bend90.info["dy"])
# Handle empty list gracefully
if not optical_ports:
return [], []
conn_params = dict(
bend=bend,
straight=straight,
taper=taper,
auto_widen=auto_widen,
cross_section=cross_section,
**kwargs,
)
# Used to avoid crossing between straights in special cases
# This could happen when abs(x_port - x_grating) <= 2 * dy
delta_gr_min = 2 * dy + 1
sep = straight_separation
# Get lists of optical ports by orientation
direction_ports = direction_ports_from_list_ports(optical_ports)
north_ports = direction_ports["N"]
north_start = north_ports[0 : len(north_ports) // 2]
north_finish = north_ports[len(north_ports) // 2 :]
west_ports = direction_ports["W"]
west_ports.reverse()
east_ports = direction_ports["E"]
south_ports = direction_ports["S"]
north_finish.reverse() # Sort right to left
north_start.reverse() # Sort right to left
ordered_ports = north_start + west_ports + south_ports + east_ports + north_finish
def get_index_port_closest_to_x(x, list_ports):
return np.array([abs(x - p.ports[gc_port_name].x) for p in list_ports]).argmin()
def gen_port_from_port(x, y, p):
new_p = pd.Port(name=p.name, midpoint=(x, y), orientation=90.0, width=p.width)
return new_p
west_ports.reverse()
y0 = min([p.y for p in ordered_ports]) - dy - 0.5
ports_to_route = []
i = 0
optical_xs_tmp = [p.x for p in ordered_ports]
x_optical_min = min(optical_xs_tmp)
x_optical_max = max(optical_xs_tmp)
# Set starting ``x`` on the west side
# ``x`` is the x-coord of the waypoint where the current component port is connected.
# x starts as close as possible to the component.
# For each new port, the distance is increased by the separation.
# The starting x depends on the heuristic chosen : ``1`` or ``2``
if optical_routing_type == 1:
# use component size to know how far to route
x = csi.west - dy - 1
elif optical_routing_type == 2:
# use optical port to know how far to route
x = x_optical_min - dy - 1
else:
raise ValueError("Invalid optical routing type")
# First route the ports facing west
# In case we have to connect these ports to a line of gratings,
# Ensure that the port is aligned with the grating port or
# has enough space for manhattan routing (at least two bend radius)
for p in west_ports:
if io_gratings_lines:
i_grating = get_index_port_closest_to_x(x, io_gratings_lines[-1])
x_gr = io_gratings_lines[-1][i_grating].ports[gc_port_name].x
if abs(x - x_gr) < delta_gr_min:
if x > x_gr:
x = x_gr
elif x < x_gr:
x = x_gr - delta_gr_min
tmp_port = gen_port_from_port(x, y0, p)
ports_to_route.append(tmp_port)
route = get_route(input_port=p, output_port=tmp_port, **conn_params)
references.extend(route.references)
lengths.append(route.length)
x -= sep
i += 1
start_straight_length = 0.5
# First-half of north ports
# This ensures that north ports are routed above the top west one
north_start.reverse() # We need them from left to right
if len(north_start) > 0:
y_max = max([p.y for p in west_ports + north_start])
for p in north_start:
tmp_port = gen_port_from_port(x, y0, p)
route = get_route(
input_port=p,
output_port=tmp_port,
start_straight_length=start_straight_length + y_max - p.y,
**conn_params,
)
references.extend(route.references)
lengths.append(route.length)
ports_to_route.append(tmp_port)
x -= sep
start_straight_length += sep
# Set starting ``x`` on the east side
if optical_routing_type == 1:
# use component size to know how far to route
x = csi.east + dy + 1
elif optical_routing_type == 2:
# use optical port to know how far to route
x = x_optical_max + dy + 1
else:
raise ValueError(
f"Invalid optical routing type. Got {optical_routing_type}, only (1, 2 supported) "
)
i = 0
# Route the east ports
# In case we have to connect these ports to a line of gratings,
# Ensure that the port is aligned with the grating port or
# has enough space for manhattan routing (at least two bend radius)
start_straight_length = 0.5
for p in east_ports:
if io_gratings_lines:
i_grating = get_index_port_closest_to_x(x, io_gratings_lines[-1])
x_gr = io_gratings_lines[-1][i_grating].ports[gc_port_name].x
if abs(x - x_gr) < delta_gr_min:
if x < x_gr:
x = x_gr
elif x > x_gr:
x = x_gr + delta_gr_min
tmp_port = gen_port_from_port(x, y0, p)
route = get_route(
p, tmp_port, start_straight_length=start_straight_length, **conn_params
)
references.extend(route.references)
lengths.append(route.length)
ports_to_route.append(tmp_port)
x += sep
i += 1
# Route the remaining north ports
start_straight_length = 0.5
if len(north_finish) > 0:
y_max = max([p.y for p in east_ports + north_finish])
for p in north_finish:
tmp_port = gen_port_from_port(x, y0, p)
ports_to_route.append(tmp_port)
route = get_route(
input_port=p,
output_port=tmp_port,
start_straight_length=start_straight_length + y_max - p.y,
**conn_params,
)
references.extend(route.references)
lengths.append(route.length)
x += sep
start_straight_length += sep
# Add south ports
ports = [flip(p) for p in ports_to_route] + south_ports
return Routes(references=references, ports=ports, lengths=lengths)
if __name__ == "__main__":
import gdsfactory as gf
# c = gf.components.mmi2x2()
# c = gf.components.ring_single()
c = gf.components.ring_double()
layer = (2, 0)
c = gf.components.ring_double(layer=layer)
r = route_south(c, bend=gf.components.bend_euler, layer=layer)
for e in r.references:
if isinstance(e, list):
print(len(e))
print(e)
# print(e)
c.add(e)
print(r.lengths)
c.show()
| gdsfactory/gdsfactory | gdsfactory/routing/route_south.py | Python | mit | 9,266 |
from .AchievementListener import AchievementListener
from models.popups import Popup
class AchievementManager(object):
def __init__(self, popups):
self._achievements = {}
self._listeners = []
self._popups = popups
def __iter__(self):
for achievement in self._achievements.values():
yield achievement
@property
def achievements(self):
return self._achievements.values()
def add_achievement(self, achievement):
_id = achievement.id
listener = AchievementListener(achievement)
self._listeners.append(listener)
listener.listen_unlock(self._on_unlock)
listener.listen_reveal(self._on_reveal)
self._achievements.setdefault(_id, achievement)
def unlock_achievement(self, _id):
achievement = self.get_achievement(_id)
achievement.is_complete = True
achievement.is_hidden = False
def reveal_achievement(self, _id):
achievement = self.get_achievement(_id)
achievement.is_hidden = False
def is_achievement_unlocked(self, _id):
achievement = self.get_achievement(_id)
return achievement.is_complete
def get_achievement(self, _id):
achievement = self._achievements.get(_id)
if achievement is None:
raise KeyError("Unknown achievement {0}".format(_id))
return achievement
def _on_reveal(self, achievement):
pass
def _on_unlock(self, achievement):
self._popups.add(Popup(
message="Unlocked achievement\n{0}".format(achievement.id),
icon=achievement.icon)) | kfcpaladin/sze-the-game | game/models/achievements/AchievementManager.py | Python | mit | 1,635 |
from __future__ import absolute_import
import pytest
import logging
import re
import mock
from tornado import gen
from tornado.ioloop import PeriodicCallback, IOLoop
from tornado.httpclient import HTTPError
import bokeh.server.server as server
from bokeh.application import Application
from bokeh.application.handlers import Handler
from bokeh.model import Model
from bokeh.core.properties import List, String
from bokeh.client import pull_session
from bokeh.util.session_id import check_session_id_signature
from .utils import ManagedServerLoop, url, ws_url, http_get, websocket_open
logging.basicConfig(level=logging.DEBUG)
def test__create_hosts_whitelist_no_host():
hosts = server._create_hosts_whitelist(None, 1000)
assert hosts == ["localhost:1000"]
hosts = server._create_hosts_whitelist([], 1000)
assert hosts == ["localhost:1000"]
def test__create_hosts_whitelist_host_value_with_port_use_port():
hosts = server._create_hosts_whitelist(["foo:1000"], 1000)
assert hosts == ["foo:1000"]
hosts = server._create_hosts_whitelist(["foo:1000","bar:2100"], 1000)
assert hosts == ["foo:1000","bar:2100"]
def test__create_hosts_whitelist_host_without_port_use_port_80():
hosts = server._create_hosts_whitelist(["foo"], 1000)
assert hosts == ["foo:80"]
hosts = server._create_hosts_whitelist(["foo","bar"], 1000)
assert hosts == ["foo:80","bar:80"]
def test__create_hosts_whitelist_host_non_int_port_raises():
with pytest.raises(ValueError):
server._create_hosts_whitelist(["foo:xyz"], 1000)
def test__create_hosts_whitelist_bad_host_raises():
with pytest.raises(ValueError):
server._create_hosts_whitelist([""], 1000)
with pytest.raises(ValueError):
server._create_hosts_whitelist(["a:b:c"], 1000)
with pytest.raises(ValueError):
server._create_hosts_whitelist([":80"], 1000)
@gen.coroutine
def async_value(value):
yield gen.moment # this ensures we actually return to the loop
raise gen.Return(value)
class HookListModel(Model):
hooks = List(String)
class HookTestHandler(Handler):
def __init__(self):
super(HookTestHandler, self).__init__()
self.load_count = 0
self.unload_count = 0
self.session_creation_async_value = 0
self.hooks = []
self.server_periodic_remover = None
self.session_periodic_remover = None
def modify_document(self, doc):
# this checks that the session created hook has run
# and session destroyed has not.
assert self.session_creation_async_value == 3
doc.title = "Modified"
doc.roots[0].hooks.append("modify")
self.hooks.append("modify")
def on_server_loaded(self, server_context):
assert len(server_context.sessions) == 0
self.load_count += 1
self.hooks.append("server_loaded")
server_context.add_next_tick_callback(self.on_next_tick_server)
server_context.add_timeout_callback(self.on_timeout_server, 2)
server_context.add_periodic_callback(self.on_periodic_server, 3)
def remover():
server_context.remove_periodic_callback(self.on_periodic_server)
self.server_periodic_remover = remover
def on_server_unloaded(self, server_context):
self.unload_count += 1
self.hooks.append("server_unloaded")
# important to test that this can be async
@gen.coroutine
def on_session_created(self, session_context):
@gen.coroutine
def setup_document(doc):
# session creation hook is allowed to init the document
# before any modify_document() handlers kick in
from bokeh.document import DEFAULT_TITLE
hook_list = HookListModel()
assert doc.title == DEFAULT_TITLE
assert len(doc.roots) == 0
hook_list.hooks.append("session_created")
doc.add_root(hook_list)
self.session_creation_async_value = yield async_value(1)
self.session_creation_async_value = yield async_value(2)
self.session_creation_async_value = yield async_value(3)
yield session_context.with_locked_document(setup_document)
server_context = session_context.server_context
server_context.add_next_tick_callback(self.on_next_tick_session)
server_context.add_timeout_callback(self.on_timeout_session, 2)
server_context.add_periodic_callback(self.on_periodic_session, 3)
def remover():
server_context.remove_periodic_callback(self.on_periodic_session)
self.session_periodic_remover = remover
self.hooks.append("session_created")
# this has to be async too
@gen.coroutine
def on_session_destroyed(self, session_context):
@gen.coroutine
def shutdown_document(doc):
doc.roots[0].hooks.append("session_destroyed")
self.session_creation_async_value = yield async_value(4)
self.session_creation_async_value = yield async_value(5)
self.session_creation_async_value = yield async_value(6)
yield session_context.with_locked_document(shutdown_document)
self.hooks.append("session_destroyed")
def on_next_tick_server(self):
self.hooks.append("next_tick_server")
def on_timeout_server(self):
self.hooks.append("timeout_server")
def on_periodic_server(self):
self.hooks.append("periodic_server")
self.server_periodic_remover()
def on_next_tick_session(self):
self.hooks.append("next_tick_session")
def on_timeout_session(self):
self.hooks.append("timeout_session")
def on_periodic_session(self):
self.hooks.append("periodic_session")
self.session_periodic_remover()
def test__lifecycle_hooks():
application = Application()
handler = HookTestHandler()
application.add(handler)
with ManagedServerLoop(application, check_unused_sessions_milliseconds=30) as server:
# wait for server callbacks to run before we mix in the
# session, this keeps the test deterministic
def check_done():
if len(handler.hooks) == 4:
server.io_loop.stop()
server_load_checker = PeriodicCallback(check_done, 1,
io_loop=server.io_loop)
server_load_checker.start()
server.io_loop.start()
server_load_checker.stop()
# now we create a session
client_session = pull_session(session_id='test__lifecycle_hooks',
url=url(server),
io_loop=server.io_loop)
client_doc = client_session.document
assert len(client_doc.roots) == 1
server_session = server.get_session('/', client_session.id)
server_doc = server_session.document
assert len(server_doc.roots) == 1
client_session.close()
# expire the session quickly rather than after the
# usual timeout
server_session.request_expiration()
def on_done():
server.io_loop.stop()
server.io_loop.call_later(0.1, on_done)
server.io_loop.start()
assert handler.hooks == ["server_loaded",
"next_tick_server",
"timeout_server",
"periodic_server",
"session_created",
"next_tick_session",
"modify",
"timeout_session",
"periodic_session",
"session_destroyed",
"server_unloaded"]
client_hook_list = client_doc.roots[0]
server_hook_list = server_doc.roots[0]
assert handler.load_count == 1
assert handler.unload_count == 1
assert handler.session_creation_async_value == 6
assert client_doc.title == "Modified"
assert server_doc.title == "Modified"
# the client session doesn't see the event that adds "session_destroyed" since
# we shut down at that point.
assert client_hook_list.hooks == ["session_created", "modify"]
assert server_hook_list.hooks == ["session_created", "modify", "session_destroyed"]
# examples:
# "sessionid" : "NzlNoPfEYJahnPljE34xI0a5RSTaU1Aq1Cx5"
# 'sessionid':'NzlNoPfEYJahnPljE34xI0a5RSTaU1Aq1Cx5'
sessionid_in_json = re.compile("""["']sessionid["'] *: *["']([^"]+)["']""")
def extract_sessionid_from_json(html):
from six import string_types
if not isinstance(html, string_types):
import codecs
html = codecs.decode(html, 'utf-8')
match = sessionid_in_json.search(html)
return match.group(1)
def autoload_url(server):
return url(server) + \
"autoload.js?bokeh-protocol-version=1.0&bokeh-autoload-element=foo"
def test_use_xheaders():
application = Application()
with ManagedServerLoop(application, use_xheaders=True) as server:
assert server._http.xheaders == True
def test__autocreate_session_autoload():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = http_get(server.io_loop,
autoload_url(server))
js = response.body
sessionid = extract_sessionid_from_json(js)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
def test__autocreate_session_doc():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = http_get(server.io_loop,
url(server))
html = response.body
sessionid = extract_sessionid_from_json(html)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
def test__no_autocreate_session_websocket():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
websocket_open(server.io_loop,
ws_url(server) + "?bokeh-protocol-version=1.0")
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__use_provided_session_autoload():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
response = http_get(server.io_loop,
autoload_url(server) + "&bokeh-session-id=" + expected)
js = response.body
sessionid = extract_sessionid_from_json(js)
assert expected == sessionid
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
def test__use_provided_session_doc():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
response = http_get(server.io_loop,
url(server) + "?bokeh-session-id=" + expected)
html = response.body
sessionid = extract_sessionid_from_json(html)
assert expected == sessionid
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
def test__use_provided_session_websocket():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
url = ws_url(server) + \
"?bokeh-protocol-version=1.0" + \
"&bokeh-session-id=" + expected
websocket_open(server.io_loop,
url)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
def test__autocreate_signed_session_autoload():
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='foo') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = http_get(server.io_loop,
autoload_url(server))
js = response.body
sessionid = extract_sessionid_from_json(js)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
assert check_session_id_signature(sessionid, signed=True, secret_key='foo')
def test__autocreate_signed_session_doc():
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='foo') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = http_get(server.io_loop,
url(server))
html = response.body
sessionid = extract_sessionid_from_json(html)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
assert check_session_id_signature(sessionid, signed=True, secret_key='foo')
def test__reject_unsigned_session_autoload():
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
with (pytest.raises(HTTPError)) as info:
http_get(server.io_loop,
autoload_url(server) + "&bokeh-session-id=" + expected)
assert 'Invalid session ID' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__reject_unsigned_session_doc():
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
with (pytest.raises(HTTPError)) as info:
http_get(server.io_loop, url(server) + "?bokeh-session-id=" + expected)
assert 'Invalid session ID' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__reject_unsigned_session_websocket():
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
url = ws_url(server) + \
"?bokeh-protocol-version=1.0" + \
"&bokeh-session-id=" + expected
websocket_open(server.io_loop,
url)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__no_generate_session_autoload():
application = Application()
with ManagedServerLoop(application, generate_session_ids=False) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
with (pytest.raises(HTTPError)) as info:
http_get(server.io_loop, autoload_url(server))
assert 'No bokeh-session-id provided' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__no_generate_session_doc():
application = Application()
with ManagedServerLoop(application, generate_session_ids=False) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
with (pytest.raises(HTTPError)) as info:
http_get(server.io_loop, url(server))
assert 'No bokeh-session-id provided' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__server_multiple_processes():
with mock.patch('tornado.process.fork_processes') as tornado_fp:
application = Application()
with ManagedServerLoop(application, num_procs=3):
pass
tornado_fp.assert_called_with(3)
def test__existing_ioloop_with_multiple_processes_exception():
application = Application()
ioloop_instance = IOLoop.instance() ; ioloop_instance # silence flake8
with pytest.raises(RuntimeError):
with ManagedServerLoop(application, num_procs=3):
pass
| phobson/bokeh | bokeh/server/tests/test_server.py | Python | bsd-3-clause | 16,586 |
# -*- coding: utf-8 -*-
import csv
import urllib
import json
from collections import defaultdict
import pickle
import re
files = {
"male":
{
"old": "UN_POP/UN_POP_FEMALE/ESTIMATES-Table 1.csv",
"prev": "UN_POP/UN_POP_FEMALE_PREV/MEDIUM-Table 1.csv"
},
"female":
{
"old": "UN_POP/UN_POP_MALE/ESTIMATES-Table 1.csv",
"prev": "UN_POP/UN_POP_MALE_PREV/MEDIUM-Table 1.csv",
}
}
f = lambda: defaultdict(f)
res_dict = defaultdict(f)
year_set = set()
countries_dict = {}
count =0
for sex in files:
for typ in files[sex]:
reader = csv.reader(open(files[sex][typ], 'rb'), delimiter=';')
for row in reader:
year = int(row[5])
if (year % 5== 0):
year_set.add(year)
country = row[2]
encoded_country = urllib.quote_plus(re.sub(r'[^A-Za-z\ ]', "",
country.decode('ascii','ignore')))
print encoded_country
countries_dict[encoded_country]=unicode(country,'utf-8')
i=0
for v in row[6:22]+row[23:]:
try:
res_dict[encoded_country][year][sex][i]=int(v)
except:
if v!="…":
print "failed on :'%s' " % v
res_dict[encoded_country][year][sex][i]=0
if country=="WORLD" and year==2100:
print sex, year, i, v
i = i+1
count = count +1
year_list = sorted(list(year_set))
#print year_list
#print countries_dict
encoded_countries_list= sorted(countries_dict.keys())
alphabet = map(chr, range(97, 123))
letters_to_countries_list_dict = defaultdict(list)
for c in encoded_countries_list:
first_letter = c[0]
letters_to_countries_list_dict[first_letter].append(c)
#print letters_to_countries_list_dict
pop_dict = defaultdict(f)
for encoded_country in encoded_countries_list:
for year in year_list:
sum =0
for sex in ('male','female'):
for i in res_dict[encoded_country][year][sex]:
sum += res_dict[encoded_country][year][sex][i]
pop_dict[encoded_country][year]=sum
print res_dict['WORLD'][2100]
#print pop_dict
def age_to_int(age):
if not age.strip().startswith("100"):
return int(age.split('-')[0])
else:
return 100
for encoded_country in encoded_countries_list:
f = open("generated/%s.json" % encoded_country,'w')
f.write(json.dumps(res_dict[encoded_country]))
f.close()
age_labels = [' 0-4', ' 5-9', ' 10-14', ' 15-19', ' 20-24', ' 25-29', ' 30-34', ' 35-39', ' 40-44', ' 45-49', ' 50-54', ' 55-59', ' 60-64', ' 65-69', ' 70-74', ' 75-79', ' 80-84', ' 85-89', ' 90-94', ' 95-99', ' 100+'];
main_data_dict ={
'alphabet':alphabet,
'lettersToCountriesList':letters_to_countries_list_dict,
'countriesHumanNames': countries_dict,
'populations':pop_dict,
'years':year_list,
'ageLabels':age_labels
}
f= open('letters_to_countries_list_dict.pickle','w')
pickle.dump(letters_to_countries_list_dict,f)
f.close
f = open('countries_dict.pickle','w')
pickle.dump(countries_dict,f)
f.close
f = open('mainData.json','w')
f.write(json.dumps(main_data_dict))
f.close()
| bctclc/PopulationPyramid.net | 2010/parser.py | Python | mit | 3,367 |
import os, stat
from flask_defaults import *
from base import *
### Import configuration from the current environment
env = os.environ.get('SECUREDROP_ENV')
# default env is 'production'
env = env or DEFAULT_ENV
if env == 'test':
from test import *
if env == 'development':
from development import *
if env == 'production':
from production import *
# data directories - should be on secure media
STORE_DIR=os.path.join(SECUREDROP_ROOT, 'store')
GPG_KEY_DIR=os.path.join(SECUREDROP_ROOT, 'keys')
# create the data directories
for d in (SECUREDROP_ROOT, STORE_DIR, GPG_KEY_DIR):
if not os.path.isdir(d):
os.mkdir(d)
# restrict permissions in the data directories to avoid warnings from GPG
def has_perms(path, mode):
return oct(stat.S_IMODE(os.stat(path).st_mode)) == oct(mode)
safe_perms = 0700
if not has_perms(GPG_KEY_DIR, safe_perms):
os.chmod(GPG_KEY_DIR, safe_perms)
### Flask base configuration
class FlaskConfig(object):
DEBUG = FLASK_DEBUG
TESTING = FLASK_TESTING
SECRET_KEY = FLASK_SECRET_KEY
WTF_CSRF_ENABLED = FLASK_CSRF_ENABLED
| beni55/securedrop | securedrop/config/__init__.py | Python | gpl-2.0 | 1,089 |
#! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
#
# Copyright (c) 2009 University of Washington
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from __future__ import print_function
import os
import sys
import time
import optparse
import subprocess
import threading
import signal
import xml.dom.minidom
import shutil
import re
from utils import get_list_from_file
try:
import queue
except ImportError:
import Queue as queue
#
# XXX This should really be part of a waf command to list the configuration
# items relative to optional ns-3 pieces.
#
# A list of interesting configuration items in the waf configuration
# cache which we may be interested in when deciding on which examples
# to run and how to run them. These are set by waf during the
# configuration phase and the corresponding assignments are usually
# found in the associated subdirectory wscript files.
#
interesting_config_items = [
"NS3_ENABLED_MODULES",
"NS3_ENABLED_CONTRIBUTED_MODULES",
"NS3_MODULE_PATH",
"NSC_ENABLED",
"ENABLE_REAL_TIME",
"ENABLE_THREADING",
"ENABLE_EXAMPLES",
"ENABLE_TESTS",
"EXAMPLE_DIRECTORIES",
"ENABLE_PYTHON_BINDINGS",
"NSCLICK",
"ENABLE_BRITE",
"ENABLE_OPENFLOW",
"APPNAME",
"BUILD_PROFILE",
"VERSION",
"PYTHON",
"VALGRIND_FOUND",
]
NSC_ENABLED = False
ENABLE_REAL_TIME = False
ENABLE_THREADING = False
ENABLE_EXAMPLES = True
ENABLE_TESTS = True
NSCLICK = False
ENABLE_BRITE = False
ENABLE_OPENFLOW = False
EXAMPLE_DIRECTORIES = []
APPNAME = ""
BUILD_PROFILE = ""
BUILD_PROFILE_SUFFIX = ""
VERSION = ""
PYTHON = ""
VALGRIND_FOUND = True
#
# This will be given a prefix and a suffix when the waf config file is
# read.
#
test_runner_name = "test-runner"
#
# If the user has constrained us to run certain kinds of tests, we can tell waf
# to only build
#
core_kinds = ["bvt", "core", "performance", "system", "unit"]
#
# There are some special cases for test suites that kill valgrind. This is
# because NSC causes illegal instruction crashes when run under valgrind.
#
core_valgrind_skip_tests = [
"ns3-tcp-cwnd",
"nsc-tcp-loss",
"ns3-tcp-interoperability",
"routing-click",
"lte-rr-ff-mac-scheduler",
"lte-tdmt-ff-mac-scheduler",
"lte-fdmt-ff-mac-scheduler",
"lte-pf-ff-mac-scheduler",
"lte-tta-ff-mac-scheduler",
"lte-fdbet-ff-mac-scheduler",
"lte-ttbet-ff-mac-scheduler",
"lte-fdtbfq-ff-mac-scheduler",
"lte-tdtbfq-ff-mac-scheduler",
"lte-pss-ff-mac-scheduler",
]
#
# There are some special cases for test suites that fail when NSC is
# missing.
#
core_nsc_missing_skip_tests = [
"ns3-tcp-cwnd",
"nsc-tcp-loss",
"ns3-tcp-interoperability",
]
#
# Parse the examples-to-run file if it exists.
#
# This function adds any C++ examples or Python examples that are to be run
# to the lists in example_tests and python_tests, respectively.
#
def parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
example_names_original,
python_tests):
# Look for the examples-to-run file exists.
if os.path.exists(examples_to_run_path):
# Each tuple in the C++ list of examples to run contains
#
# (example_name, do_run, do_valgrind_run)
#
# where example_name is the executable to be run, do_run is a
# condition under which to run the example, and do_valgrind_run is
# a condition under which to run the example under valgrind. This
# is needed because NSC causes illegal instruction crashes with
# some tests when they are run under valgrind.
#
# Note that the two conditions are Python statements that
# can depend on waf configuration variables. For example,
#
# ("tcp-nsc-lfn", "NSC_ENABLED == True", "NSC_ENABLED == False"),
#
cpp_examples = get_list_from_file(examples_to_run_path, "cpp_examples")
for example_name, do_run, do_valgrind_run in cpp_examples:
# Separate the example name from its arguments.
example_name_original = example_name
example_name_parts = example_name.split(' ', 1)
if len(example_name_parts) == 1:
example_name = example_name_parts[0]
example_arguments = ""
else:
example_name = example_name_parts[0]
example_arguments = example_name_parts[1]
# Add the proper prefix and suffix to the example name to
# match what is done in the wscript file.
example_path = "%s%s-%s%s" % (APPNAME, VERSION, example_name, BUILD_PROFILE_SUFFIX)
# Set the full path for the example.
example_path = os.path.join(cpp_executable_dir, example_path)
example_name = os.path.join(
os.path.relpath(cpp_executable_dir, NS3_BUILDDIR),
example_name)
# Add all of the C++ examples that were built, i.e. found
# in the directory, to the list of C++ examples to run.
if os.path.exists(example_path):
# Add any arguments to the path.
if len(example_name_parts) != 1:
example_path = "%s %s" % (example_path, example_arguments)
example_name = "%s %s" % (example_name, example_arguments)
# Add this example.
example_tests.append((example_name, example_path, do_run, do_valgrind_run))
example_names_original.append(example_name_original)
# Each tuple in the Python list of examples to run contains
#
# (example_name, do_run)
#
# where example_name is the Python script to be run and
# do_run is a condition under which to run the example.
#
# Note that the condition is a Python statement that can
# depend on waf configuration variables. For example,
#
# ("realtime-udp-echo.py", "ENABLE_REAL_TIME == True"),
#
python_examples = get_list_from_file(examples_to_run_path, "python_examples")
for example_name, do_run in python_examples:
# Separate the example name from its arguments.
example_name_parts = example_name.split(' ', 1)
if len(example_name_parts) == 1:
example_name = example_name_parts[0]
example_arguments = ""
else:
example_name = example_name_parts[0]
example_arguments = example_name_parts[1]
# Set the full path for the example.
example_path = os.path.join(python_script_dir, example_name)
# Add all of the Python examples that were found to the
# list of Python examples to run.
if os.path.exists(example_path):
# Add any arguments to the path.
if len(example_name_parts) != 1:
example_path = "%s %s" % (example_path, example_arguments)
# Add this example.
python_tests.append((example_path, do_run))
#
# The test suites are going to want to output status. They are running
# concurrently. This means that unless we are careful, the output of
# the test suites will be interleaved. Rather than introducing a lock
# file that could unintentionally start serializing execution, we ask
# the tests to write their output to a temporary directory and then
# put together the final output file when we "join" the test tasks back
# to the main thread. In addition to this issue, the example programs
# often write lots and lots of trace files which we will just ignore.
# We put all of them into the temp directory as well, so they can be
# easily deleted.
#
TMP_OUTPUT_DIR = "testpy-output"
def read_test(test):
result = test.find('Result').text
name = test.find('Name').text
if not test.find('Reason') is None:
reason = test.find('Reason').text
else:
reason = ''
if not test.find('Time') is None:
time_real = test.find('Time').get('real')
else:
time_real = ''
return (result, name, reason, time_real)
#
# A simple example of writing a text file with a test result summary. It is
# expected that this output will be fine for developers looking for problems.
#
def node_to_text (test, f):
(result, name, reason, time_real) = read_test(test)
if reason:
reason = " (%s)" % reason
output = "%s: Test Suite \"%s\" (%s)%s\n" % (result, name, time_real, reason)
f.write(output)
for details in test.findall('FailureDetails'):
f.write(" Details:\n")
f.write(" Message: %s\n" % details.find('Message').text)
f.write(" Condition: %s\n" % details.find('Condition').text)
f.write(" Actual: %s\n" % details.find('Actual').text)
f.write(" Limit: %s\n" % details.find('Limit').text)
f.write(" File: %s\n" % details.find('File').text)
f.write(" Line: %s\n" % details.find('Line').text)
for child in test.findall('Test'):
node_to_text(child, f)
def translate_to_text(results_file, text_file):
text_file += '.txt'
print('Writing results to text file \"%s\"...' % text_file, end='')
f = open(text_file, 'w')
import xml.etree.ElementTree as ET
et = ET.parse (results_file)
for test in et.findall('Test'):
node_to_text (test, f)
for example in et.findall('Example'):
result = example.find('Result').text
name = example.find('Name').text
if not example.find('Time') is None:
time_real = example.find('Time').get('real')
else:
time_real = ''
output = "%s: Example \"%s\" (%s)\n" % (result, name, time_real)
f.write(output)
f.close()
print('done.')
#
# A simple example of writing an HTML file with a test result summary. It is
# expected that this will eventually be made prettier as time progresses and
# we have time to tweak it. This may end up being moved to a separate module
# since it will probably grow over time.
#
def translate_to_html(results_file, html_file):
html_file += '.html'
print('Writing results to html file %s...' % html_file, end='')
f = open(html_file, 'w')
f.write("<html>\n")
f.write("<body>\n")
f.write("<center><h1>ns-3 Test Results</h1></center>\n")
#
# Read and parse the whole results file.
#
import xml.etree.ElementTree as ET
et = ET.parse(results_file)
#
# Iterate through the test suites
#
f.write("<h2>Test Suites</h2>\n")
for suite in et.findall('Test'):
#
# For each test suite, get its name, result and execution time info
#
(result, name, reason, time) = read_test (suite)
#
# Print a level three header with the result, name and time. If the
# test suite passed, the header is printed in green. If the suite was
# skipped, print it in orange, otherwise assume something bad happened
# and print in red.
#
if result == "PASS":
f.write("<h3 style=\"color:green\">%s: %s (%s)</h3>\n" % (result, name, time))
elif result == "SKIP":
f.write("<h3 style=\"color:#ff6600\">%s: %s (%s) (%s)</h3>\n" % (result, name, time, reason))
else:
f.write("<h3 style=\"color:red\">%s: %s (%s)</h3>\n" % (result, name, time))
#
# The test case information goes in a table.
#
f.write("<table border=\"1\">\n")
#
# The first column of the table has the heading Result
#
f.write("<th> Result </th>\n")
#
# If the suite crashed or is skipped, there is no further information, so just
# declare a new table row with the result (CRASH or SKIP) in it. Looks like:
#
# +--------+
# | Result |
# +--------+
# | CRASH |
# +--------+
#
# Then go on to the next test suite. Valgrind and skipped errors look the same.
#
if result in ["CRASH", "SKIP", "VALGR"]:
f.write("<tr>\n")
if result == "SKIP":
f.write("<td style=\"color:#ff6600\">%s</td>\n" % result)
else:
f.write("<td style=\"color:red\">%s</td>\n" % result)
f.write("</tr>\n")
f.write("</table>\n")
continue
#
# If the suite didn't crash, we expect more information, so fill out
# the table heading row. Like,
#
# +--------+----------------+------+
# | Result | Test Case Name | Time |
# +--------+----------------+------+
#
f.write("<th>Test Case Name</th>\n")
f.write("<th> Time </th>\n")
#
# If the test case failed, we need to print out some failure details
# so extend the heading row again. Like,
#
# +--------+----------------+------+-----------------+
# | Result | Test Case Name | Time | Failure Details |
# +--------+----------------+------+-----------------+
#
if result == "FAIL":
f.write("<th>Failure Details</th>\n")
#
# Now iterate through all of the test cases.
#
for case in suite.findall('Test'):
#
# Get the name, result and timing information from xml to use in
# printing table below.
#
(result, name, reason, time) = read_test(case)
#
# If the test case failed, we iterate through possibly multiple
# failure details
#
if result == "FAIL":
#
# There can be multiple failures for each test case. The first
# row always gets the result, name and timing information along
# with the failure details. Remaining failures don't duplicate
# this information but just get blanks for readability. Like,
#
# +--------+----------------+------+-----------------+
# | Result | Test Case Name | Time | Failure Details |
# +--------+----------------+------+-----------------+
# | FAIL | The name | time | It's busted |
# +--------+----------------+------+-----------------+
# | | | | Really broken |
# +--------+----------------+------+-----------------+
# | | | | Busted bad |
# +--------+----------------+------+-----------------+
#
first_row = True
for details in case.findall('FailureDetails'):
#
# Start a new row in the table for each possible Failure Detail
#
f.write("<tr>\n")
if first_row:
first_row = False
f.write("<td style=\"color:red\">%s</td>\n" % result)
f.write("<td>%s</td>\n" % name)
f.write("<td>%s</td>\n" % time)
else:
f.write("<td></td>\n")
f.write("<td></td>\n")
f.write("<td></td>\n")
f.write("<td>")
f.write("<b>Message: </b>%s, " % details.find('Message').text)
f.write("<b>Condition: </b>%s, " % details.find('Condition').text)
f.write("<b>Actual: </b>%s, " % details.find('Actual').text)
f.write("<b>Limit: </b>%s, " % details.find('Limit').text)
f.write("<b>File: </b>%s, " % details.find('File').text)
f.write("<b>Line: </b>%s" % details.find('Line').text)
f.write("</td>\n")
#
# End the table row
#
f.write("</td>\n")
else:
#
# If this particular test case passed, then we just print the PASS
# result in green, followed by the test case name and its execution
# time information. These go off in <td> ... </td> table data.
# The details table entry is left blank.
#
# +--------+----------------+------+---------+
# | Result | Test Case Name | Time | Details |
# +--------+----------------+------+---------+
# | PASS | The name | time | |
# +--------+----------------+------+---------+
#
f.write("<tr>\n")
f.write("<td style=\"color:green\">%s</td>\n" % result)
f.write("<td>%s</td>\n" % name)
f.write("<td>%s</td>\n" % time)
f.write("<td>%s</td>\n" % reason)
f.write("</tr>\n")
#
# All of the rows are written, so we need to end the table.
#
f.write("</table>\n")
#
# That's it for all of the test suites. Now we have to do something about
# our examples.
#
f.write("<h2>Examples</h2>\n")
#
# Example status is rendered in a table just like the suites.
#
f.write("<table border=\"1\">\n")
#
# The table headings look like,
#
# +--------+--------------+--------------+---------+
# | Result | Example Name | Elapsed Time | Details |
# +--------+--------------+--------------+---------+
#
f.write("<th> Result </th>\n")
f.write("<th>Example Name</th>\n")
f.write("<th>Elapsed Time</th>\n")
f.write("<th>Details</th>\n")
#
# Now iterate through all of the examples
#
for example in et.findall("Example"):
#
# Start a new row for each example
#
f.write("<tr>\n")
#
# Get the result and name of the example in question
#
(result, name, reason, time) = read_test(example)
#
# If the example either failed or crashed, print its result status
# in red; otherwise green. This goes in a <td> ... </td> table data
#
if result == "PASS":
f.write("<td style=\"color:green\">%s</td>\n" % result)
elif result == "SKIP":
f.write("<td style=\"color:#ff6600\">%s</fd>\n" % result)
else:
f.write("<td style=\"color:red\">%s</td>\n" % result)
#
# Write the example name as a new tag data.
#
f.write("<td>%s</td>\n" % name)
#
# Write the elapsed time as a new tag data.
#
f.write("<td>%s</td>\n" % time)
#
# Write the reason, if it exist
#
f.write("<td>%s</td>\n" % reason)
#
# That's it for the current example, so terminate the row.
#
f.write("</tr>\n")
#
# That's it for the table of examples, so terminate the table.
#
f.write("</table>\n")
#
# And that's it for the report, so finish up.
#
f.write("</body>\n")
f.write("</html>\n")
f.close()
print('done.')
#
# Python Control-C handling is broken in the presence of multiple threads.
# Signals get delivered to the runnable/running thread by default and if
# it is blocked, the signal is simply ignored. So we hook sigint and set
# a global variable telling the system to shut down gracefully.
#
thread_exit = False
def sigint_hook(signal, frame):
global thread_exit
thread_exit = True
return 0
#
# In general, the build process itself naturally takes care of figuring out
# which tests are built into the test runner. For example, if waf configure
# determines that ENABLE_EMU is false due to some missing dependency,
# the tests for the emu net device simply will not be built and will
# therefore not be included in the built test runner.
#
# Examples, however, are a different story. In that case, we are just given
# a list of examples that could be run. Instead of just failing, for example,
# nsc-tcp-zoo if NSC is not present, we look into the waf saved configuration
# for relevant configuration items.
#
# XXX This function pokes around in the waf internal state file. To be a
# little less hacky, we should add a command to waf to return this info
# and use that result.
#
def read_waf_config():
for line in open(".lock-waf_" + sys.platform + "_build", "rt"):
if line.startswith("top_dir ="):
key, val = line.split('=')
top_dir = eval(val.strip())
if line.startswith("out_dir ="):
key, val = line.split('=')
out_dir = eval(val.strip())
global NS3_BASEDIR
NS3_BASEDIR = top_dir
global NS3_BUILDDIR
NS3_BUILDDIR = out_dir
for line in open("%s/c4che/_cache.py" % out_dir).readlines():
for item in interesting_config_items:
if line.startswith(item):
exec(line, globals())
if options.verbose:
for item in interesting_config_items:
print("%s ==" % item, eval(item))
#
# It seems pointless to fork a process to run waf to fork a process to run
# the test runner, so we just run the test runner directly. The main thing
# that waf would do for us would be to sort out the shared library path but
# we can deal with that easily and do here.
#
# There can be many different ns-3 repositories on a system, and each has
# its own shared libraries, so ns-3 doesn't hardcode a shared library search
# path -- it is cooked up dynamically, so we do that too.
#
def make_paths():
have_DYLD_LIBRARY_PATH = False
have_LD_LIBRARY_PATH = False
have_PATH = False
have_PYTHONPATH = False
keys = list(os.environ.keys())
for key in keys:
if key == "DYLD_LIBRARY_PATH":
have_DYLD_LIBRARY_PATH = True
if key == "LD_LIBRARY_PATH":
have_LD_LIBRARY_PATH = True
if key == "PATH":
have_PATH = True
if key == "PYTHONPATH":
have_PYTHONPATH = True
pypath = os.environ["PYTHONPATH"] = os.path.join (NS3_BUILDDIR, "bindings", "python")
if not have_PYTHONPATH:
os.environ["PYTHONPATH"] = pypath
else:
os.environ["PYTHONPATH"] += ":" + pypath
if options.verbose:
print("os.environ[\"PYTHONPATH\"] == %s" % os.environ["PYTHONPATH"])
if sys.platform == "darwin":
if not have_DYLD_LIBRARY_PATH:
os.environ["DYLD_LIBRARY_PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["DYLD_LIBRARY_PATH"] += ":" + path
if options.verbose:
print("os.environ[\"DYLD_LIBRARY_PATH\"] == %s" % os.environ["DYLD_LIBRARY_PATH"])
elif sys.platform == "win32":
if not have_PATH:
os.environ["PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["PATH"] += ';' + path
if options.verbose:
print("os.environ[\"PATH\"] == %s" % os.environ["PATH"])
elif sys.platform == "cygwin":
if not have_PATH:
os.environ["PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["PATH"] += ":" + path
if options.verbose:
print("os.environ[\"PATH\"] == %s" % os.environ["PATH"])
else:
if not have_LD_LIBRARY_PATH:
os.environ["LD_LIBRARY_PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["LD_LIBRARY_PATH"] += ":" + str(path)
if options.verbose:
print("os.environ[\"LD_LIBRARY_PATH\"] == %s" % os.environ["LD_LIBRARY_PATH"])
#
# Short note on generating suppressions:
#
# See the valgrind documentation for a description of suppressions. The easiest
# way to generate a suppression expression is by using the valgrind
# --gen-suppressions option. To do that you have to figure out how to run the
# test in question.
#
# If you do "test.py -v -g -s <suitename> then test.py will output most of what
# you need. For example, if you are getting a valgrind error in the
# devices-mesh-dot11s-regression test suite, you can run:
#
# ./test.py -v -g -s devices-mesh-dot11s-regression
#
# You should see in the verbose output something that looks like:
#
# Synchronously execute valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output/2010-01-12-22-47-50-CUT
# --out=testpy-output/2010-01-12-22-47-50-CUT/devices-mesh-dot11s-regression.xml
#
# You need to pull out the useful pieces, and so could run the following to
# reproduce your error:
#
# valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output
#
# Hint: Use the first part of the command as is, and point the "tempdir" to
# somewhere real. You don't need to specify an "out" file.
#
# When you run the above command you should see your valgrind error. The
# suppression expression(s) can be generated by adding the --gen-suppressions=yes
# option to valgrind. Use something like:
#
# valgrind --gen-suppressions=yes --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output
#
# Now when valgrind detects an error it will ask:
#
# ==27235== ---- Print suppression ? --- [Return/N/n/Y/y/C/c] ----
#
# to which you just enter 'y'<ret>.
#
# You will be provided with a suppression expression that looks something like
# the following:
# {
# <insert_a_suppression_name_here>
# Memcheck:Addr8
# fun:_ZN3ns36dot11s15HwmpProtocolMac8SendPreqESt6vectorINS0_6IePreqESaIS3_EE
# fun:_ZN3ns36dot11s15HwmpProtocolMac10SendMyPreqEv
# fun:_ZN3ns36dot11s15HwmpProtocolMac18RequestDestinationENS_12Mac48AddressEjj
# ...
# the rest of the stack frame
# ...
# }
#
# You need to add a supression name which will only be printed out by valgrind in
# verbose mode (but it needs to be there in any case). The entire stack frame is
# shown to completely characterize the error, but in most cases you won't need
# all of that info. For example, if you want to turn off all errors that happen
# when the function (fun:) is called, you can just delete the rest of the stack
# frame. You can also use wildcards to make the mangled signatures more readable.
#
# I added the following to the testpy.supp file for this particular error:
#
# {
# Suppress invalid read size errors in SendPreq() when using HwmpProtocolMac
# Memcheck:Addr8
# fun:*HwmpProtocolMac*SendPreq*
# }
#
# Now, when you run valgrind the error will be suppressed.
#
VALGRIND_SUPPRESSIONS_FILE = "testpy.supp"
def run_job_synchronously(shell_command, directory, valgrind, is_python, build_path=""):
suppressions_path = os.path.join (NS3_BASEDIR, VALGRIND_SUPPRESSIONS_FILE)
if is_python:
path_cmd = PYTHON[0] + " " + os.path.join (NS3_BASEDIR, shell_command)
else:
if len(build_path):
path_cmd = os.path.join (build_path, shell_command)
else:
path_cmd = os.path.join (NS3_BUILDDIR, shell_command)
if valgrind:
cmd = "valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 --errors-for-leak-kinds=all %s" % (suppressions_path,
path_cmd)
else:
cmd = path_cmd
if options.verbose:
print("Synchronously execute %s" % cmd)
start_time = time.time()
proc = subprocess.Popen(cmd, shell = True, cwd = directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_results, stderr_results = proc.communicate()
elapsed_time = time.time() - start_time
retval = proc.returncode
try:
stdout_results = stdout_results.decode()
except UnicodeDecodeError:
print("Non-decodable character in stdout output of %s" % cmd)
print(stdout_results)
retval = 1
try:
stderr_results = stderr_results.decode()
except UnicodeDecodeError:
print("Non-decodable character in stderr output of %s" % cmd)
print(stderr_results)
retval = 1
if options.verbose:
print("Return code = ", retval)
print("stderr = ", stderr_results)
return (retval, stdout_results, stderr_results, elapsed_time)
#
# This class defines a unit of testing work. It will typically refer to
# a test suite to run using the test-runner, or an example to run directly.
#
class Job:
def __init__(self):
self.is_break = False
self.is_skip = False
self.skip_reason = ""
self.is_example = False
self.is_pyexample = False
self.shell_command = ""
self.display_name = ""
self.basedir = ""
self.tempdir = ""
self.cwd = ""
self.tmp_file_name = ""
self.returncode = False
self.elapsed_time = 0
self.build_path = ""
#
# A job is either a standard job or a special job indicating that a worker
# thread should exist. This special job is indicated by setting is_break
# to true.
#
def set_is_break(self, is_break):
self.is_break = is_break
#
# If a job is to be skipped, we actually run it through the worker threads
# to keep the PASS, FAIL, CRASH and SKIP processing all in one place.
#
def set_is_skip(self, is_skip):
self.is_skip = is_skip
#
# If a job is to be skipped, log the reason.
#
def set_skip_reason(self, skip_reason):
self.skip_reason = skip_reason
#
# Examples are treated differently than standard test suites. This is
# mostly because they are completely unaware that they are being run as
# tests. So we have to do some special case processing to make them look
# like tests.
#
def set_is_example(self, is_example):
self.is_example = is_example
#
# Examples are treated differently than standard test suites. This is
# mostly because they are completely unaware that they are being run as
# tests. So we have to do some special case processing to make them look
# like tests.
#
def set_is_pyexample(self, is_pyexample):
self.is_pyexample = is_pyexample
#
# This is the shell command that will be executed in the job. For example,
#
# "utils/ns3-dev-test-runner-debug --test-name=some-test-suite"
#
def set_shell_command(self, shell_command):
self.shell_command = shell_command
#
# This is the build path where ns-3 was built. For example,
#
# "/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build/debug"
#
def set_build_path(self, build_path):
self.build_path = build_path
#
# This is the display name of the job, typically the test suite or example
# name. For example,
#
# "some-test-suite" or "udp-echo"
#
def set_display_name(self, display_name):
self.display_name = display_name
#
# This is the base directory of the repository out of which the tests are
# being run. It will be used deep down in the testing framework to determine
# where the source directory of the test was, and therefore where to find
# provided test vectors. For example,
#
# "/home/user/repos/ns-3-dev"
#
def set_basedir(self, basedir):
self.basedir = basedir
#
# This is the directory to which a running test suite should write any
# temporary files.
#
def set_tempdir(self, tempdir):
self.tempdir = tempdir
#
# This is the current working directory that will be given to an executing
# test as it is being run. It will be used for examples to tell them where
# to write all of the pcap files that we will be carefully ignoring. For
# example,
#
# "/tmp/unchecked-traces"
#
def set_cwd(self, cwd):
self.cwd = cwd
#
# This is the temporary results file name that will be given to an executing
# test as it is being run. We will be running all of our tests in parallel
# so there must be multiple temporary output files. These will be collected
# into a single XML file at the end and then be deleted.
#
def set_tmp_file_name(self, tmp_file_name):
self.tmp_file_name = tmp_file_name
#
# The return code received when the job process is executed.
#
def set_returncode(self, returncode):
self.returncode = returncode
#
# The elapsed real time for the job execution.
#
def set_elapsed_time(self, elapsed_time):
self.elapsed_time = elapsed_time
#
# The worker thread class that handles the actual running of a given test.
# Once spawned, it receives requests for work through its input_queue and
# ships the results back through the output_queue.
#
class worker_thread(threading.Thread):
def __init__(self, input_queue, output_queue):
threading.Thread.__init__(self)
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
while True:
job = self.input_queue.get()
#
# Worker threads continue running until explicitly told to stop with
# a special job.
#
if job.is_break:
return
#
# If the global interrupt handler sets the thread_exit variable,
# we stop doing real work and just report back a "break" in the
# normal command processing has happened.
#
if thread_exit == True:
job.set_is_break(True)
self.output_queue.put(job)
continue
#
# If we are actually supposed to skip this job, do so. Note that
# if is_skip is true, returncode is undefined.
#
if job.is_skip:
if options.verbose:
print("Skip %s" % job.shell_command)
self.output_queue.put(job)
continue
#
# Otherwise go about the business of running tests as normal.
#
else:
if options.verbose:
print("Launch %s" % job.shell_command)
if job.is_example or job.is_pyexample:
#
# If we have an example, the shell command is all we need to
# know. It will be something like "examples/udp/udp-echo" or
# "examples/wireless/mixed-wireless.py"
#
(job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command,
job.cwd, options.valgrind, job.is_pyexample, job.build_path)
else:
#
# If we're a test suite, we need to provide a little more info
# to the test runner, specifically the base directory and temp
# file name
#
if options.update_data:
update_data = '--update-data'
else:
update_data = ''
(job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command +
" --xml --tempdir=%s --out=%s %s" % (job.tempdir, job.tmp_file_name, update_data),
job.cwd, options.valgrind, False)
job.set_elapsed_time(et)
if options.verbose:
print("returncode = %d" % job.returncode)
print("---------- begin standard out ----------")
print(standard_out)
print("---------- begin standard err ----------")
print(standard_err)
print("---------- end standard err ----------")
self.output_queue.put(job)
#
# This is the main function that does the work of interacting with the
# test-runner itself.
#
def run_tests():
#
# Pull some interesting configuration information out of waf, primarily
# so we can know where executables can be found, but also to tell us what
# pieces of the system have been built. This will tell us what examples
# are runnable.
#
read_waf_config()
#
# Set the proper suffix.
#
global BUILD_PROFILE_SUFFIX
if BUILD_PROFILE == 'release':
BUILD_PROFILE_SUFFIX = ""
else:
BUILD_PROFILE_SUFFIX = "-" + BUILD_PROFILE
#
# Add the proper prefix and suffix to the test-runner name to
# match what is done in the wscript file.
#
test_runner_name = "%s%s-%s%s" % (APPNAME, VERSION, "test-runner", BUILD_PROFILE_SUFFIX)
#
# Run waf to make sure that everything is built, configured and ready to go
# unless we are explicitly told not to. We want to be careful about causing
# our users pain while waiting for extraneous stuff to compile and link, so
# we allow users that know what they''re doing to not invoke waf at all.
#
if not options.nowaf:
#
# If the user is running the "kinds" or "list" options, there is an
# implied dependency on the test-runner since we call that program
# if those options are selected. We will exit after processing those
# options, so if we see them, we can safely only build the test-runner.
#
# If the user has constrained us to running only a particular type of
# file, we can only ask waf to build what we know will be necessary.
# For example, if the user only wants to run BVT tests, we only have
# to build the test-runner and can ignore all of the examples.
#
# If the user only wants to run a single example, then we can just build
# that example.
#
# If there is no constraint, then we have to build everything since the
# user wants to run everything.
#
if options.kinds or options.list or (len(options.constrain) and options.constrain in core_kinds):
if sys.platform == "win32":
waf_cmd = sys.executable + " waf --target=test-runner"
else:
waf_cmd = sys.executable + " waf --target=test-runner"
elif len(options.example):
if sys.platform == "win32": #Modify for windows
waf_cmd = sys.executable + " waf --target=%s" % os.path.basename(options.example)
else:
waf_cmd = sys.executable + " waf --target=%s" % os.path.basename(options.example)
else:
if sys.platform == "win32": #Modify for windows
waf_cmd = sys.executable + " waf"
else:
waf_cmd = sys.executable + " waf"
if options.verbose:
print("Building: %s" % waf_cmd)
proc = subprocess.Popen(waf_cmd, shell = True)
proc.communicate()
if proc.returncode:
print("Waf died. Not running tests", file=sys.stderr)
return proc.returncode
#
# Dynamically set up paths.
#
make_paths()
#
# Get the information from the build status file.
#
build_status_file = os.path.join (NS3_BUILDDIR, 'build-status.py')
if os.path.exists(build_status_file):
ns3_runnable_programs = get_list_from_file(build_status_file, "ns3_runnable_programs")
ns3_runnable_scripts = get_list_from_file(build_status_file, "ns3_runnable_scripts")
else:
print('The build status file was not found. You must do waf build before running test.py.', file=sys.stderr)
sys.exit(2)
#
# Make a dictionary that maps the name of a program to its path.
#
ns3_runnable_programs_dictionary = {}
for program in ns3_runnable_programs:
# Remove any directory names from path.
program_name = os.path.basename(program)
ns3_runnable_programs_dictionary[program_name] = program
# Generate the lists of examples to run as smoke tests in order to
# ensure that they remain buildable and runnable over time.
#
example_tests = []
example_names_original = []
python_tests = []
for directory in EXAMPLE_DIRECTORIES:
# Set the directories and paths for this example.
example_directory = os.path.join("examples", directory)
examples_to_run_path = os.path.join(example_directory, "examples-to-run.py")
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
# Parse this example directory's file.
parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
example_names_original,
python_tests)
for module in NS3_ENABLED_MODULES:
# Remove the "ns3-" from the module name.
module = module[len("ns3-"):]
# Set the directories and paths for this example.
module_directory = os.path.join("src", module)
example_directory = os.path.join(module_directory, "examples")
examples_to_run_path = os.path.join(module_directory, "test", "examples-to-run.py")
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
# Parse this module's file.
parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
example_names_original,
python_tests)
for module in NS3_ENABLED_CONTRIBUTED_MODULES:
# Remove the "ns3-" from the module name.
module = module[len("ns3-"):]
# Set the directories and paths for this example.
module_directory = os.path.join("contrib", module)
example_directory = os.path.join(module_directory, "examples")
examples_to_run_path = os.path.join(module_directory, "test", "examples-to-run.py")
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
# Parse this module's file.
parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
example_names_original,
python_tests)
#
# If lots of logging is enabled, we can crash Python when it tries to
# save all of the text. We just don't allow logging to be turned on when
# test.py runs. If you want to see logging output from your tests, you
# have to run them using the test-runner directly.
#
os.environ["NS_LOG"] = ""
#
# There are a couple of options that imply we can to exit before starting
# up a bunch of threads and running tests. Let's detect these cases and
# handle them without doing all of the hard work.
#
if options.kinds:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-type-list")
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
print(standard_out.decode())
if options.list:
if len(options.constrain):
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --print-test-types --test-type=%s" % options.constrain)
else:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --print-test-types")
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
if rc != 0:
# This is usually a sign that ns-3 crashed or exited uncleanly
print(('test.py error: test-runner return code returned {}'.format(rc)))
print(('To debug, try running {}\n'.format('\'./waf --run \"test-runner --print-test-name-list\"\'')))
return
if isinstance(standard_out, bytes):
standard_out = standard_out.decode()
list_items = standard_out.split('\n')
list_items.sort()
print("Test Type Test Name")
print("--------- ---------")
for item in list_items:
if len(item.strip()):
print(item)
example_names_original.sort()
for item in example_names_original:
print("example ", item)
print()
if options.kinds or options.list:
return
#
# We communicate results in two ways. First, a simple message relating
# PASS, FAIL, CRASH or SKIP is always written to the standard output. It
# is expected that this will be one of the main use cases. A developer can
# just run test.py with no options and see that all of the tests still
# pass.
#
# The second main use case is when detailed status is requested (with the
# --text or --html options). Typically this will be text if a developer
# finds a problem, or HTML for nightly builds. In these cases, an
# XML file is written containing the status messages from the test suites.
# This file is then read and translated into text or HTML. It is expected
# that nobody will really be interested in the XML, so we write it somewhere
# with a unique name (time) to avoid collisions. In case an error happens, we
# provide a runtime option to retain the temporary files.
#
# When we run examples as smoke tests, they are going to want to create
# lots and lots of trace files. We aren't really interested in the contents
# of the trace files, so we also just stash them off in the temporary dir.
# The retain option also causes these unchecked trace files to be kept.
#
date_and_time = time.strftime("%Y-%m-%d-%H-%M-%S-CUT", time.gmtime())
if not os.path.exists(TMP_OUTPUT_DIR):
os.makedirs(TMP_OUTPUT_DIR)
testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time);
if not os.path.exists(testpy_output_dir):
os.makedirs(testpy_output_dir)
#
# Create the main output file and start filling it with XML. We need to
# do this since the tests will just append individual results to this file.
#
xml_results_file = os.path.join(testpy_output_dir, "results.xml")
f = open(xml_results_file, 'w')
f.write('<?xml version="1.0"?>\n')
f.write('<Results>\n')
f.close()
#
# We need to figure out what test suites to execute. We are either given one
# suite or example explicitly via the --suite or --example/--pyexample option,
# or we need to call into the test runner and ask it to list all of the available
# test suites. Further, we need to provide the constraint information if it
# has been given to us.
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test,py: run all of the suites and examples
# ./test.py --constrain=core: run all of the suites of all kinds
# ./test.py --constrain=unit: run all unit suites
# ./test.py --suite=some-test-suite: run a single suite
# ./test.py --example=examples/udp/udp-echo: run single example
# ./test.py --pyexample=examples/wireless/mixed-wireless.py: run python example
# ./test.py --suite=some-suite --example=some-example: run the single suite
#
# We can also use the --constrain option to provide an ordering of test
# execution quite easily.
#
# Flag indicating a specific suite was explicitly requested
single_suite = False
if len(options.suite):
# See if this is a valid test suite.
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
if isinstance(suites, bytes):
suites = suites.decode()
if options.suite in suites.split('\n'):
suites = options.suite + "\n"
single_suite = True
else:
print('The test suite was not run because an unknown test suite name was requested.', file=sys.stderr)
sys.exit(2)
elif len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain):
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --test-type=%s" % options.constrain)
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
else:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
else:
suites = ""
#
# suite_list will either a single test suite name that the user has
# indicated she wants to run or a list of test suites provided by
# the test-runner possibly according to user provided constraints.
# We go through the trouble of setting up the parallel execution
# even in the case of a single suite to avoid having to process the
# results in two different places.
#
if isinstance(suites, bytes):
suites = suites.decode()
suite_list = suites.split('\n')
#
# Performance tests should only be run when they are requested,
# i.e. they are not run by default in test.py.
# If a specific suite was requested we run it, even if
# it is a performance test.
if not single_suite and options.constrain != 'performance':
# Get a list of all of the performance tests.
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --test-type=%s" % "performance")
(rc, performance_tests, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
if isinstance(performance_tests, bytes):
performance_tests = performance_tests.decode()
performance_test_list = performance_tests.split('\n')
# Remove any performance tests from the suites list.
for performance_test in performance_test_list:
if performance_test in suite_list:
suite_list.remove(performance_test)
# We now have a possibly large number of test suites to run, so we want to
# run them in parallel. We're going to spin up a number of worker threads
# that will run our test jobs for us.
#
input_queue = queue.Queue(0)
output_queue = queue.Queue(0)
jobs = 0
threads=[]
#
# In Python 2.6 you can just use multiprocessing module, but we don't want
# to introduce that dependency yet; so we jump through a few hoops.
#
processors = 1
if sys.platform != "win32":
if 'SC_NPROCESSORS_ONLN'in os.sysconf_names:
processors = os.sysconf('SC_NPROCESSORS_ONLN')
else:
proc = subprocess.Popen("sysctl -n hw.ncpu", shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_results, stderr_results = proc.communicate()
stdout_results = stdout_results.decode()
stderr_results = stderr_results.decode()
if len(stderr_results) == 0:
processors = int(stdout_results)
#
# Now, spin up one thread per processor which will eventually mean one test
# per processor running concurrently.
#
for i in range(processors):
thread = worker_thread(input_queue, output_queue)
threads.append(thread)
thread.start()
#
# Keep track of some summary statistics
#
total_tests = 0
skipped_tests = 0
skipped_testnames = []
#
# We now have worker threads spun up, and a list of work to do. So, run
# through the list of test suites and dispatch a job to run each one.
#
# Dispatching will run with unlimited speed and the worker threads will
# execute as fast as possible from the queue.
#
# Note that we actually dispatch tests to be skipped, so all of the
# PASS, FAIL, CRASH and SKIP processing is done in the same place.
#
for test in suite_list:
test = test.strip()
if len(test):
job = Job()
job.set_is_example(False)
job.set_is_pyexample(False)
job.set_display_name(test)
job.set_tmp_file_name(os.path.join(testpy_output_dir, "%s.xml" % test))
job.set_cwd(os.getcwd())
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
if (options.multiple):
multiple = ""
else:
multiple = " --stop-on-failure"
if (len(options.fullness)):
fullness = options.fullness.upper()
fullness = " --fullness=%s" % fullness
else:
fullness = " --fullness=QUICK"
path_cmd = os.path.join("utils", test_runner_name + " --test-name=%s%s%s" % (test, multiple, fullness))
job.set_shell_command(path_cmd)
if options.valgrind and test in core_valgrind_skip_tests:
job.set_is_skip(True)
job.set_skip_reason("crashes valgrind")
# Skip tests that will fail if NSC is missing.
if not NSC_ENABLED and test in core_nsc_missing_skip_tests:
job.set_is_skip(True)
job.set_skip_reason("requires NSC")
if options.verbose:
print("Queue %s" % test)
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# We've taken care of the discovered or specified test suites. Now we
# have to deal with examples run as smoke tests. We have a list of all of
# the example programs it makes sense to try and run. Each example will
# have a condition associated with it that must evaluate to true for us
# to try and execute it. This is used to determine if the example has
# a dependency that is not satisfied. For example, if an example depends
# on NSC being configured by waf, that example should have a condition
# that evaluates to true if NSC is enabled. For example,
#
# ("tcp-nsc-zoo", "NSC_ENABLED == True"),
#
# In this case, the example "tcp-nsc-zoo" will only be run if we find the
# waf configuration variable "NSC_ENABLED" to be True.
#
# We don't care at all how the trace files come out, so we just write them
# to a single temporary directory.
#
# XXX As it stands, all of the trace files have unique names, and so file
# collisions can only happen if two instances of an example are running in
# two versions of the test.py process concurrently. We may want to create
# uniquely named temporary traces directories to avoid this problem.
#
# We need to figure out what examples to execute. We are either given one
# suite or example explicitly via the --suite or --example option, or we
# need to walk the list of examples looking for available example
# conditions.
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test.py: run all of the examples
# ./test.py --constrain=unit run no examples
# ./test.py --constrain=example run all of the examples
# ./test.py --suite=some-test-suite: run no examples
# ./test.py --example=some-example: run the single example
# ./test.py --suite=some-suite --example=some-example: run the single example
#
#
if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain) == 0 or options.constrain == "example":
if ENABLE_EXAMPLES:
for name, test, do_run, do_valgrind_run in example_tests:
# Remove any arguments and directory names from test.
test_name = test.split(' ', 1)[0]
test_name = os.path.basename(test_name)
# Don't try to run this example if it isn't runnable.
if test_name in ns3_runnable_programs_dictionary:
if eval(do_run):
job = Job()
job.set_is_example(True)
job.set_is_pyexample(False)
job.set_display_name(name)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(test)
job.set_build_path(options.buildpath)
if options.valgrind and not eval(do_valgrind_run):
job.set_is_skip (True)
job.set_skip_reason("skip in valgrind runs")
if options.verbose:
print("Queue %s" % test)
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
elif len(options.example):
# Add the proper prefix and suffix to the example name to
# match what is done in the wscript file.
example_name = "%s%s-%s%s" % (APPNAME, VERSION, options.example, BUILD_PROFILE_SUFFIX)
# Don't try to run this example if it isn't runnable.
if example_name not in ns3_runnable_programs_dictionary:
print("Example %s is not runnable." % example_name)
else:
#
# If you tell me to run an example, I will try and run the example
# irrespective of any condition.
#
example_path = ns3_runnable_programs_dictionary[example_name]
example_path = os.path.abspath(example_path)
job = Job()
job.set_is_example(True)
job.set_is_pyexample(False)
job.set_display_name(example_path)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(example_path)
job.set_build_path(options.buildpath)
if options.verbose:
print("Queue %s" % example_name)
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# Run some Python examples as smoke tests. We have a list of all of
# the example programs it makes sense to try and run. Each example will
# have a condition associated with it that must evaluate to true for us
# to try and execute it. This is used to determine if the example has
# a dependency that is not satisfied.
#
# We don't care at all how the trace files come out, so we just write them
# to a single temporary directory.
#
# We need to figure out what python examples to execute. We are either
# given one pyexample explicitly via the --pyexample option, or we
# need to walk the list of python examples
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test.py --constrain=pyexample run all of the python examples
# ./test.py --pyexample=some-example.py: run the single python example
#
if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain) == 0 or options.constrain == "pyexample":
if ENABLE_EXAMPLES:
for test, do_run in python_tests:
# Remove any arguments and directory names from test.
test_name = test.split(' ', 1)[0]
test_name = os.path.basename(test_name)
# Don't try to run this example if it isn't runnable.
if test_name in ns3_runnable_scripts:
if eval(do_run):
job = Job()
job.set_is_example(False)
job.set_is_pyexample(True)
job.set_display_name(test)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(test)
job.set_build_path("")
#
# Python programs and valgrind do not work and play
# well together, so we skip them under valgrind.
# We go through the trouble of doing all of this
# work to report the skipped tests in a consistent
# way through the output formatter.
#
if options.valgrind:
job.set_is_skip (True)
job.set_skip_reason("skip in valgrind runs")
#
# The user can disable python bindings, so we need
# to pay attention to that and give some feedback
# that we're not testing them
#
if not ENABLE_PYTHON_BINDINGS:
job.set_is_skip (True)
job.set_skip_reason("requires Python bindings")
if options.verbose:
print("Queue %s" % test)
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
elif len(options.pyexample):
# Don't try to run this example if it isn't runnable.
example_name = os.path.basename(options.pyexample)
if example_name not in ns3_runnable_scripts:
print("Example %s is not runnable." % example_name)
else:
#
# If you tell me to run a python example, I will try and run the example
# irrespective of any condition.
#
job = Job()
job.set_is_pyexample(True)
job.set_display_name(options.pyexample)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(options.pyexample)
job.set_build_path("")
if options.verbose:
print("Queue %s" % options.pyexample)
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# Tell the worker threads to pack up and go home for the day. Each one
# will exit when they see their is_break task.
#
for i in range(processors):
job = Job()
job.set_is_break(True)
input_queue.put(job)
#
# Now all of the tests have been dispatched, so all we have to do here
# in the main thread is to wait for them to complete. Keyboard interrupt
# handling is broken as mentioned above. We use a signal handler to catch
# sigint and set a global variable. When the worker threads sense this
# they stop doing real work and will just start throwing jobs back at us
# with is_break set to True. In this case, there are no real results so we
# ignore them. If there are real results, we always print PASS or FAIL to
# standard out as a quick indication of what happened.
#
passed_tests = 0
failed_tests = 0
failed_testnames = []
crashed_tests = 0
crashed_testnames = []
valgrind_errors = 0
valgrind_testnames = []
for i in range(jobs):
job = output_queue.get()
if job.is_break:
continue
if job.is_example or job.is_pyexample:
kind = "Example"
else:
kind = "TestSuite"
if job.is_skip:
status = "SKIP"
skipped_tests = skipped_tests + 1
skipped_testnames.append(job.display_name + (" (%s)" % job.skip_reason))
else:
if job.returncode == 0:
status = "PASS"
passed_tests = passed_tests + 1
elif job.returncode == 1:
failed_tests = failed_tests + 1
failed_testnames.append(job.display_name)
status = "FAIL"
elif job.returncode == 2:
valgrind_errors = valgrind_errors + 1
valgrind_testnames.append(job.display_name)
status = "VALGR"
else:
crashed_tests = crashed_tests + 1
crashed_testnames.append(job.display_name)
status = "CRASH"
if options.duration or options.constrain == "performance":
print("%s (%.3f): %s %s" % (status, job.elapsed_time, kind, job.display_name))
else:
print("%s: %s %s" % (status, kind, job.display_name))
if job.is_example or job.is_pyexample:
#
# Examples are the odd man out here. They are written without any
# knowledge that they are going to be run as a test, so we need to
# cook up some kind of output for them. We're writing an xml file,
# so we do some simple XML that says we ran the example.
#
# XXX We could add some timing information to the examples, i.e. run
# them through time and print the results here.
#
f = open(xml_results_file, 'a')
f.write('<Example>\n')
example_name = " <Name>%s</Name>\n" % job.display_name
f.write(example_name)
if status == "PASS":
f.write(' <Result>PASS</Result>\n')
elif status == "FAIL":
f.write(' <Result>FAIL</Result>\n')
elif status == "VALGR":
f.write(' <Result>VALGR</Result>\n')
elif status == "SKIP":
f.write(' <Result>SKIP</Result>\n')
else:
f.write(' <Result>CRASH</Result>\n')
f.write(' <Time real="%.3f"/>\n' % job.elapsed_time)
f.write('</Example>\n')
f.close()
else:
#
# If we're not running an example, we're running a test suite.
# These puppies are running concurrently and generating output
# that was written to a temporary file to avoid collisions.
#
# Now that we are executing sequentially in the main thread, we can
# concatenate the contents of the associated temp file to the main
# results file and remove that temp file.
#
# One thing to consider is that a test suite can crash just as
# well as any other program, so we need to deal with that
# possibility as well. If it ran correctly it will return 0
# if it passed, or 1 if it failed. In this case, we can count
# on the results file it saved being complete. If it crashed, it
# will return some other code, and the file should be considered
# corrupt and useless. If the suite didn't create any XML, then
# we're going to have to do it ourselves.
#
# Another issue is how to deal with a valgrind error. If we run
# a test suite under valgrind and it passes, we will get a return
# code of 0 and there will be a valid xml results file since the code
# ran to completion. If we get a return code of 1 under valgrind,
# the test case failed, but valgrind did not find any problems so the
# test case return code was passed through. We will have a valid xml
# results file here as well since the test suite ran. If we see a
# return code of 2, this means that valgrind found an error (we asked
# it to return 2 if it found a problem in run_job_synchronously) but
# the suite ran to completion so there is a valid xml results file.
# If the suite crashes under valgrind we will see some other error
# return code (like 139). If valgrind finds an illegal instruction or
# some other strange problem, it will die with its own strange return
# code (like 132). However, if the test crashes by itself, not under
# valgrind we will also see some other return code.
#
# If the return code is 0, 1, or 2, we have a valid xml file. If we
# get another return code, we have no xml and we can't really say what
# happened -- maybe the TestSuite crashed, maybe valgrind crashed due
# to an illegal instruction. If we get something beside 0-2, we assume
# a crash and fake up an xml entry. After this is all done, we still
# need to indicate a valgrind error somehow, so we fake up an xml entry
# with a VALGR result. Thus, in the case of a working TestSuite that
# fails valgrind, we'll see the PASS entry for the working TestSuite
# followed by a VALGR failing test suite of the same name.
#
if job.is_skip:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>SKIP</Result>\n')
f.write(" <Reason>%s</Reason>\n" % job.skip_reason)
f.write("</Test>\n")
f.close()
else:
if job.returncode == 0 or job.returncode == 1 or job.returncode == 2:
f_to = open(xml_results_file, 'a')
f_from = open(job.tmp_file_name)
f_to.write(f_from.read())
f_to.close()
f_from.close()
else:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>CRASH</Result>\n')
f.write("</Test>\n")
f.close()
#
# We have all of the tests run and the results written out. One final
# bit of housekeeping is to wait for all of the threads to close down
# so we can exit gracefully.
#
for thread in threads:
thread.join()
#
# Back at the beginning of time, we started the body of an XML document
# since the test suites and examples were going to just write their
# individual pieces. So, we need to finish off and close out the XML
# document
#
f = open(xml_results_file, 'a')
f.write('</Results>\n')
f.close()
#
# Print a quick summary of events
#
print("%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)" % (passed_tests,
total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors))
#
# Repeat summary of skipped, failed, crashed, valgrind events
#
if skipped_testnames:
skipped_testnames.sort()
print('List of SKIPped tests:\n %s' % '\n '.join(map(str, skipped_testnames)))
if failed_testnames:
failed_testnames.sort()
print('List of FAILed tests:\n %s' % '\n '.join(map(str, failed_testnames)))
if crashed_testnames:
crashed_testnames.sort()
print('List of CRASHed tests:\n %s' % '\n '.join(map(str, crashed_testnames)))
if valgrind_testnames:
valgrind_testnames.sort()
print('List of VALGR failures:\n %s' % '\n '.join(map(str, valgrind_testnames)))
#
# The last things to do are to translate the XML results file to "human
# readable form" if the user asked for it (or make an XML file somewhere)
#
if len(options.html) + len(options.text) + len(options.xml):
print()
if len(options.html):
translate_to_html(xml_results_file, options.html)
if len(options.text):
translate_to_text(xml_results_file, options.text)
if len(options.xml):
xml_file = options.xml + '.xml'
print('Writing results to xml file %s...' % xml_file, end='')
shutil.copyfile(xml_results_file, xml_file)
print('done.')
#
# Let the user know if they need to turn on tests or examples.
#
if not ENABLE_TESTS or not ENABLE_EXAMPLES:
print()
if not ENABLE_TESTS:
print('*** Note: ns-3 tests are currently disabled. Enable them by adding')
print('*** "--enable-tests" to ./waf configure or modifying your .ns3rc file.')
print()
if not ENABLE_EXAMPLES:
print('*** Note: ns-3 examples are currently disabled. Enable them by adding')
print('*** "--enable-examples" to ./waf configure or modifying your .ns3rc file.')
print()
#
# Let the user know if they tried to use valgrind but it was not
# present on their machine.
#
if options.valgrind and not VALGRIND_FOUND:
print()
print('*** Note: you are trying to use valgrind, but valgrind could not be found')
print('*** on your machine. All tests and examples will crash or be skipped.')
print()
#
# If we have been asked to retain all of the little temporary files, we
# don't delete tm. If we do delete the temporary files, delete only the
# directory we just created. We don't want to happily delete any retained
# directories, which will probably surprise the user.
#
if not options.retain:
shutil.rmtree(testpy_output_dir)
if passed_tests + skipped_tests == total_tests:
return 0 # success
else:
return 1 # catchall for general errors
def main(argv):
parser = optparse.OptionParser()
parser.add_option("-b", "--buildpath", action="store", type="string", dest="buildpath", default="",
metavar="BUILDPATH",
help="specify the path where ns-3 was built (defaults to the build directory for the current variant)")
parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="",
metavar="KIND",
help="constrain the test-runner by kind of test")
parser.add_option("-d", "--duration", action="store_true", dest="duration", default=False,
help="print the duration of each test suite and example")
parser.add_option("-e", "--example", action="store", type="string", dest="example", default="",
metavar="EXAMPLE",
help="specify a single example to run (no relative path is needed)")
parser.add_option("-u", "--update-data", action="store_true", dest="update_data", default=False,
help="If examples use reference data files, get them to re-generate them")
parser.add_option("-f", "--fullness", action="store", type="choice", dest="fullness", default="QUICK",
metavar="FULLNESS", choices=["QUICK", "EXTENSIVE", "TAKES_FOREVER"],
help="choose the duration of tests to run: QUICK, EXTENSIVE, or TAKES_FOREVER, where EXTENSIVE includes QUICK and TAKES_FOREVER includes QUICK and EXTENSIVE (only QUICK tests are run by default)")
parser.add_option("-g", "--grind", action="store_true", dest="valgrind", default=False,
help="run the test suites and examples using valgrind")
parser.add_option("-k", "--kinds", action="store_true", dest="kinds", default=False,
help="print the kinds of tests available")
parser.add_option("-l", "--list", action="store_true", dest="list", default=False,
help="print the list of known tests")
parser.add_option("-m", "--multiple", action="store_true", dest="multiple", default=False,
help="report multiple failures from test suites and test cases")
parser.add_option("-n", "--nowaf", action="store_true", dest="nowaf", default=False,
help="do not run waf before starting testing")
parser.add_option("-p", "--pyexample", action="store", type="string", dest="pyexample", default="",
metavar="PYEXAMPLE",
help="specify a single python example to run (with relative path)")
parser.add_option("-r", "--retain", action="store_true", dest="retain", default=False,
help="retain all temporary files (which are normally deleted)")
parser.add_option("-s", "--suite", action="store", type="string", dest="suite", default="",
metavar="TEST-SUITE",
help="specify a single test suite to run")
parser.add_option("-t", "--text", action="store", type="string", dest="text", default="",
metavar="TEXT-FILE",
help="write detailed test results into TEXT-FILE.txt")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="print progress and informational messages")
parser.add_option("-w", "--web", "--html", action="store", type="string", dest="html", default="",
metavar="HTML-FILE",
help="write detailed test results into HTML-FILE.html")
parser.add_option("-x", "--xml", action="store", type="string", dest="xml", default="",
metavar="XML-FILE",
help="write detailed test results into XML-FILE.xml")
global options
options = parser.parse_args()[0]
signal.signal(signal.SIGINT, sigint_hook)
return run_tests()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| kronat/ns-3-dev-git | test.py | Python | gpl-2.0 | 80,507 |
"""
FEniCS tutorial demo program: Poisson equation with Dirichlet conditions.
As d2_p2D.py, but LinearVariationalProblem and LinearVariationalSolver
are used instead of the solve(a == L, u, bc) call in d2_p2D.py.
-Laplace(u) = f on the unit square.
u = u0 on the boundary.
u0 = u = 1 + x^2 + 2y^2, f = -6.
"""
from __future__ import print_function
import os
from dolfin import *
# Create mesh and define function space
#mesh = UnitSquareMesh(600, 400)
mesh = UnitSquareMesh(60, 40)
V = FunctionSpace(mesh, 'Lagrange', 1)
# Define boundary conditions
u0 = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')
def u0_boundary(x, on_boundary):
return on_boundary
bc = DirichletBC(V, u0, u0_boundary)
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(-6.0)
a = inner(nabla_grad(u), nabla_grad(v))*dx
L = f*v*dx
# Compute solution
u = Function(V)
problem = LinearVariationalProblem(a, L, u, bc)
solver = LinearVariationalSolver(problem)
solver.parameters['linear_solver'] = 'gmres'
solver.parameters['preconditioner'] = 'ilu'
info(solver.parameters, True)
print(parameters['linear_algebra_backend'])
cg_prm = solver.parameters['krylov_solver'] # short form
cg_prm['absolute_tolerance'] = 1E-7
cg_prm['relative_tolerance'] = 1E-4
cg_prm['maximum_iterations'] = 10000
#cg_prm['preconditioner']['ilu']['fill_level'] = 0
set_log_level(DEBUG)
solver.solve()
# Plot solution and mesh
#plot(u)
#plot(mesh)
# Dump solution to file in VTK format
file = File('poisson.pvd')
file << u
# Hold plot
interactive()
| MiroK/dolfin | test/unit/python/book/chapter_1_files/stationary/poisson/d3_p2D.py | Python | gpl-3.0 | 1,535 |
# -*- coding: utf-8 -*-
#
# Sensor Widgets documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 4 12:48:01 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sensor Widgets'
copyright = u'2015, Oscar Fonts'
author = u'Oscar Fonts'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'ca'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'es'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'SensorWidgetsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SensorWidgets.tex', u'Sensor Widgets Documentation',
u'Oscar Fonts', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sensorwidgets', u'Sensor Widgets Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SensorWidgets', u'Sensor Widgets Documentation',
author, 'SensorWidgets', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| oscarfonts/sensor-widgets | doc/ca/conf.py | Python | mit | 9,203 |
from __future__ import (absolute_import, division, print_function)
import glob, os, sys
test_files = glob.glob('*.py')
test_files.remove('run_all.py')
test_files.remove('allskymap.py')
test_files.remove('fcstmaps.py')
test_files.remove('fcstmaps_axesgrid.py')
test_files.remove('testgdal.py')
test_files.remove('animate.py')
test_files.remove('geos_demo_2.py')
test_files.remove('plotsst.py')
test_files.remove('embedding_map_in_wx.py') # requires wx
test_files.remove('plothighsandlows.py') # requires scipy
test_files.remove('lic_demo.py')
test_files.remove('testwmsimage.py')
try:
from netCDF4 import Dataset
except ImportError:
# remove tests requiring netCDF4
sys.stdout.write("Could not import netCDF4, skipping tests that require netCDF4.\n")
test_files.remove('streamplot_demo.py')
test_files.remove('plotprecip.py')
test_files.remove('test_rotpole.py')
test_files.remove('ccsm_popgrid.py')
test_files.remove('ploticos.py')
py_path = os.environ.get('PYTHONPATH')
if py_path is None:
py_path = '.'
else:
py_path = os.pathsep.join(['.',py_path])
os.environ['PYTHONPATH'] = py_path
for f in test_files:
sys.stdout.write( "**********************************************\n")
ff = os.path.join(sys.path[0],f)
args = [sys.executable,ff]
sys.stdout.write("Running %s\n" % f)
status = os.spawnve(os.P_WAIT,sys.executable,args,os.environ)
if status:
sys.stdout.write('TEST FAILURE (status=%s)\n' % (status))
| matplotlib/basemap | examples/run_all.py | Python | mit | 1,481 |
# -*- coding: utf-8 -*-
''' Used to extract attachment ids from html files in ./html folder.
Extracted ids are saved in ./pickle folder'''
from urllib import parse
import os, re, pickle
from config import path
attachment = re.compile(r'''<a href="attachment\.php\?aid=(\d+)" target="_blank">''')
failcnt = 0
aids_list = []
for htmlfile in os.listdir('%s/html/'%path):
with open('%s/html/%s'%(path,htmlfile),'r',errors='ignore') as f:
print('Extracting from',parse.unquote_plus(htmlfile))
flag = True
lines = f.readlines()
for line in lines:
p = re.search('<title> SexInSex! Board </title>', line)
if p:
flag = False
print('Failed to extract from',parse.unquote_plus(htmlfile))
failcnt += 1
break
if flag:
for line in lines:
a = attachment.search(line)
if a:
aids_list.append(a.group(1))
print('OK')
s_aids_list = list(set(aids_list))
print(s_aids_list)
print('%d attachment ids found'%len(s_aids_list))
print('saving attachment ids to pickle/aids_list.p')
with open('%s/pickle/aids_list.p'%path,'wb') as a:
pickle.dump(s_aids_list, a)
print('Finished. %d files failed to extract.'%(failcnt))
| Nymphet/sexinsex-crawler | page_aids_extractor.py | Python | apache-2.0 | 1,319 |
#!/usr/bin/env python
# encoding: utf-8
"""
remove_dups_from_sorted_list_ii.py
Created by Shengwei on 2014-07-05.
"""
# https://oj.leetcode.com/problems/remove-duplicates-from-sorted-list-ii/
# tags: medium, linked-list, pointer, dups, edge cases
"""
Given a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.
For example,
Given 1->2->3->3->4->4->5, return 1->2->5.
Given 1->1->1->2->3, return 2->3.
"""
# TODO: other solutions
# 1. use counter to distinguish the situation where current value is not the same as the next
# if counter is 0, current node is unique; otherwise, start from the next node
# changejian's code and https://oj.leetcode.com/discuss/5743/how-can-i-improve-my-code)
# 2. use two pointers, move prior one when necessary
# https://github.com/MaskRay/LeetCode/blob/master/remove-duplicates-from-sorted-list-ii.cc
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def deleteDuplicates(self, head):
if head is None:
return head
pre = dummy_head = ListNode(0)
dummy_head.next = head
walker, runner = head, head.next
while walker and walker.next:
if walker.val != walker.next.val:
pre.next = walker
pre = walker
walker = walker.next
else:
# look for next unique node or None
runner = walker.next
while runner.next and runner.next.val == walker.val:
runner = runner.next
# runner.next can be either None or a node with different value
walker = runner.next
# walker is either None (last node is also a dup) or last unique node
pre.next = walker
return dummy_head.next
| CodingVault/LeetCodeInPython | remove_dups_from_sorted_list_ii.py | Python | apache-2.0 | 2,026 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from mxnet.test_utils import *
from mxnet.base import MXNetError
from common import setup_module, with_seed, teardown, assertRaises
import random
import warnings
def is_scalar(var):
return False if hasattr(var, "__len__") else True
def get_result_type(call, dflt_stype):
"""Try to infer result storage type for a sparse matrix and a given unary operation"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
result = do_normalize(call(zero))
if not almost_equal(result, zero, equal_nan=True):
expected_result_type = 'default'
else:
if dflt_stype is not None:
expected_result_type = dflt_stype;
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_with_scalar(call, dflt_stype):
"""Try to infer result storage type when operating a sparse matrices and a scalar"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
result = call(zero, 5)
if not almost_equal(result, zero, equal_nan=True):
expected_result_type = 'default'
else:
if dflt_stype is not None:
expected_result_type = dflt_stype;
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_2(call, dflt_stype):
"""Try to infer result storage type when operating on two sparse matrices"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
need_default = False
for outer in [zero, np.ones(zero.shape)]:
for inner in [zero, np.ones(zero.shape)]:
result = do_normalize(call(outer, inner))
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
if need_default is True:
break
if not need_default and dflt_stype is not None:
expected_result_type = dflt_stype
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_3(call, dflt_stype):
"""Try to infer result storage type when operating on three sparse matrices"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
need_default = False
for moon in [zero]:
for outer in [zero]:
for inner in [zero]:
res_1, res_2 = call(moon, outer, inner)
result = do_normalize(res_1)
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
result = do_normalize(res_2)
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
if need_default is True:
break
if need_default is True:
break
if not need_default and dflt_stype is not None:
expected_result_type = dflt_stype
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_fw_bw_result_types(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type(forward_numpy_call, fwd_res_dflt),
get_result_type(backward_numpy_call, bwd_res_dflt))
def get_fw_bw_result_types_2(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type(forward_numpy_call, fwd_res_dflt),
get_result_type_2(backward_numpy_call, bwd_res_dflt))
def get_fw_bw_result_types_with_scalar(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type_with_scalar(forward_numpy_call, fwd_res_dflt),
get_result_type_with_scalar(backward_numpy_call, bwd_res_dflt))
def gen_rsp_random_indices(shape, density=.5, force_indices=None):
assert density >= 0 and density <= 1
indices = set()
if force_indices is not None:
for val in force_indices:
indices.add(int(val))
if not np.isclose(density, .0, rtol=1.e-3, atol=1.e-3, equal_nan=True) and len(shape) > 0:
row_count = shape[0]
for i in range(row_count):
r = random.uniform(0, 1)
if r <= density and len(indices) < shape[0]:
indices.add(i)
assert len(indices) <= shape[0]
return list(indices)
def all_zero(var):
return 0
@with_seed()
def test_elemwise_binary_ops():
def test_elemwise_binary_op(name, lhs_stype, rhs_stype, shape,
forward_mxnet_call, forward_numpy_call, backward_numpy_call,
lhs_grad_stype,
rhs_grad_stype,
expected_result_storage_type=None,
modifier_func=None,
lhs_density=.5,
rhs_density=.5,
force_lr_overlap=False,
force_grad_overlap=False,
ograd_density=0.0,
skip_gradient_check=False,
shuffle_csr_indices=True,
verbose=False):
if lhs_grad_stype is None:
lhs_grad_stype = lhs_stype
if rhs_grad_stype is None:
rhs_grad_stype = rhs_stype
lhs_grad_stype = get_result_type_3(backward_numpy_call, lhs_grad_stype)
rhs_grad_stype = get_result_type_3(backward_numpy_call, rhs_grad_stype)
if verbose is True:
print("testing: {} lhs={}, rhs={}, lhs_grad_stype={}, rhs_grad_stype={}"
.format(name, lhs_stype, rhs_stype, lhs_grad_stype, rhs_grad_stype))
# Output type should be same as lvalue type, unless otherwise specified
if expected_result_storage_type is None:
if lhs_stype == 'default' or rhs_stype == 'default':
expected_result_storage_type = 'default'
else:
expected_result_storage_type = lhs_stype
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
grad_stypes = dict()
grad_stypes['lhs'] = lhs_grad_stype
grad_stypes['rhs'] = rhs_grad_stype
if lhs_stype == 'default':
lhs_nd = rand_ndarray(shape, 'default')
if abs(lhs_density) < 1e-4:
func = all_zero
else:
func = modifier_func
lhs_nd = mx.nd.array(assign_each(lhs_nd.asnumpy(), func))
else:
lhs_nd = create_sparse_array_zd(
shape, lhs_stype, density=lhs_density,
modifier_func=modifier_func,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=lhs_density,
force_indices=[(shape[0]/2)] if force_lr_overlap is True else None
))
if rhs_stype == 'default':
rhs_nd = rand_ndarray(shape, 'default')
if abs(rhs_density) < 1e-4:
func = all_zero
else:
func = modifier_func
rhs_nd = mx.nd.array(assign_each(rhs_nd.asnumpy(), func))
else:
rhs_nd = create_sparse_array_zd(
shape, rhs_stype, density=rhs_density,
modifier_func=modifier_func,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=rhs_density,
force_indices=[(shape[0]/2)] if force_lr_overlap is True else None
))
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd.asnumpy()
if verbose is True:
print("lhs input: {}".format(lhs_np))
print("rhs input: {}".format(rhs_np))
out_np = forward_numpy_call(lhs_np, rhs_np)
if verbose is True:
print("out_np: {}".format(out_np))
test = forward_mxnet_call(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
outputs = check_symbolic_forward(test, location, [out_np], equal_nan=True)
assert len(outputs) == 1
assert outputs[0].stype == expected_result_storage_type
if verbose is True:
print ("mx forward output: ", outputs[0].asnumpy())
print ("lhs_nd: ", lhs_nd.stype)
print ("rhs_nd: ", rhs_nd.stype)
print ("forward output: ", outputs[0].stype)
if outputs[0].stype != 'default':
out_grad = create_sparse_array_zd(
shape, outputs[0].stype, density=ograd_density,
data_init=1,
modifier_func=lambda x: 2,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=ograd_density,
force_indices=[(shape[0]/2)] if force_grad_overlap is True else None
))
else:
if abs(ograd_density) < 1e-4:
out_grad = mx.nd.array(np.zeros(shape))
else:
out_grad = mx.nd.array(np.ones(shape))
out_grad_np = out_grad.asnumpy()
if verbose is True:
print("out_grad_np", out_grad_np)
ingrad_lhs_np, ingrad_rhs_np = backward_numpy_call(out_grad_np, lhs_np, rhs_np)
if verbose is True:
print("out_grad", out_grad.asnumpy())
print("ingrad_lhs_np", ingrad_lhs_np)
print("ingrad_rhs_np", ingrad_rhs_np)
igrads_result = check_symbolic_backward(test, location, [out_grad],
[ingrad_lhs_np, ingrad_rhs_np],
grad_stypes=grad_stypes,
equal_nan=True)
if verbose is True:
print("ingrad_lhs", igrads_result['lhs'].asnumpy())
print("ingrad_rhs", igrads_result['rhs'].asnumpy())
assert len(igrads_result) == 2
if lhs_grad_stype is not None:
assert igrads_result['lhs'].stype == lhs_grad_stype
if rhs_grad_stype is not None:
assert igrads_result['rhs'].stype == rhs_grad_stype
if skip_gradient_check is not True:
check_numeric_gradient(test, location,
grad_stype_dict=grad_stypes)
def check_all(l, r, check_function):
assert l.shape == r.shape
return check_function(l, r)
def gt(l, r):
return check_all(l, r, lambda a, b: a > b)
def ge(l, r):
return check_all(l, r, lambda a, b: a >= b)
def lt(l, r):
return check_all(l, r, lambda a, b: a < b)
def le(l, r):
return check_all(l, r, lambda a, b: a <= b)
def elemwise_mul_stype(lstype, rstype):
if lstype == rstype:
return lstype
elif lstype == 'default' and rstype == 'row_sparse':
return 'row_sparse'
elif lstype == 'row_sparse' and rstype == 'default':
return 'row_sparse'
elif lstype == 'default' and rstype == 'csr':
return 'csr'
elif lstype == 'csr' and rstype == 'default':
return 'csr'
else:
return 'default'
def elemwise_mul_lhs_grad_stype(lstype, rstype):
return elemwise_mul_stype(elemwise_mul_stype(lstype, rstype), rstype)
def elemwise_mul_rhs_grad_stype(lstype, rstype):
return elemwise_mul_stype(elemwise_mul_stype(lstype, rstype), lstype)
def check_elemwise_binary_ops(lhs_stype, rhs_stype, shape,
lhs_grad_stype=None, rhs_grad_stype=None,
lhs_density=.5, rhs_density=.5,
force_lr_overlap=False,
force_grad_overlap=False,
ograd_density=0.0):
test_elemwise_binary_op("elemwise_add", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_add(l, r),
lambda l, r: l + r,
lambda outg, l, r: (outg, outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
if ((lhs_stype is 'default' and rhs_stype is 'row_sparse') or
(lhs_stype is 'default' and rhs_stype is 'csr') or
(lhs_stype is 'row_sparse' and rhs_stype is 'row_sparse') and (rhs_density == 0.0)):
test_elemwise_binary_op("elemwise_add", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_add(l, r, out=l),
lambda l, r: l + r,
lambda outg, l, r: (outg, outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_sub", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_sub(l, r, out=l),
lambda l, r: l - r,
lambda outg, l, r: (outg, -outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
if ((lhs_stype is 'row_sparse' and rhs_stype is 'row_sparse') and (lhs_density == 0.0)):
test_elemwise_binary_op("elemwise_add", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_add(l, r, out=r),
lambda l, r: l + r,
lambda outg, l, r: (outg, outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_sub", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_sub(l, r, out=l),
lambda l, r: l - r,
lambda outg, l, r: (outg, -outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_sub", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_sub(l, r),
lambda l, r: l - r,
lambda outg, l, r: (outg, -outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density,
rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_mul", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_mul(l, r),
lambda l, r: l * r,
lambda outg, l, r: (outg * r, outg * l),
elemwise_mul_lhs_grad_stype(lhs_stype, rhs_stype),
elemwise_mul_rhs_grad_stype(lhs_stype, rhs_stype),
expected_result_storage_type=elemwise_mul_stype(lhs_stype, rhs_stype),
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_div", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_div(l, r),
lambda l, r: l / r,
lambda outg, l, r: (outg * (1/r), outg * (-l/(r*r))),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
expected_result_storage_type='default',
skip_gradient_check=True,
verbose=False)
test_elemwise_binary_op("maximum", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._maximum(l, r),
lambda l, r: np.maximum(l, r),
lambda outg, l, r: (outg * ge(l, r), outg * lt(l, r)),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
skip_gradient_check=True,
ograd_density=ograd_density,
verbose=False)
test_elemwise_binary_op("minimum", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._minimum(l, r),
lambda l, r: np.minimum(l, r),
lambda outg, l, r: (outg * le(l, r), outg * gt(l, r)),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
skip_gradient_check=True,
verbose=False)
test_elemwise_binary_op("hypot", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._hypot(l, r),
lambda l, r: np.hypot(l, r),
lambda outg, l, r: (
outg * assign_each2(
l, r, lambda a, b: a/np.sqrt(a * a + b * b)),
outg * assign_each2(
l, r, lambda a, b: b/np.sqrt(a * a + b * b))
),
lhs_grad_stype, rhs_grad_stype,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
skip_gradient_check=True,
verbose=False)
# Run basic tests
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for ii in range(1):
# Run defaults
shape = rand_shape_2d()
print("testing defaults with shape: {}".format(shape))
check_elemwise_binary_ops('default', 'default', shape)
# Try different densities
shape = rand_shape_2d()
for lhs_density in [0.0, random.uniform(0, 1), 1.0]:
for rhs_density in [0.0, random.uniform(0, 1), 1.0]:
for ograd_density in [0.0, random.uniform(0, 1), 1.0]:
print("lhs_density={}, rhs_density={}, ograd_density={}, shape: {}".format(
lhs_density, rhs_density, ograd_density, shape))
# Try row_sparse overlaps
for force_lr_overlap in [False, True]:
for force_grad_overlap in [False, True]:
print(" force_lr_overlap={}, force_grad_overlap={}, shape={}".
format(force_lr_overlap, force_grad_overlap, shape))
# Left and right always overlap when one is default storage
# (assuming the row_sparse one has some entries in it)
if force_lr_overlap is False:
check_elemwise_binary_ops('default', 'row_sparse', shape,
lhs_density=lhs_density,
rhs_density=rhs_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
ograd_density=ograd_density)
check_elemwise_binary_ops('row_sparse', 'default', shape,
lhs_density=lhs_density,
rhs_density=rhs_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
ograd_density=ograd_density)
# Back to left-right overlap possiblities
check_elemwise_binary_ops('row_sparse', 'row_sparse', shape,
lhs_grad_stype='row_sparse',
rhs_grad_stype='row_sparse',
lhs_density=lhs_density,
rhs_density=rhs_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
ograd_density=ograd_density)
# No overlap flags for CSR
check_elemwise_binary_ops('csr', 'csr', shape,
lhs_grad_stype='csr',
rhs_grad_stype='csr',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
check_elemwise_binary_ops('csr', 'csr', shape,
lhs_grad_stype='default',
rhs_grad_stype='default',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
check_elemwise_binary_ops('default', 'csr', shape,
lhs_grad_stype='csr',
rhs_grad_stype='csr',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
check_elemwise_binary_ops('csr', 'default', shape,
lhs_grad_stype='csr',
rhs_grad_stype='csr',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
@with_seed()
def test_elemwise_csr_same_zeros():
# Zeroes
a = mx.nd.sparse.zeros('csr', (1,1))
b = mx.nd.elemwise_add(a,a)
res = a.asnumpy() + a.asnumpy()
assert_almost_equal(b.asnumpy(), res)
def as_dense(arr):
if arr.stype != 'default':
return mx.nd.cast_storage(arr, stype='default')
else:
return arr;
# Make sure that 0's look like 0's when we do a comparison
def do_normalize(arr):
ret = arr.copy()
idx = np.isclose(arr, -0, rtol=1.e-3, atol=1.e-3, equal_nan=True)
ret[idx] = 0
return ret
def check_sparse_mathematical_core(name, stype,
forward_mxnet_call, forward_numpy_call, backward_numpy_call=None,
rhs_arg=None, data_init=9., grad_init=2., output_grad_stype=None,
input_grad_stype=None, force_overlap=False, density=.5,
ograd_density=.5, verbose=False, shuffle_csr_indices=True):
if verbose is True:
print("TESTING: " + name)
data = mx.symbol.Variable('data', stype=stype)
temp_input_grad_stype = input_grad_stype
if temp_input_grad_stype is None:
temp_input_grad_stype = stype
if rhs_arg is not None:
if is_scalar(rhs_arg):
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_with_scalar(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
else:
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_2(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
else:
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
if input_grad_stype is not None and input_grad_stype != expected_grad_result_type:
print("{}: explicit override of deduced input grad type '{}' with '{}'".format(
name, expected_grad_result_type, input_grad_stype))
expected_grad_result_type = input_grad_stype
shape = rand_shape_2d()
if verbose is True:
print("Shape: ", shape, "density: ", density, "force_overlap", force_overlap)
if stype == 'default':
data_tmp = np.zeros(shape)
if abs(density) >= 1e-4:
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
else:
arr_data = create_sparse_array_zd(
shape, stype, density=density,
data_init=data_init,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] if force_overlap is True else None
)
)
data_tmp = arr_data.asnumpy()
if verbose is True:
print("arr_data indices", arr_data.indices.asnumpy())
if verbose is True:
print("input", data_tmp)
if backward_numpy_call is None:
arr_grad = None
elif expected_grad_result_type == 'default':
if abs(density) < 1e-4:
arr_grad = mx.nd.zeros(shape)
else:
arr_grad = mx.nd.ones(shape)
else:
arr_grad = create_sparse_array_zd(
shape,
expected_grad_result_type,
density=density,
data_init=1,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] if force_overlap is True else None
)
)
if rhs_arg is not None:
test = forward_mxnet_call(data, rhs_arg)
else:
test = forward_mxnet_call(data)
args = list()
args.append(arr_data)
if arr_grad is not None:
exe_test = test.bind(default_context(), args=args, args_grad=[arr_grad])
else:
exe_test = test.bind(default_context(), args=args)
exe_test.forward(is_train=True)
assert exe_test.outputs[0].stype == expected_result_type
out = exe_test.outputs[0].asnumpy()
if rhs_arg is not None:
npout = forward_numpy_call(data_tmp, rhs_arg)
else:
npout = forward_numpy_call(data_tmp)
if verbose is True:
print("out", out)
print("npout", npout)
assert_almost_equal(out, npout, equal_nan=True)
if backward_numpy_call is not None:
if output_grad_stype == 'default' or output_grad_stype is None:
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
else:
out_grad = create_sparse_array_zd(
shape, output_grad_stype,
density=density,
data_init=grad_init,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=ograd_density,
force_indices=[(shape[0]/2)] if force_overlap is True else None))
npout_grad = out_grad.asnumpy()
if verbose is True:
print("npout_grad", npout_grad)
if rhs_arg is not None:
temp = backward_numpy_call(data_tmp, rhs_arg)
else:
temp = backward_numpy_call(data_tmp)
input_grad = npout_grad * temp
if verbose is True:
print(arr_grad.asnumpy())
exe_test.backward(out_grad)
if verbose is True:
print(arr_grad.asnumpy())
assert arr_grad.stype == expected_grad_result_type
arr_grad = arr_grad.asnumpy()
if verbose is True:
print(name)
print("arr_grad", arr_grad)
print("input_grad", input_grad)
assert_almost_equal(arr_grad, input_grad, equal_nan=True)
@with_seed()
def test_sparse_mathematical_core():
def util_sign(a):
if np.isclose(a, -0, rtol=1.e-3, atol=1.e-3, equal_nan=True):
return 0
elif np.isclose(a, 0, rtol=1.e-3, atol=1.e-3, equal_nan=True):
return 0
elif a < 0.0:
return -1
else: # a > 0.0:
return 1
# Check scalar binary operators
def check_binary_op_with_scalar(stype,
output_grad_stype=None,
input_grad_stype=None,
density=.5, ograd_density=.5,
force_overlap=False,):
# mul_scalar
check_sparse_mathematical_core("mul_scalar", stype,
lambda x, y: x * y,
lambda x, y: x * y,
lambda input, rhs: rhs,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# plus_scalar
check_sparse_mathematical_core("plus_scalar", stype,
lambda x, y: x + y,
lambda x, y: x + y,
lambda input, rhs: 1,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# minus_scalar
check_sparse_mathematical_core("minus_scalar", stype,
lambda x, y: x - y,
lambda x, y: x - y,
lambda input, rhs: 1,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# Check many basic unary operators
def check_mathematical_core(stype, output_grad_stype=None,
input_grad_stype=None, force_overlap=False,
density=.5, ograd_density=.5):
# negative
check_sparse_mathematical_core("negative", stype,
lambda x: mx.sym.sparse.negative(x),
lambda x: np.negative(x),
force_overlap=force_overlap,
density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
# square
check_sparse_mathematical_core("square", stype,
lambda x: mx.sym.sparse.square(x),
lambda x: np.square(x),
lambda x: 2 * x,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density,
verbose=False)
# sqrt
check_sparse_mathematical_core("sqrt", stype,
lambda x: mx.sym.sparse.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 1.0/(2.0 * np.sqrt(x)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density,
verbose=False)
# cbrt
check_sparse_mathematical_core("cbrt", stype,
lambda x: mx.sym.sparse.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1.0/(3.0 * np.cbrt(x) * np.cbrt(x)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density,
verbose=False)
# rint
check_sparse_mathematical_core("rint", stype,
lambda x: mx.sym.sparse.rint(x),
lambda x: np.rint(x),
force_overlap=force_overlap, density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
# fix
check_sparse_mathematical_core("fix", stype,
lambda x: mx.sym.sparse.fix(x),
lambda x: np.fix(x),
force_overlap=force_overlap, density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
# floor
check_sparse_mathematical_core("floor", stype, lambda x: mx.sym.sparse.floor(x),
lambda x: np.floor(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# ceil
check_sparse_mathematical_core("ceil", stype,
lambda x: mx.sym.sparse.ceil(x),
lambda x: np.ceil(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# round
check_sparse_mathematical_core("round", stype,
lambda x: mx.sym.sparse.round(x),
lambda x: np.round(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# trunc
check_sparse_mathematical_core("trunc", stype,
lambda x: mx.sym.sparse.trunc(x),
lambda x: np.trunc(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# sign
check_sparse_mathematical_core("sign", stype,
lambda x: mx.sym.sparse.sign(x),
lambda x: np.sign(x),
lambda x: np.zeros(x.shape),
output_grad_stype=output_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# log1p
check_sparse_mathematical_core("log1p", stype,
lambda x: mx.sym.sparse.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# expm1
check_sparse_mathematical_core("expm1", stype,
lambda x: mx.sym.sparse.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# sin
check_sparse_mathematical_core("sin", stype,
lambda x: mx.sym.sparse.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# tan
check_sparse_mathematical_core("tan", stype,
lambda x: mx.sym.sparse.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density,
ograd_density=ograd_density)
# arcsin
check_sparse_mathematical_core("arcsin", stype,
lambda x: mx.sym.sparse.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# arctan
check_sparse_mathematical_core("arctan", stype,
lambda x: mx.sym.sparse.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# degrees
check_sparse_mathematical_core("degrees", stype,
lambda x: mx.sym.sparse.degrees(x),
lambda x: np.degrees(x),
lambda x: assign_each(x, lambda a: 180./np.pi),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# radians
check_sparse_mathematical_core("radians", stype,
lambda x: mx.sym.sparse.radians(x),
lambda x: np.radians(x),
lambda x: assign_each(x, lambda a: np.pi / 180.),
data_init=0.6, grad_init=1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# sinh
check_sparse_mathematical_core("sinh", stype,
lambda x: mx.sym.sparse.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# tanh
check_sparse_mathematical_core("tanh", stype,
lambda x: mx.sym.sparse.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
data_init=0.5, grad_init=1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arcsinh
check_sparse_mathematical_core("arcsinh", stype,
lambda x: mx.sym.sparse.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arctanh
check_sparse_mathematical_core("arctanh", stype,
lambda x: mx.sym.sparse.arctanh(x),
lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.),
data_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# abs
check_sparse_mathematical_core("abs", stype,
lambda x: mx.sym.sparse.abs(x),
lambda x: np.abs(x),
lambda x: assign_each(x, function=util_sign),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
if stype != "csr":
# rsqrt
check_sparse_mathematical_core("rsqrt", stype,
lambda x: mx.sym.sparse.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# cos
check_sparse_mathematical_core("cos", stype,
lambda x: mx.sym.sparse.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# arccos
check_sparse_mathematical_core("arccos", stype,
lambda x: mx.sym.sparse.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# cosh
check_sparse_mathematical_core("cosh", stype,
lambda x: mx.sym.sparse.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
data_init=5, grad_init=5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# arccosh
check_sparse_mathematical_core("arccosh", stype,
lambda x: mx.sym.sparse.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# log10
check_sparse_mathematical_core("log10", stype,
lambda x: mx.sym.sparse.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# log2
check_sparse_mathematical_core("log2", stype,
lambda x: mx.sym.sparse.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
try:
from scipy import special as scipy_special
# On scipy v1.0, psi([0, -1, -2, -3, ...]) = [ inf, inf, inf, inf, ...]
# On scipy v1.1, psi([0, -1, -2, -3, ...]) = [-inf, nan, nan, nan, ...]
# Map the behavior of v1.1 psi() to that of v1.0 for ints <= 0 for consistency
scipy_psi = np.vectorize(lambda x: np.inf if float(x).is_integer() and x <= 0 else
scipy_special.psi(x))
# gamma
check_sparse_mathematical_core("gamma", stype,
lambda x: mx.sym.sparse.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_psi(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# gammaln
check_sparse_mathematical_core("gammaln", stype,
lambda x: mx.sym.sparse.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_psi(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
except ImportError:
print("Could not import scipy. Skipping unit tests for special functions")
for i in range(1):
print("pass", i)
for density in [0.0, random.uniform(0, 1), 1.0]:
for ograd_density in [0.0, random.uniform(0, 1), 1.0]:
for force_overlap in [False, True]:
print("{}, {}, {}".format(density, ograd_density, force_overlap))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check unary ops (unary fwd, binary bwd)
check_mathematical_core('default', force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', output_grad_stype='default',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', output_grad_stype='row_sparse',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('csr', output_grad_stype='default',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('csr', output_grad_stype='csr',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# Check binary with scalar ops
check_binary_op_with_scalar('default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse', output_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse',
output_grad_stype='row_sparse',
density=density, ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='csr',
input_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='csr',
input_grad_stype='csr',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
@with_seed()
def test_elemwise_add_ex():
def check_elemwise_add_ex(lhs_stype, rhs_stype, shape, lhs_grad_stype=None, rhs_grad_stype=None):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
lhs_nd = rand_ndarray(shape, lhs_stype)
rhs_nd = rand_ndarray(shape, rhs_stype)
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd.asnumpy()
out_np = lhs_np + rhs_np
test = mx.symbol.sparse.elemwise_add(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(test, location, [out_np])
check_numeric_gradient(test, location)
grad_stypes = {}
if lhs_grad_stype is not None and lhs_grad_stype != 'default':
grad_stypes['lhs'] = lhs_grad_stype
if rhs_grad_stype is not None and rhs_grad_stype != 'default':
grad_stypes['rhs'] = rhs_grad_stype
check_symbolic_backward(test, location, [out_np], [out_np, out_np],
grad_stypes=grad_stypes)
shapes = [rand_shape_2d(), rand_shape_3d()]
for shape in shapes:
check_elemwise_add_ex('default', 'default', shape)
check_elemwise_add_ex('row_sparse', 'row_sparse', shape,
lhs_grad_stype='row_sparse', rhs_grad_stype='row_sparse')
@with_seed()
def test_cast_storage_ex():
def check_cast_storage(shape, density, from_stype, to_stype, check_numeric_grad=True):
x = mx.symbol.Variable('x', stype=from_stype)
x_nd = rand_ndarray(shape, from_stype, density=density)
x_np = x_nd.asnumpy()
out_np = x_np
test = mx.symbol.cast_storage(x, stype=to_stype)
location = {'x': x_nd}
check_symbolic_forward(test, location, [out_np])
# consider disable the numeric grad check for gpu block kernel since the input is large
if check_numeric_grad:
check_numeric_gradient(test, location)
grad_stypes = {'x': to_stype}
check_symbolic_backward(test, location, [out_np], [out_np], grad_stypes=grad_stypes)
density = [1.00, 0.50, 0.01]
for d in density:
shape_2d = rand_shape_2d()
shape_3d = rand_shape_3d()
check_cast_storage(shape_2d, d, 'csr', 'default')
check_cast_storage(shape_2d, d, 'default', 'csr')
check_cast_storage(shape_2d, d, 'csr', 'csr')
check_cast_storage(shape_2d, d, 'row_sparse', 'default')
check_cast_storage(shape_2d, d, 'default', 'row_sparse')
check_cast_storage(shape_2d, d, 'row_sparse', 'row_sparse')
check_cast_storage(shape_3d, d, 'row_sparse', 'default')
check_cast_storage(shape_3d, d, 'default', 'row_sparse')
check_cast_storage(shape_3d, d, 'row_sparse', 'row_sparse')
for i in range(4, 6):
shape = rand_shape_nd(i, 5)
check_cast_storage(shape, d, 'default', 'row_sparse')
check_cast_storage(shape, d, 'row_sparse', 'default')
# Test specific gpu kernels
if default_context().device_type is 'gpu':
dim0 = rnd.randint(1, 10)
# test gpu thread kernel
check_cast_storage((dim0, rnd.randint( 1, 32)), d, 'default', 'csr')
# test gpu warp kernel
check_cast_storage((dim0, rnd.randint( 32, 512)), d, 'default', 'csr')
# test gpu block kernel
check_cast_storage((dim0, rnd.randint(512, 1024)), d, 'default', 'csr',
check_numeric_grad=False)
# check race condition in block kernel
check_cast_storage((200, 128 * 2 + 1), d, 'default', 'csr',
check_numeric_grad=False)
# test gpu thread kernel
check_cast_storage((dim0, rnd.randint( 1, 32)), d, 'default', 'row_sparse')
# test gpu warp kernel
check_cast_storage((dim0, rnd.randint( 32, 512)), d, 'default', 'row_sparse')
# test gpu block kernel
check_cast_storage((dim0, rnd.randint(512, 1024)), d, 'default', 'row_sparse',
check_numeric_grad=False)
@with_seed()
def test_sparse_dot():
def test_infer_forward_stype(lhs_shape, rhs_shape, lhs_density, rhs_density, trans_a, trans_b):
all_stypes = ["default", "csr", "row_sparse"]
lhs_nd = rand_ndarray(lhs_shape, 'default', density=lhs_density)
rhs_nd = rand_ndarray(rhs_shape, 'default', density=rhs_density)
out_nd = mx.nd.dot(lhs_nd, rhs_nd, transpose_a=trans_a, transpose_b=trans_b)
out_np = out_nd.asnumpy()
for lhs_stype in all_stypes:
for rhs_stype in all_stypes:
for forward_stype in all_stypes:
lhs = lhs_nd.tostype(lhs_stype)
rhs = rhs_nd.tostype(rhs_stype)
out = mx.nd.dot(lhs, rhs, forward_stype=forward_stype,
transpose_a=trans_a, transpose_b=trans_b)
assert_almost_equal(out.tostype('default').asnumpy(), out_np, rtol=1e-3, atol=1e-4)
lhs_var = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs_var = mx.symbol.Variable('rhs', stype=rhs_stype)
out = mx.symbol.sparse.dot(lhs_var, rhs_var,
forward_stype=forward_stype,
transpose_a=trans_a, transpose_b=trans_b)
location = {'lhs': lhs, 'rhs': rhs}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
def test_dot_csr(lhs_shape, rhs_shape, rhs_stype, trans_lhs, lhs_density, rhs_density):
lhs_nd = rand_ndarray(lhs_shape, 'csr', density=lhs_density, shuffle_csr_indices=False)
lhs_dns = lhs_nd.tostype('default')
rhs_nd = rand_ndarray(rhs_shape, rhs_stype, density=rhs_density)
rhs_dns = rhs_nd if rhs_stype == 'default' else rhs_nd.tostype('default')
out = mx.nd.dot(lhs_nd, rhs_nd, transpose_a=trans_lhs)
out_dns = mx.nd.dot(lhs_dns, rhs_dns, transpose_a=trans_lhs)
out_np = out_dns.asnumpy()
assert_almost_equal(out.asnumpy(), out_np, rtol=1e-3, atol=1e-5)
# test symbolic forward
lhs = mx.symbol.Variable('lhs', stype='csr')
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
out = mx.symbol.sparse.dot(lhs, rhs, transpose_a=trans_lhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
# test symbolic backward
backward_trans = not trans_lhs
rhs_backward_grad = mx.nd.dot(lhs_dns, out_dns, transpose_a=backward_trans).asnumpy()
expected = {'rhs': rhs_backward_grad}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'null', 'rhs': 'write'},
rtol=1e-3, atol=1e-4)
def test_dot_dns_csr(lhs_shape, rhs_shape, lhs_density, rhs_density, trans_lhs=False, trans_rhs=False):
lhs_nd = rand_ndarray(lhs_shape, stype='default', density=lhs_density)
rhs_nd = rand_ndarray(rhs_shape, stype='csr', density=rhs_density)
rhs_dns = rhs_nd.tostype('default')
if default_context() == mx.cpu():
forward_stype = 'csr'
else:
forward_stype = 'default'
out = mx.nd.sparse.dot(lhs_nd, rhs_nd, transpose_a=trans_lhs, transpose_b=trans_rhs, forward_stype=forward_stype)
out_dns = mx.nd.dot(lhs_nd, rhs_dns, transpose_a=trans_lhs, transpose_b=trans_rhs, forward_stype=forward_stype)
out_np = out_dns.asnumpy()
assert_almost_equal(out.asnumpy(), out_np, rtol=1e-3, atol=1e-5)
# test symbolic forward
lhs = mx.symbol.Variable('lhs', stype='default')
rhs = mx.symbol.Variable('rhs', stype='csr')
out = mx.symbol.sparse.dot(lhs, rhs, transpose_a=trans_lhs, transpose_b=trans_rhs, forward_stype=forward_stype)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
if default_context() == mx.cpu():
# test symbolic backward
backward_trans = not trans_lhs
rhs_backward_grad = mx.nd.dot(lhs_nd, out_dns, transpose_a=backward_trans).asnumpy()
if trans_rhs is True:
rhs_backward_grad = rhs_backward_grad.T
expected = {'rhs': rhs_backward_grad}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'null', 'rhs': 'write'},
rtol=1e-3, atol=1e-4)
else:
transpose_b = not trans_rhs
lhs_backward_grad = mx.nd.dot(out_dns, rhs_dns, transpose_b=transpose_b)
expected = {'lhs': lhs_backward_grad.asnumpy()}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'write', 'rhs': 'null'},
rtol=1e-3, atol=1e-4)
def test_sparse_dot_zero_output(lhs_shape, trans_lhs, rhs_num_cols):
"""Test for nnr_out = 0. Before the fix, the test would fail."""
lhs = mx.nd.zeros(lhs_shape)
irow = np.random.randint(0, lhs_shape[0])
icol = np.random.randint(0, lhs_shape[1])
lhs[irow, icol] = 1.0
if trans_lhs:
rhs = rand_ndarray(shape=(lhs_shape[0], rhs_num_cols), stype='default')
rhs[irow, :] = 0
else:
rhs = rand_ndarray(shape=(lhs_shape[1], rhs_num_cols), stype='default')
rhs[icol, :] = 0
dns_out = mx.nd.dot(lhs, rhs, transpose_a=trans_lhs)
assert mx.nd.sum(mx.nd.abs(dns_out)).asscalar() == 0
sps_out = mx.nd.sparse.dot(lhs.tostype('csr'), rhs.tostype('row_sparse'), transpose_a=trans_lhs)
assert same(dns_out.asnumpy(), sps_out.asnumpy())
density = [1.00, 0.5, 0.01]
for lhs_d in density:
lhs_shape = rand_shape_2d(50, 200)
rhs_d = 1
test_dot_csr(lhs_shape, (lhs_shape[1], 1), 'default', False, lhs_d, rhs_d) # test gpu SpMV
test_dot_csr(lhs_shape, (lhs_shape[0], 1), 'default', True, lhs_d, rhs_d) # (vector kernel)
test_dot_csr(lhs_shape, (lhs_shape[1], rnd.randint(5, 10)), 'default', False, lhs_d, rhs_d) # test gpu SpMM
test_dot_csr(lhs_shape, (lhs_shape[0], rnd.randint(5, 10)), 'default', True, lhs_d, rhs_d) # (scalar kernel)
test_dot_dns_csr(lhs_shape, (lhs_shape[1], rnd.randint(50, 200)), lhs_d, lhs_d)
test_dot_dns_csr(lhs_shape, (rnd.randint(50, 200), lhs_shape[1]), lhs_d, lhs_d, trans_rhs=True)
for rhs_d in density:
test_dot_csr(lhs_shape, (lhs_shape[1], rnd.randint(1, 10)), 'row_sparse', False, lhs_d, rhs_d)
test_dot_csr(lhs_shape, (lhs_shape[0], rnd.randint(1, 10)), 'row_sparse', True, lhs_d, rhs_d)
test_infer_forward_stype(lhs_shape, (lhs_shape[1], rnd.randint(10, 20)),
lhs_d, rhs_d, False, False)
test_infer_forward_stype(lhs_shape, (rnd.randint(10, 20), lhs_shape[1]),
lhs_d, rhs_d, False, True)
test_infer_forward_stype(lhs_shape, (lhs_shape[0], rnd.randint(10, 20)),
lhs_d, rhs_d, True, False)
test_infer_forward_stype(lhs_shape, (rnd.randint(10, 20), lhs_shape[0]),
lhs_d, rhs_d, True, True)
test_sparse_dot_zero_output(rand_shape_2d(50, 200), False, 40)
test_sparse_dot_zero_output(rand_shape_2d(50, 200), True, 40)
@with_seed()
def test_sparse_dot_determinism():
def check_dot_determinism(lhs_stype, rhs_stype, lhs_density, rhs_density, transpose_a, transpose_b, forward_stype):
lhs_row = rnd.randint(50, 100)
lhs_col = rnd.randint(50, 100)
if transpose_a:
if transpose_b:
rhs_shape = (rnd.randint(50, 100), lhs_row)
else:
rhs_shape = (lhs_row, rnd.randint(50, 100))
else:
if transpose_b:
rhs_shape = (rnd.randint(50, 100), lhs_col)
else:
rhs_shape = (lhs_col, rnd.randint(50, 100))
lhs_shape = (lhs_row, lhs_col)
lhs = rand_ndarray(lhs_shape, lhs_stype, density=lhs_density)
rhs = rand_ndarray(rhs_shape, rhs_stype, density=rhs_density)
res1 = mx.nd.sparse.dot(lhs, rhs, transpose_a=transpose_a, transpose_b=transpose_b, forward_stype=forward_stype)
res2 = mx.nd.sparse.dot(lhs, rhs, transpose_a=transpose_a, transpose_b=transpose_b, forward_stype=forward_stype)
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.0, atol=0.0)
check_dot_determinism('csr', 'default', 0.1, 1.0, True, False, 'row_sparse')
forward_stype = 'csr' if default_context() == mx.cpu() else 'default'
check_dot_determinism('default', 'csr', 1.0, 0.1, False, False, forward_stype)
check_dot_determinism('default', 'csr', 1.0, 0.1, False, True, forward_stype)
check_dot_determinism('csr', 'default', 0.1, 1.0, True, False, 'default')
@with_seed()
def test_sparse_slice():
def check_csr_slice(shape, slice_input):
storage_type = 'csr'
B, _ = rand_sparse_ndarray(shape, storage_type)
np = B.asnumpy()
begin = rnd.randint(0, B.shape[0] - 1)
end = rnd.randint(begin + 1, B.shape[0])
nd_slice = mx.nd.crop(B, begin=begin, end=end)
assert same(nd_slice.asnumpy(), np[begin:end]), (nd_slice.asnumpy(), np[begin:end])
shape = (rnd.randint(7, 15), rnd.randint(1, 10))
check_csr_slice(shape, True)
check_csr_slice(shape, False)
@with_seed()
def test_sparse_retain():
def check_sparse_retain(shape, density, index_type=np.int64):
num_rows = shape[0]
rsp, _ = rand_sparse_ndarray(shape=shape, stype='row_sparse', density=density)
length = np.random.randint(1, num_rows + 1)
idx = random_sample(list(range(0, num_rows)), length)
idx.sort()
dns = rsp.asnumpy()
tensor_retained_expected = np.zeros(shape)
for i in idx:
tensor_retained_expected[i][:] = dns[i]
indices = mx.nd.array(idx, dtype=index_type)
rsp_retained = mx.nd.sparse.retain(rsp, indices=indices)
assert same(tensor_retained_expected, rsp_retained.asnumpy())
# check numeric gradient
data = mx.symbol.Variable('data')
idx = mx.symbol.Variable('indices')
sym = mx.sym.sparse.retain(data=data, indices=idx)
check_numeric_gradient(sym, [rsp, indices], grad_nodes=['data'],
grad_stype_dict={'data': 'row_sparse'})
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
densities = [0.01, 0.5, 1.0]
index_types = [np.float32, np.int32, np.int64]
for density in densities:
for itype in index_types:
check_sparse_retain(shape, density, itype)
check_sparse_retain(shape_3d, density, itype)
@with_seed()
def test_sparse_unary_with_numerics():
def check_sparse_simple(name, stype, mxnet_func, forward_numpy_call,
backward_numpy_call, output_grad_stype=None,
backward_is_use_output=False):
if output_grad_stype is None:
output_grad_stype = stype
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_2(forward_numpy_call, stype, backward_numpy_call, output_grad_stype)
if backward_is_use_output is True:
expected_grad_result_type = expected_result_type
shape = (3, 4)
data = mx.symbol.Variable("data")
grad_stypes = {'data' : expected_grad_result_type}
y = mxnet_func(data)
if stype == 'default':
xa = np.random.uniform(low=-1.0, high=1.0, size=shape)
xa_np = xa
else:
xa = create_sparse_array(shape, stype, data_init=None, rsp_indices=[1],
modifier_func=lambda a: a - 0.5,
shuffle_csr_indices=True)
xa_np = xa.asnumpy()
if output_grad_stype != 'default':
out_grad = create_sparse_array(shape, output_grad_stype, data_init=None,
rsp_indices=[1, 2],
modifier_func=lambda a: a - 0.5,
shuffle_csr_indices=True)
out_grad_np = out_grad.asnumpy()
else:
out_grad_np = np.ones(xa.shape)
out_grad = mx.nd.array(out_grad_np)
output_np = forward_numpy_call(xa_np)
input_grad_np = backward_numpy_call(output_np, out_grad_np)
outputs = check_symbolic_forward(y, [xa], [output_np])
output = outputs[0]
assert output.stype == expected_result_type
input_grad_dict = check_symbolic_backward(y, location=[xa], out_grads=[out_grad],
expected=[input_grad_np],
grad_stypes=grad_stypes)
inp_grad = input_grad_dict["data"]
assert inp_grad.stype == expected_grad_result_type
def check_sparse_function(name, mxnet_func, forward_numpy_call, backward_numpy_call,
backward_is_use_output=False):
check_sparse_simple(name, 'default', mxnet_func, forward_numpy_call, backward_numpy_call)
for output_grad_stype in [None, "row_sparse", "default"]:
check_sparse_simple(name, 'row_sparse', mxnet_func, forward_numpy_call, backward_numpy_call,
output_grad_stype=output_grad_stype,
backward_is_use_output=backward_is_use_output)
for output_grad_stype in [None, "csr", "default"]:
check_sparse_simple(name, 'csr', mxnet_func, forward_numpy_call, backward_numpy_call,
output_grad_stype=output_grad_stype,
backward_is_use_output=backward_is_use_output)
check_sparse_function('relu',
lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.0),
lambda output, outg: outg * assign_each(output, lambda x: x > 0.0), backward_is_use_output=True)
check_sparse_function('sigmoid',
lambda x: mx.sym.sigmoid(x),
lambda x: np.divide(1.0, (1.0 + np.exp(-x))),
lambda output, outg: outg * assign_each(output, lambda x: x * (1.0 - x)),
backward_is_use_output=True)
@with_seed()
def test_sparse_nd_zeros():
def check_sparse_nd_zeros(stype, shape):
zero = mx.nd.zeros(shape)
sparse_zero = mx.nd.zeros(shape=shape, stype=stype)
assert_almost_equal(sparse_zero.asnumpy(), zero.asnumpy())
shape = rand_shape_2d()
check_sparse_nd_zeros('row_sparse', shape)
check_sparse_nd_zeros('csr', shape)
check_sparse_nd_zeros('default', shape)
@with_seed()
def test_sparse_nd_zeros_like():
def check_sparse_nd_zeros_like(stype, shape):
zero = mx.nd.zeros(shape, stype=stype)
zero_like = mx.nd.sparse.zeros_like(zero)
assert_almost_equal(zero.asnumpy(), zero_like.asnumpy())
shape = rand_shape_2d()
check_sparse_nd_zeros_like('row_sparse', shape)
check_sparse_nd_zeros_like('csr', shape)
@with_seed()
def test_sparse_axis_operations():
def test_variations(func_name):
dim0 = 30
dim1 = 100
axes = [0, 1]
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
csr_array = rand_ndarray(shape=shape, stype='csr', density=density)
dns = csr_array.tostype('default')
for axis in axes:
ret = func_name(csr_array, axis=axis)
assert ret.stype == 'default'
ret_expected = func_name(dns, axis=axis)
assert_almost_equal(ret.asnumpy(), ret_expected.asnumpy())
def test_fallback(func_name, axis=0, keepdims=True, exclude=True):
dim0 = 30
dim1 = 100
shape = rand_shape_2d(dim0, dim1)
csr_array = rand_ndarray(shape=shape, stype='csr', density=0.01)
ret= func_name(csr_array, axis=axis, keepdims=keepdims,
exclude=exclude)
test_variations(mx.nd.sum)
test_fallback(mx.nd.sum, axis=0, keepdims=True, exclude=True)
test_variations(mx.nd.mean)
test_fallback(mx.nd.mean, axis=0, keepdims=True, exclude=True)
@with_seed()
def test_sparse_square_sum():
dim0 = 30
dim1 = 30
axes = [0, 1]
keepdims = [False, True]
densities = [0, 0.01, 0.2, 0.5, 1.0]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
rsp = rand_ndarray(shape, 'row_sparse', density)
dns = rsp.tostype('default')
for axis in axes:
for keepdim in keepdims:
ret = mx.nd._internal._square_sum(rsp, axis=axis, keepdims=keepdim)
if axis == 1 and keepdim:
assert ret.stype == 'row_sparse'
else:
assert ret.stype == 'default'
ret_expected = mx.nd.sum(dns*dns, axis=axis, keepdims=keepdim)
# check forward result
assert_almost_equal(ret.asnumpy(), ret_expected.asnumpy())
rsp_data = mx.sym.Variable('data', stype='row_sparse')
test = mx.symbol._internal._square_sum(rsp_data, axis=axis, keepdims=keepdim)
# check symbolic backward since ograd can be an rsp
# and cannot be checked through check_numeric_gradient
# because it will add a loss layer as the output layer
# which makes ograd of the square_sum dense
if axis == 1 and keepdim:
dns_data = mx.sym.Variable('data')
baseline = mx.sym.sum(mx.sym.square(dns_data), axis=axis, keepdims=keepdim)
igrad_expected = mx.nd.empty(dns.shape)
baseline_exec = baseline.bind(default_context(), args=[dns],
args_grad=[igrad_expected])
baseline_exec.forward(is_train=True)
baseline_exec.backward([ret_expected])
# check backward when ograd is row sparse
check_symbolic_backward(test, [rsp], [ret_expected.tostype('row_sparse')],
[igrad_expected.asnumpy()], grad_stypes={'data': 'row_sparse'})
# check backward when ograd is dense
# the stype of output of the square_sum is deteremined in symbol binding stage.
# The ograd stype of the last layer is the same as the output stype of the last layer.
# Need to add one more layer after square_sum to trigger the kernel for ograd
# with default stype in square_sum op.
baseline1 = baseline + 1
baseline_exec1 = baseline1.bind(default_context(), args=[dns],
args_grad=[igrad_expected])
baseline_exec1.forward(is_train=True)
baseline_exec1.backward([ret_expected])
test1 = test + 1
check_symbolic_backward(test1, [rsp], [ret_expected], [igrad_expected.asnumpy()],
grad_stypes={'data': 'row_sparse'})
# check numeric gradient
check_numeric_gradient(test, [rsp], grad_stype_dict={'data': 'row_sparse'},
atol=1e-2, rtol=0.1)
@with_seed()
def test_sparse_storage_fallback():
""" test operators which don't implement FComputeEx or FStatefulComputeEx """
def check_broadcast_add(shape, lhs_stype, rhs_stype):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
lhs_nd = rand_ndarray(shape, lhs_stype)
rhs_nd = rand_ndarray(shape, rhs_stype)
lhs_dns = mx.nd.cast_storage(lhs_nd, stype='default')
rhs_dns = mx.nd.cast_storage(rhs_nd, stype='default')
out_dns = (lhs_dns + rhs_dns).asnumpy()
test = mx.symbol.broadcast_add(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(test, location, [out_dns])
check_numeric_gradient(test, location)
check_symbolic_backward(test, location, [out_dns], [out_dns, out_dns])
def np_softmax(x, axis=-1):
# fix for old numpy on Travis not supporting keepdims
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_softmax_with_shape(lhs_stype, rhs_stype, shape, preserve_shape=False):
# bind with label
ctx = default_context()
X = mx.symbol.Variable('X', stype=lhs_stype)
L = mx.symbol.Variable('L', stype=rhs_stype)
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = rand_ndarray(shape, lhs_stype)
l = rand_ndarray(shape, rhs_stype)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx=ctx)
exec1 = Y.bind(ctx, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=1e-4)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(),
rtol=1e-3, atol=1e-4)
def check_concat(shape, lhs_stype, rhs_stype):
x = mx.symbol.Variable('x', stype=lhs_stype)
w = mx.symbol.Variable('w', stype=rhs_stype)
test = mx.sym.Concat(x, w)
x_nd = rand_ndarray(shape, lhs_stype)
w_nd = rand_ndarray(shape, rhs_stype)
location = {'x': x_nd, 'w': w_nd}
check_numeric_gradient(test, location)
def check_operator_with_temp_resource(shape, stype):
x = mx.symbol.Variable('x', stype=stype)
test = mx.sym.sum(x)
x_nd = rand_ndarray(shape, stype)
location = {'x': x_nd}
check_numeric_gradient(test, location)
shape = rand_shape_2d()
stypes = ['default', 'csr', 'row_sparse']
for lhs in stypes:
check_operator_with_temp_resource(shape, lhs)
for rhs in stypes:
check_broadcast_add(shape, lhs, rhs)
check_concat(shape, lhs, rhs)
check_softmax_with_shape(lhs, rhs, shape, preserve_shape=False)
check_softmax_with_shape(rhs, rhs, shape, preserve_shape=True)
@with_seed()
def test_sparse_elementwise_sum():
def check_sparse_elementwise_sum_with_shape(stypes, shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.sparse.add_n(*inputs, name='esum')
arr = []
arr_grad = [mx.nd.empty(shape, stype=stype) for stype in stypes]
densities = [0, 0.01, 0.5, 1.0]
for stype in stypes:
arr.append(rand_ndarray(shape, stype, densities[np.random.randint(0, len(densities))]))
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), atol=1e-5)
all_stypes = ['default', 'csr', 'row_sparse']
for dim in range(2, 4):
shape = tuple(np.random.randint(5, 10, size=dim))
rsp_test_cnt = np.random.randint(1, 9)
check_sparse_elementwise_sum_with_shape(['row_sparse' for i in range(rsp_test_cnt)], shape, rsp_test_cnt)
if dim is 2:
check_sparse_elementwise_sum_with_shape(['default', 'csr', 'default'], shape, 3)
test_len = np.random.randint(5, 10)
# at least one default type
stypes = ['default']
for i in range(test_len):
pick_side = np.random.randint(2)
pick_type = np.random.randint(3)
stypes = ([all_stypes[pick_type]] if pick_side is 0 else []) + stypes + ([all_stypes[pick_type]] if pick_side is 1 else [])
check_sparse_elementwise_sum_with_shape(stypes, shape, test_len+1)
@with_seed()
def test_contrib_sparse_embedding():
''' test sparse embedding operator '''
def check_sparse_embedding(in_dim, out_dim, batch, densities, deterministic, weight_stype):
# init executor
data = mx.sym.Variable("data")
weight = mx.sym.Variable("embed_weight", stype=weight_stype)
embed = mx.sym.contrib.SparseEmbedding(data=data, weight=weight, input_dim=in_dim,
output_dim=out_dim, deterministic=deterministic,
name="embed")
grad_req = {'data': 'null', 'embed_weight': 'write'}
exe_test = embed.simple_bind(default_context(), grad_req=grad_req, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
# init data
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_onehot = np.zeros((batch, in_dim)).astype(np.float32)
np_onehot[np.arange(batch), np_data] = 1.0
arg_map["data"][:] = np_data
# init grad
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
# weight
weight = arg_map["embed_weight"]
for density in densities:
# update weight based on density
weight[:] = rand_ndarray(weight.shape, weight_stype, density=density)
# check forward
exe_test.forward(is_train=True)
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, weight.asnumpy()), atol=1e-4)
# check backward
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, grad.asnumpy()), atol=1e-4)
# run twice to check if the result is deterministic when passing "deterministic=True" to SparseEmbedding
if deterministic:
grad_ref = grad_map["embed_weight"].asnumpy()
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), grad_ref, atol=0, rtol=0)
densities = [0, 0.5, 1]
in_dim = 50
out_dim = 3
batch = 8
stypes = ['default', 'row_sparse']
deterministics = [True, False]
for stype in stypes:
for deterministic in deterministics:
check_sparse_embedding(in_dim, out_dim, batch, densities, deterministic, stype)
check_sparse_embedding(in_dim, out_dim, batch, densities, deterministic, stype)
@with_seed()
def test_sparse_embedding():
''' test sparse embedding operator '''
def check_sparse_embedding(in_dim, out_dim, batch, densities, sparse_grad, weight_stype):
target_stype = 'row_sparse' if sparse_grad else 'default'
# init executor
data = mx.sym.Variable("data")
weight = mx.sym.Variable("embed_weight", stype=weight_stype)
embed = mx.sym.sparse.Embedding(data=data, weight=weight, input_dim=in_dim,
sparse_grad=sparse_grad, output_dim=out_dim, name='embed')
grad_req = {'data': 'null', 'embed_weight': 'write'}
exe_test = embed.simple_bind(default_context(), grad_req=grad_req, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
# init data
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_onehot = np.zeros((batch, in_dim)).astype(np.float32)
np_onehot[np.arange(batch), np_data] = 1.0
arg_map["data"][:] = np_data
# init grad
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
# weight
weight = arg_map["embed_weight"]
for density in densities:
# update weight based on density
weight[:] = rand_ndarray(weight.shape, weight_stype, density=density)
# check forward
exe_test.forward(is_train=True)
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, weight.asnumpy()), atol=1e-4)
# check backward
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, grad.asnumpy()), atol=1e-4)
# check grad stype
assert(grad_map["embed_weight"].stype == target_stype)
densities = [0, 0.5, 1]
in_dim = 50
out_dim = 3
batch = 8
weight_stypes = ['default', 'row_sparse']
sparse_grads = [True, False]
for weight_stype in weight_stypes:
for sparse_grad in sparse_grads:
check_sparse_embedding(in_dim, out_dim, batch, densities, sparse_grad, weight_stype)
check_sparse_embedding(in_dim, out_dim, batch, densities, sparse_grad, weight_stype)
@with_seed()
def test_sparse_broadcast_add_sub():
def check_broadcast_add(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.add(mx_lhs, mx_rhs).asnumpy(), np.add(np_lhs, np_rhs), atol=1e-4)
def check_broadcast_sub(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.subtract(mx_lhs, mx_rhs).asnumpy(), np.subtract(np_lhs, np_rhs), atol=1e-4)
stype = 'csr'
shape = rand_shape_2d()
num_rows = shape[0]
num_cols = shape[1]
for density in [0.1 * i for i in range(10)]:
mx_lhs = rand_ndarray(shape, stype, density)
np_lhs = mx_lhs.asnumpy()
mx_rhs_row_2D = rand_ndarray((1, num_cols), 'default')
mx_rhs_row_1D = mx_rhs_row_2D.reshape((num_cols))
mx_rhs_col = rand_ndarray((num_rows, 1), 'default')
mx_rhs_scalar_2D = rand_ndarray((1, 1), 'default')
mx_rhs_scalar_1D = mx_rhs_scalar_2D.reshape((1, ))
for mx_rhs in [mx_rhs_row_2D, mx_rhs_row_1D, mx_rhs_col, mx_rhs_scalar_2D, mx_rhs_scalar_1D]:
np_rhs = mx_rhs.asnumpy()
check_broadcast_add(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
check_broadcast_sub(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
check_broadcast_add(mx_rhs, mx_lhs, np_rhs, np_lhs, np.float32)
check_broadcast_sub(mx_rhs, mx_lhs, np_rhs, np_lhs, np.float32)
@with_seed()
def test_sparse_broadcast_mul_div():
def check_broadcast_mul(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.multiply(mx_lhs, mx_rhs).asnumpy(), np.multiply(np_lhs, np_rhs), atol=1e-4)
def check_broadcast_div(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.divide(mx_lhs, mx_rhs).asnumpy(), np.divide(np_lhs, np_rhs), atol=1e-4)
stype = 'csr'
shape = rand_shape_2d()
num_rows = shape[0]
num_cols = shape[1]
for density in [0.1 * i for i in range(10)]:
mx_lhs = rand_ndarray(shape, stype, density)
np_lhs = mx_lhs.asnumpy()
mx_rhs_row_2D = rand_ndarray((1, num_cols), 'default')
mx_rhs_row_1D = mx_rhs_row_2D.reshape((num_cols))
mx_rhs_col = rand_ndarray((num_rows, 1), 'default')
mx_rhs_scalar_2D = rand_ndarray((1, 1), 'default')
mx_rhs_scalar_1D = mx_rhs_scalar_2D.reshape((1, ))
for mx_rhs in [mx_rhs_row_2D, mx_rhs_row_1D, mx_rhs_col, mx_rhs_scalar_2D, mx_rhs_scalar_1D]:
np_rhs = mx_rhs.asnumpy()
check_broadcast_mul(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
check_broadcast_div(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
@with_seed()
def test_scatter_ops():
def csr_get_seen_points(name, csr_array, verbose=False):
"""Get a unique list of points int he CSR array as well as a
corresponding parallel list of points and values"""
seen_points = set()
seen_point_list = list()
values = list()
row_count = csr_array.shape[0]
row_pointers = csr_array.indptr.asnumpy()
col_indexes = csr_array.indices.asnumpy()
data = csr_array.data.asnumpy()
for row in range(row_count):
start_pos = row_pointers[row]
end_pos = row_pointers[row + 1]
for col_index in range(start_pos, end_pos):
col = col_indexes[col_index]
val = data[col_index]
if verbose is True:
print("{}: (row, col = ({}, {}) = {}".format(name, row, col, val))
seen_points.add((row, col))
seen_point_list.append((row, col))
values.append(val)
return seen_points, values, seen_point_list
def check_scatter_ops(name, shape, lhs_stype, rhs_stype, forward_mxnet_call, forward_numpy_call,
density=0.25, rhs_is_scalar=False, verbose=False):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
if rhs_is_scalar is False:
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
if verbose is True:
print(name)
if lhs_stype != 'default':
lhs_nd = create_sparse_array_zd(
shape, lhs_stype, density=density,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] # force at least one overlap
))
else:
lhs_nd = rand_ndarray(shape, 'default')
if rhs_is_scalar is False:
if rhs_stype != 'default':
rhs_nd = create_sparse_array_zd(
shape, rhs_stype, density=density,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] # force at least one overlap
))
else:
rhs_nd = rand_ndarray(shape, 'default')
else:
rhs_nd = 9
rhs = rhs_nd
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd if rhs_is_scalar is True else rhs_nd.asnumpy()
if verbose is True:
print("lhs = {}".format(lhs_np))
print("rhs = {}".format(rhs_np))
out_np = forward_numpy_call(lhs_np, rhs_np)
if verbose is True:
print("Numpy: out_np = {}".format(out_np))
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
out = forward_mxnet_call(lhs, rhs)
exe_test = out.bind(default_context(), args=location)
exe_test.forward(is_train=False)
out_nd = exe_test.outputs[0]
if verbose is True:
print("Sym: out_nd = {}".format(out_nd.asnumpy()))
# For row_sparse, check that rows only exist for rows that are
# either int lhs or rhs, and if they exist, they should equal
# the numpy values
if lhs_stype == 'default':
almost_equal(out_nd.asnumpy(), out_np, equal_nan=True)
elif lhs_stype == 'row_sparse':
seen_rows = set()
indices = lhs_nd.indices.asnumpy()
for i in range(len(indices)):
seen_rows.add(indices[i])
assert len(out_nd.indices.asnumpy()) == len(seen_rows)
out_nd_np = out_nd.asnumpy()
for row in seen_rows:
row_nd = out_nd_np[row]
row_np = out_np[row]
almost_equal(row_nd, row_np, equal_nan=True)
elif lhs_stype == 'csr' and rhs_is_scalar is False:
almost_equal(out_nd.asnumpy(), out_np, equal_nan=True)
else:
assert rhs_is_scalar
lhs_seen_points, _, _ = csr_get_seen_points("lhs", lhs_nd, verbose)
if rhs_is_scalar is False:
rhs_seen_points, _, _ = csr_get_seen_points("rhs", rhs_nd, verbose)
else:
rhs_seen_points = set()
input_seen_points = lhs_seen_points.union(rhs_seen_points)
out_seen_pounts, out_values, seen_point_list = csr_get_seen_points("out_nd", out_nd, verbose)
# Some may have been zero
assert len(out_seen_pounts) <= len(input_seen_points)
out_nd_np = out_nd.asnumpy()
val_index = 0
for row_col in seen_point_list:
row = row_col[0]
col = row_col[1]
val = out_values[val_index]
val_np = out_nd_np[row, col]
almost_equal(val, val_np, equal_nan=True)
val_index += 1
shape = (10, 5)
for lhs_stype in ['row_sparse', 'default', 'csr']:
for rhs_stype in ['row_sparse', 'default', 'csr']:
print("op: {}, lhs_stype: {}, rhs_stype: {}".format('_scatter_elemwise_div',
lhs_stype, rhs_stype))
check_scatter_ops('_scatter_elemwise_div', shape, lhs_stype, rhs_stype,
lambda l, r: mx.sym._internal._scatter_elemwise_div(l, r),
lambda l, r: l / r,
verbose=False)
for lhs_stype in ['row_sparse', 'default', 'csr']:
print("op: {}, lhs_stype: {}".format('_scatter_plus', lhs_stype))
check_scatter_ops('_scatter_plus', shape, lhs_stype, 'scalar',
lambda l, r: mx.sym._internal._scatter_plus_scalar(l, r),
lambda l, r: l + r,
rhs_is_scalar=True, verbose=False)
print("op: {}, lhs_stype: {}".format('_scatter_minus', lhs_stype))
check_scatter_ops('_scatter_minus', shape, lhs_stype, 'scalar',
lambda l, r: mx.sym._internal._scatter_minus_scalar(l, r),
lambda l, r: l + r,
rhs_is_scalar=True, verbose=False, density=0.5)
@with_seed()
def test_batchnorm_fallback():
# same test as test_operator.test_batchnorm_training, but tests fallback logic of batchnorm
stype = 'row_sparse'
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
assertRaises(MXNetError, check_numeric_gradient, test, in_location, mean_std, numeric_eps=1e-3, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
assertRaises(MXNetError, check_numeric_gradient, test, in_location, mean_std, numeric_eps=1e-3, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-3, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-3, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
assertRaises(MXNetError, check_numeric_gradient, test, in_location, xmean_std, numeric_eps=1e-3, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
assertRaises(MXNetError, check_numeric_gradient, test, in_location, xmean_std, numeric_eps=1e-3, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-3, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-3, rtol=0.2, atol=0.01)
@with_seed()
def test_mkldnn_sparse():
# This test is trying to create a race condition describedd in
# https://github.com/apache/incubator-mxnet/issues/10189
arr = mx.nd.random.uniform(shape=(10, 10, 32, 32))
weight1 = mx.nd.random.uniform(shape=(10, 10, 3, 3))
arr = mx.nd.Convolution(data=arr, weight=weight1, no_bias=True, kernel=(3, 3), num_filter=10)
rs_arr = mx.nd.sparse.row_sparse_array((mx.nd.zeros_like(arr), np.arange(arr.shape[0])))
weight2 = mx.nd.random.uniform(shape=(10, np.prod(arr.shape[1:4])))
fc_res = mx.nd.FullyConnected(data=arr, weight=weight2, no_bias=True, num_hidden=10)
sum_res = mx.nd.elemwise_sub(arr, rs_arr)
res1 = np.dot(mx.nd.flatten(sum_res).asnumpy(), weight2.asnumpy().T)
print(res1 - fc_res.asnumpy())
almost_equal(res1, fc_res.asnumpy())
@with_seed()
def test_sparse_nd_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape):
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y \
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition', stype='csr')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
cond_nd = mx.nd.array(condition_np).tostype('csr')
outputs = where_exe_write.forward(is_train=True, \
condition=cond_nd, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=cond_nd.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=cond_nd, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
def test_where_numeric_gradient(shape):
condition = mx.sym.Variable('condition', stype='csr')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
test_where_helper((5, 9))
test_where_numeric_gradient((5, 9))
@with_seed()
def test_sparse_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
def check_sparse_quadratic_function(a, b, c, expected_stype):
# check forward and compare the result with dense op
ndim = 2
shape = rand_shape_nd(ndim, 5)
data = rand_ndarray(shape=shape, stype='csr')
data_np = data.asnumpy()
expected = f(data_np, a, b, c)
output = mx.nd.contrib.quadratic(data, a=a, b=b, c=c)
assert(output.stype == expected_stype)
assert_almost_equal(output.asnumpy(), expected)
a = np.random.random_sample()
b = np.random.random_sample()
check_sparse_quadratic_function(a, b, 0.0, 'csr')
check_sparse_quadratic_function(a, b, 1.0, 'default')
if __name__ == '__main__':
import nose
nose.runmodule()
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/tests/python/unittest/test_sparse_operator.py | Python | apache-2.0 | 113,121 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='IframePlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin', on_delete=models.CASCADE)),
('src', models.URLField()),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| satyrius/cmsplugin-iframe | cmsplugin_iframe/migrations/0001_initial.py | Python | mit | 674 |
class MockConn:
def __init__(self, name, config):
self.name = name
self.config = config
def test_manager_load():
import cloudbot.permissions
from cloudbot.permissions import PermissionManager
manager = PermissionManager(MockConn('testconn', {}))
assert not manager.group_perms
assert not manager.group_users
assert not manager.perm_users
assert not manager.get_groups()
assert not manager.get_group_permissions('foobar')
assert not manager.get_group_users('foobar')
assert not manager.get_user_permissions('foo!bar@baz')
assert not manager.get_user_groups('foo!bar@baz')
assert not manager.group_exists('baz')
assert not manager.user_in_group('foo!bar@baz', 'bing')
cloudbot.permissions.backdoor = "*!user@host"
assert manager.has_perm_mask("test!user@host", 'foo', False)
assert not manager.has_perm_mask("test!otheruser@host", 'foo', False)
user = 'user!a@host.com'
user_mask = 'user!*@host??om'
other_user = 'user1!b@hosaacom'
cloudbot.permissions.backdoor = None
manager = PermissionManager(MockConn('testconn', {
'permissions': {
'admins': {
'users': [
user_mask
],
'perms': [
'testperm'
]
}
}
}))
assert manager.group_exists('admins')
assert manager.get_groups() == {'admins'}
assert manager.get_user_groups(user) == ['admins']
assert not manager.get_user_groups(other_user)
assert manager.get_group_users('admins') == [user_mask]
assert manager.get_group_permissions('admins') == ['testperm']
assert manager.get_user_permissions(user) == {'testperm'}
assert not manager.get_user_permissions(other_user)
assert manager.has_perm_mask(user, 'testperm')
assert manager.user_in_group(user, 'admins')
assert not manager.user_in_group(other_user, 'admins')
assert manager.remove_group_user('admins', user) == [user_mask]
manager.reload()
assert 'admins' not in manager.get_user_groups(user)
assert user_mask not in manager.get_group_users('admins')
assert 'testperm' not in manager.get_user_permissions(user)
assert not manager.has_perm_mask(user, 'testperm')
assert not manager.user_in_group(user, 'admins')
def test_mix_case_group():
from cloudbot.permissions import PermissionManager
manager = PermissionManager(MockConn('testconn', {
'permissions': {
'Admins': {
'users': [
'*!*@host'
],
'perms': [
'testperm'
]
}
}
}))
assert manager.group_exists('admins')
manager.remove_group_user('admins', 'user!name@host')
manager.reload()
assert manager.user_in_group('user!name@host', 'admins')
def test_add_user_to_group():
from cloudbot.permissions import PermissionManager
manager = PermissionManager(MockConn('testconn', {}))
manager.add_user_to_group('*!*@host', 'admins')
manager.add_user_to_group('*!*@mask', 'admins')
manager.reload()
assert manager.user_in_group('user!name@host', 'admins')
assert manager.user_in_group('otheruser!name@mask', 'admins')
manager.add_user_to_group('*!*@mask', 'admins')
manager.reload()
assert len(manager.get_group_users('admins')) == 2
| tiredtyrant/CloudBot | tests/core_tests/test_permission_manager.py | Python | gpl-3.0 | 3,424 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import SparkContext
"""
Create data in Cassandra fist
(following: https://wiki.apache.org/cassandra/GettingStarted)
cqlsh> CREATE KEYSPACE test
... WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
cqlsh> use test;
cqlsh:test> CREATE TABLE users (
... user_id int PRIMARY KEY,
... fname text,
... lname text
... );
> cassandra_outputformat <host> test users 1745 john smith
> cassandra_outputformat <host> test users 1744 john doe
> cassandra_outputformat <host> test users 1746 john smith
cqlsh:test> SELECT * FROM users;
user_id | fname | lname
---------+-------+-------
1745 | john | smith
1744 | john | doe
1746 | john | smith
"""
if __name__ == "__main__":
if len(sys.argv) != 7:
print >> sys.stderr, """
Usage: cassandra_outputformat <host> <keyspace> <cf> <user_id> <fname> <lname>
Run with example jar:
./bin/spark-submit --driver-class-path /path/to/example/jar \
/path/to/examples/cassandra_outputformat.py <args>
Assumes you have created the following table <cf> in Cassandra already,
running on <host>, in <keyspace>.
cqlsh:<keyspace>> CREATE TABLE <cf> (
... user_id int PRIMARY KEY,
... fname text,
... lname text
... );
"""
exit(-1)
host = sys.argv[1]
keyspace = sys.argv[2]
cf = sys.argv[3]
sc = SparkContext(appName="CassandraOutputFormat")
conf = {"cassandra.output.thrift.address": host,
"cassandra.output.thrift.port": "9160",
"cassandra.output.keyspace": keyspace,
"cassandra.output.partitioner.class": "Murmur3Partitioner",
"cassandra.output.cql": "UPDATE " + keyspace + "." + cf + " SET fname = ?, lname = ?",
"mapreduce.output.basename": cf,
"mapreduce.outputformat.class": "org.apache.cassandra.hadoop.cql3.CqlOutputFormat",
"mapreduce.job.output.key.class": "java.util.Map",
"mapreduce.job.output.value.class": "java.util.List"}
key = {"user_id": int(sys.argv[4])}
sc.parallelize([(key, sys.argv[5:])]).saveAsNewAPIHadoopDataset(
conf=conf,
keyConverter="org.apache.spark.examples.pythonconverters.ToCassandraCQLKeyConverter",
valueConverter="org.apache.spark.examples.pythonconverters.ToCassandraCQLValueConverter")
sc.stop()
| obulpathi/spark | python/io/cassandra_outputformat.py | Python | apache-2.0 | 3,245 |
import time
import os
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver# as Observer
from watchdog import events
from watchdog.tricks import LoggerTrick
import argparse
import logging
import random
logger = logging.getLogger('watchntouch')
class PollingHandler(events.FileSystemEventHandler):
def __init__(self, options):
self.options = options
self.skip_next = set()
def touch_file(self, event):
if event.src_path == self.options.watchdir:
logger.debug("Ignoring change to root watchdir...")
return
if event in self.skip_next:
logger.debug("Event on skiplist: %s" % event)
self.skip_next.remove(event)
return
logger.debug("Re-touching file for event: %s" % event)
os.utime(event.src_path, None)
on_modified = touch_file
on_created = touch_file
def on_deleted(self, event):
if not self.options.simulate_rm:
return
if event.is_directory:
logger.debug("Simulating native rmdir: %s" % event)
os.mkdir(event.src_path)
os.rmdir(event.src_path)
else:
logger.debug("Simulating native rm: %s" % event)
os.makedirs(os.path.dirname(event.src_path))
open(event.src_path, "a").close()
os.remove(event.src_path)
def on_moved(self, event):
if not self.options.simulate_mv:
return
logger.debug("Simulating move: %s" % event)
os.rename(event.dest_path, event.src_path)
os.rename(event.src_path, event.dest_path)
class NativeHandler(events.FileSystemEventHandler):
def __init__(self, other, options):
self.other = other
self.options = options
def on_modified(self, event):
logger.debug("Adding native event to skiplist: %s" % event)
self.other.skip_next.add(event)
def run():
parser = argparse.ArgumentParser(
description='Poll a directory for changes and re-touch changed paths '
'so that inotify-incapable mounts (like CIFS) receive inotify '
'events anyway.')
parser.add_argument('-i', '--polling-interval',
default=1.0,
help="Polling interval in seconds",
type=float,
dest='interval'
)
parser.add_argument('-l', '--log-level',
default=11,
help="Logger verbosity level",
type=int,
dest='loglevel'
)
parser.add_argument("-r", "--simulate-rm",
default=False,
action='store_true',
dest='simulate_rm',
help="Simulate rm operations by natively flashing a path in/out of "
"existance. Only use if you find your tools get confused when a file "
"disapeared from under them."
)
parser.add_argument("-m", "--simulate-mv",
default=False,
action='store_true',
dest='simulate_mv',
help="Simulate mv operations by natively moving a path back and forth."
" Only use if you find your tools require specific handling of"
" move events."
)
parser.add_argument('-w', '--watchdir',
default=".",
required=False,
help="the directory to watch for changes",
dest="watchdir"
)
args = parser.parse_args()
args.watchdir = os.path.realpath(os.path.abspath(args.watchdir))
logging.basicConfig(level=args.loglevel, format="%(message)s (%(levelname)s)\n")
logger.info("Watching %r", args.watchdir)
polling_handler = PollingHandler(args)
native_handler = NativeHandler(polling_handler, args)
polling_observer = PollingObserver()
native_observer = Observer()
native_observer.schedule(native_handler, path=args.watchdir, recursive=True)
native_observer.start()
polling_observer.schedule(polling_handler, path=args.watchdir, recursive=True)
polling_observer.start()
try:
while True:
time.sleep(args.interval)
except KeyboardInterrupt:
logger.info("Shutdown")
native_observer.stop()
polling_observer.stop()
native_observer.join()
polling_observer.join()
if __name__ == "__main__":
run() | rubyruy/watchntouch | watchntouch.py | Python | bsd-2-clause | 4,238 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
RUtils.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import re
import os
import stat
import subprocess
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.tools.system import *
class RUtils:
RSCRIPTS_FOLDER = 'R_SCRIPTS_FOLDER'
R_FOLDER = 'R_FOLDER'
R_USE64 = 'R_USE64'
@staticmethod
def RFolder():
folder = ProcessingConfig.getSetting(RUtils.R_FOLDER)
if folder is None:
folder = ''
return os.path.abspath(unicode(folder))
@staticmethod
def RScriptsFolder():
folder = ProcessingConfig.getSetting(RUtils.RSCRIPTS_FOLDER)
if folder is None:
folder = unicode(os.path.join(userFolder(), 'rscripts'))
mkdir(folder)
return os.path.abspath(folder)
@staticmethod
def createRScriptFromRCommands(commands):
scriptfile = open(RUtils.getRScriptFilename(), 'w')
for command in commands:
scriptfile.write(command + '\n')
scriptfile.close()
@staticmethod
def getRScriptFilename():
return userFolder() + os.sep + 'processing_script.r'
@staticmethod
def getConsoleOutputFilename():
return RUtils.getRScriptFilename() + '.Rout'
@staticmethod
def executeRAlgorithm(alg, progress):
RUtils.verboseCommands = alg.getVerboseCommands()
RUtils.createRScriptFromRCommands(alg.getFullSetOfRCommands())
if isWindows():
if ProcessingConfig.getSetting(RUtils.R_USE64):
execDir = 'x64'
else:
execDir = 'i386'
command = [
RUtils.RFolder() + os.sep + 'bin' + os.sep + execDir + os.sep
+ 'R.exe',
'CMD',
'BATCH',
'--vanilla',
RUtils.getRScriptFilename(),
RUtils.getConsoleOutputFilename(),
]
else:
os.chmod(RUtils.getRScriptFilename(), stat.S_IEXEC | stat.S_IREAD
| stat.S_IWRITE)
command = 'R CMD BATCH --vanilla ' + RUtils.getRScriptFilename() \
+ ' ' + RUtils.getConsoleOutputFilename()
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
proc.wait()
RUtils.createConsoleOutput()
loglines = []
loglines.append('R execution console output')
loglines += RUtils.allConsoleResults
for line in loglines:
progress.setConsoleInfo(line)
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
@staticmethod
def createConsoleOutput():
RUtils.consoleResults = []
RUtils.allConsoleResults = []
add = False
if os.path.exists(RUtils.getConsoleOutputFilename()):
lines = open(RUtils.getConsoleOutputFilename())
for line in lines:
line = line.strip('\n').strip(' ')
if line.startswith('>'):
line = line[1:].strip(' ')
if line in RUtils.verboseCommands:
add = True
else:
add = False
elif add:
RUtils.consoleResults.append('<p>' + line + '</p>\n')
RUtils.allConsoleResults.append(line)
@staticmethod
def getConsoleOutput():
s = '<font face="courier">\n'
s += '<h2> R Output</h2>\n'
for line in RUtils.consoleResults:
s += line
s += '</font>\n'
return s
@staticmethod
def checkRIsInstalled(ignoreRegistrySettings=False):
if isWindows():
path = RUtils.RFolder()
if path == '':
return 'R folder is not configured.\nPlease configure it \
before running R scripts.'
R_INSTALLED = 'R_INSTALLED'
settings = QSettings()
if not ignoreRegistrySettings:
if settings.contains(R_INSTALLED):
return
if isWindows():
if ProcessingConfig.getSetting(RUtils.R_USE64):
execDir = 'x64'
else:
execDir = 'i386'
command = [RUtils.RFolder() + os.sep + 'bin' + os.sep + execDir
+ os.sep + 'R.exe', '--version']
else:
command = ['R --version']
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
).stdout
for line in iter(proc.readline, ''):
if 'R version' in line:
settings.setValue(R_INSTALLED, True)
return
html = '<p>This algorithm requires R to be run. Unfortunately, it \
seems that R is not installed in your system, or it is not \
correctly configured to be used from QGIS</p> \
<p><a href= "http://docs.qgis.org/2.0/html/en/docs/user_manual/processing/3rdParty.html">Click here</a>to know more about how to install and configure R to be used with QGIS</p>'
return html
@staticmethod
def getRequiredPackages(code):
regex = re.compile('library\("?(.*?)"?\)')
return regex.findall(code)
| mweisman/QGIS | python/plugins/processing/r/RUtils.py | Python | gpl-2.0 | 6,647 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employee', '0003_position_owner'),
]
operations = [
migrations.AlterField(
model_name='position',
name='owner',
field=models.OneToOneField(blank=True, null=True, related_name='permanent_position', to='employee.Employee'),
),
]
| luiscberrocal/homeworkpal | homeworkpal_project/employee/migrations/0004_auto_20151026_1823.py | Python | mit | 471 |
#!/usr/bin/env python
#
# Copyright 2004,2007,2008,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_boolean_operators (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def help_ss (self, src_data, exp_data, op):
for s in zip (range (len (src_data)), src_data):
src = gr.vector_source_s (s[1])
self.tb.connect (src, (op, s[0]))
dst = gr.vector_sink_s ()
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
def help_bb (self, src_data, exp_data, op):
for s in zip (range (len (src_data)), src_data):
src = gr.vector_source_b (s[1])
self.tb.connect (src, (op, s[0]))
dst = gr.vector_sink_b ()
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
def help_ii (self, src_data, exp_data, op):
for s in zip (range (len (src_data)), src_data):
src = gr.vector_source_i (s[1])
self.tb.connect (src, (op, s[0]))
dst = gr.vector_sink_i ()
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
def test_xor_ss (self):
src1_data = (1, 2, 3, 0x5004, 0x1150)
src2_data = (8, 2, 1 , 0x0508, 0x1105)
expected_result = (9, 0, 2, 0x550C, 0x0055)
op = gr.xor_ss ()
self.help_ss ((src1_data, src2_data),
expected_result, op)
def test_xor_bb (self):
src1_data = (1, 2, 3, 4, 0x50)
src2_data = (8, 2, 1 , 8, 0x05)
expected_result = (9, 0, 2, 0xC, 0x55)
op = gr.xor_bb ()
self.help_bb ((src1_data, src2_data),
expected_result, op)
def test_xor_ii (self):
src1_data = (1, 2, 3, 0x5000004, 0x11000050)
src2_data = (8, 2, 1 , 0x0500008, 0x11000005)
expected_result = (9, 0, 2, 0x550000C, 0x00000055)
op = gr.xor_ii ()
self.help_ii ((src1_data, src2_data),
expected_result, op)
def test_and_ss (self):
src1_data = (1, 2, 3, 0x5004, 0x1150)
src2_data = (8, 2, 1 , 0x0508, 0x1105)
expected_result = (0, 2, 1, 0x0000, 0x1100)
op = gr.and_ss ()
self.help_ss ((src1_data, src2_data),
expected_result, op)
def test_and_bb (self):
src1_data = (1, 2, 2, 3, 0x04, 0x50)
src2_data = (8, 2, 2, 1, 0x08, 0x05)
src3_data = (8, 2, 1, 1, 0x08, 0x05)
expected_result = (0, 2, 0, 1, 0x00, 0x00)
op = gr.and_bb ()
self.help_bb ((src1_data, src2_data, src3_data),
expected_result, op)
def test_and_ii (self):
src1_data = (1, 2, 3, 0x50005004, 0x11001150)
src2_data = (8, 2, 1 , 0x05000508, 0x11001105)
expected_result = (0, 2, 1, 0x00000000, 0x11001100)
op = gr.and_ii ()
self.help_ii ((src1_data, src2_data),
expected_result, op)
def test_or_ss (self):
src1_data = (1, 2, 3, 0x5004, 0x1150)
src2_data = (8, 2, 1 , 0x0508, 0x1105)
expected_result = (9, 2, 3, 0x550C, 0x1155)
op = gr.or_ss ()
self.help_ss ((src1_data, src2_data),
expected_result, op)
def test_or_bb (self):
src1_data = (1, 2, 2, 3, 0x04, 0x50)
src2_data = (8, 2, 2, 1 , 0x08, 0x05)
src3_data = (8, 2, 1, 1 , 0x08, 0x05)
expected_result = (9, 2, 3, 3, 0x0C, 0x55)
op = gr.or_bb ()
self.help_bb ((src1_data, src2_data, src3_data),
expected_result, op)
def test_or_ii (self):
src1_data = (1, 2, 3, 0x50005004, 0x11001150)
src2_data = (8, 2, 1 , 0x05000508, 0x11001105)
expected_result = (9, 2, 3, 0x5500550C, 0x11001155)
op = gr.or_ii ()
self.help_ii ((src1_data, src2_data),
expected_result, op)
def test_not_ss (self):
src1_data = (1, 2, 3, 0x5004, 0x1150)
expected_result = (~1, ~2, ~3, ~0x5004, ~0x1150)
op = gr.not_ss ()
self.help_ss ((((src1_data),)),
expected_result, op)
def test_not_bb (self):
src1_data = (1, 2, 2, 3, 0x04, 0x50)
expected_result = (0xFE, 0xFD, 0xFD, 0xFC, 0xFB, 0xAF)
op = gr.not_bb ()
self.help_bb (((src1_data), ),
expected_result, op)
def test_not_ii (self):
src1_data = (1, 2, 3, 0x50005004, 0x11001150)
expected_result = (~1 , ~2, ~3, ~0x50005004, ~0x11001150)
op = gr.not_ii ()
self.help_ii (((src1_data),),
expected_result, op)
if __name__ == '__main__':
gr_unittest.run(test_boolean_operators, "test_boolean_operators.xml")
| manojgudi/sandhi | modules/gr36/gnuradio-core/src/python/gnuradio/gr/qa_boolean_operators.py | Python | gpl-3.0 | 6,062 |
# -*- coding: utf-8 -*-
import os
'''
The 24 Game Player
Given any four digits in the range 1 to 9, which may have repetitions,
Using just the +, -, *, and / operators; and the possible use of
brackets, (), show how to make an answer of 24.
An answer of "q" will quit the game.
An answer of "!" will generate a new set of four digits.
An answer of "!!" will ask you for a new set of four digits.
An answer of "?" will compute an expression for the current digits.
Otherwise you are repeatedly asked for an expression until it evaluates to 24
Note: you cannot form multiple digit numbers from the supplied digits,
so an answer of 12+12 when given 1, 2, 2, and 1 would not be allowed.
'''
from __future__ import division, print_function
from itertools import permutations, combinations, product, \
chain
from pprint import pprint as pp
from fractions import Fraction as F
import random
import ast
import re
import sys
if sys.version_info[0] < 3:
input = raw_input
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest
def choose4():
'four random digits >0 as characters'
return [str(random.randint(1, 9)) for i in range(4)]
def ask4():
'get four random digits >0 from the player'
digits = ''
while len(digits) != 4 or not all(d in '123456789' for d in digits):
digits = input('Enter the digits to solve for: ')
digits = ''.join(digits.strip().split())
return list(digits)
def welcome(digits):
print(__doc__)
print("Your four digits: " + ' '.join(digits))
def check(answer, digits):
allowed = set('() +-*/\t' + ''.join(digits))
ok = all(ch in allowed for ch in answer) and \
all(digits.count(dig) == answer.count(dig) for dig in set(digits)) \
and not re.search('\d\d', answer)
if ok:
try:
ast.parse(answer)
except:
ok = False
return ok
def solve(digits):
"""\
>>> for digits in '3246 4788 1111 123456 1127 3838'.split():
solve(list(digits))
Solution found: 2 + 3 * 6 + 4
'2 + 3 * 6 + 4'
Solution found: ( 4 + 7 - 8 ) * 8
'( 4 + 7 - 8 ) * 8'
No solution found for: 1 1 1 1
'!'
Solution found: 1 + 2 + 3 * ( 4 + 5 ) - 6
'1 + 2 + 3 * ( 4 + 5 ) - 6'
Solution found: ( 1 + 2 ) * ( 1 + 7 )
'( 1 + 2 ) * ( 1 + 7 )'
Solution found: 8 / ( 3 - 8 / 3 )
'8 / ( 3 - 8 / 3 )'
>>> """
digilen = len(digits)
# length of an exp without brackets
exprlen = 2 * digilen - 1
# permute all the digits
digiperm = sorted(set(permutations(digits)))
# All the possible operator combinations
opcomb = list(product('+-*/', repeat=digilen - 1))
# All the bracket insertion points:
brackets = ([()] + [(x, y)
for x in range(0, exprlen, 2)
for y in range(x + 4, exprlen + 2, 2)
if (x, y) != (0, exprlen + 1)]
+ [(0, 3 + 1, 4 + 2, 7 + 3)]) # double brackets case
for d in digiperm:
for ops in opcomb:
if '/' in ops:
d2 = [('F(%s)' % i) for i in d] # Use Fractions for accuracy
else:
d2 = d
ex = list(chain.from_iterable(zip_longest(d2, ops, fillvalue='')))
for b in brackets:
exp = ex[::]
for insertpoint, bracket in zip(b, '()' * (len(b) // 2)):
exp.insert(insertpoint, bracket)
txt = ''.join(exp)
try:
num = eval(txt)
except ZeroDivisionError:
continue
if num == 24:
if '/' in ops:
exp = [(term if not term.startswith('F(') else term[2])
for term in exp]
ans = ' '.join(exp).rstrip()
print("Solution found:", ans)
return ans
print("No solution found for:", ' '.join(digits))
return '!'
def main():
digits = choose4()
welcome(digits)
trial = 0
answer = ''
chk = ans = False
while not (chk and ans == 24):
trial += 1
answer = input("Expression %i: " % trial)
chk = check(answer, digits)
if answer == '?':
solve(digits)
answer = '!'
if answer.lower() == 'q':
break
if answer == '!':
digits = choose4()
trial = 0
print("\nNew digits:", ' '.join(digits))
continue
if answer == '!!':
digits = ask4()
trial = 0
print("\nNew digits:", ' '.join(digits))
continue
if not chk:
print("The input '%s' was wonky!" % answer)
else:
if '/' in answer:
# Use Fractions for accuracy in divisions
answer = ''.join((('F(%s)' % char) if char in '123456789' else char)
for char in answer)
ans = eval(answer)
print(" = ", ans)
if ans == 24:
print("Thats right!")
print("Thank you and goodbye")
main()
os.system("pause")
| NicovincX2/Python-3.5 | Problèmes divers/24 game/24-game_solver.py | Python | gpl-3.0 | 5,244 |
#-------------------------------------------------------------------------------
# elftools: elf/structs.py
#
# Encapsulation of Construct structs for parsing an ELF file, adjusted for
# correct endianness and word-size.
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
from ..construct import (
UBInt8, UBInt16, UBInt32, UBInt64,
ULInt8, ULInt16, ULInt32, ULInt64,
SBInt32, SLInt32, SBInt64, SLInt64,
Struct, Array, Enum, Padding, BitStruct, BitField, Value,
)
from .enums import *
class ELFStructs(object):
""" Accessible attributes:
Elf_{byte|half|word|word64|addr|offset|sword|xword|xsword}:
Data chunks, as specified by the ELF standard, adjusted for
correct endianness and word-size.
Elf_Ehdr:
ELF file header
Elf_Phdr:
Program header
Elf_Shdr:
Section header
Elf_Sym:
Symbol table entry
Elf_Rel, Elf_Rela:
Entries in relocation sections
"""
def __init__(self, little_endian=True, elfclass=32):
assert elfclass == 32 or elfclass == 64
self.little_endian = little_endian
self.elfclass = elfclass
self._create_structs()
def _create_structs(self):
if self.little_endian:
self.Elf_byte = ULInt8
self.Elf_half = ULInt16
self.Elf_word = ULInt32
self.Elf_word64 = ULInt64
self.Elf_addr = ULInt32 if self.elfclass == 32 else ULInt64
self.Elf_offset = self.Elf_addr
self.Elf_sword = SLInt32
self.Elf_xword = ULInt32 if self.elfclass == 32 else ULInt64
self.Elf_sxword = SLInt32 if self.elfclass == 32 else SLInt64
else:
self.Elf_byte = UBInt8
self.Elf_half = UBInt16
self.Elf_word = UBInt32
self.Elf_word64 = UBInt64
self.Elf_addr = UBInt32 if self.elfclass == 32 else UBInt64
self.Elf_offset = self.Elf_addr
self.Elf_sword = SBInt32
self.Elf_xword = UBInt32 if self.elfclass == 32 else UBInt64
self.Elf_sxword = SBInt32 if self.elfclass == 32 else SBInt64
self._create_ehdr()
self._create_phdr()
self._create_shdr()
self._create_sym()
self._create_rel()
self._create_dyn()
self._create_sunw_syminfo()
self._create_gnu_verneed()
self._create_gnu_verdef()
self._create_gnu_versym()
self._create_note()
def _create_ehdr(self):
self.Elf_Ehdr = Struct('Elf_Ehdr',
Struct('e_ident',
Array(4, self.Elf_byte('EI_MAG')),
Enum(self.Elf_byte('EI_CLASS'), **ENUM_EI_CLASS),
Enum(self.Elf_byte('EI_DATA'), **ENUM_EI_DATA),
Enum(self.Elf_byte('EI_VERSION'), **ENUM_E_VERSION),
Enum(self.Elf_byte('EI_OSABI'), **ENUM_EI_OSABI),
self.Elf_byte('EI_ABIVERSION'),
Padding(7)
),
Enum(self.Elf_half('e_type'), **ENUM_E_TYPE),
Enum(self.Elf_half('e_machine'), **ENUM_E_MACHINE),
Enum(self.Elf_word('e_version'), **ENUM_E_VERSION),
self.Elf_addr('e_entry'),
self.Elf_offset('e_phoff'),
self.Elf_offset('e_shoff'),
self.Elf_word('e_flags'),
self.Elf_half('e_ehsize'),
self.Elf_half('e_phentsize'),
self.Elf_half('e_phnum'),
self.Elf_half('e_shentsize'),
self.Elf_half('e_shnum'),
self.Elf_half('e_shstrndx'),
)
def _create_phdr(self):
if self.elfclass == 32:
self.Elf_Phdr = Struct('Elf_Phdr',
Enum(self.Elf_word('p_type'), **ENUM_P_TYPE),
self.Elf_offset('p_offset'),
self.Elf_addr('p_vaddr'),
self.Elf_addr('p_paddr'),
self.Elf_word('p_filesz'),
self.Elf_word('p_memsz'),
self.Elf_word('p_flags'),
self.Elf_word('p_align'),
)
else: # 64
self.Elf_Phdr = Struct('Elf_Phdr',
Enum(self.Elf_word('p_type'), **ENUM_P_TYPE),
self.Elf_word('p_flags'),
self.Elf_offset('p_offset'),
self.Elf_addr('p_vaddr'),
self.Elf_addr('p_paddr'),
self.Elf_xword('p_filesz'),
self.Elf_xword('p_memsz'),
self.Elf_xword('p_align'),
)
def _create_shdr(self):
self.Elf_Shdr = Struct('Elf_Shdr',
self.Elf_word('sh_name'),
Enum(self.Elf_word('sh_type'), **ENUM_SH_TYPE),
self.Elf_xword('sh_flags'),
self.Elf_addr('sh_addr'),
self.Elf_offset('sh_offset'),
self.Elf_xword('sh_size'),
self.Elf_word('sh_link'),
self.Elf_word('sh_info'),
self.Elf_xword('sh_addralign'),
self.Elf_xword('sh_entsize'),
)
def _create_rel(self):
# r_info is also taken apart into r_info_sym and r_info_type.
# This is done in Value to avoid endianity issues while parsing.
if self.elfclass == 32:
r_info_sym = Value('r_info_sym',
lambda ctx: (ctx['r_info'] >> 8) & 0xFFFFFF)
r_info_type = Value('r_info_type',
lambda ctx: ctx['r_info'] & 0xFF)
else: # 64
r_info_sym = Value('r_info_sym',
lambda ctx: (ctx['r_info'] >> 32) & 0xFFFFFFFF)
r_info_type = Value('r_info_type',
lambda ctx: ctx['r_info'] & 0xFFFFFFFF)
self.Elf_Rel = Struct('Elf_Rel',
self.Elf_addr('r_offset'),
self.Elf_xword('r_info'),
r_info_sym,
r_info_type,
)
self.Elf_Rela = Struct('Elf_Rela',
self.Elf_addr('r_offset'),
self.Elf_xword('r_info'),
r_info_sym,
r_info_type,
self.Elf_sxword('r_addend'),
)
def _create_dyn(self):
self.Elf_Dyn = Struct('Elf_Dyn',
Enum(self.Elf_sxword('d_tag'), **ENUM_D_TAG),
self.Elf_xword('d_val'),
Value('d_ptr', lambda ctx: ctx['d_val']),
)
def _create_sym(self):
# st_info is hierarchical. To access the type, use
# container['st_info']['type']
st_info_struct = BitStruct('st_info',
Enum(BitField('bind', 4), **ENUM_ST_INFO_BIND),
Enum(BitField('type', 4), **ENUM_ST_INFO_TYPE))
# st_other is hierarchical. To access the visibility,
# use container['st_other']['visibility']
st_other_struct = BitStruct('st_other',
Padding(5),
Enum(BitField('visibility', 3), **ENUM_ST_VISIBILITY))
if self.elfclass == 32:
self.Elf_Sym = Struct('Elf_Sym',
self.Elf_word('st_name'),
self.Elf_addr('st_value'),
self.Elf_word('st_size'),
st_info_struct,
st_other_struct,
Enum(self.Elf_half('st_shndx'), **ENUM_ST_SHNDX),
)
else:
self.Elf_Sym = Struct('Elf_Sym',
self.Elf_word('st_name'),
st_info_struct,
st_other_struct,
Enum(self.Elf_half('st_shndx'), **ENUM_ST_SHNDX),
self.Elf_addr('st_value'),
self.Elf_xword('st_size'),
)
def _create_sunw_syminfo(self):
self.Elf_Sunw_Syminfo = Struct('Elf_Sunw_Syminfo',
Enum(self.Elf_half('si_boundto'), **ENUM_SUNW_SYMINFO_BOUNDTO),
self.Elf_half('si_flags'),
)
def _create_gnu_verneed(self):
# Structure of "version needed" entries is documented in
# Oracle "Linker and Libraries Guide", Chapter 7 Object File Format
self.Elf_Verneed = Struct('Elf_Verneed',
self.Elf_half('vn_version'),
self.Elf_half('vn_cnt'),
self.Elf_word('vn_file'),
self.Elf_word('vn_aux'),
self.Elf_word('vn_next'),
)
self.Elf_Vernaux = Struct('Elf_Vernaux',
self.Elf_word('vna_hash'),
self.Elf_half('vna_flags'),
self.Elf_half('vna_other'),
self.Elf_word('vna_name'),
self.Elf_word('vna_next'),
)
def _create_gnu_verdef(self):
# Structure off "version definition" entries are documented in
# Oracle "Linker and Libraries Guide", Chapter 7 Object File Format
self.Elf_Verdef = Struct('Elf_Verdef',
self.Elf_half('vd_version'),
self.Elf_half('vd_flags'),
self.Elf_half('vd_ndx'),
self.Elf_half('vd_cnt'),
self.Elf_word('vd_hash'),
self.Elf_word('vd_aux'),
self.Elf_word('vd_next'),
)
self.Elf_Verdaux = Struct('Elf_Verdaux',
self.Elf_word('vda_name'),
self.Elf_word('vda_next'),
)
def _create_gnu_versym(self):
# Structure off "version symbol" entries are documented in
# Oracle "Linker and Libraries Guide", Chapter 7 Object File Format
self.Elf_Versym = Struct('Elf_Versym',
Enum(self.Elf_half('ndx'), **ENUM_VERSYM),
)
def _create_note(self):
# Structure of "PT_NOTE" section
self.Elf_Nhdr = Struct('Elf_Nhdr',
self.Elf_word('n_namesz'),
self.Elf_word('n_descsz'),
Enum(self.Elf_word('n_type'), **ENUM_NOTE_N_TYPE),
)
self.Elf_Nhdr_abi = Struct('Elf_Nhdr_abi',
Enum(self.Elf_word('abi_os'), **ENUM_NOTE_ABI_TAG_OS),
self.Elf_word('abi_major'),
self.Elf_word('abi_minor'),
self.Elf_word('abi_tiny'),
)
| g0t3n/Ksymhunter-gui | utils/elftools/elf/structs.py | Python | gpl-2.0 | 10,047 |
"""
evaluate the gamma distribution with different shape settings
"""
import pandas as pd
import os
from localutils import changedetect as dc, benchmark as bch, misc as ms
import logging
import ConfigParser
import traceback
import multiprocessing
import argparse
import numpy as np
METHOD = ['cpt_gamma%1', 'cpt_gamma%10', 'cpt_gamma%20', 'cpt_gamma%30', 'cpt_gamma%50', 'cpt_gamma%80',
'cpt_gamma%adpt', 'cpt_np', 'cpt_poisson']
PENALTY = ["AIC", "BIC", "MBIC", "Hannan-Quinn"]
WINDOW = 2 # perform evaluation with window size equaling 2
MINSEGLEN = 3
def worker(f):
f_base = os.path.basename(f)
r = []
logging.info("handling %s" % f)
trace = pd.read_csv(f, sep=';')
if type(trace['rtt'][0]) is str:
trace = pd.read_csv(f, sep=';', decimal=',')
fact = trace['cp']
fact = [i for i, v in enumerate(fact) if v == 1] # fact in format of data index
logging.debug("%s : change counts %d" % (f_base, len(fact)))
for m, p in [(x, y) for x in METHOD for y in PENALTY]:
logging.info("%s: evaluating %s with %s" % (f_base, m, p))
if 'gamma' in m:
mm = m.split('%')
method_caller = getattr(dc, 'cpt_gamma')
if 'adpt' in mm[1]:
shape = np.sqrt(np.mean([i for i in trace['rtt'] if 0 < i < 1000]))
detect = method_caller(trace['rtt'], p, MINSEGLEN, shape=shape)
else:
shape = ms.type_convert(mm[1])
detect = method_caller(trace['rtt'], p, MINSEGLEN, shape=shape)
else:
method_caller = getattr(dc, m)
detect = method_caller(trace['rtt'], p, MINSEGLEN)
b = bch.evaluation_window_weighted(trace['rtt'], fact, detect, WINDOW)
r.append((os.path.basename(f), len(trace), len(fact),
b['tp'], b['fp'], b['fn'],
b['precision'], b['recall'], b['score'], b['dis'], m, p))
logging.debug('%r' % b)
return r
def worker_wrapper(args):
try:
return worker(args)
except Exception:
logging.critical("Exception in worker.")
traceback.print_exc()
raise
def main():
# logging setting
logging.basicConfig(filename='cpt_evaluation.log', level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S %z')
# load data collection configuration from config file in the same folder
config = ConfigParser.ConfigParser()
if not config.read('./config'):
logging.critical("Config file ./config is missing.")
return
# load the configured directory where collected data shall be saved
try:
data_dir = config.get("dir", "data")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
logging.critical("config for data storage is not right.")
return
# check if the directory is there
if not os.path.exists(data_dir):
logging.critical("data folder %s does not exisit." % data_dir)
return
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory",
help="benchmark changepoint methods using the traces from the specified directory.",
action="store")
parser.add_argument("-f", "--filename",
help="file name for output.",
action="store")
args = parser.parse_args()
if not args.directory or not args.filename:
print args.help
return
else:
trace_dir = args.directory
outfile = args.filename
if not os.path.exists(trace_dir):
print "%s doesn't existe." % trace_dir
files = []
for f in os.listdir(trace_dir):
if f.endswith('.csv') and not f.startswith('~'):
files.append(os.path.join(trace_dir,f))
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
res = pool.map(worker_wrapper, files)
with open(os.path.join(data_dir, outfile), 'w') as fp:
fp.write(';'.join(
['file', 'len', 'changes', 'tp', 'fp', 'fn', 'precision', 'recall', 'score', 'dis', 'method', 'penalty']) + '\n')
for ck in res:
for line in ck:
fp.write(";".join([str(i) for i in line]) + '\n')
if __name__ == '__main__':
main()
| WenqinSHAO/rtt | eval_gamma.py | Python | mit | 4,356 |
'''
Created on 04.03.2012
@author: michi
'''
from datetime import date
from abc import ABCMeta, abstractmethod, abstractproperty
import datetime
class XType:
__metaclass__ = ABCMeta
CUSTOM = 1
NUMBER = 2
STRING = 3
BOOL = 4
COMPLEX = 5
TEMPORAL = 6
MIXED = 7
def __init__(self, canBeNone=None, defaultValue=None):
if canBeNone is None:
canBeNone = True
self.canBeNone = canBeNone
self.defaultValue = defaultValue
self.canBeEdited = True
self.forceInteraction = False
@abstractproperty
def group(self):
pass
def value2String(self, value):
return unicode(value)
class BoolType(XType):
def __init__(self, boolNames=None):
XType.__init__(self)
self.defaultValue = False
@property
def group(self):
return XType.BOOL
@staticmethod
def castToBool(value):
if isinstance(value, (float, int)):
return bool(value)
if isinstance(value, basestring):
if value.lower() in ('true','yes'):
return True
if value.lower() in ('false','no'):
return False
try:
numeric = float(value)
return bool(numeric)
except ValueError:
pass
if value:
return True
return False
class NumberType(XType):
def __init__(self, pyTypeOfNumber):
XType.__init__(self)
self.pyType = pyTypeOfNumber
self.strNumberFormat = ''
self.strPrefix = ''
self.strSuffix = ''
self.minValue = -100000
self.maxValue = 100000
self.decimalsSeparator = '.'
self.thousandsSeparator = None
self.decimalsCount = None
if self.pyType is float:
self.defaultValue = 0.0
if self.pyType is int:
self.defaultValue = 0
def value2String(self, value):
if self.strNumberFormat:
number = ('{0:' + self.strNumberFormat + '}').format(value)
else:
number = NumberType.formatNumber(value,
self.decimalsCount,
self.decimalsSeparator,
self.thousandsSeparator)
return (self.strPrefix + number + self.strSuffix)
@property
def group(self):
return XType.NUMBER
@staticmethod
def intWithThousandsSeparator(x, sep=None):
if sep is None:
sep = ','
if type(x) not in [type(0), type(0L)]:
raise TypeError("Parameter must be an integer.")
if x < 0:
return '-' + NumberType.intWithThousandsSeparator(-x, sep)
result = ''
while x >= 1000:
x, r = divmod(x, 1000)
result = "%s%03d%s" % (sep, r, result)
return "%d%s" % (x, result)
def viewToModel(self, viewValue):
return viewValue
def modelToView(self, modelValue):
return modelValue
@staticmethod
def formatNumber(x, decimalsCount=None, decimalsSeparator=None,
thousandsSeparator=None, zeroFill=None, decimalsZeroFill=None):
if not isinstance(x, (float, int)):
raise TypeError("formatNumber needs float|int")
if zeroFill is not None or decimalsZeroFill is not None:
raise NotImplementedError("Zerofill and decimalsZeroFill currently not supported")
preDecimals = '0'
decimals = ''
if decimalsCount is None:
strVersion = str(x)
else:
strVersion = ("{0:." + str(decimalsCount) + "f}").format(x)
if "." in strVersion:
preDecimals, decimals = strVersion.split('.')
else:
preDecimals = strVersion
if decimalsCount is None:
decimalsCount = 0
if decimalsSeparator is None:
decimalsSeparator = '.'
if thousandsSeparator:
preDecimals = NumberType.intWithThousandsSeparator(int(preDecimals),
thousandsSeparator)
if not decimals:
return preDecimals
else:
return "{0}{1}{2}".format(preDecimals,decimalsSeparator,decimals)
class StringType(XType):
def __init__(self):
XType.__init__(self)
self.minLength = 0
self.maxLength = 10000000
self.defaultValue = unicode()
self.hints = []
@property
def group(self):
return XType.STRING
def value2String(self, value):
return value
class ColorType(StringType):
pass
class FilesystemPathType(StringType):
def __init__(self):
super(FilesystemPathType, self).__init__()
self.mustExist = False
class FilePathType(FilesystemPathType):
pass
class DirectoryPathType(FilesystemPathType):
pass
class ImagePathType(FilePathType):
pass
class UnitType(NumberType):
PREPEND = 1
APPEND = 2
VALUE_2_UNIT_SPACE = ' '
def __init__(self, unit=None, pyTypeOfNumber=None):
if pyTypeOfNumber is None:
pyTypeOfNumber = float
super(UnitType, self).__init__(pyTypeOfNumber)
self._unit = unit
self._unitStrPosition = UnitType.APPEND
self._value2UnitSpace = 0
if unit is not None:
self.unit = unit
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, unit):
self._unit = unit
if self._unitStrPosition == UnitType.APPEND:
self.strPrefix = ''
self.strSuffix = self.getUnitString(unit, self._value2UnitSpace,
self._unitStrPosition)
elif self._unitStrPosition == UnitType.PREPEND:
self.strSuffix = ''
self.strPrefix = self.getUnitString(unit, self.value2UnitSpace,
self._unitStrPosition)
@staticmethod
def getUnitString(unit,value2UnitSpace, position):
if value2UnitSpace == 0:
return unit
if position == UnitType.APPEND:
parts = []
for i in range(value2UnitSpace):
parts.append(' ')
parts.append(unit)
else:
parts = []
parts.append(unit)
for i in range(value2UnitSpace):
parts.append(' ')
return unicode("").join(parts)
@property
def unitStrPosition(self):
return self._unitStrPosition
@unitStrPosition.setter
def unitStrPosition(self, position):
self._unitStrPosition = position
self.unit = self.unit
@property
def value2UnitSpace(self):
return self._value2UnitSpace
@value2UnitSpace.setter
def value2UnitSpace(self, space):
self._value2UnitSpace = space
self.unit = self.unit
class DateType(XType):
def __init__(self):
XType.__init__(self)
self.minDate = None
self.maxDate = None
self.defaultValue = date.today()
@property
def group(self):
return XType.TEMPORAL
def value2String(self, value):
return unicode(value)
class ComplexType(XType):
def __init__(self, canBeNone=None, defaultValue=None):
XType.__init__(self, canBeNone=canBeNone, defaultValue=defaultValue)
@property
def group(self):
return XType.COMPLEX
class OneOfAListType(XType):
def __init__(self, canBeNone=None, defaultValue=None):
XType.__init__(self, canBeNone=canBeNone, defaultValue=defaultValue)
self.possibleValues = ()
self.xTypeOfItems = None
@property
def group(self):
return XType.MIXED
@property
def itemType(self):
if self.xTypeOfItems:
return self.xTypeOfItems
return native2XType(self.possibleValues[0])
class NamedFieldType(ComplexType):
def __init__(self, canBeNone=None, defaultValue=None):
ComplexType.__init__(self, canBeNone=canBeNone,
defaultValue=defaultValue)
self.defaultValue = {}
self.__xTypeMap = {}
self.__keys = []
def addKey(self, name, xType):
self.__keys.append(name)
self.__xTypeMap[self.__keys.index(name)] = xType
def keyType(self, key):
if isinstance(key, basestring):
return self.__xTypeMap[self.__keys.index(key)]
elif isinstance(key, int):
return self.__xTypeMap[key]
def keys(self):
return self.__keys
def keyName(self, index):
return self.__keys[index]
@property
def xTypeMap(self):
return self.__xTypeMap
def __getitem__(self, key):
return self.__xTypeMap[self.__keys.index(key)]
def __setitem__(self, name, xType):
self.__keys.append(name)
self.__xTypeMap[self.__keys.index(name)] = xType
def __contains__(self, item):
if isinstance(item, XType):
return item in self.__xTypeMap
return item in self.__keys
def __len__(self):
return len(self.__keys)
def __iter__(self):
return self.__keys.__iter__()
@classmethod
def create(cls, keys=None, **kwargs):
keys = kwargs if keys is None else keys
xtype = cls.__new__(cls)
xtype.__init__()
for key in keys:
xtype.addKey(key, keys[key])
return xtype
class SequenceType(ComplexType):
def __init__(self, itemType, canBeNone=None, defaultValue=None):
ComplexType.__init__(self, canBeNone=canBeNone,
defaultValue=defaultValue)
self.defaultValue = []
self.maxLength = None
self.minLength = None
self.defaultLength = 0
self.defaultItem = None
self.itemType = itemType
class DictType(NamedFieldType):
pass
class ObjectInstanceType(NamedFieldType):
def __init__(self, cls, canBeNone=None, defaultValue=None):
NamedFieldType.__init__(self, canBeNone=canBeNone,
defaultValue=None)
self.cls = cls
def native2XType(type_):
if type_ in (int, float):
return NumberType(type_)
if type_ is bool:
return BoolType()
if type_ in (str, unicode):
return StringType()
if type_ in (dict, list, tuple, set):
return ComplexType()
if type_ in (datetime.datetime, datetime.date):
return DateType() | mtils/ems | ems/xtype/base.py | Python | mit | 10,775 |
'''test_classifypixels - test the ClassifyPixels module
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
'''
import base64
import numpy as np
import os
from cStringIO import StringIO
import tempfile
import unittest
import zlib
import cellprofiler.pipeline as cpp
import cellprofiler.cpimage as cpi
import cellprofiler.measurements as cpmeas
import cellprofiler.objects as cpo
import cellprofiler.workspace as cpw
try:
import cellprofiler.modules.classifypixels as C
#
# This tests for a version of Vigra that doesn't work with
# Ilastik.
#
import vigra
vigra.arraytypes._VigraArray # throws on latest version of Vigra
has_ilastik = True
except:
has_ilastik = False
INPUT_IMAGE_NAME = "inputimage"
def get_output_image_name(index):
return "outputimage%d" % index
if has_ilastik:
class TestClassifyPixels(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.classifier_fd, cls.classifier_file = tempfile.mkstemp(".h5")
binary_data = zlib.decompress(base64.b64decode(classifier_data))
f = os.fdopen(cls.classifier_fd, 'wb')
f.write(binary_data)
f.flush()
f.close()
@classmethod
def tearDownClass(cls):
#os.remove(cls.classifier_file)
pass
def test_01_01_load_v1(self):
data = """CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:11710
ClassifyPixels:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:1|show_window:True|notes:\x5B\x5D]
Select the input image:Color
Name of the output probability map:WhiteColonies
Class to choose:2
Input classifier file location:Default Input Folder\x7CNone
Classfier File:classifier.h5
"""
pipeline = cpp.Pipeline()
def callback(caller, event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, C.ClassifyPixels))
self.assertEqual(len(module.probability_maps), 1)
self.assertEqual(module.h5_directory.dir_choice,
C.DEFAULT_INPUT_FOLDER_NAME)
self.assertEqual(module.classifier_file_name, "classifier.h5")
self.assertEqual(module.probability_maps[0].output_image, "WhiteColonies")
self.assertEqual(module.probability_maps[0].class_sel, 2)
def test_01_02_load_v2(self):
data = """CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:11710
ClassifyPixels:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:2|show_window:True|notes:\x5B\x5D]
Select the input image:Color
Input classifier file location:Default Input Folder\x7CNone
Classfier File:classifier.h5
Probability map count:2
Name of the output probability map:BlueColonies
Class to choose:1
Name of the output probability map:WhiteColonies
Class to choose:2
"""
pipeline = cpp.Pipeline()
def callback(caller, event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, C.ClassifyPixels))
self.assertEqual(len(module.probability_maps), 2)
self.assertEqual(module.h5_directory.dir_choice,
C.DEFAULT_INPUT_FOLDER_NAME)
self.assertEqual(module.classifier_file_name, "classifier.h5")
self.assertEqual(module.probability_maps[0].output_image, "BlueColonies")
self.assertEqual(module.probability_maps[0].class_sel, 1)
self.assertEqual(module.probability_maps[1].output_image, "WhiteColonies")
self.assertEqual(module.probability_maps[1].class_sel, 2)
def make_workspace(self, classes, scale=255):
module = C.ClassifyPixels()
module.module_num = 1
module.image_name.value = INPUT_IMAGE_NAME
path, filename = os.path.split(self.classifier_file)
module.h5_directory.dir_choice = C.ABSOLUTE_FOLDER_NAME
module.h5_directory.custom_path = path
module.classifier_file_name.value = filename
module.probability_maps[0].output_image.value = get_output_image_name(0)
module.probability_maps[0].class_sel.value = classes[0]
for i, class_sel in enumerate(classes):
module.add_probability_map()
module.probability_maps[i+1].output_image.value = get_output_image_name(i+1)
module.probability_maps[i+1].class_sel.value = class_sel
pipeline = cpp.Pipeline()
def callback(caller, event):
self.assertFalse(isinstance(event, cpp.RunExceptionEvent))
pipeline.add_listener(callback)
pipeline.add_module(module)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
r = np.random.RandomState()
r.seed(0)
pixels = r.uniform(size=(64, 72))
image_set.add(INPUT_IMAGE_NAME, cpi.Image(pixels, scale=scale))
workspace = cpw.Workspace(
pipeline,
module,
image_set,
cpo.ObjectSet(),
cpmeas.Measurements(),
image_set_list)
return workspace, module
def test_02_01_run_one(self):
workspace, module = self.make_workspace([1])
module.run(workspace)
image = workspace.image_set.get_image(get_output_image_name(0))
pixels = image.pixel_data
self.assertEqual(pixels.shape[0], 64)
self.assertEqual(pixels.shape[1], 72)
def test_02_02_run_two(self):
workspace, module = self.make_workspace([1, 2])
module.run(workspace)
for i in range(2):
image = workspace.image_set.get_image(get_output_image_name(i))
pixels = image.pixel_data
self.assertEqual(pixels.shape[0], 64)
self.assertEqual(pixels.shape[1], 72)
def test_02_03_run_no_scale(self):
#
# Handle missing scale (e.g. derived image) gracefully
#
workspace, module = self.make_workspace([1, 2], None)
module.run(workspace)
for i in range(2):
image = workspace.image_set.get_image(get_output_image_name(i))
pixels = image.pixel_data
self.assertEqual(pixels.shape[0], 64)
self.assertEqual(pixels.shape[1], 72)
def test_03_01_prepare_to_create_batch(self):
module = C.ClassifyPixels()
module.h5_directory.dir_choice = C.ABSOLUTE_FOLDER_NAME
module.h5_directory.custom_path = "/foo"
def fn_alter_path(path, *args, **kwargs):
self.assertEqual(path, "/foo")
return "/bar"
module.prepare_to_create_batch(None, fn_alter_path)
self.assertEqual(module.h5_directory.custom_path, "/bar")
classifier_data = (
'eJztfQt8XEX1/+0LlncKLQSodJtCCfwKLFAgKP64ImAUpJEfSOSVDW3KBtp0'
'TVK6oNDlvQJCeCgBUVcFfwH5afAHGv+grFgkPA2CGP3xWAQkQIXwjqD0n7sz'
'Z3dm9s5jk52ws3fO55NO771n7+PMmfOdx5lzvtF4+JFbbb7T5o5HoZAz06lx'
'SNqIKbz1LOoYrkdxOQ2XKVz2TYfz03LXavH52fj+LN9xxx5xhMe9kSF4ThY9'
'Pn8fS8GixiM+0+SVzfi4HpeD02m+ZStbu7raV7S3dXbljle0tXav6Wzrguug'
'pxHF5/L0t3EGOo5hvUT6O52rvyOboTKMjwdeRTdoUnwPS2YT6G8/PgY9DmE9'
'4OmZswFdD78+g7jb6KH+T6l12TP/dczSw6eN62UIH4dnsBzMHYDv+ZlixoAR'
'r36aNkXHcdy+ZXZgdCtUuvg4shKVUD+WqptYO5DAZbIFlTw9621HxwNnivmy'
'cXwij3iIkB2Ymdez+k3F7wm9wJqLxHzw/PhGIVse9xq+Pk3EVvHEk3vzFug4'
'gds3sgMzuHagEQu4AR+Hd0VlGh/37YHKerPFZYlDYAeiuH4bcNm/QPQrwP2Q'
'Sx+z14F2xXyfwqXjgh2A/vPAEvF7Qj955DAxH+hxzefFfPAiDUvFfJVO05xN'
'0FgC19usccvq/Xf69Om5MzPGLYT3n9nhQ7HNdZ0lWzhLNs6A3yMDMG0aHCMK'
'4d/NmNYzG86QtC0+9nh6dnpsXIrJcn/ahIjFl4ZtxPxhXPbsKOYDPU3vJOYD'
'/evfWcxnBsn693C9UHpqRXbtF4//eRAPeuUNk7ce/9uycM6FHr5XZ/PG/7Yj'
'fu9NCHkiryX4t8Ql9A3mjP/tOf63gHjO9g6qs0X4mR4/xkp3Hi7DuNyV/t74'
'jc9vvJc4Dh/75ffx981xaTn4271jT3mBlEv0T23PA5/r/zugGpX7j963cQMl'
'd/eEdUX14H9/mZ1m65WlsOt/vrjed3BQvQOR9Q71Qdb7XAfVO9Tx5rjcGvNs'
'6yBdmkfc06v3hQ6qd0zu9vg/ns54OlFPPE9W72dumOESx05m5Q1YDxa5fl/N'
'yDv55pVzMvR5lq/wngr3i4WcLHlce8gIXFfSE8e5eK2i3rHvw/KB3hf/vpLr'
'Ha4x9e549b7/+N++BM8i/B/vt590kB2Hd1qMy8JcHZLDtdMZfekCfZnnOr40'
'UX2RtWdfu5N66KC3S7Fb/bMOfE3RjkC/T3i/+Ld3yDLPF35H7IAPnuU8lz3m'
'9TvV9NHTOQJ/HE8fPR2cXWBzN8Glnz7u4lD66M7BJcIo/3FZ9BT8xvgCbx65'
'humHdK+Or165+vSz4Tje2tm6qq0bppXLNo/cuwwdD7QBn+5+Za9R/Ur98khb'
'eTikPPqsPByqvWwFZ0iqXHn425nRPdHx2CdQyc4/9MENDiHvxsMDmE8YPTQn'
'a4e2/XICHAm5U9C+Das/7e3bysMh5dFvlDyAVrV3tHTFV7Z3t3SsXt7W0tV+'
'TltuBWFVd+fZLVCuWNOxDK8r5I671rZ3L4vlzsQ725a3L+tuWdvWfnqsu205'
'ZvPOj3d12lpWd6xs72hrWdnW2tnR3nE6XHa6WlfFV7a1jN8o1jLOurJ1Wduq'
'to7uwvXuztbu9hXty8b/Xd3RMt5liq32bj5+uj13o6627pZlrSuXkS/jMNeJ'
'96bOxzvHe2WduRs7fr/LS8E739bWsmz1Gng1qHdifOJL+vVtwCh9g3m9Rnyc'
'mSdkz/sDJXcR8yVx2TNfzJeGMizmq3TSr1cZo/RKvzwGrTwcUh5DVh4OKY9h'
'o+SB7PAsZwAf1ywU8w/jMrabmG8Ul4lFQjanBgswtbuYD9are2VA+zGRzD8n'
'vDcqYT2pF/+nFx/bdfnqJv12J2ug3Sms69bsJeaHeYfYPmI+MA8JyQQqtMPU'
'vkK2iif9ejVilF7pl0d8FzhDUlDkAfNxxDwHnteld1AERR7F+pEItH4Uy6PW'
'MHlw1vs+jY6z2K+Nt97nHEDfr9zrfcXvq7v+wobVn2551Ft5OKQ8IkbJA/U7'
'N83PO/buJ+aH9t23v5gvjcvRT4r5YlAeIGTL++8nDhTzwX7H1EFiPhhnZg8W'
'86mSfr1qMEqv5PKoyR17X438pcL4d7L+lYvlsCX1vGI5pJxKpNZl3WtaV7as'
'wus7+WO8nIT2VRILKugYrR51jZ9ZtnrlmlUdBYb2LnppKd7Ztqy9q7ByE+9c'
'fdrKtlUt3WfHc0s2navXkss1zpou9MOVrae1rewq6N2AYxbJ9jX3uKiE1mPn'
'm4JFyvYoJLZHeNqSXP/A4z26xVS6PYL9qtCvH/6MmB/aTZLnYRtQKjfuw7ip'
'oF/JbdEZl7JMlYv7/uPFms+i48gRqJTZ64EjUWntdTCJN5/QdATNV37/YV04'
'kcHbfszGifxAkUPQXvuOELIFjvTjRALc9I3GiQa8r7X5KFRK4xUdjUqLE8Ek'
'Hk4kjqL5KhUnwA2c8Fuci86Axwgi03Ci/hgxP7TXoaOEbIGjcuMEhB0g/NLx'
'VsK00TgRxXENkl9CpQwnQv+FS3xscSJYxMOJ9JdovkrFiTC+X6EdN+ImkaSe'
'ZxpONB0n5of2OvYlIVvgqNw4AdsNCvoVPR6dyVLPrVScQJTbNzcy//1pLnEc'
'vXnpzAxxPHbBiduS13n38bnOHu/pKvy+hLgGEH9Cvu/bqz1eHAJcpRCHwIU2'
'5F3zFs/mEr/zfuPV/U7E73DcEWrft//35MrszlGQL8RB4n2fUtwAdp9/+qmx'
'9WWK88CP/1Eu+Xr78ecSPHhfvbst807MNxIxWeol38DqEMvnH0OFfz+oM7lM'
'PN3hxbrBpBzrBviZWDc8nWNj1hCxH9jYJb7fy8YacfcYe7lMMWVksR/qXP/z'
'/JgypHwhlgNQCbEcWJ1jZcjqYAl2kY2Tw/JDnfDuw7cDrEy8bRVlkElOT72t'
'HGR4K0/nFoz/EVvjQGYQb8XF/G5hm50vpiTXv//KBGVZ5/qfp46HntuU0l8i'
'9ojMJsr0F+wJ7/esTS/ch60rLzQrYTNdCBnt1YUnv60JXkFsnBIxCZFs/Oe2'
'Fp7rkR3/BZNaVse9fc35qK0tbYnultwgDx0f5+1njuByX1zuh8v9cbkElwfg'
'8kBcHoTLBlweXLBbDT7vIiJ940b3y+hMknqeaePG1GlifmjnkajGlzKQ9I8b'
'M1i/stRzK3XcKMWN01FpcSOYxJs3HG6j+Sp13hDCrRFxek5AZ5ZTzzPO/seE'
'7AX7v0LnW5lH5bb/EJeyoF8zcVOByPuITLX/qQ5UWvsfTOLa/zNpvnLbf2Tv'
'Qnl3q0FJQi7IbxSTxPmG7+jZTMwXxeWAhA/McPYsMR+kywidK+aDzxz6hJgv'
'b9+3EPPpG0fdiUWZpJ5nGo4OrRbzg5wTq3S+lXmkfxy15CvoTJZ6rrE42o1K'
'i6PBJB6Ohpn8SeaMo+aciM6YPY4aWiPmz9v/LiFb4Ej/OCqB9cuMcRTPTy+0'
'Fh3X4xYtw4n+c1BpcSKYxMOJxrNpvkrFCQjbR8THOAmduYl6nmk4MfY1MT+0'
'1/TZQrbAUblxog7ft6Bf67F+1VAW0TSciOB5gaZ1qJThxHASlRYngkk8nIiv'
'o/kqFSfC+H6FdvzkyehMknqeaTgRvkDMD+11cJ2QLXCkfz4pciE6k6WeW6k4'
'gQj51vWHt3DRcZ0r4HM4vnKjt/W8qui3iPIh1vzoPkXfOpk/N9+3jrv/FeeB'
'br4ElbI4yj3foF7UiY+gko4SZalaiZfnPPMEKnl6NvxndDz2FwnfS5jvZfq5'
'PP4xrLe1WC9l+blHLkcl+Mdlvo/KNHzfLai0/bjqJF5+7qbvin5V/vzczfQ0'
'WxFB/zD1GzFfPj/yejEfvMjwQ2K+SqcpiKf5TThDUqX2W9g47sOXifnDuIx8'
'S8wHeup+W8wH+td0vZjPDJp8fm7YswQk27MEexIgl6/fngS4JtmzlO08ntpf'
'Q/Rj+XmmydK98Wq8X4fdL+Nv7zLnXn+v/3X/+/PvV549YfhcyfLFVOqesJLz'
'mCevW82MC4RyI8YVsvvLxg3q+59Ee2o8+XhyAzn77fME/gnIl8j/zN8DRJSE'
'/NXyd2fOXUvW38Dm54wp6med638eiJ+/mae/ICfVPY2g7+SeRtBpzp7GHI+3'
'n21h4fe8/WWOc+o6UjbRl954hdHV4m8jjpsO/pDaj+a4J6wr035H/h5a4nzR'
'XkP+/dT3l4lyXQORewGhPlRyXUN9KO55nmxudJ/zQGr1e8bxD+Dn8W217HkT'
'sTUgU789z/A7cv8pb5wY/gdmesP/racgnrZR/Ur98nCtPBwqvoqVh0O1l2/A'
'GZIqVx7+dqfnx+i49wZU8vJzp39J3o1nQ03Kz+0aVn/a27eVh0PKo8koeQDZ'
'/Nw+143Iz91slL6x+bmjvUL2/P6Z8I1iviQuI98R86Vx6d4k4qp80q9XUaP0'
'Sr88YlYeDpWv0srDofzMjZIHm5+7/3tifsjPHfqBmA+iLdf+UMwHXpb1PxLz'
'wXp1w81ivo+LZH4lmVtRCetJDXeiEmDPrstXN+m3O0kD7U5hXbe/T8ien3cI'
'/UTMB/3w2tvFfNAO6/9HzFfppF+vUkbplX551NwIZ0gKijyK80c24Xldm58b'
'yaM20PpRLI+BG+AMSZUrD/95budXqEhivzZu/HvG7838/NwZw+pPtzwGrTwc'
'Uh5DRsmDzc/d8FMxP7Tvxp+J+dK47PmFmA/i+4TuEPOB33ntz8V8sHup/n/F'
'fDDOTN4l5lMl/Xo1bJRe6cvPncVysPm5PQabnxuRbD9v5B5UQuux803BIn37'
'amN4vJeknlfp9ojdV5v4tZgf2k34bp1vZR7p31c7gvUrSz23cnHff7zYj/c1'
'DWVQKbPXzfeh0trrYBJvPmE0Q/OZE39h8Ep0Jkk9zzScSP9OzA/ttfG3Ot/K'
'PNKPE41XoTNZ6rmm4cQw3tc69ntUynAiOYhKixPBJB5O1D5A85mDE/U96EyS'
'ep5pODH4oJgf2mv8ASFb4Eg/TvRh/cpSzzUNJ5yHURF+FJUynOh7DJUWJ4JJ'
'PJxwH6X5KhUniuN+pq5GZ8yO+zn6BzF/Pv7io0K2wJH+uJ+7XoPOmB33s/5x'
'dNyI41VJxxN/QqXFiWASDydiT9B8uvxYbP7QJdeiM0nqeabh2uBTYv78+OdJ'
'nW9lHukf/9yJ9StLPbdScU2KV/+HSotXwSTu/Ndfab5KHdcU57257jp0xuy8'
'N4NPi/nz9v+vQrbAkf68N8PPoDNm5L1BlIsHMjL//WkuOl7kCvjyZfi8S0uJ'
'V0fEn3Mk/HWu//nimE/jXdKi+H1efDI2ppYXp2w2wefFhPLa0vYFNhe3GYh/'
'lqvpBeN/8wu/y8WE8kr4nXff8W5yzi7Cvb0+wT7jf3s5hXhTOD6ai8fB7iJc'
'7pl/vH/csWNPoWIwRmff+hR5TMT1UouxKIuz9fxpN+Lns3XE3ocfo444Hr1v'
'44aPKY4bXC++b7libMK1ScSAZOs3fuaGGa7s/YmSqH+2zfrKr3/Wga+VSf78'
'mJO8uHdkGxXFvcPEi3sHbdDhtBk2fv7YBSduy31X+vxi1/88EC9eM1Cd63+e'
'r3OkTDzd4dktMpadxG6VGneUlRcRV5TVscnGDWVjJbJ8cH/e79nnc9/DkiLJ'
'xn+Dr6LSjv+CSdzx3wjNpzt/uPO4mB/yhycuEfPBd6QlfFFcjl0n5oP9ReG3'
'xXyQP7zxn2I+sNXxG8R80B6HJN9RrnH0Afnn5tcHn0Vn6AlG08bRNRvE/CDn'
'gVd0vpV5VO5x9N74vgX9Ovk5dCZuxPqgFEdxgB6Lo8EkHo5GmfjRlTqPGsb3'
'K7TPR7LoTJJ6nnH2/y0xf97+c+J8B5X0r6Md/Tw6k6WeW6n2n+cf0oj7hbF3'
'USnDidH3UGlxIpjEw4nUuzRfpeJE8Xrb4r+hM2avtzWMifmhvWbfFbIFjvSv'
't/Vg/TJjvY2HE3E8L9DzISplOFH7b1RanAgm8XCi/0Oar1JxIozvR+R3eAGd'
'SVLPMw0noh+J+aG9hv6l863MI/3jiQzWryz1XNNwohcjwAD+UFkc5NGZiM/F'
'x0PL0DEd5clStRIvv3Z4KdIDbh73/0LXm4+X8LVivmV0R4PH34yas5OYSeov'
'P79246aID/zHwzuj4zQ+7gujY9vPqU7i5dfurxVVePnzaw/8h1jBoP80crCY'
'D/S4xhXzwYs0HGm2YuuPW9gT8kO0SsV1Ng57wybi+g3jsqdGzJePhztbTf/6'
'tzVbrxBNPr+2p1083z/w+cS+f+BnReUMBR6Obxqbd5XwvZL56yn5/zmPhf7B'
'8eObuE+tSCbgz+onE8jZDJql4A9J5RQGHl5OYX7OWvg2llRz1vrKMhJywC+O'
'rYsJ+dZGn//264r+z+q+lX6+q35+hJycwlT+Z3g3lfzPcB+oW+89Fo//FfyZ'
'HTeMy13zp3zbBOF3ztarUp756EEdqjgv8/9UrOeLwT8SfK6F9y3yr+Tfn+9f'
'yta7Vw9l8Kml2ijUKa53x6t3r64XFJ4Dfu+8Nhq9eenMDHGcat3w9gR9cbXU'
'ReyAD55V9F+X+fry82RP1NeXU1eltFHW1zd5wP7vinx9ibZYx3zPZP3HayT3'
'k/nvi/OQ6+9X9hrVr9Qvj7SVh0PFObLycKj2MtMsefjPT40uQF8xNgeVvPza'
'zv7k+Kka8munDas/7e3bysMh5dFvlDyAbH5tn+tG5NceMErf2Pzame3E82tR'
'XCbnivmSuOzZXsyXhnIHs+f1piC/llF6NQX5taw8HCq/lpWHQ8WzMEoebH7t'
'mp3E9hDya8fmifkgv3biE2I+WKZJ7SLmg/Xq3vmVaa9lfiXh3dB7w3pS7z7o'
'GPwP7bp8dZN+u5M10O4U1nVrdhUrPsw7xBZJ7AQuE7uL+aAdpgxvcPr1asQo'
'vdIvj/jcIMujOP9jP57Xtfm1kTwSgdaPYnnUGiYP/3nu6BL0FVns18bbT+As'
'pvHE/PzaYcPqT7c86q08HFIeEaPkwebX7t1D3P+D9t23p5gvjcvRfcV8EP8m'
'tljMB37nib3EfLC7J7W3mA/GmdlIefq7+vWqwSi90pdf28VysPm1PQabXxuR'
'bL9rz0F067HzTcEiffEJBvF4z+z4BMMNYsWHdpM80DYQkvTHJ1i8GbpkdnyC'
'mk+ir4gcgkqZvR74tLXXQSbefELTIXrnE/Tlw3sat2Oz8+E5h6rhRN8htmGS'
'pD8fXmJzdMnsfHgNLnr95sPUcCL7WYsTQSYeTiQOMwMniuPWprZAl+gRuGk4'
'UX+EGk4MHWYbJknlxgloD8T82Zbokms0TkRxXINkoxpOhL5gcSLIxMOJdKMZ'
'OFE8nkhuVQ3jiaaj1HBirNE2TJL0jyfmbV0N44nU0ej1+45RHE80WZwIMvFw'
'YvAYvTgBZPNrL94G9+yo55mGa/XHKo5/llpDQpL+OJ63Y/3KUs+tVFyT4tWX'
'LV4FmbjzX8ebOq6pb66GcU19VNH+H28bJkm8OFWQv5SXn1kl9hGWtG9+Zohd'
'xeRn5saSSy6+ioyLFCvEeptQXuXR23peVYwrhWOc7bP2XvhWfz7x7wmS/L7e'
'9T9fUXGq2Jy02c7jqbzIfXuc8F4pccFKiFMli8UHcRonHwcM8iADkTHbQOak'
'fEHfOXmQHU/XvTqDPpLDxOrz4vEtJO7t1Y0Xn6+euDf+rRvGJROrL1H3JhWb'
'j62n5EcnvcboMUuqMRjrlH5fyJetFh+OjY9598K3yqQX/PhlpcTThGeVKZ4m'
'266K4o+q5xdXyyfPj5k5WbvFb3eV0q6AyHYFOMRvV9VN+uc11zebNK8pG//V'
'r7DjvyATd/zH5FPQnV87erRYwSC/9vA0MR98hzNdzBfFZTIm5oP9RX0dYj7I'
'rz3ULeYDfByaoza+i0i+Q9886pNfqYZ51PjpanKubbMGjiT986jHnWjSPCpv'
'fbAP24/BM9TWB5tWWrwNMvHwduQMM+Zbi/0Nl5yELpntb9i7Sg0n3DNtwyRJ'
'v7/hBqxfZvsbDuH+42hcDScSnRYngkw8nKj5qhk4Ecb3I/wyTqmG8USmSw0n'
'Yl+1DZOkKfDLOKUaxhNjeP6gdq0aTqQTFieCTDycaFhrBk4UjyeuOLUaxhMj'
'Z6vhRM9a2zBJ0j+eGDvHpPEEIt/8gxrXwiEfrfD3hI+E0lptUR665HWrmfeZ'
'qA/KHJf3e+7+r6/j3uh5JM7y400PnI+1Bh9nsjTuWqpu4uUxH3kY6UEZ8rq6'
'dEQyRDz9Da/D+ng+qb/8POY1FyE+8NMfuR4dp/Fx/Lu2n1jNxMtjnviWqMLB'
'npYvj3myT6xg0P/s/4WYD/R4+B4xH7yI8zuzFVt/fMjmi/0QrVL7RWy8e+dC'
'cf2Gcdn8TTEf6GnsSjX9S1xltl4h4vW71POYez5pKr67nq8akQfa8XzgPGzb'
'vvBbFg+pfMCen9sC4p7e77w6W1Q4BT6IMt9Sd+FXNskQx6yvouPcspY8Tl7x'
'4uuMPaTuRxB7neWrk/zet7/s7jH2MvC5/r/Lk+T+kBtXNXdu4T68egcS+Jbm'
'2qrnt7idU6h/Vf96uMb415fo+8j68KJ67Q9vgb8XxhHF302W7o1Xk+OuR0IX'
'QH3Lcg7njtNPja1XHPco+vKfe/29/tf9359/P/DVl/u8ito7nJtk3ndpe4d7'
'KrZ3Nhf16H0bN1ByCZ93KT5WqkfC11jW3pTGt9Hnv/06zUfbH97vHLmeQB57'
'3n34ee5V8r6L2rtXJ9sR/Kr1DnnfoY4l+2liB7z8Dlm3I7/ffRv8TTJbLPMj'
'Z20Cz9aL6yh69oX4/fhtzP+8OrbwxorOMJJ++K/FfRX9/cqoUf1K/fKIWXk4'
'VP4fKw+Hai/nmyUPf5sz8D30FZmrUcnLYz74M9ImVUMe85hh9ae9fVt5OFSc'
'VaPkAWTzmPtcNyKPedIofWPzmKd6xPNrUVw2XiPmS+Ky+VoxXxqXsevMntfT'
'r1cpo/RKvzx6rDwcUh69Vh4OKY+0UfJg85gPf1tsDyGPeeQGMR/kMXdvFPPB'
'7uqm74j5YL06elNl2muZX8nID9B7w3pS9HZ0DP6bdl2+ukm/3ekz0O4U1nWH'
'02LFh3mHyI8kdgKX7s1iPmiHTbeY3eD061W/UXo1Bfk1rwmyPIrzbCbwvK7N'
'Y47k4QZaP4rlkb3aLHn4z3PX34G+og/7tXHjPzN+b+bnMR8xrP50y2PUysOh'
'9lkYJQ82j3n0x+L+H7Tv+H+L+dK4HPipmA/iDEVuFfOB37l7m5gPdkc1/UTM'
'l98n+D/l6e/q1yunynB0onnMQ1gONo+5x2DzmCOS7RduvpNuPXa+KVikb99u'
'Dx7vmb1vN32XWPGh3TT+r20gJOnft9t4CbrkUoKvXNz3Hy8O431NYwOolNnr'
'5P+z9jrIxJtPqP2V3vkEjflpL60GnBi8Ww0n4r+yDZOkKchPm6oGnHB+jV4/'
'fK8aTvRlLE4EmXg44d5rKk4kv1ENODH6WzWc6L3XNkyS9ONEw2XVgBP1OK5B'
'4/1qODH0e4sTQSYeTsTuNwMnivP9JS5Hl8zO91c7qIYTmfttwyRJf76fOVeg'
'S2bk++HhRNOD6PXjDyuOJx61OBFk4uFEz8N6cQIoaHnMi3Ft9LFqwLXRpxTH'
'P49YQ0KSStwNUY5BTGyOwVysDi+2BpHDkYqzA79TzGPLxiAVxNFgY4xMKEZK'
'+qh1VIyUsQtO3Bbu6/r+TDlGCj+fJiJ+nlWyZGILEfKRff8i5jrLx4/hwsuj'
'CzSZPLpYt3h5dHM8nm4udAqxWji6ktp12XuUbAq5QNk4TPz4UyT/m1fOydDn'
'WT7eMXteFuOFjeGyt+vPJ39+KTmPIUbWJOMoqeY8djLnrqWOC/GQlOIb9S15'
'bzPXn5/9HbQl1bbG8tVx78/KdzOHagsuGXfOk8/WxDXAIk9+nk579hX4ZXYT'
'2omi3RyZ/z7kNZa1e9R2Hjro7Qnqqm8suaLYV/z7yeIbyeymelsoNY8uJqU8'
'utBu/OwW1F1xXX18pH9ce8UfTBrXSser/2fHq0Em7voXE/OsUuc1MW6Q+8Vx'
'ZHrwSAc+w8Z/TyuO/3xi0wWZym3/IbZiQb9uGkKXktRzK9X+c9e/nsWtJKs2'
'r1n7gsWJIBN3/SurFyfYfOf1D4oVDPKd7yqJV57Pd/6smC+Ky/hLYj7Yh9T7'
'mpgP8p1n3hTz5fOrXq2GA2Pnifn0zbfOe7wa5lujL6rJOfQ3a+BI0j/eGnjc'
'pPEWD297sf0YeFkNb91XLN4GmXh4O/yyGeOyML5foR0P/lGt31zZOJF6VQ0n'
'IiO2YZKkP49t4xPoUpZ6rmk4kcH9x+w/1HAi9obFiSATDyec183AieLxRP2T'
'1TCe6B9Vw4nm123DJEn/eGL9k9UwnhjB8wehd9RwouddixNBJh5O1L9jKk48'
'+adqwInh99RwIvmObZgk6ceJkfdNwglEvv5hrL8S4fs32bx3SvnOHediyFcu'
'y6eG3ve9yIOKvof8fIzc/M//RNXpfkjiJj/OdOIjzI+Pm49HHjF0FC1L1Uq8'
'/OV9jUgPuOP4o9H17DESvmMx33HgMecI+bNYb52PSP3l5y8fwh5c4J/fNwcd'
'p/FxZCd0bPt91Um8/OUN207n/cQp9v1WxQF+/nJ3N9HzCv3J+P5iPtDj3oPF'
'fPAimUPFfJVO+uNC1k73Q7RK7eewce4zG8WGK4zL2s3FepAfD22hpn8NW5qt'
'V4hUfZMLpWbfY/ADzz+T2RcSvXnpzAxxXEJfts71Pw/Ez+VL/16+j8RrcYq5'
'fsF33i1c8825TOQ4lvmxg188Tx58v/mPOzc9PIfMVQ2/n2CualZ/BLnpVXNR'
'swTnlXJfs/u8nLsXvsU8n/cc2fuVL1c17P0A2ZP16vn4b+2QOFLIVQ1E7q2B'
'/QOKe2sI+bBjS195svVN7PcoNRf4ZOULNrP4/jz7AETaB5A5Yx9y1zzZzy3w'
'5H7jyZ7YWyOTb+0hI5RNJ+YGZHZFaR8fuz+G2MeouteMJ989Xd71UvYueXjF'
'6q8nU0J/ZZhE7Lerc/3flfqWzCUvhzLEcawgE8hpzv4OiD+/Ae8qfj6f9Pcr'
'w0b1K/XLo97KwyHlEbHycKj28hG9QoeocuXhPz+V2BnVanJrVPLyl6cWk+On'
'ashfXm9Y/Wlv31YeDhU3zyh5ANn85T7Xjchf7hqlb2z+8satxPNrUVyGthHz'
'JXFZWyPmS+OyfrbZ83r69arRKL3SL48mKw+HlEezlYdDyiNqlDzY/OW924nt'
'IeQvH50r5oP85c4OYj7wtqmplazXYL7wjpVpr2V+JX270Ovy4T3QMfhj2nX5'
'6ib9didmoN0prOv2fkLcrvNxMuarres6C9TWdWvqKtOeqJJ+vYobpVdTkK91'
'6yDPYxbn12zA87o2fzmSh7NNkPWjWB5pw9oLZ//NXugrYrvR89zs/ooU4/dm'
'fv7yPsPqT7c8+q08HCpeh1HyYPOXhxeK+3/QviO7qs0nJv5DzAdxg0Yl/rHg'
'd+7sLuaD3U419WI+GGfG9ixPf1e/XmWM0it9+csHsRxs/nKPweYvRySN37cv'
'3XrsfFOwqFz7cHfE9yPWP/B4bzn1vEq3R0Xx3/YT42A+/lvE7PmhclO5cR/H'
'fyf0606sX7ADD1Hl4j4n/hve1zRwACql8d8OsvY6yMSN/3aA3vkEfXG5l8/A'
'MyXU80zDiVSDGk5EDrQ4QZL+uNy3Y/1KUs81DScyeF9r9lNqOBH7tMWJIBM3'
'/tshZuBEcf7yxTNxT4l6nmk40f+fajjRfIjFCZL05y9/EeuXS1lE03BiBMc1'
'CB2mhhM9n7U4EWTixn87zAycCOP7Fdrxrpuo9fcqGyeGD1fDieRhFidI0h9P'
'+masX1nquabhRM2R6CsijYrjiS9YnAgy8XCiqXFq/FiClr88jO9XsDsXbVoN'
'uNZ/lOL45/MW10jSj2szQybhmhSvmixeBZm4819LzRjXFK+nvxiqhvX0/i8p'
'2v+l1v6TpH89vXEzk9bTZfa//wRr/4NMXPt/vF77z+YlHTlCbMcgL2n0Q7Ei'
'wnekJHxRXIZOU/MvjsTEfJCXtLlDbd9d89Zq9n1A8h36xlFjzdUwjhqLqsk5'
'/WWLoySxMQG92Im8mItYchBzMSdTiGkJ2kvGtMTnVGNaplo3MDEpb71QMb+B'
'b8zb5EcnvYZjNjrM79n7yGIuymI6Lnb9f1cs392c4lixC53iWLGcOMAi+eZa'
'+oLxv/nE77B/kYv7Ny6OgenuQn8DGx80xsaj/O6O+bitruNLvPiWbLxPf1kX'
'4s/K4sPC83mxG+dIrodd//NqcX29GLxEW8jVlYelswk+r668mLzbOwXy2ooX'
'g3dn4rdeXdY5yCYrthVZXF934Vc2yRDHTubctWS99sz5/INkPRXFHU1et5o8'
'7t1wDht3l3oeSS7nPCZZ3FReDH0gWYxP2f3V485CrG6y3iFWN1nvE4zVzdop'
'to6iL73ximJbALskkxnv92xcVR6fpWol/fOoia+YNI/KWx+M4PFD03K19cHh'
'NjveDjLxxtvx5WbMtxbnEWw+Eb252XkEw6erjRMHl9txIkn68wi+g/XLjDyC'
'PJxoxvNHiTPUcMJZaXEiyMTDid4zzMCJYr/0mSdXg1964yo1nBg9w+IESfr9'
'0u85uRr80pN4/SAdV8OJ+k6LE0EmHk5k4mbgRLH/xr9OqQb/jXiXGk7UftXi'
'BEn6/Teip5rkv8HDib5u9BWDZ5E4wY+X7JyD0REfxoZp3LBU3cTLwx1Zj/SA'
'p2eNQ+h6+GIx39Cf0PXRP9P2jMcfW4v4UmeT+svPw938dcQHfuaRHtxPwscD'
'37L9nGomXh7uzDdF+Fn+PNyDaTFeQ/9p7Kdivnzc+rvEfPAijXeb3U+Ygri4'
'5/ohWqXiOhuvvfFr4voN4zJ9sZgP9LT/EjX9y1xqtl4hmnweblHOU8jZC+TV'
'GSfnKZVTFnw3GJ+NHBZ6PiQLiXt6/J4PST3xO1mu5kIeVb7vClFmO4+ncjkT'
'uX7ZnNa+djL91Nj6Ijn6P1fmE5I7Dr809Bx5n6aDP3yFc99SfUr411Xyr4v8'
'2qDeoY4Yn45c3CavbmG92yOv3r26XkC8H4xLyHqHa5J6T9S9OY36vkzXDdhP'
'hM35TP0OyuSbV87J0OdZvsK7KNyP9Qkj9Eopp3fPmvnwe9V65d2Pbe8FUvG3'
'g9zspC+PKDc7vBO24b652Tn1zvO3K/LTci5eTbeJ+DryeOT3u2/D/WbmvMs5'
'j0nmM5Q7P/TcplBXdZL7yeqydL8rILKNwrP82ihc82ujXv3Pcwr1StYVtG1J'
'XbHtkLDHSu0welCHar9UzU+V8RfLRs76t2LdA36U7i/Gm390mX6Krv3OUxCv'
'36h+5RTE67fycKh4/VYeDtVezjZLHv7zU871+CsuQyUvD3dNH2nnqiEPd79h'
'9ae9fVt5OFQeCaPkAWTzcPtcNyIP96BR+sbm4R5KiefXorjsuUzMl8Rl+nIx'
'XxqX/VeYPa+nX6+GjNIr/fIYtvJwSHlkrTwcUh4jRsmDzcMdvkpsDyEPd+Jq'
'MR/k4U5dI+YDb/XeayXrNTCeuK4y7bXMryRyI70u33cz7X9o1+Wrm/TbnVED'
'7U5hXTd8g7hdw7xD4jtq67qpm9TWdXu/W5n2RJWmIO+0UXqlXx7Jy4Isj+I8'
'kRk8r2vzcCN5pAKtH8XyqDdMHv7z3PFbsd9mmp7nZtfzan4wNet5hffVXX8R'
'w+pPtzwarDwcUh6uUfJg83D3fU/c/4P2PfB9tflE57/FfBAnL/EDMR/4nad+'
'KOaD3T29PxLzwThz9Jby9Hf161WjUXqlLw93E5aDzcPtMdg83Ihk+13Tt9Ot'
'x843BYv0xScYxuM9s+MTjPyPGAeh3fT8xOz5oXKT/vgEPefhHZiUZapc3Pcf'
'L4Z/hr/iDnLfHt9eD/7c2usgE28+IXqH3vkEfTgxtK4acKLmTjWcGLjD4gRJ'
'+nEilawGnGjE+1pjv1TDidEBixNBJh5OpH5pBk4Ux8UcPB+9udlxMRv+nxpO'
'ZH9pcYIk/XExl1+AJG52XMw4jmvQ82s1nKi91+JEkImHE/2/NhUnui+sBpyI'
'ZtRwIvQbixMk6ceJmouqASd6f4tH3b9THE/cb3EiyMTDieHfTY0fS9DycBfP'
'kzU8UA3zZA1/UBz/rLe4RtJk88epxFeCa6XGV4LnSeLs1B4ywsQ5yuedk+WU'
'QvFAluwO8UD4sU6IkogH4qjcX3Y/IlYLG0tGljNL9nx+rBi23r26YePscGK3'
'uNCWvGuevswl7qNQ72zeQOex0D/I7ydymoUl3yeLb8XGOUJ8106fge/Lxkth'
'7yvLWwjxuNTjV5HyhZxkQJPISRa/8fmNZEwZIh9f7njsghO35b4rfZ6NicPy'
'10nuw8+1qCIT0Dl8ztfWgF2QyITNidj88I1jpdgFIk6PLOaSTE/4edp49ldV'
'JpC/E4jMLwlyUszfWaRDhXapJi9G5xzn3LVM+5yo/EAnJ58TcqLxA0H2fvgG'
'cubLt3JJ/zrZ6AMmrZPJxquJJ+x4NcikL4935EGkOUnqeaaNwzJPqo3DYn+0'
'4zCS9Ofx7Mf6laWea6odzvzV2uEgE3f/27DeeUNk70L5/Tvx34rtWBMuR85S'
'2+dTs1bMF8Vl7TNq+3zcF8R8ceB/RW3/e/YbavbdlXyHvrxEPQ+hJ5udlyj0'
'tJqc+/9icZQk/XmJ3sH6ZXZeIhfbj+hzaut0I1mLt0EmHt4mnzPDn6MYJ25+'
'uBpwIvI3NZwYfs7iBEn6cSL0SDXgRAz3H1MvqeFEzcsWJ4JMPJzoe8kMnAjj'
'+xFxpR+phnm55hE1nHD+bnGCJP3zcg2PmjQvx8OJHjx/0P+aGk40/MPiRJCJ'
'hxNDr5mKE7WPVQNOJF9Xw4n6DRYnSNKPEz1vmIQTiFDus/nvQ/4tvp8GUbL+'
'cKO39bxaoj9YnlzOecXfw/OKf8/1fx/F3rRvkTjIj8s8+u508kWdoeOQQtBR'
'pyxVK/HyfYddpAc8PWs4HF1vPlLCdyzmO47OZMTjb34b6WPiXVJ/+fm+G8cQ'
'Xz5P8pboOWl83DcbHdt+XHUSL993/2aizFnlz/c9ME+cqQv6hyN7ivlAj2v2'
'FfPBizQcVFqGsEoj/XEUe/5Jj/QQVWq/hY0L3/C+uJ8bxmXPNLEegJ6mp6vp'
'X/8Ms/UK0eTzfYv85qFmyuk3z+RmTZ898obiXgDZXg12bwnLt5i5XuArxYfZ'
'a42ED7PyXg25DzPyoR/LTsfvqZQ7l9ibo9R317gXhoc1xXs1Pkb5EntT+Pso'
'iDLVuqGkHN4C+YKP/0TlW8f9vUpOb28PmOJeGGqPA1yD33ryXTD+N5+4htu8'
'LE909OalMzPEMSsrYg8NKwvqPk6x7rN16Vs3/bMOfK1M+yX4+dURqe+X8NsP'
'CPnXof7I/Otwjsy/DrqP24rqfhR2Pwlrq6N/anueklNy6eXwvcz3lbpvTGar'
'ZPsE+ftVPq62gO8vbAu8MWHyK3hMd5J6n0R/v7LXqH6lfnmkrTwcKr+1lYdD'
'tZd3zZKHvy0a3RZ98NgsVPLyfTu7kraqGvJ9pw2rP+3t28rDofb3GCUPIJvv'
'2+e6Efm+B4zSNzbfd2am2NJHcZncRMyXxGXPpmK+NJQhs+f19OtVxii90i+P'
'QSsPh4rrbuXhUPlQjJIHm++7ZguxPYR837GtxHyQ7zuxtZgPoiumtpGs12C+'
'3prKtNcyv5Lw9ui983mGw+gY/Cvtunx1k367kzXQ7hTWdWvmits1zDvEdlBb'
'103Uqq3rpnasTHuiSvr1asQovdIvj/gmfh6BQZFHcT7Kfjyva/N9I3kkAq0f'
'xfKoNUwe/vPc0d3QV2Tn0fPc7H4J5xM0npif7ztsWP1pz19v5eFQcdKMkgeb'
'77t3J3H/D9p3385q84mjdWI+iAMU+4SYD/zOE7uI+WD3Umq+mA/GmdkF5env'
'6terBqP0Sl++bxfLweb79hhsvm9Esv28PfV067HzTcEifXF6Bv9ZDXF6hvcQ'
'4yC0m+TuZs8PlZv0x+lZ/EE1xOmp+Q/0wZG9UCmz1wN7W3sdZOLNJzTtpXc+'
'QV/8hac/qIb4C05EDSf69rI4QZL++Asnf2hS/AXufnC8r7V5fzWcyC6xOBFk'
'4uFEYn9TceKQf1UDTtQfqIYTQ/tbnCBJP07c869qwIkojmuQPFgNJ0KfsjgR'
'ZOLhRPpgM3CiON/3Tf9G7djsfN9Nh6jhxNjBFidI0p/vu+EjpF9m5/tOfRp9'
'cN+hiuOJz1icCDLxcGLw0KnxYwlavu8wvl/B7jRurIrxz2cVxz+uxTWS9I9/'
'BjeaNP6R4tXnLF4FmbjzX0eaOq7pwz4/Zo9r6j+vaP+PtPafpCkY13wBsZox'
'rkHkG18q23k8FX+KiP8liyfNxv9a5PrzUcfpp8bWlyn+lyxWHj+23ETjf0F8'
'KnxOJT6VLP5Xcv37rwjjf82+9Sl8XRZbsdT6U411x/s9XJfH/4L4lkCefL24'
'gUR8S3cmvua1IE4+eohv6Xjxvbw6m0fwcORLxGXHsS+7bsDy3VPyfah+3rxy'
'TgZfcP35eMe+93MeC/0DH8v0mx+XnZWvKJajJydPbnCOlC/IHst3krEc65nv'
'Yb/LN/ao46xZzei3TI5P/yd8p/91IJl94Mufp79gC3THZ42efSF5TNhYWfxL'
'sMETtal1zHUenyWTSDb+qz/Bjv+CTNzx33F6x39snvHop8XjGMgzPvyWOF45'
'fIfztpgvisvDJXHNYX9RskXMB3nG+9rU9t0PzVIb30Uk36FxHvUovEJOPc+0'
'cXS8WU3OtV+242iSpmAeFetXlnpupY6jpTh6isXRIBMPRzMn6cVRffa/7+iq'
'sP+nKtr/k639J0m//a//okn2n+cf0of7hYOtav4hTcssTgSZeDgx0moqToSO'
'qQac6F2uhhPuaRYnSNKPE6ljqgEnhvC8wOjpajiRaLc4EWTi4URNzAycKI5z'
'EFuK3tzsOAeZM9RwIhazOEGS/jgHj2D9MjvOwdiZ6CtqO0ic4MdBbvoq4nPx'
'sfsHvyhPlqqVePm1k3cjPeDmcX8AXR/7mpgv/Ci6Hv86bc94/ANYb4fjpP7y'
'82v3dSE+8B9PXoqO0/g49E3bz6lm4uXXrrlYhJ/lz69d2yvGa+g/Nf5YzAd6'
'HL9dzAcv0vtzs/sJ+uMWjnb7IVql4jobh723U1y/YVyOfl3Ml4+He56a/tWs'
'M1uvEE0+vzb4d5L+W+DfCUTmdyX95yC/KyYX16mqfyfr2+WTm3gtedy3xwnv'
'4WPIVUzdz+d4ovldZf5xda7/+WL5+uV3FeXP9ZOvJH+u48l3oYNyvWIqNdcx'
'4XuJjt0T1lHH6fMvVfTNnKzvMut7LePn59pFpO7rLPIV9fwcPR/Fgv0t+IoC'
'v58vrlf/8wo8VF2Bf66krgg/UtaPnObj1GUk5EC7UvKT9jlPXWf94ovyME+8'
'7vk5yFXsFvhNg1xJv2mvDgi/3kn5pR+w/7uk3Qof++X3FX1Qff18BzY/Z0zR'
'71ym66p+0yyV5vcPJMv7jUmGCzn+xQ5qF0Deffd0qDyHLm5HbhiXuzr8ecBa'
'pr9Q7nlA3nN7ND8XSH+/csyofqV+eThrrDyodU0rD4dqL3Gz5OE/P9V0JfqK'
'5vNRycuvHf0+aeeqIb+281Wz6k97+7bycEh51BglDyCbX9vnuhH5tWuN0jc2'
'v3Y4Kbb0UVxmzxfzJXE5eoGYLw3/ucjseb0pyK9llF5NQX4tKw+Hyq9l5eFQ'
'8SyMkgebXzt+idgeQn7twZSYD/JrD39DzAdRP0Yuk6zXYL6xyyvTXsv8SpJX'
'0+vyY9+h/Q/tunx1k3674xpodwrruvEetX2+g9eoresOX6u2rjtyXWXaE1XS'
'r1eNRumVfnkMnR9keUBrIeY58Lyuza+N5DEcaP0olkfCMHn4z3Nn0tgvuJee'
'52bX86KM35v5+bWThtWfbnmkrDwcUh49RsmDza899i1x/w/ad+h6tfnEpu+J'
'+SD+zaDEPxb8zodvEPPB7p6RG8V8MM50v1ue/q5+veo1Sq/05ddOYznY/Noe'
'g82vjUi233X0R3TrsfNNwSJ98Qnq8XgvST2v0u0Ru++04Ra1fafZH5o9P1Ru'
'0h+foA/rV5Z6buXivv94MY73NfX0kfv2+Pa69jZrr4NMvPmE/j698wn68kak'
'zkJvbnbeiOhP1HAidKvFCZL0543YdS1iNSNvBHffON7XOvBTNZxw+y1OBJl4'
'ODH8U1NxYkmiGnAidYcaTkR+ZnGCJP04MZSoBpzI4LgG2TvVcCL2C4sTQSYe'
'Tjh3mYETYXy/QjvOnl0N8079v1TDiea7LE6QpH/eKXpONcw7jQxgu3+34nji'
'1xYngkw8nKi/e2r8WIKWX3tvfD/CbxdHjKNXIE3DtdRvFMc/91hcI6ncuAbt'
'l/CjuBd7rhkx/kHE5upj49r4xoMqIV/nxOOeoFIpnpR7zxFU3Jv+WQe+phj3'
'RhZ/Tz2fJcSTAiLj3mCi4t4sdIrj3ixwUD8Dznm/nT/+94nCOTYfoyxGkTv9'
'qtdJGSSX7L4F/ia1mEXZ835M1r2TOfd66tg553J8rBp/ive8yf5eva78cmOK'
'YhRJ4kmxuTEHR6dNy5DvUoj7JItvVu4YW2xcqcJ9WJn4xRUS5buFuE0gQzJu'
'E/CrxBWC35NxhaAO/OIKwfs7PnbLufXCUuI9FeUwTa668F5/fvY+qnZD/Hs2'
'ttvdC99i6n2idquO+3u23r264bUFTFDvLhMvyuHIMdW64W1F+y3Lnxtx/c+X'
'pr/Md1D66xd3TKK/Lr0Dw0ySjVcHH7Lj1SCTvnWo4Uw1rEPVPKI2Dht40I7D'
'SNK/DtX9W5PWoWR2uPdxa4eDTGy+6cyA2J5AvumGDrX9NlEJXxSXo0+o7bep'
'/auYD/JNu1m1fegRSRwiaBc9ku/Qlx/oovvQk83OD5T9o5qcU0MWz0jSnx9o'
'BOuX2fmBap9EX9HwlNp6WebPFveCTLz1suanzPCrKF5/6v1dNaw/hf6ihhP9'
'T1mcIEn/+lPTepPWn3g44eL+Y/RpNZwYecbiRJCJhxPJp83ACVg7IuI83I8j'
'n1DPMw0nIs+p4cTw0xYnSCo3TkAuImL+9X41/076+sdFPJyI4fmD1N/UcKLm'
'RYsTQSYeTvT9zQycCOP7Fdpx8++rwU+7+SU1nHBesDhBkn4/7djfTfLTRpTz'
'I4gV8qoh/4QzN4A/G98fhiovXk0du6sh554sDxhcV815p+7Pw41/8DKOf/AK'
'iYP8+Mi9r8+gPiTxRWR16OhPlqqVeHm3BxuQHvD0LHsI9p45VML3Bcz3RfC2'
'oalzRUskEsHlvrjcD5f743IJLg/A5YG4PAiXDbgk/MybfZ82cUI4tUm+nzA2'
'Ju4YQn8g2T1dyJfPszwq5nNxOXSmGPdglNj4stq64Eibf73oIlke9dFRvC6A'
'jwf/hY7T+LhpOnpf2y+vTuLlUW/+gNTnieZNZ/kLJZtHPTpX3C6gffcsFPOB'
'Hg/sIebDL+Zk95na9lhu0h+fsn8WnCGpUvuhbLz97BtiuxzGZcNbYj7Q08a3'
'xXygf83vmD1e0q9XA0bplX55ZKw8HFIeg1YeDhXfwzB5cOJAzUB4m34Pr7Jw'
'8qj2zSNxWVceVfi9t2irPV+CYfWnPY/P6/SKAKLgyqPRKHkA2TyqPteNyKPa'
'ZJS+sXlUY++q+U3Xvy/mS+KyYUzMl8Zl4z9tvz5XcvWq2Si90i+PqJWHQ61r'
'WXk4pDziRsmDzaM68KHYHkIe1ZqPxHyQRzW8UcwHu8oijnjeDuar3WmVOb8n'
'Wycc3AS9N8wnuTuiY/CXsfPy1U367U7t++bZncK87sAscbvO7xMOSewELsOb'
'qa0rRDavTHuiSvr1KmyUXk1BXkij8F1/HtUklofNo4rH4Vv6eQBVqjwQhVxe'
'/C34Pm8u1osvRMQvyvXYvBgvs4lzmzhIE7Yv/NZl49x4ElzgoFhcmFTjbxXF'
'GMqcu5aOMUT7mkUv/f0z+Bh8xVRjALHnWV84WSwpiNXDu6/M962OuV7gU6kr'
'z/+QzLw3wbqCWD2OF6vHq/8FBG+JdVUcX+qWtdTxbte8qxhvipUdy8f6Jfqv'
'N7Dxovj8/Fho/s8r3IcXHwpIECsN6iJ3zavjuU6h7rzfeHW8U+HcZOoq9dBB'
'b5OyIOqujvk2Xswplui6jp7NxALLt1NZzKrc+eZFrz+rWFegO6XH+lJpVxON'
'QQj3IGMQwj1KjEHIxsMj6k4pviNbF6O39bxKyjbC+PtK78eXdb0rvi6zgert'
'yi8GoddeWBvo1c9s4vmb4FISgzB689KZGeKYtWUlxPCUxSyEOJKqPs+F+/DW'
'aOuPRVraeFzxWGcK/CDeM6u/7C/D0C5Idins18bd/8L4vZmfR33QsPrT7rdh'
'5eFQfhBGyYPNo+5uIZ7/gfbdNEfMl8Zl785iPojrVLO9mA/2EYR3EPPBbrRI'
'rZgP5plTO5Vnvku/XmWN0it9edRHsBxsHnWPweZRRyTbn92wgJ6FsutNwSJ9'
'8Wab8fym2fFmk3ViHIR2Ux82e32o3KQ/3uw7WL/MiDfLGy8O4H1Nw7uhUmav'
'o7tbex1k4s0njO2mdz5BH07M3LoacKKvXg0nmhZZnCBJP06kt64GnMjCvtbF'
'ajiR2sviRJCJhxPhxabiRP821YATQ3ur4URiscUJkvTjxNE11YAToQi26/up'
'4UT//hYngkw8nGjcz1ScOHl2NeDE2BI1nEjvZ3GCJP04MTa7GnAiciD6iqYG'
'xfHEJy1OBJl4OBFvmBo/lqDlUS/OdxTaDvEvp55nGq4NfUpx/HOwxTWS9Oc7'
'Won1y4x8R1K8ci1eBZm481+HmjGuCeP7Ef6En0H8Sep5xtn/zyva/0Ot/SeJ'
'9WPfzKH2h7jkXh1Pkzz/dNjvQe4P8fYIzCXuI9gf4pN3O+drnuwPb+GiY4j3'
'yRLt7x8+71LF/Tq+ccSJPSCy/Oyy/Tz8PR+8fQJA5J4OkBO5p8OTqbdnADdf'
'dp9ALt+Mt08E4r17RO6/wVTq/hvHOXUd3qMhi3Huvy+Oz8/uEWH5cuXQc5tS'
'ddWzZj677630/TWI+DHZVfbfTGBPR65+vfrYibgXuQcR6n2CexAFe0HYPYLU'
'fRzFumX35zjuCetoXZlwXcj2KPLtAFtX40Mx7n5RILKuQOa4rnJ7pTx5E7uI'
'ffeLQjtk6ir32/HhYa4/AO/gtcl9xv/2Iu6J69LF8z/uIlzuSXybU7zfJ9t5'
'/Au0rG+9sJS9hiXsD0LP/1Pb84p1rLbni9nrWMY9XrL9e+p7uPx0yG8fK+gQ'
'6IKfDoF++e1j9ep6AfEcT4c8vVlE3BProQt2Pewg/dqHuDdfh9RJf96RpsMQ'
'nmWp55o6/htaasd/QSbu+O+Lesd/bN740IHicQzkh+h/RRz/Cr5jWMIXxeU8'
'yT4k2F9U3yzmg7zxTaeqxd1JvKeWVyo1S8imcX0w8tlqWB+s/ZLaODpzjB1H'
'k6R/ffCRz5q0PijF0eMtjgaZeDgaO86MedRi+//04VVh/09QtP8+sRyCTPrt'
'f+IIk+w/zz+kCfcL4yeq+YeMnWRxIsjEw4meE83AiQPw/QrtOHUk4n+Sep5p'
'OOGeooYTIydanCCp3DixN74v4af6OWxhjcaJBJ4X6I2q4UT4NIsTQSYeTgxE'
'TcWJRxqrASdiy9RwoqbV4gRJ+nEisdwknEDku/7N+i4kr3jx9VL8QsZppuo6'
'ttL92Liudy98q0zr5Wz8zDp3cvdjY0EW+Hi4XLMCaU0kRuIyP+9E7EzEBw9q'
'fIjGaUvVTbz89L13IT3g6Vn/r9D1obslfA9gvgdp/ODxD2G9HT2D1F9+PvPM'
'KsQH/vq956PjND4OX2r7ldVMvHzm9etE/RWws6XmNy/kD2XzmUeuEfePoL8a'
'TYv5QI9TPxbzwYv03252v0x/nMjQaj9Eq9R+FJv3pn+luH7DuAytFfOBntYm'
'1PSv/myz9QqRqk9aoeTF4yd9zjgx3sHf2i1c8/e9PfYU8BWU+XfKclLs6fqf'
'Ly2vAOPXTPmNQ14BoDL7yibq3pxGfWOm6wYsKzUf8+Lz9HFB1rnj9FNj6+nf'
'f+tCBgvE95u4nyW/Lkvx8ffqY2uC16srz6ePzSsAfo7gr1iGfB2E//8il/0G'
'+tt8fcSd5HVU7pVHQheo4q/D4aP4M5e8HMqgY7X8Hvz35/ussnXlyYhtV+CD'
'Du1D4NdM+aCXuB+D3S8RnX3rUxx5y+RX59LH/vLKrIS2yc8xIPp9gfj5Uiay'
'3wWu+e138cuHAvwS+TrXTp+B33Oxy74n/f6ofax//5VD6e9j+djj1/+TOCb2'
'UEx2HoK/B6MM+Ja75u2TIXCB2u/iyX6eU9BjUr68cd/YY+gptY+Lx5NjT2K+'
'P4v7J/r7lTVG9SunIL+klYdD5UW08nCo9nKmWfLwtz+xFJ6V/xoq2fmHPsyf'
'uIG0Tzx7DfMJ3nwuspelZZsHOx9yp6B9G1Z/2tu3lYdDyqPeKHkArWrvaOmK'
'r2zvbulYvbytpav9nFxaCAcSUOTKFWs6lrVgfu+4a21797JY7ky8s215+7Ju'
'OukEOh9v7WxrWd2xsr2jrWVlW2tnR3vH6XDZwfkuxm8UaxlnXdm6rG1VWz4L'
'xfj17s7W7vYV7ctavfA3LavaumOrvZuPn27P3airrbtlWevKZeTLOMx14r2p'
'8/HO1fHVnd2FrBjM9bwUvPNtbWSCDKh3mE/ikX59ixilbzCvB/uDGs4R91+j'
'uBz7mpgvicvQuWK+NC5rzzN7Xk+/XjUYpVf65eFaeTikPBqtPBxq/7BR8kB2'
'eFY+n1MqKbaHw7jMXiDmG4XyQjFffnfAxZL1GsxXc0ll2muZX0nv5fS6fM23'
'aX9Puy5f3aTf7jQbaHcK67qpy9T2VWevUFvXHf2m2rquc1Vl2hNV0q9XUaP0'
'Sr88Rr4WZHkU59usx/O6RCQiJzjyKNaP0UDrR7E8egyTh/889/CN6Cuar6Hn'
'udn9GwnG7838fOa9htWfbnmkrTwcUh59RsmDzWde0yPu/0H7Dl+tNp8Y6xXz'
'QbyhrMQ/FvzOR68V8+V3U31LzAfjzObry9Pf1a9X/Ubplb585gNYDjafucdg'
'85kjku0vDn2Pbj12vilYVK59vthvmVz/wOM9QFDgq2x7xO7zbfq+2j7fsZvM'
'nh8qN5Ub98H/mog3gvUrST23cnHff7yYwvua+n5I7tvj2+vIzdZeB5l48wmD'
'P9Q7n6AvvlwojvjNji+XuEUNJ8I/sjhBkv74cjdj/TI7vlw/3tc61KeGE823'
'WZwIMvFwYrTPVJy486vVgBPpn6jhROOtFidI0o8TTZ3VgBPDOK7B2E/VcCLZ'
'b3EiyMTDidqfmYETYXw/wg+lS21eoLJxYvAONZyI/8ziBEn68/5ksX5lqeea'
'hhPO/+JWdZfieOKXFieCTDyccO+aGj+WoOUzD+P7FexOprsacC09oDj++YXF'
'NZL045q7xiRck+LVry1eBZm48193mzquCZ9VFfb/N4r2/x5r/0nSb//77zXJ'
'/iPyjd/Vv2lRPDKKnyBZnMDc+fAuiz7E1+tdAV++TK6BONYTj0fFy9kNcblk'
'eaAxlZwHGq4pxiF03J5rOTEa0fF3d+TnkKfPszm/F7n+fNRx/6wDX1OM/SWL'
'U8iPreb/+8J9Som95smPjUPoxQPz6g03zcnEtiPiQtZx35co2ViDo/dt3ICP'
'1fJfJw9ai+tflr9eFrNePXagF0OtlNiBmNjYgRB7DXQdns3G3WfjkBI5x9m4'
'+ey3yeKU8uPUoxJiearHU4TvF8Sjc8jYl8CvEPuSp3OsfIic7nXM95U7r4As'
'b4GsTcviMVqqVJKN/9KP2PFfkIk7/ntQ7/iPzWc+/HPxOAbymTfF1PYXJSR8'
'UVymz1LbXzQo4YN85mN/Vdt33yiJuwTtsU/yHfr8SHoy1eBHMvKompx7Hrbj'
'aJL0+5Es/q1JfiRSHP2jxdEgEw9HGx43dR71kPuqYR515AlF+/+4tf8k6Z9H'
'vec+k+ZRpfZ/2Nr/IBPX/v/ZDPu/I75foX3e9DvEv5x6nnH2/y+K9l+S7yFo'
'VG77j9dHCP1y1iOJQ2ZIRJVq/3n+geH/Q1/hPqPmHzj4rMWJIBMPJ6LPmIET'
'YXw/Yj18fTWME2qyajgx8IzFCZL0jxMi95s0TuDhROPz6CtiL6jhRPrvGAem'
'7tUtVRCxOJHP2/sCbX9WtLV2r+lsa4lEIszxvszxfrljwI0w57k8/Q29iPXx'
'72r6OzaC+OgoppaCQmzed9Dj5HtILzwdO2L56W08fXPHEF/0A1Qe3erlsGlv'
'7QivXhH+XOuarq7x/+dPLl0BpxCuz8jjdfYFMV7n4xf9XcwHdjj1QTDwX47r'
'6Hi7acinZhqBxx4R/vKvIont4pBUjN+9TiUT0qtN8vGV3LfU1qsj76v1K0de'
'FvOFcRl9R8wHOBF/V1Hv355aff5cZ+vy9raO7vAXW0/vaO9es7yNbND5EnPl'
'mfbP0RO5NHbk/w9vX7GirbOtY1lbwQR0yd7BkiVLlixZsjS1VEq/ckdH1K88'
'Ffcrd3RIMq1fOfn5M5581r5KrzQgqvR52YnPU/PksA7Lge7nFsuhR88HTZLK'
'L4/Gv5ssD6CO1lVtXpmLbz5edrWfvqrVO79m1Wltnd6YYr/l4WWx1o6OXFhy'
'4vz+1PnTO1eviRNxy5s5z5solWLvZjqiejtasd4q297J5g1HPrTzhkEm3rxh'
'fNqsXCmbN4zMQHxNs1BZmCAg5xm6yDkE3DzV2+lcR9ROE7idzqW+y7R2ys53'
'Rf6tNt8Vnj5LyAe9kWHJPGoY7rtRbb4r6oifC3rifjS1812l6BXaZ8jTq29i'
'vdqOur9peqWvv3uh7e96/NMut/1dh5RH8gOT5QEUvP7uRYr1Vtn2TtbfHd4E'
'4Zbt7waTeP3d6JZIL7y+ro7nltJOd3BE7bQft9MdqPub1k7Z/m44JO5PQn+3'
'Ziu1/u7gLDFfGJfu5mI+6O82baHW341sJuYrN5VvHHXVq9UwjtLX333gA9vf'
'9eTwB6P7d+WXxxNGywMoeP3dOEYIs/u7/x8BMXbk') | LeeKamentsky/CellProfiler | cellprofiler/modules/tests/test_classifypixels.py | Python | gpl-2.0 | 40,874 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MerchandiseTag'
db.create_table('merchandise_merchandisetag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('merchandise', ['MerchandiseTag'])
# Adding model 'Merchandise'
db.create_table('merchandise_merchandise', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('ordinary_price', self.gf('django.db.models.fields.PositiveIntegerField')()),
('internal_price', self.gf('django.db.models.fields.PositiveIntegerField')()),
('ean', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
))
db.send_create_signal('merchandise', ['Merchandise'])
# Adding M2M table for field tags on 'Merchandise'
db.create_table('merchandise_merchandise_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('merchandise', models.ForeignKey(orm['merchandise.merchandise'], null=False)),
('merchandisetag', models.ForeignKey(orm['merchandise.merchandisetag'], null=False))
))
# SQLite workaroundd for unique constrain:
# TODO: This workaround can be removed if ticet #144 for South is
# solved:
if db.backend_name == 'sqlite3':
db.create_index('merchandise_merchandise_tags', ['merchandise_id',
'merchandisetag_id'], unique=True)
else:
db.create_unique('merchandise_merchandise_tags', ['merchandise_id',
'merchandisetag_id'])
# Adding model 'PurchasedItem'
db.create_table('merchandise_purchaseditem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('transaction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.Transaction'])),
('merchandise', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['merchandise.Merchandise'])),
('price', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('merchandise', ['PurchasedItem'])
def backwards(self, orm):
# Deleting model 'MerchandiseTag'
db.delete_table('merchandise_merchandisetag')
# Deleting model 'Merchandise'
db.delete_table('merchandise_merchandise')
# Removing M2M table for field tags on 'Merchandise'
db.delete_table('merchandise_merchandise_tags')
# Deleting model 'PurchasedItem'
db.delete_table('merchandise_purchaseditem')
models = {
'accounts.account': {
'Meta': {'object_name': 'Account'},
'balance': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'color': ('django.db.models.fields.SmallIntegerField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limit_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.LimitGroup']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'phone_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp_grey': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'accounts.limitgroup': {
'Meta': {'object_name': 'LimitGroup'},
'black_limit': ('django_kikrit.accounts.fields.NegativeIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_price': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'max_grey_hours': ('django.db.models.fields.SmallIntegerField', [], {'default': '24'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'accounts.transaction': {
'Meta': {'object_name': 'Transaction'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'amount': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'responsible': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'merchandise.merchandise': {
'Meta': {'object_name': 'Merchandise'},
'ean': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_price': ('django.db.models.fields.PositiveIntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'ordinary_price': ('django.db.models.fields.PositiveIntegerField', [], {}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['merchandise.MerchandiseTag']", 'null': 'True', 'blank': 'True'})
},
'merchandise.merchandisetag': {
'Meta': {'object_name': 'MerchandiseTag'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'merchandise.purchaseditem': {
'Meta': {'object_name': 'PurchasedItem'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merchandise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['merchandise.Merchandise']"}),
'price': ('django.db.models.fields.PositiveIntegerField', [], {}),
'transaction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Transaction']"})
}
}
complete_apps = ['merchandise']
| smyrman/kikrit | django_kikrit/merchandise/migrations/0001_initial.py | Python | gpl-3.0 | 9,159 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains the Apache Livy sensor."""
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Union
from airflow.providers.apache.livy.hooks.livy import LivyHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class LivySensor(BaseSensorOperator):
"""
Monitor a Livy sessions for termination.
:param livy_conn_id: reference to a pre-defined Livy connection
:type livy_conn_id: str
:param batch_id: identifier of the monitored batch
:type batch_id: Union[int, str]
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
"""
template_fields: Sequence[str] = ('batch_id',)
def __init__(
self,
*,
batch_id: Union[int, str],
livy_conn_id: str = 'livy_default',
extra_options: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.batch_id = batch_id
self._livy_conn_id = livy_conn_id
self._livy_hook: Optional[LivyHook] = None
self._extra_options = extra_options or {}
def get_hook(self) -> LivyHook:
"""
Get valid hook.
:return: hook
:rtype: LivyHook
"""
if self._livy_hook is None or not isinstance(self._livy_hook, LivyHook):
self._livy_hook = LivyHook(livy_conn_id=self._livy_conn_id, extra_options=self._extra_options)
return self._livy_hook
def poke(self, context: "Context") -> bool:
batch_id = self.batch_id
status = self.get_hook().get_batch_state(batch_id)
return status in self.get_hook().TERMINAL_STATES
| mistercrunch/airflow | airflow/providers/apache/livy/sensors/livy.py | Python | apache-2.0 | 2,541 |
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="layout.ternary.aaxis.title.font",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/layout/ternary/aaxis/title/font/_family.py | Python | mit | 553 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import os
import sys
import unittest
sys.path.append('..')
from hecatoncheir.QueryResult import QueryResult
from hecatoncheir.exception import DriverError, InternalError, QueryError, QueryTimeout
from hecatoncheir.bigquery import BigQueryDriver
class TestBigQueryDriver(unittest.TestCase):
dbname = None
dbuser = None
dbpass = None
def setUp(self):
self.dbname = os.environ.get('BQ_PROJECT', '')
self.dbuser = "dqwbuser"
self.dbpass = "dqwbuser"
def test_BigQueryDriver_001(self):
bq = BigQueryDriver.BigQueryDriver('a', 'b', 'c')
self.assertTrue(bq is not None)
self.assertEqual('a', bq.project)
self.assertEqual('b', bq.dbuser)
self.assertEqual('c', bq.dbpass)
def test_connect_001(self):
# connection success
bq = BigQueryDriver.BigQueryDriver(self.dbname, self.dbuser, self.dbpass)
try:
bq.connect()
except DriverError as e:
self.fail()
self.assertIsNotNone(bq.conn)
def test_connect_002(self):
# connection failure
# FIXME:
bq = BigQueryDriver.BigQueryDriver(self.dbname, "nosuchuser", '')
with self.assertRaises(DriverError) as cm:
bq.connect()
self.assertEqual('', cm.exception.value)
def test_query_to_resultset_001(self):
bq = BigQueryDriver.BigQueryDriver(self.dbname, self.dbuser, self.dbpass)
try:
bq.connect()
except DriverError as e:
self.fail()
self.assertIsNotNone(bq.conn)
# ok
rs = bq.query_to_resultset(u'select 1 as c')
self.assertEqual('c', rs.column_names[0])
self.assertEqual(1, rs.resultset[0][0])
# exception
with self.assertRaises(QueryError) as cm:
bq.query_to_resultset(u'select 1 as c from bar')
self.assertEqual('Could not execute a query: 400 Table name "bar" cannot be resolved: dataset name is missing.', cm.exception.value)
# query timeout (no timeout)
rs = bq.query_to_resultset(u'select l_orderkey from snagatest.lineitem order by l_orderkey limit 1')
self.assertEqual("QueryResult:{'column': (u'l_orderkey',), 'query': u'select l_orderkey from snagatest.lineitem order by l_orderkey limit 1', 'result': [[1]]}",
str(rs))
# query timeout
# FIXME:
with self.assertRaises(QueryTimeout) as cm:
bq.query_to_resultset(u'select * from snagatest.lineitem order by l_shipdate desc limit 1', timeout=1)
self.assertEqual('Query timeout: select * from snagatest.lineitem order by l_shipdate desc limit 1', cm.exception.value)
# ok
rs = bq.query_to_resultset(u'select * from snagatest.region order by r_regionkey')
self.assertEqual(5, len(rs.resultset))
# exception
with self.assertRaises(InternalError) as cm:
bq.query_to_resultset(u'select * from snagatest.region order by r_regionkey', max_rows=4)
self.assertEqual('Exceeded the record limit (4) for QueryResult.', cm.exception.value)
# q2rs ok
rs = bq.q2rs(u'select 1 as c')
self.assertEqual('c', rs.column_names[0])
self.assertEqual(1, rs.resultset[0][0])
def test_disconnect_001(self):
bq = BigQueryDriver.BigQueryDriver(self.dbname, self.dbuser, self.dbpass)
conn = bq.connect()
self.assertIsNotNone(conn)
self.assertTrue(bq.disconnect())
self.assertIsNone(bq.conn)
self.assertFalse(bq.disconnect())
if __name__ == '__main__':
unittest.main()
| snaga/Hecatoncheir | tests/testBigQueryDriver.py | Python | apache-2.0 | 3,669 |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from django.conf.urls import url
from . import views
urlpatterns = [
url('^header/?$', views.header, name='browser_header'),
url('^search/?$', views.search, name='browser_search'),
url('^mygraph/?$', views.myGraphLookup, name='browser_my_graph'),
url('^usergraph/?$', views.userGraphLookup, name='browser_usergraph'),
url('^$', views.browser, name='browser'),
]
| krux/graphite-web | webapp/graphite/browser/urls.py | Python | apache-2.0 | 961 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import traceback
import logging
import base64
import csv
import tempfile
from openerp import models, fields, api, exceptions
from openerp.tools.translate import _
from itertools import izip_longest
from datetime import datetime
_logger = logging.getLogger(__name__)
class AccountCresusImport(models.TransientModel):
_name = 'account.cresus.import'
_description = 'Export Accounting'
company_id = fields.Many2one('res.company', 'Company',
invisible=True)
period_id = fields.Many2one('account.period', 'Period',
required=True)
report = fields.Text(
'Report',
readonly=True
)
journal_id = fields.Many2one('account.journal', 'Journal',
required=True)
state = fields.Char(sting="Import state"
'Report',
readonly=True,
default="draft"
)
file = fields.Binary(
'File',
required=True
)
imported_move_ids = fields.Many2many(
'account.move', 'import_cresus_move_rel',
string='Imported moves')
help_html = fields.Html('Import help', readonly=True,
default=_('''
In order to import your 'Cresus Salaires' .txt \
file you must complete the following requirements : </br>
* The accounts, analytical accounts used in the Cresus\
file must be previously created into Odoo </br>
* The date of the entry will determine the period used\
in Odoo, so please ensure the period is created already. </br>
* If the Cresus file uses VAT codes (i.e: IPI), \
please make sure you have indicated this code in the \
related Odoo tax (new field). \
Warning, the Odoo tax must be 'tax included'. \
If the tax does not exist you have to create it. </br>
* All PL accounts must have a deferral method = 'none'\
(meaning: no balance brought forward in the new fiscal year)\
and all
Balance sheet account must have a deferral \
method = 'balance'. </br>'''))
HEAD_CRESUS = ['date', 'debit', 'credit', 'pce',
'ref', 'amount', 'typtvat', 'currency_amount',
'analytic_account']
HEAD_ODOO = ['ref', 'date', 'period_id', 'journal_id',
'line_id/account_id', 'line_id/partner_id', 'line_id/name',
'line_id/debit', 'line_id/credit',
'line_id/account_tax_id',
'line_id/analytic_account_id']
@api.multi
def open_account_moves(self):
res = {
'domain': str([('id', 'in', self.imported_move_ids.ids)]),
'name': 'Account Move',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'view_id': False,
'type': 'ir.actions.act_window',
}
return res
def format_messages(self, messages):
"""Format error messages generated by the BaseModel.load method
:param messages: return of BaseModel.load messages key
:returns: formatted string
"""
res = []
for msg in messages:
rows = msg.get('rows', {})
res.append(_("%s. -- Field: %s -- rows %s to %s") % (
msg.get('message', 'N/A'),
msg.get('field', 'N/A'),
rows.get('from', 'N/A'),
rows.get('to', 'N/A'))
)
return "\n \n".join(res)
def _parse_csv(self):
"""Parse stored CSV file in order to be usable by BaseModel.load method.
Manage base 64 decoding.
:param imp_id: current importer id
:returns: (head [list of first row], data [list of list])
"""
# We use tempfile in order to avoid memory error with large files
with tempfile.TemporaryFile() as src:
content = self.file
delimiter = '\t'
src.write(content)
with tempfile.TemporaryFile() as decoded:
src.seek(0)
base64.decode(src, decoded)
decoded.seek(0)
return self._prepare_csv_data(decoded, delimiter)
def _prepare_csv_data(self, csv_file, delimiter=","):
"""Parse a decoded CSV file and return head list and data list
:param csv_file: decoded CSV file
:param delimiter: CSV file delimiter char
:returns: (head [list of first row], data [list of list])
"""
try:
data = csv.DictReader(csv_file, fieldnames=self.HEAD_CRESUS,
delimiter=delimiter)
except csv.Error as error:
raise exceptions.Warning(
_('CSV file is malformed'),
_("Please choose the correct separator \n"
"the error detail is : \n %s") % repr(error)
)
# Generator does not work with orm.BaseModel.load
values = [x for x in data if x]
return (values)
def _manage_load_results(self, result):
"""Manage the BaseModel.load function output and store exception.
Will generate success/failure report and store it into report field.
Manage commit and rollback even if load method uses PostgreSQL
Savepoints.
:param result: BaseModel.load returns
{ids: list(int)|False, messages: [Message]}
"""
# Import sucessful
if not result['messages']:
self.state = 'done'
self.report = _("Lines imported")
self.imported_move_ids = result['ids']
else:
self.report = self.format_messages(result['messages'])
self.state = 'error'
@api.multi
def _standardise_data(self, data):
""" This function split one line of the CSV into multiple lines.
Cresus just write one line per move,
"""
new_openerp_data = []
tax_obj = self.env['account.tax']
account_obj = self.env['account.account']
cp = self.env.user.company_id
company_partner = cp.partner_id.name
standard_dict = dict(izip_longest(self.HEAD_ODOO, []))
previous_date = False
for line_cresus in data:
is_negative = False
current_date_french_format = datetime.strptime(line_cresus['date'],
'%d.%m.%Y')
current_openerp_date = fields.Date.to_string(
current_date_french_format)
default_value = standard_dict.copy()
if (not previous_date) or previous_date != current_openerp_date:
default_value.update({'date': current_openerp_date,
'ref': line_cresus['pce'],
'journal_id': self.journal_id.name,
'period_id': self.period_id.code
})
previous_date = current_openerp_date
else:
default_value.update({'date': None,
'ref': None,
'journal_id': None,
'period_id': None})
decimal_amount = float(
line_cresus['amount'].replace('\'', '').replace(' ', ''))
if decimal_amount < 0:
default_value.update({'line_id/credit': abs(decimal_amount),
'line_id/debit': 0.0,
'line_id/account_id':
line_cresus['debit']})
is_negative = True
else:
default_value.update({'line_id/debit': abs(decimal_amount),
'line_id/credit': 0.0,
'line_id/account_id':
line_cresus['debit']})
tax_code = None
analytic_code = None
tax_code_inverted = None
tax_current = None
analytic_code_inverted = None
if line_cresus['typtvat']:
tax_current = tax_obj.search([('tax_cresus_mapping',
'=',
line_cresus['typtvat']),
('price_include', '=', True)],
limit=1)
if tax_current or line_cresus['analytic_account']:
current_account = account_obj.search(
[('code', '=', default_value['line_id/account_id'])],
limit=1)
if current_account:
# Search for account that have a deferal method
if current_account.user_type.close_method == 'none':
if tax_current:
tax_code = tax_current.name
analytic_code = line_cresus['analytic_account']
default_value.update({'line_id/account_tax_id': tax_code,
'line_id/partner_id': company_partner,
'line_id/name': line_cresus['ref'],
'line_id/analytic_account_id':
analytic_code})
new_openerp_data.append(default_value)
#
# Generated the second line inverted
#
inverted_default_value = default_value.copy()
inverted_default_value.update({'date': None,
'ref': None,
'journal_id': None,
'period_id': None})
if is_negative:
inverted_default_value.update({'line_id/debit':
abs(decimal_amount),
'line_id/credit': 0.0,
'line_id/account_id':
line_cresus['credit']})
else:
inverted_default_value.update({'line_id/debit': 0.0,
'line_id/credit':
abs(decimal_amount),
'line_id/account_id':
line_cresus['credit']})
# Search for account that have a deferal method
if tax_current or line_cresus['analytic_account']:
current_account = account_obj.search([
('code', '=',
inverted_default_value.get('line_id/account_id'))])
if current_account:
if current_account.user_type.close_method == 'none':
if tax_current:
tax_code_inverted = tax_current['name']
analytic_code_inverted = line_cresus['analytic_account']
inverted_default_value.update({'line_id/account_tax_id':
tax_code_inverted,
'line_id/analytic_account_id':
analytic_code_inverted})
new_openerp_data.append(inverted_default_value)
return new_openerp_data
@api.multi
def _load_data(self, data):
"""Function that does the load of parsed CSV file.
If will log exception and susccess into the report fields.
:param data: CSV file content (list of data list)
"""
# Change data from dict to list of array
data_array = []
for data_item_dict in data:
data_item = []
for item in self.HEAD_ODOO:
data_item.append(data_item_dict[item])
data_array.append(data_item)
try:
res = self.env['account.move'].load(self.HEAD_ODOO,
data_array)
self._manage_load_results(res)
except Exception as exc:
ex_type, sys_exc, tb = sys.exc_info()
tb_msg = ''.join(traceback.format_tb(tb, 30))
_logger.error(tb_msg)
_logger.error(repr(exc))
self.report = _("Unexpected exception.\n %s \n %s" %
(repr(exc), tb_msg))
self.state = 'error'
finally:
if self.state == 'error':
self.env.cr.rollback()
self.write({'report': self.report, 'state': self.state})
return {}
@api.multi
def import_file(self):
data = self._parse_csv()
new_data = self._standardise_data(data)
return self._load_data(new_data)
| cgaspoz/l10n-switzerland | l10n_ch_import_cresus/wizard/l10n_ch_import_cresus.py | Python | agpl-3.0 | 14,106 |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 22 12:07:53 2014
@author: Gouthaman Balaraman
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
import re
import numpy as np
import os
#####################################################
# A bunch of constants used throught the script. #
#####################################################
_curdir= os.path.abspath(os.path.curdir)
_posdat = re.compile('(\w+):(\d+)px')
_topdat = re.compile('top:(\d+)px')
_leftdat = re.compile('top:(\d+)px')
# this is the full format with all columns; The numbers here bracket the columns
maptbl_long = [(0,75),(75,145),(145,212),(212,283),(283,350),(350,418),(418,486),
(486,554),(554,621),(621,688),(688,756),(756,823),(823,890),(890,958),
(958,1026),(1026,1094),(1094,1199)]
# This provides a mapping to the column with the text
mptbltxt = ['RD','MURDER','MANSLTR','FORCED_RAPE','ROBBERY','AGGRAV_ASSAULT',
'BURGLARY_RES','BURGLARY_COM','AUTO_BURG','GRAND_THEFT','PETTY_THEFT',
'BIKE_THEFT','AUTO_THEFT','ARSON','TOTAL_PART1','TOTAL_PART2','GRAND_TOTAL']
#this a truncate version I found for some months; The numbers here bracket the columns
maptbl_short=[(0,133),(133,194.5),(194.5,264),(264,329),(329,396),(396,466),(466,531),
(531,597),(597,667.5),(667.5,736),(736,803),(803,871),(871,938),(938,1004),(1004,1300)
]
def load_html(filename):
soup = BeautifulSoup(file(filename).read())
return soup
def grab_pages(soup):
return soup.body.find_all('div')
def cleanup_data(data):
# remove  
data = data.replace(u'\xa0','')
return data
def create_buckets(arr):
'''
Here we bin the rows based on 'top' value
'''
sarr = np.sort(arr)
# coarseness ; this is used to separate different rows
crsns = 10# np.mean(sdiff)
s = 0
prev = sarr[0]
buckets = []
for sa in sarr[1:]:
if sa-prev>crsns:
e = (sa+prev)*0.5
buckets.append((s,e))
s = e
prev = sa
#else
buckets.append((s,s+40))
return [buckets,[i for i,y in enumerate(buckets)]]
def create_frame(pnodes,mptbl,mptbltxt,lftmrkr):
'''
For a given page, here I use the position to tag it with a column number.
Then a data frame is created and the pivot_table option is construct back
a proper table to resemble the actual data set.
'''
df = pd.DataFrame(pnodes)
[tmptbl,tmptblval] = create_buckets(df.top.unique()) # buckets for top
dval = []
for t in tmptbl:
dvlst = df[(df["top"]>=t[0])&(df["top"]<=t[1])&(df['left']<lftmrkr)]['content'].values
#dval.append(dvlst[0] if len(dvlst)>0 else u'RD')
cval = dvlst[0] if len(dvlst)>0 else u'RD'
dval.append(cval)
#df[(df["top"]>=t[0])&(df["top"]<=t[1])]['rowval'] = cval
df['row'] = df['top'].map(lambda g:
[
dval[i] for i,x in enumerate(tmptbl)
if ((x[0]<=g)and(g<=x[1])) or None
][0]
)
dfs = df[df['row']!='RD']
dlst = dcnt = []
for i,v in dfs.iterrows():
if v.left<lftmrkr:
dcnt.append(v.content)
dlst.append(v.top)
dfs['column'] = dfs['left'].map(lambda g: [mptbltxt[i] for i,x in enumerate(mptbl)
if ((x[0]<=g)and(g<=x[1]))][0])
pvt = dfs.pivot(index='row',columns='column',values='content')
pvt.fillna(0,inplace=True)
for c in pvt.columns:
try:
pvt[c] = pvt[c].astype(int)
except:
pass
return pvt
'''
# this didn't work; need to check later
def grab_monthlypdfs():
domain='http://www.longbeach.gov'
url = 'http://www.longbeach.gov/police/statistics.asp'
res = requests.get(url)
sp = BeautifulSoup(res.text)
tbody = sp.find_all('tbody')
links = tbody[3].find_all('a')
pdfdir = os.path.join(_curdir,'files','PDF')
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
for l in links:
title = '_'.join( l['title'].split(" ") )
print title
try:
res = requests.get(domain+l['href'],stream=True)
pdffile = os.path.join(pdfdir,title+'.pdf')
with open(pdffile,'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except Exception as e:
print 'FAILED: '+str(e)+l['title']+" "+l['href']
'''
def extract_nodes(p,lftmrkr):
'''
This is the code that extracts the beautiful soup html document into
a bunch of nodes for easy processing
'''
nodes = p.find_all('p' )
dlist = []
nextdat = {}
for node in nodes:
ddict = {}
attrs = node.attrs
attrssty = attrs.get('style','')
attrscls = attrs.get('class','')
if attrscls[0] == 'ft01' or attrscls[0] == 'ft03':
posns = _posdat.findall(attrssty)
if len(posns) == 2:
k,v = zip(*posns)
if ('top' in k ) and ('left' in k):
if nextdat != {}:
nextdat['top'] = int(v[0]) if k[0] == 'top' else int(v[1])
ddict = nextdat
nextdat = {}
ddict[k[0]] = int(v[0])
ddict[k[1]] = int(v[1])
cont = node.contents
if len(cont) == 1 :
ddict['content'] = cont[0].replace('\xa0','0')
elif len(cont)==3:
ddict['content'] = cont[0].replace('\xa0','0')
nextdat['content'] = cont[2].replace('\xa0','0')
nextdat['left'] = int(v[1])if k[1] == 'left' else int(v[0])
#if (ddict['left']<lftmrkr) and (ddict['content']!= 'RD'):
# currrd = ddict['content']
#ddict['rd'] = currrd
dlist.append(ddict)
return dlist
def create_html(pdffile):
'''
Given a pdf file, this calls pdftohtml.exe to convert to html
'''
try:
pdftohtml = "pdftohtml.exe "
htmldir = os.path.join(_curdir,'files','HTML')
if not os.path.exists(htmldir):
os.makedirs(htmldir)
pdffile = os.path.abspath(pdffile)
fileprefix = os.path.split(pdffile)[1].split('.pdf')[0]
cmd = pdftohtml+pdffile+" -c -noframes "+os.path.join(htmldir,fileprefix+".html")
print cmd
os.system(cmd)
except Exception as e:
print str(e)
def convert_all_pdfs(pdfdir):
'''
Convenient method to loop over all the pdf files. Calls create_html
file in a loop.
'''
for f in os.listdir(pdfdir):
if f.endswith('.pdf'):
create_html(os.path.join(pdfdir,f))
def _finalize_dataframe(ddf):
'''
Does some clean-up, check sums to validate the data. This is a basic
check. Nothing is guaranteed!
'''
# do a checksum test
if 'TOTAL_PART1' in ddf.columns:
checksum = np.sum(\
np.power(
ddf[mptbltxt[1:14]].astype(int).sum(axis=1) -
ddf['TOTAL_PART1'].astype(int)
,2)
)
if checksum:
print "Failed check sum test "+str(checksum)
else:
print "Passed checksum test"
# reorder the columns
if len(ddf.columns) == 17:
ddf = ddf[mptbltxt]
else:
ddf = ddf[mptbltxt[:15]]
del ddf['RD']
ddf.index.name = 'RD'
return ddf
def create_csv(htmlfile):
'''
This creates the csv file given a html file
'''
try:
print "Converting "+htmlfile
soup = load_html(htmlfile)
pages = grab_pages(soup)
num_nodes = len(pages[0])
leftmrkr = 75 if num_nodes > 440 else 133 # to handle two pdf formats
mptbl = maptbl_long if num_nodes > 440 else maptbl_short
#filetype = 1 if num_nodes > 480 else 0 # 1 if long type else 0
pvts = []
for i,p in enumerate(pages):
print 'Page-'+str(i)
dlist = extract_nodes(p,leftmrkr)
#df = create_frame(dlist,mptbl0,mptbltxt,leftmrkr)
df = create_frame(dlist,mptbl,mptbltxt,leftmrkr)
pvts.append(df)
ddf = pd.concat(pvts)
exclrows = set(['0'+str(i)for i in range(2000,2020,1)]) | set(['%CHG'])
exclrows = exclrows & set(ddf.index)
ddf.drop(exclrows,inplace=True)
ddf.fillna(0,inplace=True)
#cleanup
ddf = _finalize_dataframe(ddf)
csvdir = os.path.join(_curdir,'files','CSV')
if not os.path.exists(csvdir):
os.makedirs(csvdir)
htmlfile = os.path.abspath(htmlfile)
fileprefix = os.path.split(htmlfile)[1].split('.html')[0]
csvfile = os.path.join(csvdir,fileprefix+".csv")
ddf.to_csv(csvfile)
except Exception as e:
print str(e)
def convert_all_htmls(htmldir):
'''
This is a top leve driver which calls create_csv in a loop
'''
for f in os.listdir(htmldir):
if f.endswith('.html'):
create_csv(os.path.join(htmldir,f))
#break
if __name__=='__main__':
'''
Here is a complete example to loop over all pdfs and create all csvs.
>>>pdfdir = "D:\\Development\\Python\\CrimeData\\files\\PDF"
>>>convert_all_pdfs(pdfdir)
>>>htmldir = "D:\\Development\\Python\\CrimeData\\files\\HTML"
>>>convert_all_htmls(htmldir)
Or you can do individual file conversions:
>>>pdffile = os.path.join(pdfdir,'January_2013.pdf')
>>>create_html(pdffile)
'''
# Convert pdfs to html
pdfdir = "D:\\Development\\Python\\CrimeData\\files\\PDF"
pdffile = os.path.join(pdfdir,'January_2013.pdf')
create_html(pdffile)
#convert_all_pdfs(pdfdir)
# Then convert html to csv
htmldir = "D:\\Development\\Python\\CrimeData\\files\\HTML"
html = os.path.join(htmldir,'January_2013.html')
create_csv(html)
#convert_all_htmls(htmldir) | gouthambs/OpenData | src/longbeach_crime_stats.py | Python | mit | 10,310 |
import random
from io import StringIO
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages import add_message, INFO
from django.urls import reverse
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect
from django.http import HttpResponse, JsonResponse
from django.utils import translation
from django.views.defaults import bad_request
from django.views.generic import View, TemplateView
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import UpdateView
from django.views.generic.edit import FormMixin, ProcessFormView
from django.db.models import functions
from nextgis_common.email_utils import send_templated_mail
from nextgis_common.supported_languages import SupportedLanguages
from qms_core.models import GeoService, TmsService, WmsService, WfsService, GeoJsonService
from qms_core.status_checker.service_checkers.geojson_checker import GeoJsonChecker
from qms_site.forms import (
TmsForm, WmsForm, WfsForm, GeoJsonForm,
AuthReportForm,
# NonAuthReportForm
)
from django.utils.translation import gettext_lazy as _
from qms_site.models import ReportType
from qms_site.model_telegram import ModelTelegram
from django.shortcuts import render
from django.template import RequestContext
class GeoserviceListView(TemplateView):
template_name = 'list.html'
class AboutView(TemplateView):
template_name = 'about.html'
class FAQView(TemplateView):
template_name = 'faq.html'
def handler404(request, exception):
template_name = '404.html'
response = render(request, template_name)
response.status_code = 404
return response
def handler500(request, *args, **argv):
template_name = '500.html'
response = render(request, template_name)
response.status_code = 500
return response
class ReportFormMixin(FormMixin, ProcessFormView):
"""
Mixin for use report popup form/
Use only with simple View (such as TemplateView or POST less
"""
def get_success_url(self):
return reverse('site_geoservice_detail', kwargs={'pk': self.get_service_id()})
def get_service_id(self):
raise NotImplementedError
def get_context_data(self, **kwargs):
if 'report_form' not in kwargs:
kwargs['report_form'] = self.get_form()
if 'restore_problem_service' not in kwargs:
kwargs['restore_problem_service'] = None
return super(ReportFormMixin, self).get_context_data(**kwargs)
def get_initial(self):
if self.request.user.is_authenticated:
return {'reported_email': self.request.user.email}
else:
return {}
def get_form_class(self):
return AuthReportForm
# if self.request.user.is_authenticated:
# return AuthReportForm
# else:
# return NonAuthReportForm
def form_valid(self, form):
report_form = form
# get service
service = get_object_or_404(GeoService, id=self.get_service_id())
# save message
report = report_form.save(commit=False)
report.geo_service = service
if self.request.user.is_authenticated:
report.reported = self.request.user
report.save()
context = {
'reported_user': str(report.reported) if report.reported else None,
'reported_email': report.reported_email,
'service_url': self.request.build_absolute_uri(reverse('site_geoservice_detail', kwargs={'pk': service.id})),
'report_type': ReportType.choices[report.report_type],
'report_message': report.report_message,
}
# send email to service author
if service.submitter and service.submitter.email:
with translation.override(service.submitter.locale):
send_templated_mail('qms_site/email/user_report_for_author', service.submitter.email, context)
# send copy to message submitter
if report.reported_email:
send_templated_mail('qms_site/email/user_report_for_submitter', report.reported_email, context)
elif self.request.user.is_authenticated and self.request.user.email:
with translation.override(self.request.user.locale):
send_templated_mail('qms_site/email/user_report_for_submitter', self.request.user.email, context)
# send copy to admin TODO: TEMPORARY ADDRESS. MAKE ANY OPTIONS
if settings.DEFAULT_FROM_EMAIL:
with translation.override(SupportedLanguages.EN):
send_templated_mail('qms_site/email/user_report_for_admin', settings.DEFAULT_FROM_EMAIL, context)
# add message for user
add_message(self.request, INFO, _('Your message was sent to service author and QMS admins'))
redirect_url = self.get_success_url()
return redirect(redirect_url)
def form_invalid(self, form):
kwargs = self.kwargs
kwargs['report_form'] = form
kwargs['restore_problem_service'] = self.get_service_id() # for restore form
return self.render_to_response(self.get_context_data(**kwargs))
class GeoserviceDetailView(TemplateView, ReportFormMixin):
template_name = 'detail.html'
def get_context_data(self, **kwargs):
service = get_object_or_404(GeoService.objects.select_related('tmsservice')
.select_related('wmsservice')
.select_related('wfsservice'),
id=kwargs['pk'])
kwargs['service'] = service
kwargs['service_guid'] = str(service.guid)
kwargs['can_user_delete'] = False
user = self.request.user
if service.submitter == user:
kwargs['can_user_delete'] = True
if user.id:
if user.groups.filter(name='MODIFICATION_API_USERS').exists():
kwargs['can_user_delete'] = True
if service.type == TmsService.service_type:
tms_url_pattern, tms_subdomains = service.tmsservice.get_url_pattern_and_subdomains()
kwargs['leaflet_tms_url'] = tms_url_pattern % {'subdomain': '{s}'}
kwargs['leaflet_tms_subdomains'] = list(map(str, tms_subdomains))
# Remove this block when the leaflet map is fixed for use subdomain
if len(tms_subdomains) > 0:
random_subdomain_index = random.randint(0, len(tms_subdomains)-1)
kwargs['leaflet_tms_url'] = tms_url_pattern % {'subdomain': tms_subdomains[random_subdomain_index]}
kwargs['body_class'] = 'admin'
return super(GeoserviceDetailView, self).get_context_data(**kwargs)
def get_service_id(self):
return int(self.kwargs['pk'])
class LicenseErrorsMixin:
def has_license_error(self, form):
lic_fields = ('license_name', 'license_url', 'copyright_text', 'copyright_url', 'terms_of_use_url',)
return any([error for error in form.errors.keys() if error in lic_fields])
class CreateServiceView(LicenseErrorsMixin, LoginRequiredMixin, TemplateView):
template_name = 'create.html'
acceptable_forms = (TmsForm.__name__, WmsForm.__name__, WfsForm.__name__, GeoJsonForm.__name__)
def get_context_data(self, **kwargs):
context = super(CreateServiceView, self).get_context_data(**kwargs)
forms = {
TmsForm.__name__: TmsForm(initial={'z_min': 0, 'z_max': 19, 'epsg': 3857, 'y_origin_top': True}),
WmsForm.__name__: WmsForm(),
WfsForm.__name__: WfsForm(),
GeoJsonForm.__name__: GeoJsonForm()
}
if 'form' in kwargs and kwargs['form'].__class__.__name__ in self.acceptable_forms:
forms[kwargs['form'].__class__.__name__] = kwargs['form']
if 'error_form_type' in kwargs and kwargs['error_form_type'] in self.acceptable_forms:
context['error_form_type'] = kwargs['error_form_type']
context['forms'] = forms
return context
def post(self, request, *args, **kwargs):
form_class_name = request.POST.get('service_type', None)
if not form_class_name:
return bad_request(request, _('Invalid form param: service_type'))
if form_class_name not in self.acceptable_forms:
return bad_request(request, _('Invalid form param: service_type'))
form_class = globals()[form_class_name]
form = form_class(**self.get_form_kwargs())
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def get_form_kwargs(self):
kwargs = {}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.submitter = self.request.user
self.object.updated_at = functions.Now()
self.object.save()
o = self.object
ModelTelegram.on_create(o.type, o.id, o.name, o.submitter, o.url)
return HttpResponseRedirect(reverse('site_geoservice_detail', kwargs={'pk': self.object.id},))
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form, error_form_type=form.__class__.__name__, lic_error=self.has_license_error(form)))
class EditServiceView(LicenseErrorsMixin, LoginRequiredMixin, UpdateView):
template_name = 'edit.html'
queryset = GeoService.objects\
.select_related('tmsservice')\
.select_related('wmsservice')\
.select_related('wfsservice')\
.select_related('geojsonservice')
def get_object(self, queryset=None):
model_map = {
TmsService.service_type: lambda x: x.tmsservice,
WmsService.service_type: lambda x: x.wmsservice,
WfsService.service_type: lambda x: x.wfsservice,
GeoJsonService.service_type: lambda x: x.geojsonservice
}
obj = super(EditServiceView, self).get_object(queryset=queryset)
if obj:
return model_map[obj.type](obj)
return obj
def get_context_data(self, **kwargs):
context = super(EditServiceView, self).get_context_data(**kwargs)
context['form_name'] = context['form'].__class__.__name__
return context
def get_form_class(self):
form_map = {
TmsService.service_type: TmsForm,
WmsService.service_type: WmsForm,
WfsService.service_type: WfsForm,
GeoJsonService.service_type: GeoJsonForm
}
obj = self.get_object()
return form_map[obj.type]
def get_success_url(self):
return reverse('site_geoservice_detail', kwargs={'pk': self.object.id},)
def get(self, request, *args, **kwargs):
if not self.check_submitter(request):
return HttpResponseForbidden()
return super(EditServiceView, self).get(self, request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if not self.check_submitter(request):
return HttpResponseForbidden()
return super(EditServiceView, self).post(self, request, *args, **kwargs)
def check_submitter(self, request):
obj = self.get_object()
return obj.submitter == request.user
def form_valid(self, form):
self.object = form.save()
self.object.updated_at = functions.Now()
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form, lic_error=self.has_license_error(form)))
class GeoserviceBoundaryView(LoginRequiredMixin, View):
def get(self, request, pk, *args, **kwargs):
geoservice = get_object_or_404(GeoService, pk=pk)
out= StringIO.StringIO()
out.write(geoservice.boundary.geojson)
response = HttpResponse(out.getvalue(), content_type='application/txt')
response['Content-Disposition'] = 'attachment; filename=boundary.geojson'
return response
class GeoserviceDataView(View):
def get(self, request, pk, *args, **kwargs):
geoservice = get_object_or_404(GeoService, pk=pk)
response = {}
if geoservice.type == GeoJsonService.service_type:
service = geoservice.get_typed_instance()
checker = GeoJsonChecker(service=service)
service_check = checker.check()
if hasattr(service_check, "data"):
response["data"] = service_check.data
else:
response["cumulative_status"] = getattr(service_check, "cumulative_status")
response["error_text"] = getattr(service_check, "error_text")
return JsonResponse(response)
| nextgis/quickmapservices_server | qms_server/qms_site/views.py | Python | gpl-2.0 | 13,007 |
#!/usr/bin/env python3
import json
import sys
from librip.ctxmngrs import timer
from librip.decorators import print_result
from librip.gens import field, gen_random
from librip.iterators import Unique
# encoding=utf8
path = "/Users/bestK1ng/Code/Web/IU5/5 семестр/Разработка интернет-приложений/Lab4/data_light.json"
# Здесь необходимо в переменную path получить
# путь до файла, который был передан при запуске
with open(path, "r", encoding='utf8') as f:
data = json.load(f)
# Далее необходимо реализовать все функции по заданию, заменив `raise NotImplemented`
# Важно!
# Функции с 1 по 3 дожны быть реализованы в одну строку
# В реализации функции 4 может быть до 3 строк
# При этом строки должны быть не длиннее 80 символов
@print_result
def f1(arg):
jobs = list(field(arg, "job-name"))
jobs = Unique(jobs, ignore_case=True)
jobs = sorted(jobs)
return jobs
@print_result
def f2(arg):
jobs = list(filter(lambda x: "программист" in x.lower(), arg))
return jobs
@print_result
def f3(arg):
jobs = list(map(lambda x: x + "с опытом Python", arg))
return jobs
@print_result
def f4(arg):
jobs = list(arg)
salaries = list(gen_random(100000, 200000, len(jobs)))
salaries = list(map(lambda x: "зарплата " + str(x) + " руб.", salaries))
full_info = list(zip(jobs, salaries))
return full_info
with timer():
f4(f3(f2(f1(data))))
| bestK1ngArthur/IU5 | Term 5/Development of Internet applications/Lab4/ex_6.py | Python | mit | 1,707 |
from pytest import fixture
from functional.core import (
builder,
PreparedImagesOutputChecker,
PDFDocumentChecker,
DjVuDocumentChecker)
@fixture()
def checker_classes():
""" Run all checkers in one test for optimization reason. """
return [
PreparedImagesOutputChecker,
PDFDocumentChecker,
DjVuDocumentChecker]
@fixture()
def toc_checker_classes():
return [PDFDocumentChecker, DjVuDocumentChecker]
def put_transform_contents(builder, directory):
builder.save_transform_ini(
directory,
"[transform]\n" +
"justconvert: yes\n")
def check_all_valid(builder, checkers):
for class_ in checkers:
assert builder.valid(class_)
def check_all_invalid(builder, checkers):
for class_ in checkers:
assert not builder.valid(class_)
def test_checker_valid_page(builder, checker_classes):
builder.create_unused_image("000-001", "0001.jpg")
builder.create_used_image("001-002", "0001.jpg")
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
put_transform_contents(builder, "001-002")
builder.run_program()
check_all_valid(builder, checker_classes)
def test_checker_invalid_page(builder, checker_classes):
builder.create_used_image("000-001", "0001.jpg")
builder.create_unused_image("001-002", "0001.jpg")
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
put_transform_contents(builder, "001-002")
builder.run_program()
check_all_invalid(builder, checker_classes)
def test_checker_valid_order(builder, checker_classes):
builder.create_used_image("000-001", "0000.jpg")
builder.create_used_image("000-001", "0001.jpg")
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
builder.run_program()
check_all_valid(builder, checker_classes)
def test_checker_valid_reference_override(builder, checker_classes):
builder.create_used_image("000-001", "0000.jpg")
builder.override_reference_image()
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
builder.run_program()
check_all_valid(builder, checker_classes)
def test_checker_invalid_reference_override(builder, checker_classes):
(builder.create_used_image("000-001", "0000.jpg")
.add_border(20, 20, 20, 20, (0, 0, 0)))
(builder.override_reference_image()
.add_border(50, 50, 50, 50, (0, 0, 0)))
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
builder.run_program()
check_all_invalid(builder, checker_classes)
def test_checker_invalid_order(builder, checker_classes):
builder.create_used_image("000-001", "0001.jpg")
builder.create_used_image("000-001", "0000.jpg")
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
builder.run_program()
check_all_invalid(builder, checker_classes)
def test_checker_invalid_count(builder, checker_classes):
builder.create_used_image("000-002", "0000.jpg")
builder.create_used_image("000-002", "0001.jpg")
builder.create_unused_image("000-002", "0002.jpg")
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-002")
builder.run_program()
check_all_invalid(builder, checker_classes)
def prepare_three_images(builder):
for i in range(1, 4):
builder.create_used_image("001-003", "%04d.jpg" % i)
builder.save_images()
put_transform_contents(builder, "001-003")
def test_checker_valid_toc(builder, toc_checker_classes):
prepare_three_images(builder)
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"],
[0, 3, "Page 3"]
])
builder.run_program()
check_all_valid(builder, toc_checker_classes)
def test_checker_invalid_level_toc(builder, toc_checker_classes):
prepare_three_images(builder)
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"]
])
builder.run_program()
builder.save_toc([
[0, 1, "Page 1"],
[0, 2, "Page 2"]
])
check_all_invalid(builder, toc_checker_classes)
def test_checker_invalid_pagenum_toc(builder, toc_checker_classes):
prepare_three_images(builder)
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"]
])
builder.run_program()
builder.save_toc([
[0, 1, "Page 1"],
[1, 3, "Page 2"]
])
check_all_invalid(builder, toc_checker_classes)
def test_checker_invalid_description_toc(builder, toc_checker_classes):
prepare_three_images(builder)
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"]
])
builder.run_program()
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2 2 2"]
])
check_all_invalid(builder, toc_checker_classes)
def test_checker_invalid_toc_extra_line(builder, toc_checker_classes):
prepare_three_images(builder)
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"]
])
builder.run_program()
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"],
[2, 3, "Page 3"]
])
check_all_invalid(builder, toc_checker_classes)
| atrosinenko/lecture-notes-compiler | tests/functional/core_test.py | Python | mit | 5,334 |
#!/usr/bin/env python
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Configment interface
>>> class TestCfg(Configment):
... CONFIGSPEC_SOURCE = '''
... [abc]
... x = integer(default=3)
... '''
>>> cfg = TestCfg()
>>> cfg["abc"]["x"]
3
>>>
"""
import os
import validate
import six
from .configobj_wrap import ConfigObjWrap
from .meta_configment import MetaConfigment
from .configment_validator import ConfigmentValidator
from .pathname import Pathname
from .environment import load_configspec
__author__ = "Simone Campagna"
__all__ = [
'create_configment_class',
'Configment',
'ConfigmentValidateError',
]
class ConfigmentValidateError(validate.ValidateError):
def __str__(self):
return "validation failed: {}".format(self.args[0])
class BaseConfigment(ConfigObjWrap):
CONFIGSPEC = None
DEFAULT_MODE_HIDE = "hide"
DEFAULT_MODE_SHOW = "show"
DEFAULT_MODES = [DEFAULT_MODE_HIDE, DEFAULT_MODE_SHOW]
DEFAULT_MODE = DEFAULT_MODE_HIDE
def __init__(self, filename=None, default_mode=None):
super(BaseConfigment, self).__init__(
infile=None,
configspec=self.__class__.CONFIGSPEC,
unrepr=True,
interpolation=False,
indent_type=" ",
stringify=True,
)
if default_mode is None:
default_mode = self.DEFAULT_MODE
self.default_mode = default_mode
self.set_filename(filename)
if self.filename is not None:
self.load_file(filename, throw_on_errors=True)
else:
self.initialize(throw_on_errors=False)
def set_filename(self, filename=None):
super(BaseConfigment, self).set_filename(filename)
if self.filename is None:
self._base_dir = os.getcwd()
else:
self._base_dir = os.path.dirname(os.path.abspath(filename))
def do_validation(self, base_dir=None, reset=False, throw_on_errors=False):
if base_dir is None:
base_dir = self._base_dir
validator = ConfigmentValidator()
copy = self.default_mode == self.DEFAULT_MODE_SHOW
result = super(BaseConfigment, self).validate(validator, preserve_errors=True, copy=copy)
result = self.filter_validation_result(result)
self.set_paths(base_dir, reset=reset)
if throw_on_errors and result:
raise ConfigmentValidateError(result)
c_result = ConfigObjWrap(
infile=result,
stringify=True,
unrepr=True,
indent_type=' ',
)
return c_result
@six.add_metaclass(MetaConfigment)
class Configment(BaseConfigment):
def __init__(self, filename=None, default_mode=None):
super(Configment, self).__init__(
filename=filename,
default_mode=default_mode,
)
def impl_initialize(self, throw_on_errors=False):
try:
return self.do_validation(reset=False, throw_on_errors=throw_on_errors)
except: # pylint: disable=bare-except
return False
def impl_load_file(self, filename, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
Pathname.set_default_base_dir(self._base_dir)
self.set_filename(filename)
self.reload()
try:
result = self.do_validation(base_dir=self._base_dir, reset=True, throw_on_errors=throw_on_errors)
finally:
Pathname.set_default_base_dir(default_base_dir)
return result
def impl_dump_s(self, stream=None, filename=None, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
try:
if filename is not None:
base_dir = os.path.dirname(os.path.normpath(os.path.abspath(filename)))
else:
base_dir = self._base_dir
Pathname.set_default_base_dir(base_dir)
self.do_validation(base_dir=base_dir, reset=False, throw_on_errors=throw_on_errors)
self.write(stream)
finally:
Pathname.set_default_base_dir(default_base_dir)
def create_configment_class(configspec_filename, class_name=None, dir_list=None):
if class_name is None:
class_name = os.path.splitext(os.path.basename(configspec_filename))[0]
class_bases = (Configment, )
class_dict = {
'CONFIGSPEC_SOURCE': load_configspec(configspec_filename, dir_list=dir_list),
}
return MetaConfigment(class_name, class_bases, class_dict)
| simone-campagna/py-configment | src/configment/configment.py | Python | apache-2.0 | 5,069 |
"""Views for Zinnia quick entry"""
from urllib import urlencode
from django import forms
from django.utils.html import linebreaks
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from django.utils.encoding import smart_str
from django.contrib.auth.decorators import permission_required
from zinnia.models import Entry
from zinnia.managers import DRAFT
from zinnia.managers import PUBLISHED
class QuickEntryForm(forms.Form):
"""Form for posting an entry quickly"""
title = forms.CharField(required=True, max_length=255)
content = forms.CharField(required=True)
tags = forms.CharField(required=False, max_length=255)
@permission_required('zinnia.add_entry')
def view_quick_entry(request):
"""View for quickly post an Entry"""
if request.POST:
form = QuickEntryForm(request.POST)
if form.is_valid():
entry_dict = form.cleaned_data
status = PUBLISHED
if 'save_draft' in request.POST:
status = DRAFT
entry_dict['content'] = linebreaks(entry_dict['content'])
entry_dict['slug'] = slugify(entry_dict['title'])
entry_dict['status'] = status
entry = Entry.objects.create(**entry_dict)
entry.sites.add(Site.objects.get_current())
entry.authors.add(request.user)
return redirect(entry)
data = {'title': smart_str(request.POST.get('title', '')),
'content': smart_str(linebreaks(request.POST.get(
'content', ''))),
'tags': smart_str(request.POST.get('tags', '')),
'slug': slugify(request.POST.get('title', '')),
'authors': request.user.pk,
'sites': Site.objects.get_current().pk}
return redirect('%s?%s' % (reverse('admin:zinnia_entry_add'),
urlencode(data)))
return redirect('admin:zinnia_entry_add')
| jfdsmit/django-blog-zinnia | zinnia/views/quick_entry.py | Python | bsd-3-clause | 2,059 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-17 02:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rii_Api', '0009_auto_20160916_2116'),
]
operations = [
migrations.AddField(
model_name='player',
name='imageInfo',
field=models.CharField(default='', max_length=90),
),
]
| SimonHerrera/rock-island-independents | api/rii_Api/migrations/0010_player_imageinfo.py | Python | mit | 462 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Aladom SAS & Hosting Dvpt SAS
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class MailingConfig(AppConfig):
name = 'mailing'
verbose_name = _("Mailing")
| Aladom/django-mailing | mailing/apps.py | Python | mit | 252 |
#***************************************************************************
#* *
#* Copyright (c) 2015 *
#* Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__= "FreeCAD GbXml exporter"
__author__ = "Yorik van Havre"
__url__ = "http://www.freecadweb.org"
import os,FreeCAD,Draft
if FreeCAD.GuiUp:
from DraftTools import translate
else:
def translate(ctx,txt):
return txt
if open.__module__ == '__builtin__':
pyopen = open # because we'll redefine open below
def export(objectslist,filename):
if len(objectslist) != 1:
FreeCAD.Console.PrintError(translate("Arch","This exporter can currently only export one site object"))
return
site = objectslist[0]
if Draft.getType(site) != "Site":
FreeCAD.Console.PrintError(translate("Arch","This exporter can currently only export one site object"))
return
filestream = pyopen(filename,"wb")
# header
filestream.write( '<?xml version="1.0"?>\n' )
filestream.write( '<!-- Exported by FreeCAD %s -->\n' % FreeCAD.Version()[0]+FreeCAD.Version()[1]+FreeCAD.Version()[2] )
filestream.write( '<gbXML xmlns="http://www.gbxml.org/schema" xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.gbxml.org/schema" temperatureUnit="C" lengthUnit="Millimeters" areaUnit="SquareMeters" volumeUnit="CubicMeters" useSIUnitsForResults="false">\n' )
filestream.write( '\n' )
# campus
filestream.write( '<Campus id="%s">\n' % site.Name )
filestream.write( '<Location>\n' )
filestream.write( ' <ZipcodeOrPostalCode>%s</ZipcodeOrPostalCode>\n' % site.PostalCode )
filestream.write( '</Location>\n' )
# building
for building in site.Group:
if Draft.getType(building) == "Building":
area = 10000.0 # TODO calculate
filestream.write( ' <Building id="$s" buildingType="$s">\n' % (building.Name,building.BuildingType) )
filestream.write( ' <Area>$f</Area>\n' % area )
# space
for space in Draft.getGroupContents(building):
if Draft.getType(space) == "Space":
zone = "BLABLA" # TODO build values
filestream.write( ' <Space id="%s" spaceType="%s" zoneIdRef="%s">\n' % (space.Name, space.SpaceType, zone) )
filestream.write( ' <Name>%s</Name>\n' % space.Label )
filestream.write( ' <Description>%s</Description>\n' % space.Description )
filestream.write( ' <Name>%s</Name>\n' % space.Label )
#filestream.write( ' <PeopleNumber unit="NumberOfPeople">1.00000</PeopleNumber>\n' )
#filestream.write( ' <LightPowerPerArea unit="WattPerSquareFoot">1.50000</LightPowerPerArea>\n' )
#filestream.write( ' <EquipPowerPerArea unit="WattPerSquareFoot">0.00000</EquipPowerPerArea>\n' )
filestream.write( ' <Area>$f</Area>\n' % space.Area
filestream.write( ' </Building>\n' )
filestream.write( '</Campus>\n' )
filestream.write( '</gbXML>' )
'''
<Area>18000.00000</Area>
<Space id="sp1_LabandCorridor_Labcorridor" spaceType="LaboratoryOffice" zoneIdRef="z1_LabandCorridor">
<Name>Lab corridor</Name>
<Description/>
<PeopleNumber unit="NumberOfPeople">1.00000</PeopleNumber>
<LightPowerPerArea unit="WattPerSquareFoot">1.50000</LightPowerPerArea>
<EquipPowerPerArea unit="WattPerSquareFoot">0.00000</EquipPowerPerArea>
<Area>800.00000</Area>
<Volume>6400.00000</Volume>
<ShellGeometry id="geo_sp1_LabandCorridor_Labcorridor">
<ClosedShell>
<PolyLoop>
<CartesianPoint>
<Coordinate>0.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>0.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>480.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>480.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
... repeat
</ClosedShell>
</ShellGeometry>
<CADObjectId>21E2</CADObjectId>
</Space>
... repeat
</Building>
<Surface id="su1_Floor" surfaceType="UndergroundSlab" constructionIdRef="construction-1">
<Name>Floor</Name>
<AdjacentSpaceId spaceIdRef="sp1_LabandCorridor_Labcorridor"/>
<RectangularGeometry>
<Azimuth>90.00</Azimuth>
<CartesianPoint>
<Coordinate>0.00000</Coordinate>
<Coordinate>1320.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<Tilt>180.00</Tilt>
<Height>480.00000</Height>
<Width>240.00000</Width>
</RectangularGeometry>
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>0.00000</Coordinate>
<Coordinate>1320.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>480.00000</Coordinate>
<Coordinate>1320.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>960.00000</Coordinate>
<Coordinate>1320.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>960.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>480.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>0.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
</Surface>
<Surface id="su44_Surface4" surfaceType="ExteriorWall" constructionIdRef="construction-3">
<Name>Surface 4</Name>
<AdjacentSpaceId spaceIdRef="sp7_Office_Office6"/>
<RectangularGeometry>
<Azimuth>180.00</Azimuth>
<CartesianPoint>
<Coordinate>960.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<Tilt>90.00</Tilt>
<Height>114.00000</Height>
<Width>480.00000</Width>
</RectangularGeometry>
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>960.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1440.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1440.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>114.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>960.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>114.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
<Opening id="su44-op1_Opening1" openingType="OperableWindow" windowTypeIdRef="windowType-1">
<Name>Opening1</Name>
<RectangularGeometry>
<CartesianPoint>
<Coordinate>96.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<Height>72.00000</Height>
<Width>48.00000</Width>
</RectangularGeometry>
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>1056.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1104.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1104.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1056.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
</Opening>
<Opening id="su44-op2_Opening2" openingType="OperableWindow" windowTypeIdRef="windowType-1">
<Name>Opening2</Name>
<RectangularGeometry>
<CartesianPoint>
<Coordinate>216.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<Height>72.00000</Height>
<Width>48.00000</Width>
</RectangularGeometry>
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>1176.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1224.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1224.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1176.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
</Opening>
<Opening id="su44-op3_Opening3" openingType="OperableWindow" windowTypeIdRef="windowType-1">
<Name>Opening3</Name>
<RectangularGeometry>
<CartesianPoint>
<Coordinate>336.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<Height>72.00000</Height>
<Width>48.00000</Width>
</RectangularGeometry>
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>1296.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1344.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1344.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1296.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
</Opening>
</Surface>
... repeat
</Campus>
<Construction id="construction-1">
<Name>Standard</Name>
<Description/>
</Construction>
<Construction id="construction-2">
<Name>Standard</Name>
<Description/>
</Construction>
<Construction id="construction-3">
<Name>Standard</Name>
<Description/>
</Construction>
<WindowType id="windowType-1">
<Name>Standard</Name>
<Description/>
</WindowType>
<Zone id="z1_LabandCorridor">
<Name>Lab and Corridor</Name>
<Description/>
<AirChangesPerHour>0"</AirChangesPerHour>
<FlowPerArea unit="CFMPerSquareFoot">0.00000</FlowPerArea>
<FlowPerPerson unit="CFM">0.00000</FlowPerPerson>
<OAFlowPerArea unit="CFMPerSquareFoot">2.37037</OAFlowPerArea>
<OAFlowPerPerson unit="CFM">812.69841</OAFlowPerPerson>
<DesignHeatT>72.00000</DesignHeatT>
<DesignCoolT>75.00000</DesignCoolT>
</Zone>
<Zone id="z2_Office">
<Name>Office</Name>
<Description/>
<AirChangesPerHour>1"</AirChangesPerHour>
<FlowPerArea unit="CFMPerSquareFoot">0.13333</FlowPerArea>
<FlowPerPerson unit="CFM">20.00000</FlowPerPerson>
<OAFlowPerArea unit="CFMPerSquareFoot">0.05333</OAFlowPerArea>
<OAFlowPerPerson unit="CFM">8.00000</OAFlowPerPerson>
<DesignHeatT>72.00000</DesignHeatT>
<DesignCoolT>75.00000</DesignCoolT>
</Zone>
<Zone id="z3_Warehouse">
<Name>Warehouse</Name>
<Description/>
<AirChangesPerHour>5/32"</AirChangesPerHour>
<FlowPerArea unit="CFMPerSquareFoot">0.05000</FlowPerArea>
<FlowPerPerson unit="CFM">25.71429</FlowPerPerson>
<OAFlowPerArea unit="CFMPerSquareFoot">0.00000</OAFlowPerArea>
<OAFlowPerPerson unit="CFM">0.00000</OAFlowPerPerson>
<DesignHeatT>60.00000</DesignHeatT>
<DesignCoolT>80.00000</DesignCoolT>
</Zone>
<Results xmlns="" id="sp3_LabandCorridor_Lab1" objectIdRef="sp3_LabandCorridor_Lab1" resultsType="CoolingLoad" unit="BtuPerHour">
<ObjectId>sp3_LabandCorridor_Lab1</ObjectId>
<Value>5534.837890625</Value>
<Description>Space Cooling Roof Cond</Description>
<CADObjectId>21E3</CADObjectId>
</Results>
... repeat
</gbXML>'''
| cypsun/FreeCAD | src/Mod/Arch/importGBXML.py | Python | lgpl-2.1 | 17,885 |
# replace all key events in
# js files and htmls
# to our standard key input event
# more details see in DOC dir
# Key 事件进行全局替换, 统一处理。 | lifeinoppo/littlefishlet-scode | SRC/Server/Components/input/python/keyInput.py | Python | gpl-2.0 | 175 |
# -*- coding: utf-8 -*-
###############################################################################
#
# GetTariff
# Returns an individual Tariff object with a given id.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetTariff(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetTariff Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetTariff, self).__init__(temboo_session, '/Library/Genability/TariffData/GetTariff')
def new_input_set(self):
return GetTariffInputSet()
def _make_result_set(self, result, path):
return GetTariffResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetTariffChoreographyExecution(session, exec_id, path)
class GetTariffInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetTariff
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AppID(self, value):
"""
Set the value of the AppID input for this Choreo. ((conditional, string) The App ID provided by Genability.)
"""
super(GetTariffInputSet, self)._set_input('AppID', value)
def set_AppKey(self, value):
"""
Set the value of the AppKey input for this Choreo. ((required, string) The App Key provided by Genability.)
"""
super(GetTariffInputSet, self)._set_input('AppKey', value)
def set_MasterTariffID(self, value):
"""
Set the value of the MasterTariffID input for this Choreo. ((required, integer) The master tariff id. This can be retrieved in the output of the GetTariffs Choreo.)
"""
super(GetTariffInputSet, self)._set_input('MasterTariffID', value)
def set_PopulateProperties(self, value):
"""
Set the value of the PopulateProperties input for this Choreo. ((optional, boolean) Set to "true" to populate the properties for the returned Tariffs.)
"""
super(GetTariffInputSet, self)._set_input('PopulateProperties', value)
def set_PopulateRates(self, value):
"""
Set the value of the PopulateRates input for this Choreo. ((optional, boolean) Set to "true" to populate the rate details for the returned Tariffs.)
"""
super(GetTariffInputSet, self)._set_input('PopulateRates', value)
class GetTariffResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetTariff Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Genability.)
"""
return self._output.get('Response', None)
class GetTariffChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetTariffResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Genability/TariffData/GetTariff.py | Python | apache-2.0 | 4,018 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import models
import report
import tests
| tvtsoft/odoo8 | addons/sale_contract/__init__.py | Python | agpl-3.0 | 141 |
# Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
import pytest
import odl
from odl.trafos.util.ft_utils import (
reciprocal_grid, dft_preprocess_data, dft_postprocess_data,
_interp_kernel_ft)
from odl.trafos.fourier import (
DiscreteFourierTransform, DiscreteFourierTransformInverse,
FourierTransform)
from odl.util import (all_almost_equal, never_skip, skip_if_no_pyfftw,
noise_element,
is_real_dtype, conj_exponent, complex_dtype)
from odl.util.testutils import simple_fixture
# --- pytest fixtures --- #
impl = simple_fixture('impl', [never_skip('numpy'),
skip_if_no_pyfftw('pyfftw')])
exponent = simple_fixture('exponent', [2.0, 1.0, float('inf'), 1.5])
sign = simple_fixture('sign', ['-', '+'])
# --- helper functions --- #
def _params_from_dtype(dtype):
if is_real_dtype(dtype):
halfcomplex = True
else:
halfcomplex = False
return halfcomplex, complex_dtype(dtype)
def sinc(x):
# numpy.sinc scales by pi, we don't want that
return np.sinc(x / np.pi)
# ---- DiscreteFourierTransform ---- #
def test_dft_init(impl):
# Just check if the code runs at all
shape = (4, 5)
dom = odl.discr_sequence_space(shape)
dom_nonseq = odl.uniform_discr([0, 0], [1, 1], shape)
dom_f32 = odl.discr_sequence_space(shape, dtype='float32')
ran = odl.discr_sequence_space(shape, dtype='complex128')
ran_c64 = odl.discr_sequence_space(shape, dtype='complex64')
ran_hc = odl.discr_sequence_space((3, 5), dtype='complex128')
# Implicit range
DiscreteFourierTransform(dom, impl=impl)
DiscreteFourierTransform(dom_nonseq, impl=impl)
DiscreteFourierTransform(dom_f32, impl=impl)
DiscreteFourierTransform(dom, axes=(0,), impl=impl)
DiscreteFourierTransform(dom, axes=(0, -1), impl=impl)
DiscreteFourierTransform(dom, axes=(0,), halfcomplex=True, impl=impl)
DiscreteFourierTransform(dom, impl=impl, sign='+')
# Explicit range
DiscreteFourierTransform(dom, range=ran, impl=impl)
DiscreteFourierTransform(dom_f32, range=ran_c64, impl=impl)
DiscreteFourierTransform(dom, range=ran, axes=(0,), impl=impl)
DiscreteFourierTransform(dom, range=ran, axes=(0,), impl=impl, sign='+')
DiscreteFourierTransform(dom, range=ran, axes=(0, -1), impl=impl)
DiscreteFourierTransform(dom, range=ran_hc, axes=(0,), impl=impl,
halfcomplex=True)
def test_dft_init_raise():
# Test different error scenarios
shape = (4, 5)
dom = odl.discr_sequence_space(shape)
dom_f32 = odl.discr_sequence_space(shape, dtype='float32')
# Bad types
with pytest.raises(TypeError):
DiscreteFourierTransform(dom.dspace)
with pytest.raises(TypeError):
DiscreteFourierTransform(dom, dom.dspace)
# Illegal arguments
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, impl='fftw')
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, axes=(1, 2))
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, axes=(1, -3))
# Badly shaped range
bad_ran = odl.discr_sequence_space((3, 5), dtype='complex128')
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, bad_ran)
bad_ran = odl.discr_sequence_space((10, 10), dtype='complex128')
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, bad_ran)
bad_ran = odl.discr_sequence_space((4, 5), dtype='complex128')
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, bad_ran, halfcomplex=True)
bad_ran = odl.discr_sequence_space((4, 3), dtype='complex128')
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, bad_ran, halfcomplex=True, axes=(0,))
# Bad data types
bad_ran = odl.discr_sequence_space(shape, dtype='complex64')
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, bad_ran)
bad_ran = odl.discr_sequence_space(shape, dtype='float64')
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, bad_ran)
bad_ran = odl.discr_sequence_space((4, 3), dtype='float64')
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, bad_ran, halfcomplex=True)
bad_ran = odl.discr_sequence_space((4, 3), dtype='complex128')
with pytest.raises(ValueError):
DiscreteFourierTransform(dom_f32, bad_ran, halfcomplex=True)
# Bad sign
with pytest.raises(ValueError):
DiscreteFourierTransform(dom, sign=-1)
def test_dft_range():
# 1d
shape = 10
dom = odl.discr_sequence_space(shape, dtype='complex128')
fft = DiscreteFourierTransform(dom)
true_ran = odl.discr_sequence_space(shape, dtype='complex128')
assert fft.range == true_ran
# 3d
shape = (3, 4, 5)
ran = odl.discr_sequence_space(shape, dtype='complex64')
fft = DiscreteFourierTransform(ran)
true_ran = odl.discr_sequence_space(shape, dtype='complex64')
assert fft.range == true_ran
# 3d, with axes and halfcomplex
shape = (3, 4, 5)
axes = (-1, -2)
ran_shape = (3, 3, 5)
dom = odl.discr_sequence_space(shape, dtype='float32')
fft = DiscreteFourierTransform(dom, axes=axes, halfcomplex=True)
true_ran = odl.discr_sequence_space(ran_shape, dtype='complex64')
assert fft.range == true_ran
# ---- DiscreteFourierTransformInverse ---- #
def test_idft_init(impl):
# Just check if the code runs at all; this uses the init function of
# DiscreteFourierTransform, so we don't need exhaustive tests here
shape = (4, 5)
ran = odl.discr_sequence_space(shape, dtype='complex128')
ran_hc = odl.discr_sequence_space(shape, dtype='float64')
dom = odl.discr_sequence_space(shape, dtype='complex128')
dom_hc = odl.discr_sequence_space((3, 5), dtype='complex128')
# Implicit range
DiscreteFourierTransformInverse(dom, impl=impl)
# Explicit range
DiscreteFourierTransformInverse(ran, domain=dom, impl=impl)
DiscreteFourierTransformInverse(ran_hc, domain=dom_hc, axes=(0,),
impl=impl, halfcomplex=True)
def test_dft_call(impl):
# 2d, complex, all ones and random back & forth
shape = (4, 5)
dft_dom = odl.discr_sequence_space(shape, dtype='complex64')
dft = DiscreteFourierTransform(domain=dft_dom, impl=impl)
idft = DiscreteFourierTransformInverse(range=dft_dom, impl=impl)
assert dft.domain == idft.range
assert dft.range == idft.domain
one = dft.domain.one()
one_dft1 = dft(one, flags=('FFTW_ESTIMATE',))
one_dft2 = dft.inverse.inverse(one, flags=('FFTW_ESTIMATE',))
one_dft3 = dft.adjoint.adjoint(one, flags=('FFTW_ESTIMATE',))
true_dft = [[20, 0, 0, 0, 0], # along all axes by default
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
assert np.allclose(one_dft1, true_dft)
assert np.allclose(one_dft2, true_dft)
assert np.allclose(one_dft3, true_dft)
one_idft1 = idft(one_dft1, flags=('FFTW_ESTIMATE',))
one_idft2 = dft.inverse(one_dft1, flags=('FFTW_ESTIMATE',))
one_idft3 = dft.adjoint(one_dft1, flags=('FFTW_ESTIMATE',))
assert np.allclose(one_idft1, one)
assert np.allclose(one_idft2, one)
assert np.allclose(one_idft3, one)
rand_arr = noise_element(dft_dom)
rand_arr_dft = dft(rand_arr, flags=('FFTW_ESTIMATE',))
rand_arr_idft = idft(rand_arr_dft, flags=('FFTW_ESTIMATE',))
assert (rand_arr_idft - rand_arr).norm() < 1e-6
# 2d, halfcomplex, first axis
shape = (4, 5)
axes = 0
dft_dom = odl.discr_sequence_space(shape, dtype='float32')
dft = DiscreteFourierTransform(domain=dft_dom, impl=impl, halfcomplex=True,
axes=axes)
idft = DiscreteFourierTransformInverse(range=dft_dom, impl=impl,
halfcomplex=True, axes=axes)
assert dft.domain == idft.range
assert dft.range == idft.domain
one = dft.domain.one()
one_dft = dft(one, flags=('FFTW_ESTIMATE',))
true_dft = [[4, 4, 4, 4, 4], # transform axis shortened
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
assert np.allclose(one_dft, true_dft)
one_idft1 = idft(one_dft, flags=('FFTW_ESTIMATE',))
one_idft2 = dft.inverse(one_dft, flags=('FFTW_ESTIMATE',))
assert np.allclose(one_idft1, one)
assert np.allclose(one_idft2, one)
rand_arr = noise_element(dft_dom)
rand_arr_dft = dft(rand_arr, flags=('FFTW_ESTIMATE',))
rand_arr_idft = idft(rand_arr_dft, flags=('FFTW_ESTIMATE',))
assert (rand_arr_idft - rand_arr).norm() < 1e-6
def test_dft_sign(impl):
# Test if the FT sign behaves as expected, i.e. that the FT with sign
# '+' and '-' have same real parts and opposite imaginary parts.
# 2d, complex, all ones and random back & forth
shape = (4, 5)
dft_dom = odl.discr_sequence_space(shape, dtype='complex64')
dft_minus = DiscreteFourierTransform(domain=dft_dom, impl=impl, sign='-')
dft_plus = DiscreteFourierTransform(domain=dft_dom, impl=impl, sign='+')
arr = dft_dom.element([[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]])
arr_dft_minus = dft_minus(arr, flags=('FFTW_ESTIMATE',))
arr_dft_plus = dft_plus(arr, flags=('FFTW_ESTIMATE',))
assert all_almost_equal(arr_dft_minus.real, arr_dft_plus.real)
assert all_almost_equal(arr_dft_minus.imag, -arr_dft_plus.imag)
assert all_almost_equal(dft_minus.inverse(arr_dft_minus), arr)
assert all_almost_equal(dft_plus.inverse(arr_dft_plus), arr)
assert all_almost_equal(dft_minus.inverse.inverse(arr), dft_minus(arr))
assert all_almost_equal(dft_plus.inverse.inverse(arr), dft_plus(arr))
# 2d, halfcomplex, first axis
shape = (4, 5)
axes = (0,)
dft_dom = odl.discr_sequence_space(shape, dtype='float32')
arr = dft_dom.element([[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]])
dft = DiscreteFourierTransform(
domain=dft_dom, impl=impl, halfcomplex=True, sign='-', axes=axes)
arr_dft_minus = dft(arr, flags=('FFTW_ESTIMATE',))
arr_idft_minus = dft.inverse(arr_dft_minus, flags=('FFTW_ESTIMATE',))
assert all_almost_equal(arr_idft_minus, arr)
with pytest.raises(ValueError):
DiscreteFourierTransform(
domain=dft_dom, impl=impl, halfcomplex=True, sign='+', axes=axes)
def test_dft_init_plan(impl):
# 2d, halfcomplex, first axis
shape = (4, 5)
axes = 0
dft_dom = odl.discr_sequence_space(shape, dtype='float32')
dft = DiscreteFourierTransform(dft_dom, impl=impl, axes=axes,
halfcomplex=True)
if impl != 'pyfftw':
with pytest.raises(ValueError):
dft.init_fftw_plan()
with pytest.raises(ValueError):
dft.clear_fftw_plan()
else:
dft.init_fftw_plan()
# Make sure plan can be used
dft._fftw_plan(dft.domain.element().asarray(),
dft.range.element().asarray())
dft.clear_fftw_plan()
assert dft._fftw_plan is None
# ---- FourierTransform ---- #
def test_fourier_trafo_range(exponent, floating_dtype):
# Check if the range is initialized correctly. Encompasses the init test
# Testing R2C for real dtype, else C2C
# 1D
shape = 10
space_discr = odl.uniform_discr(0, 1, shape, exponent=exponent,
impl='numpy', dtype=floating_dtype)
dft = FourierTransform(space_discr, halfcomplex=True, shift=True)
assert dft.range.field == odl.ComplexNumbers()
halfcomplex = True if is_real_dtype(floating_dtype) else False
assert dft.range.grid == reciprocal_grid(dft.domain.grid,
halfcomplex=halfcomplex,
shift=True)
assert dft.range.exponent == conj_exponent(exponent)
# 3D
shape = (3, 4, 5)
space_discr = odl.uniform_discr([0] * 3, [1] * 3, shape, exponent=exponent,
impl='numpy', dtype=floating_dtype)
dft = FourierTransform(space_discr, halfcomplex=True, shift=True)
assert dft.range.field == odl.ComplexNumbers()
halfcomplex = True if is_real_dtype(floating_dtype) else False
assert dft.range.grid == reciprocal_grid(dft.domain.grid,
halfcomplex=halfcomplex,
shift=True)
assert dft.range.exponent == conj_exponent(exponent)
# shift must be True in the last axis
if halfcomplex:
with pytest.raises(ValueError):
FourierTransform(space_discr, shift=(True, True, False))
if exponent != 2.0:
with pytest.raises(NotImplementedError):
dft.adjoint
with pytest.raises(TypeError):
FourierTransform(dft.domain.partition)
def test_fourier_trafo_init_plan(impl, floating_dtype):
# Not supported, skip
if floating_dtype == np.dtype('float16') and impl == 'pyfftw':
return
shape = 10
halfcomplex, _ = _params_from_dtype(floating_dtype)
space_discr = odl.uniform_discr(0, 1, shape, dtype=floating_dtype)
ft = FourierTransform(space_discr, impl=impl, halfcomplex=halfcomplex)
if impl != 'pyfftw':
with pytest.raises(ValueError):
ft.init_fftw_plan()
with pytest.raises(ValueError):
ft.clear_fftw_plan()
else:
ft.init_fftw_plan()
# Make sure plan can be used
ft._fftw_plan(ft.domain.element().asarray(),
ft.range.element().asarray())
ft.clear_fftw_plan()
assert ft._fftw_plan is None
# With temporaries
ft.create_temporaries(r=True, f=False)
if impl != 'pyfftw':
with pytest.raises(ValueError):
ft.init_fftw_plan()
with pytest.raises(ValueError):
ft.clear_fftw_plan()
else:
ft.init_fftw_plan()
# Make sure plan can be used
ft._fftw_plan(ft.domain.element().asarray(),
ft.range.element().asarray())
ft.clear_fftw_plan()
assert ft._fftw_plan is None
ft.create_temporaries(r=False, f=True)
if impl != 'pyfftw':
with pytest.raises(ValueError):
ft.init_fftw_plan()
with pytest.raises(ValueError):
ft.clear_fftw_plan()
else:
ft.init_fftw_plan()
# Make sure plan can be used
ft._fftw_plan(ft.domain.element().asarray(),
ft.range.element().asarray())
ft.clear_fftw_plan()
assert ft._fftw_plan is None
def test_fourier_trafo_create_temp():
shape = 10
space_discr = odl.uniform_discr(0, 1, shape, dtype='complex64')
ft = FourierTransform(space_discr)
ft.create_temporaries()
assert ft._tmp_r is not None
assert ft._tmp_f is not None
ift = ft.inverse
assert ift._tmp_r is not None
assert ift._tmp_f is not None
ft.clear_temporaries()
assert ft._tmp_r is None
assert ft._tmp_f is None
def test_fourier_trafo_call(impl, floating_dtype):
# Test if all variants can be called without error
# Not supported, skip
if floating_dtype == np.dtype('float16') and impl == 'pyfftw':
return
shape = 10
halfcomplex, _ = _params_from_dtype(floating_dtype)
space_discr = odl.uniform_discr(0, 1, shape, dtype=floating_dtype)
ft = FourierTransform(space_discr, impl=impl, halfcomplex=halfcomplex)
ift = ft.inverse
one = space_discr.one()
assert np.allclose(ift(ft(one)), one)
# With temporaries
ft.create_temporaries()
ift = ft.inverse # shares temporaries
one = space_discr.one()
assert np.allclose(ift(ft(one)), one)
def test_fourier_trafo_charfun_1d():
# Characteristic function of [0, 1], its Fourier transform is
# given by exp(-1j * y / 2) * sinc(y/2)
def char_interval(x):
return (x >= 0) & (x <= 1)
def char_interval_ft(x):
return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi)
# Base version
discr = odl.uniform_discr(-2, 2, 40, impl='numpy')
dft_base = FourierTransform(discr)
# Complex version, should be as good
discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='complex64')
dft_complex = FourierTransform(discr)
# Without shift
discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='complex64')
dft_complex_shift = FourierTransform(discr, shift=False)
for dft in [dft_base, dft_complex, dft_complex_shift]:
func_true_ft = dft.range.element(char_interval_ft)
func_dft = dft(char_interval)
assert (func_dft - func_true_ft).norm() < 5e-6
def test_fourier_trafo_scaling():
# Test if the FT scales correctly
# Characteristic function of [0, 1], its Fourier transform is
# given by exp(-1j * y / 2) * sinc(y/2)
def char_interval(x):
return (x >= 0) & (x <= 1)
def char_interval_ft(x):
return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi)
fspace = odl.FunctionSpace(odl.IntervalProd(-2, 2),
field=odl.ComplexNumbers())
discr = odl.uniform_discr_fromspace(fspace, 40, impl='numpy')
dft = FourierTransform(discr)
for factor in (2, 1j, -2.5j, 1 - 4j):
func_true_ft = factor * dft.range.element(char_interval_ft)
func_dft = dft(factor * fspace.element(char_interval))
assert (func_dft - func_true_ft).norm() < 1e-6
def test_fourier_trafo_sign(impl):
# Test if the FT sign behaves as expected, i.e. that the FT with sign
# '+' and '-' have same real parts and opposite imaginary parts.
def char_interval(x):
return (x >= 0) & (x <= 1)
discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='complex64')
ft_minus = FourierTransform(discr, sign='-', impl=impl)
ft_plus = FourierTransform(discr, sign='+', impl=impl)
func_ft_minus = ft_minus(char_interval)
func_ft_plus = ft_plus(char_interval)
assert np.allclose(func_ft_minus.real, func_ft_plus.real)
assert np.allclose(func_ft_minus.imag, -func_ft_plus.imag)
assert np.allclose(ft_minus.inverse.inverse(char_interval),
ft_minus(char_interval))
assert np.allclose(ft_plus.inverse.inverse(char_interval),
ft_plus(char_interval))
discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='float32')
with pytest.raises(ValueError):
FourierTransform(discr, sign='+', impl=impl, halfcomplex=True)
with pytest.raises(ValueError):
FourierTransform(discr, sign=-1, impl=impl)
def test_fourier_trafo_inverse(impl, sign):
# Test if the inverse really is the inverse
def char_interval(x):
return (x >= 0) & (x <= 1)
# Complex-to-complex
discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='complex64')
discr_char = discr.element(char_interval)
ft = FourierTransform(discr, sign=sign, impl=impl)
assert all_almost_equal(ft.inverse(ft(char_interval)), discr_char)
assert all_almost_equal(ft.adjoint(ft(char_interval)), discr_char)
# Half-complex
discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='float32')
ft = FourierTransform(discr, impl=impl, halfcomplex=True)
assert all_almost_equal(ft.inverse(ft(char_interval)), discr_char)
def char_rect(x):
return (x[0] >= 0) & (x[0] <= 1) & (x[1] >= 0) & (x[1] <= 1)
# 2D with axes, C2C
discr = odl.uniform_discr([-2, -2], [2, 2], (20, 10), impl='numpy',
dtype='complex64')
discr_rect = discr.element(char_rect)
for axes in [(0,), 1]:
ft = FourierTransform(discr, sign=sign, impl=impl, axes=axes)
assert all_almost_equal(ft.inverse(ft(char_rect)), discr_rect)
assert all_almost_equal(ft.adjoint(ft(char_rect)), discr_rect)
# 2D with axes, halfcomplex
discr = odl.uniform_discr([-2, -2], [2, 2], (20, 10), impl='numpy',
dtype='float32')
discr_rect = discr.element(char_rect)
for halfcomplex in [False, True]:
if halfcomplex and sign == '+':
continue # cannot mix halfcomplex with sign
for axes in [(0,), (1,)]:
ft = FourierTransform(discr, sign=sign, impl=impl, axes=axes,
halfcomplex=halfcomplex)
assert all_almost_equal(ft.inverse(ft(char_rect)), discr_rect)
assert all_almost_equal(ft.adjoint(ft(char_rect)), discr_rect)
def test_fourier_trafo_hat_1d():
# Hat function as used in linear interpolation. It is not so
# well discretized by nearest neighbor interpolation, so a larger
# error is to be expected.
def hat_func(x):
out = np.where(x < 0, 1 + x, 1 - x)
out[x < -1] = 0
out[x > 1] = 0
return out
def hat_func_ft(x):
return sinc(x / 2) ** 2 / np.sqrt(2 * np.pi)
# Using a single-precision implementation, should be as good
# With linear interpolation in the discretization, should be better?
for interp in ['nearest', 'linear']:
discr = odl.uniform_discr(-2, 2, 101, impl='numpy', dtype='float32',
interp=interp)
dft = FourierTransform(discr)
func_true_ft = dft.range.element(hat_func_ft)
func_dft = dft(hat_func)
assert (func_dft - func_true_ft).norm() < 0.001
def test_fourier_trafo_complex_sum():
# Sum of characteristic function and hat function, both with
# known FT's.
def hat_func(x):
out = 1 - np.abs(x)
out[x < -1] = 0
out[x > 1] = 0
return out
def hat_func_ft(x):
return sinc(x / 2) ** 2 / np.sqrt(2 * np.pi)
def char_interval(x):
return (x >= 0) & (x <= 1)
def char_interval_ft(x):
return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi)
discr = odl.uniform_discr(-2, 2, 200, impl='numpy', dtype='complex128')
dft = FourierTransform(discr, shift=False)
func = discr.element(hat_func) + 1j * discr.element(char_interval)
func_true_ft = (dft.range.element(hat_func_ft) +
1j * dft.range.element(char_interval_ft))
func_dft = dft(func)
assert (func_dft - func_true_ft).norm() < 0.001
def test_fourier_trafo_gaussian_1d():
# Gaussian function, will be mapped to itself. Truncation error is
# relatively large, though, we need a large support.
def gaussian(x):
return np.exp(-x ** 2 / 2)
discr = odl.uniform_discr(-10, 10, 201, impl='numpy')
dft = FourierTransform(discr)
func_true_ft = dft.range.element(gaussian)
func_dft = dft(gaussian)
assert (func_dft - func_true_ft).norm() < 0.001
def test_fourier_trafo_freq_shifted_charfun_1d():
# Frequency-shifted characteristic function: mult. with
# exp(-1j * b * x) corresponds to shifting the FT by b.
def fshift_char_interval(x):
return np.exp(-1j * x * np.pi) * ((x >= -0.5) & (x <= 0.5))
def fshift_char_interval_ft(x):
return sinc((x + np.pi) / 2) / np.sqrt(2 * np.pi)
# Number of points is very important here (aliasing)
discr = odl.uniform_discr(-2, 2, 400, impl='numpy',
dtype='complex64')
dft = FourierTransform(discr)
func_true_ft = dft.range.element(fshift_char_interval_ft)
func_dft = dft(fshift_char_interval)
assert (func_dft - func_true_ft).norm() < 0.001
def test_dft_with_known_pairs_2d():
# Frequency-shifted product of characteristic functions
def fshift_char_rect(x):
# Characteristic function of the cuboid
# [-1, 1] x [1, 2]
return (x[0] >= -1) & (x[0] <= 1) & (x[1] >= 1) & (x[1] <= 2)
def fshift_char_rect_ft(x):
# FT is a product of shifted and frequency-shifted sinc functions
# 1st comp.: 2 * sinc(y)
# 2nd comp.: exp(-1j * y * 3/2) * sinc(y/2)
# Overall factor: (2 * pi)^(-1)
return (2 * sinc(x[0]) *
np.exp(-1j * x[1] * 3 / 2) * sinc(x[1] / 2) /
(2 * np.pi))
discr = odl.uniform_discr([-2] * 2, [2] * 2, (100, 400), impl='numpy',
dtype='complex64')
dft = FourierTransform(discr)
func_true_ft = dft.range.element(fshift_char_rect_ft)
func_dft = dft(fshift_char_rect)
assert (func_dft - func_true_ft).norm() < 0.001
def test_fourier_trafo_completely():
# Complete explicit test of all FT components on two small examples
# Discretization with 4 points
discr = odl.uniform_discr(-2, 2, 4, dtype='complex')
# Interval boundaries -2, -1, 0, 1, 2
assert np.allclose(discr.partition.cell_boundary_vecs[0],
[-2, -1, 0, 1, 2])
# Grid points -1.5, -0.5, 0.5, 1.5
assert np.allclose(discr.grid.coord_vectors[0],
[-1.5, -0.5, 0.5, 1.5])
# First test function, symmetric. Can be represented exactly in the
# discretization.
def f(x):
return (x >= -1) & (x <= 1)
def fhat(x):
return np.sqrt(2 / np.pi) * sinc(x)
# Discretize f, check values
f_discr = discr.element(f)
assert np.allclose(f_discr, [0, 1, 1, 0])
# "s" = shifted, "n" = not shifted
# Reciprocal grids
recip_s = reciprocal_grid(discr.grid, shift=True)
recip_n = reciprocal_grid(discr.grid, shift=False)
assert np.allclose(recip_s.coord_vectors[0],
np.linspace(-np.pi, np.pi / 2, 4))
assert np.allclose(recip_n.coord_vectors[0],
np.linspace(-3 * np.pi / 4, 3 * np.pi / 4, 4))
# Range
range_part_s = odl.uniform_partition_fromgrid(recip_s)
range_s = odl.uniform_discr_frompartition(range_part_s, dtype='complex')
range_part_n = odl.uniform_partition_fromgrid(recip_n)
range_n = odl.uniform_discr_frompartition(range_part_n, dtype='complex')
# Pre-processing
preproc_s = [1, -1, 1, -1]
preproc_n = [np.exp(1j * 3 / 4 * np.pi * k) for k in range(4)]
fpre_s = dft_preprocess_data(f_discr, shift=True)
fpre_n = dft_preprocess_data(f_discr, shift=False)
assert np.allclose(fpre_s, f_discr * discr.element(preproc_s))
assert np.allclose(fpre_n, f_discr * discr.element(preproc_n))
# FFT step, replicating the _call_numpy method
fft_s = np.fft.fftn(fpre_s, s=discr.shape, axes=[0])
fft_n = np.fft.fftn(fpre_n, s=discr.shape, axes=[0])
assert np.allclose(fft_s, [0, -1 + 1j, 2, -1 - 1j])
assert np.allclose(
fft_n, [np.exp(1j * np.pi * (3 - 2 * k) / 4) +
np.exp(1j * np.pi * (3 - 2 * k) / 2)
for k in range(4)])
# Interpolation kernel FT
interp_s = np.sinc(np.linspace(-1 / 2, 1 / 4, 4)) / np.sqrt(2 * np.pi)
interp_n = np.sinc(np.linspace(-3 / 8, 3 / 8, 4)) / np.sqrt(2 * np.pi)
assert np.allclose(interp_s,
_interp_kernel_ft(np.linspace(-1 / 2, 1 / 4, 4),
interp='nearest'))
assert np.allclose(interp_n,
_interp_kernel_ft(np.linspace(-3 / 8, 3 / 8, 4),
interp='nearest'))
# Post-processing
postproc_s = np.exp(1j * np.pi * np.linspace(-3 / 2, 3 / 4, 4))
postproc_n = np.exp(1j * np.pi * np.linspace(-9 / 8, 9 / 8, 4))
fpost_s = dft_postprocess_data(
range_s.element(fft_s), real_grid=discr.grid, recip_grid=recip_s,
shift=[True], axes=(0,), interp='nearest')
fpost_n = dft_postprocess_data(
range_n.element(fft_n), real_grid=discr.grid, recip_grid=recip_n,
shift=[False], axes=(0,), interp='nearest')
assert np.allclose(fpost_s, fft_s * postproc_s * interp_s)
assert np.allclose(fpost_n, fft_n * postproc_n * interp_n)
# Comparing to the known result sqrt(2/pi) * sinc(x)
assert np.allclose(fpost_s, fhat(recip_s.coord_vectors[0]))
assert np.allclose(fpost_n, fhat(recip_n.coord_vectors[0]))
# Doing the exact same with direct application of the FT operator
ft_op_s = FourierTransform(discr, shift=True)
ft_op_n = FourierTransform(discr, shift=False)
assert ft_op_s.range.grid == recip_s
assert ft_op_n.range.grid == recip_n
ft_f_s = ft_op_s(f)
ft_f_n = ft_op_n(f)
assert np.allclose(ft_f_s, fhat(recip_s.coord_vectors[0]))
assert np.allclose(ft_f_n, fhat(recip_n.coord_vectors[0]))
# Second test function, asymmetric. Can also be represented exactly in the
# discretization.
def f(x):
return (x >= 0) & (x <= 1)
def fhat(x):
return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi)
# Discretize f, check values
f_discr = discr.element(f)
assert np.allclose(f_discr, [0, 0, 1, 0])
# Pre-processing
fpre_s = dft_preprocess_data(f_discr, shift=True)
fpre_n = dft_preprocess_data(f_discr, shift=False)
assert np.allclose(fpre_s, [0, 0, 1, 0])
assert np.allclose(fpre_n, [0, 0, -1j, 0])
# FFT step
fft_s = np.fft.fftn(fpre_s, s=discr.shape, axes=[0])
fft_n = np.fft.fftn(fpre_n, s=discr.shape, axes=[0])
assert np.allclose(fft_s, [1, -1, 1, -1])
assert np.allclose(fft_n, [-1j, 1j, -1j, 1j])
fpost_s = dft_postprocess_data(
range_s.element(fft_s), real_grid=discr.grid, recip_grid=recip_s,
shift=[True], axes=(0,), interp='nearest')
fpost_n = dft_postprocess_data(
range_n.element(fft_n), real_grid=discr.grid, recip_grid=recip_n,
shift=[False], axes=(0,), interp='nearest')
assert np.allclose(fpost_s, fft_s * postproc_s * interp_s)
assert np.allclose(fpost_n, fft_n * postproc_n * interp_n)
# Comparing to the known result exp(-1j*x/2) * sinc(x/2) / sqrt(2*pi)
assert np.allclose(fpost_s, fhat(recip_s.coord_vectors[0]))
assert np.allclose(fpost_n, fhat(recip_n.coord_vectors[0]))
# Doing the exact same with direct application of the FT operator
ft_f_s = ft_op_s(f)
ft_f_n = ft_op_n(f)
assert np.allclose(ft_f_s, fhat(recip_s.coord_vectors[0]))
assert np.allclose(ft_f_n, fhat(recip_n.coord_vectors[0]))
if __name__ == '__main__':
pytest.main([str(__file__.replace('\\', '/')), '-v'])
| bgris/ODL_bgris | odl/test/trafos/fourier_test.py | Python | gpl-3.0 | 31,209 |
'''
Check Yahoo finance currency data helper.
Update log: (date / version / author : comments)
2017-12-10 / 1.0.0 / Du Jiang : Creation
2017-12-13 / 2.0.0 / Du Jiang : Use new API
'''
from com.djs.learn.financeapi import CheckFinanceDataRequests
__data_type = 1
__inventory_info_file_path = "../../../../etc/CurrencyInfo.csv"
__result_output_file_path = "../../../../Temp/CurrencyDataY.json"
argv = ["-d", __data_type, "-i", __inventory_info_file_path,
"-o", __result_output_file_path]
CheckFinanceDataRequests.main(argv)
'''
Or run:
python CheckFinanceDataRequests.py -d 1 -i "../../../../etc/CurrencyData.csv" -o "../../../../Temp/CurrencyDataY.json"
'''
if __name__ == '__main__':
pass
| djsilenceboy/LearnTest | Python_Test/PyFinanceApiSample/com/djs/learn/test/TestCheckYahooFinanceCurrencyData.py | Python | apache-2.0 | 709 |
from tiledata import GetOsmTileData
from OsmMerge import OsmMerge
def test(z, x, y):
filenames = []
for i in (0, 1):
for j in (0, 1):
lx = x * 2 + i
ly = y * 2 + j
lz = z + 1
#print("Downloading subtile %d,%d at %d" % (x,y,z))
# download (or otherwise obtain) each subtile
filenames.append(GetOsmTileData(lz, lx, ly))
# merge them together
OsmMerge("merged.osm", z, filenames)
if __name__ == "__main__":
#test(14,8009,5443) # swansea
test(13, 4070, 2682) # leicester
| ryfx/modrana | modules/pyrender/testmerge.py | Python | gpl-3.0 | 581 |
#!/usr/bin/python
from __future__ import division
import rospy
import rospkg
from sensor_msgs.msg import MagneticField
from ieee2016_xmega_connector_ported.srv import GetHeading
import numpy as np
import matplotlib.pyplot as plt
import yaml
import os
from scipy import optimize
from numpy.linalg import eig, inv
rospack = rospkg.RosPack()
CALIBRATION_FILE_URI = os.path.join(rospack.get_path('ieee2016_xmega_connector_ported'), 'scripts/')
# Shamelessly taken from online guide ===========================
def fitEllipse(x,y):
x = x[:,np.newaxis]
y = y[:,np.newaxis]
D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))
S = np.dot(D.T,D)
C = np.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = eig(np.dot(inv(S), C))
n = np.argmax(np.abs(E))
a = V[:,n]
return a
def ellipse_center(a):
b,c,d,f,g,a = a[1]/2.0, a[2], a[3]/2.0, a[4]/2.0, a[5], a[0]
num = b*b-a*c
x0=(c*d-b*f)/num
y0=(a*f-b*d)/num
return np.array([x0,y0])
def ellipse_angle_of_rotation(a):
b,c,d,f,g,a = a[1]/2.0, a[2], a[3]/2.0, a[4]/2.0, a[5], a[0]
if b == 0:
if a > c:
return 0
else:
return np.pi/2
else:
if a > c:
return np.arctan(2*b/(a-c))/2
else:
return np.pi/2 + np.arctan(2*b/(a-c))/2
def ellipse_axis_length(a):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
up = 2.0*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
down1=(b*b-a*c)*( (c-a)*np.sqrt(1.0+4*b*b/((a-c)*(a-c)))-(c+a))
down2=(b*b-a*c)*( (a-c)*np.sqrt(1.0+4*b*b/((a-c)*(a-c)))-(c+a))
res1=np.sqrt(up/down1)
res2=np.sqrt(up/down2)
return np.array([res1, res2])
# Back to non stolen code =======================================
class Calibrate():
def __init__(self, topic_name, cache_length, calibration_file_name = "calibration.yaml"):
self.mag_proxy = rospy.ServiceProxy('/robot/xmega_connector/get_heading', GetHeading)
self.mag_field = None
self.cache_length = cache_length
self.calibration_file_name = calibration_file_name
self.x = np.array([])
self.y = np.array([])
plt.ion()
self.get_mag_field()
def get_mag_field(self):
# Get the field then make it a reasonable number. Not sure of units right now.
print "Running"
while len(self.x) < self.cache_length:
print "Running..."
magnetic_field = self.mag_proxy()
self.mag_field = np.array([magnetic_field.xData,magnetic_field.yData,magnetic_field.zData])
print len(self.x),self.mag_field
self.x = np.append(self.x,self.mag_field[0])
self.y = np.append(self.y,self.mag_field[1])
plt.scatter(self.x, self.y)
plt.show()
plt.pause(.01)
# Once we have saved 'cache_length' many points, start calibrating
self.done_caching()
def done_caching(self):
# Once we are done caching, display the uncalibrated data and start the calibration.
plt.scatter(self.x, self.y)
centroid = [np.mean(self.x),np.mean(self.y)]
plt.plot(centroid[0], centroid[1], 'ro')
plt.show()
self.generate_correction_matrix()
def generate_correction_matrix(self):
'''
Generate ellipse through given points (using internet code).
The generated ellipse will have a center position, a major and minor axis, and an offset theta of the major axis from 0 rads.
To correct we construct a transformation matrix to offset the raw ellipse to the origin, rotate it so that the angle between the
major axis and the positive x axis is 0, then scale the x values so that the length of the major axis is the same as the minor axis.
'''
a = fitEllipse(self.x,self.y)
xc,yc = ellipse_center(a)
theta = ellipse_angle_of_rotation(a)
axis_len = ellipse_axis_length(a)
s = axis_len[0]/axis_len[1]
x_deviation = np.sqrt(sum((self.x-xc)**2)/len(self.x))
y_deviation = np.sqrt(sum((self.y-yc)**2)/len(self.y))
print "Calibration Results:"
print "========================================================="
print " Old Center at:",xc,yc
print " Old Angle:",theta
print " Old Maj/Min Axis Length:",axis_len
print " Old X Deviation:",x_deviation
print " Old Y Deviation:",y_deviation
print "========================================================="
# Generate the transformation matrix. Translate -> Rotate -> Scale
iron_matrix = np.array([
[ s*np.cos(theta), s*np.sin(theta), -xc*s*np.cos(theta)-yc*s*np.sin(theta)],
[ -np.sin(theta), np.cos(theta), xc*np.sin(theta)-yc*np.cos(theta)],
[ 0, 0, 1]])
print "Corrective Matrix:"
print iron_matrix
pre_corrected = {
'xc' : float(xc),
'yc' : float(yc),
'theta' : float(theta),
'major_len' : float(axis_len[0]),
'minor_len' : float(axis_len[1]),
'x_dev' : float(x_deviation),
'y_dev': float(y_deviation)
}
# The rest is not nessicary for calibration but is used to display the new calibrated info.
points = np.vstack((self.x,self.y,np.full(len(self.x),1,dtype=np.int32)))
corrected_points = np.dot(iron_matrix,points)
a = fitEllipse(*corrected_points[:2])
center = ellipse_center(a)
theta = ellipse_angle_of_rotation(a)
axis_len = ellipse_axis_length(a)
x_deviation = np.sqrt(sum((corrected_points[0])**2)/len(corrected_points[0]))
y_deviation = np.sqrt(sum((corrected_points[1])**2)/len(corrected_points[1]))
# Quick note: the center should be very close to the origin, the maj and min axis lengths should be
# very similar, the deviations should be very close to 1, but the angle doesn't need to be 0. This is
# due to the fact that if the lenghts of maj=min, then we have a circle and therefore no calculatable
# angle offset from the x-axis.
print "========================================================="
print " New Center at:",xc,yc
print " New Angle*:",theta
print " New Maj/Min Axis Length:",axis_len
print " New X Deviation:",x_deviation
print " New Y Deviation:",y_deviation
print "========================================================="
print
post_corrected = {
'xc' : float(center[0]),
'yc' : float(center[1]),
'theta' : float(theta),
'major_len' : float(axis_len[0]),
'minor_len' : float(axis_len[1]),
'x_dev' : float(x_deviation),
'y_dev': float(y_deviation)
}
# Print points, mostly for trouble shooting.
print "Old points:"
for i in range(len(self.x)):
print "(%.4f,%.4f),"%(self.x[i],self.y[i]),
print
print "New points:"
for i in range(len(self.x)):
print "(%.4f,%.4f),"%(corrected_points[0][i],corrected_points[1][i]),
print
plt.scatter(corrected_points[0], corrected_points[1])
centroid = [np.mean(corrected_points[0]),np.mean(corrected_points[1])]
plt.plot(centroid[0], centroid[1], 'ro')
#plt.show()
# Write the calibration data, duh.
print "Writing calibration data..."
details = {'pre_corrected':pre_corrected,'post_corrected':post_corrected}
self.write_to_file(iron_matrix,details)
def write_to_file(self, matrix, details):
details['correction_matrix'] = matrix.tolist()
print details['correction_matrix']
file_name = str(CALIBRATION_FILE_URI + self.calibration_file_name)
with open(file_name, 'w') as outfile:
outfile.write(yaml.dump(details, default_flow_style=False))
print
print "Calibration file: %s saved!" % self.calibration_file_name
# Exit program when we are done
rospy.signal_shutdown("Finished Calibration.")
def test_load(self):
'''
Just for testing the output of the yaml file - this can be removed later.
'''
file_name = str(CALIBRATION_FILE_URI + self.calibration_file_name)
with open(file_name, 'r') as infile:
data = yaml.load(infile)
print np.matrix(data['correction_matrix'])
if __name__ == "__main__":
rospy.init_node("magnetic_calibrator")
c = Calibrate("mag",100)
rospy.spin()
| ufieeehw/IEEE2016 | ros/ieee2016_navigation/scripts/mag_calibrator.py | Python | mit | 8,777 |
from __future__ import absolute_import, print_function
import matplotlib.pyplot as plt
import numpy as np
import os
from PIL import Image
import seaborn.apionly as sns
import timeit
import rnmu.nmu as nmu
dir_name = '../results/'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
dir_name += 'digits/'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
imgs = [Image.open('../digits/digit2.png'),
Image.open('../digits/digit3.png'),
Image.open('../digits/digit5.png'),
Image.open('../digits/digit6.png'),
Image.open('../digits/digit8.png'),
Image.open('../digits/digit9.png')
]
imgs = [np.array(im.convert('L'), dtype=np.float) / 255. for im in imgs]
img_size = imgs[0].shape
mat = 1 - np.stack([im.flatten() for im in imgs], axis=1)
t = timeit.default_timer()
factors = nmu.recursive_nmu(mat, r=10, init='svd')
t = timeit.default_timer() - t
print('time {:.2f}'.format(t))
recs = sum(u.dot(v) for u, v in factors)
with sns.axes_style("whitegrid"):
plt.figure()
for i, im in enumerate(imgs):
plt.subplot(1, len(imgs), i+1)
plt.imshow(im, interpolation='nearest', cmap='gray')
plt.tick_params(
axis='both',
which='both',
bottom='off',
top='off',
labelbottom='off',
left='off',
right='off',
labelleft='off')
plt.grid(b=False)
plt.tight_layout()
plt.savefig(dir_name + 'digits_original.pdf',
dpi=150, bbox_inches='tight')
plt.figure()
for i, (u, v) in enumerate(factors):
plt.subplot(1, len(factors), i + 1)
plt.imshow(1 - u.reshape(img_size), interpolation='nearest',
cmap='gray')
plt.tick_params(
axis='both',
which='both',
bottom='off',
top='off',
labelbottom='off',
left='off',
right='off',
labelleft='off')
plt.grid(b=False)
plt.title('F{}'.format(i + 1))
plt.tight_layout()
plt.savefig(dir_name + 'digits_left_factors.pdf',
dpi=150, bbox_inches='tight')
plt.figure()
for i in range(len(imgs)):
plt.subplot2grid((1, len(imgs)), (0, i))
plt.imshow(1 - recs[:, i].reshape(img_size), vmin=0, vmax=1,
interpolation='nearest', cmap='gray')
plt.tick_params(
axis='both',
which='both',
bottom='off',
top='off',
labelbottom='off',
left='off',
right='off',
labelleft='off')
plt.grid(b=False)
plt.tight_layout()
plt.savefig(dir_name + 'digits_reconstruction.pdf',
dpi=300, bbox_inches='tight')
for i in range(len(imgs)):
plt.figure()
plt.imshow(1 - recs[:, i].reshape(img_size), vmin=0, vmax=1,
interpolation='nearest', cmap='gray')
plt.tick_params(
axis='both',
which='both',
bottom='off',
top='off',
labelbottom='off',
left='off',
right='off',
labelleft='off')
plt.grid(b=False)
plt.tight_layout()
plt.savefig(dir_name + 'digits_reconstruction{}.pdf'.format(i),
dpi=300, bbox_inches='tight')
plt.figure(figsize=(8, 2.5))
for i in range(len(imgs)):
plt.subplot2grid((1, len(imgs)), (0, i))
x_vals = [v[0, i] for _, v in factors]
y_vals = np.arange(1, len(factors) + 1)
plt.hlines(y_vals, 0, x_vals, color='#e41a1c', linewidth=4)
plt.scatter(x_vals, y_vals, color='#e41a1c', marker='o', linewidth=4)
plt.xlim(-0.1, 1.1)
plt.ylim(0.5, len(factors) + 0.5)
plt.xticks([0, 0.5, 1])
plt.yticks(np.arange(len(factors)) + 1,
['F{}'.format(k + 1) for k in range(len(factors))])
plt.tight_layout()
plt.savefig(dir_name + 'digits_right_factors.pdf',
dpi=300, bbox_inches='tight')
plt.show()
| marianotepper/nmu_rfit | rnmu/test/test_toy_digits.py | Python | bsd-3-clause | 4,081 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/annotations.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import http_pb2 as google_dot_api_dot_http__pb2
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x1cgoogle/api/annotations.proto\x12\ngoogle.api\x1a\x15google/api/http.proto\x1a google/protobuf/descriptor.proto:E\n\x04http\x12\x1e.google.protobuf.MethodOptions\x18\xb0\xca\xbc" \x01(\x0b\x32\x14.google.api.HttpRuleBn\n\x0e\x63om.google.apiB\x10\x41nnotationsProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xa2\x02\x04GAPIb\x06proto3'
)
HTTP_FIELD_NUMBER = 72295728
http = DESCRIPTOR.extensions_by_name["http"]
if _descriptor._USE_C_DESCRIPTORS == False:
google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(http)
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b"\n\016com.google.apiB\020AnnotationsProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\242\002\004GAPI"
# @@protoc_insertion_point(module_scope)
| googleapis/python-api-common-protos | google/api/annotations_pb2.py | Python | apache-2.0 | 2,133 |
#!/usr/bin/env python
"""Display exposure state and countdown timer
History:
2010-03-10 ROwen Fix ticket #631: paused timer has wrong "sign".
2010-03-12 ROwen Changed to use Models.getModel.
Fixed bug introduced 2010-03-10.
2010-06-28 ROwen Removed unused global variable (thanks to pychecker).
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
"""
import Tkinter
import RO.Wdg
import TUI.PlaySound
import TUI.Models
class ExposureStateWdg(Tkinter.Frame):
"""A widget that displays the name of the exposure state and a countdown timer if relevant
"""
def __init__(self, master, helpURL=None):
Tkinter.Frame.__init__(self, master)
bossModel = TUI.Models.getModel("boss")
self.wasExposing = None # True, False or None if unknown
stateKeys = bossModel.exposureState.key.typedValues.vtypes[0].enumValues.keys()
maxStateLen = max(len(stateKey) for stateKey in stateKeys)
self.exposureStateWdg = RO.Wdg.StrLabel(
master = self,
width = maxStateLen,
anchor = "w",
helpText = "Status of current exposure",
helpURL = helpURL,
)
self.exposureStateWdg.grid(row=0, column=0, sticky="w")
self.expTimer = RO.Wdg.TimeBar(
master = self,
valueFormat = "%3.1f sec",
isHorizontal = True,
autoStop = True,
helpText = "Status of current exposure",
helpURL = helpURL,
)
self.expTimer.grid(row=0, column=1, sticky="ew")
self.columnconfigure(1, weight=1)
bossModel.exposureState.addCallback(self._exposureStateCallback)
def _exposureStateCallback(self, keyVar):
"""Exposure state has changed.
Fields are (probably):
- exposure state
- total time (sec)
- elapsed time (sec)
"""
expState = keyVar[0]
if expState is None:
self.wasExposing = None
self.expTimer.grid_remove()
self.expTimer.clear()
return
netTime = keyVar[1] if keyVar[1] is not None else 0.0 # change None to 0.0
elapsedTime = keyVar[2] if keyVar[2] is not None else netTime # change None to no time left
remTime = netTime - elapsedTime
# print "netTime=%r; elapsedTime=%r; remTime=%r" % (netTime, elapsedTime, remTime)
expStateLow = expState.lower()
isPaused = (expStateLow == "paused")
isExposing = expStateLow in ("integrating", "resume")
# set text state
if isPaused:
severity = RO.Constants.sevWarning
else:
severity = RO.Constants.sevNormal
self.exposureStateWdg.set(expState.title(), severity = severity, isCurrent=keyVar.isCurrent)
if not keyVar.isCurrent:
# cancel countdown timer; don't play sounds
self.wasExposing = None
self.expTimer.grid_remove()
self.expTimer.clear()
return
if not keyVar.isGenuine:
# data is cached; don't mess with the countdown timer or sounds
self.wasExposing = isExposing
return
# handle exposure timer
# print "netTime=%r" % (netTime,)
if netTime > 0:
# print "starting a timer; remTime = %r, netTime = %r" % (remTime, netTime)
# handle a countdown timer
# it should be stationary if expStateStr = paused,
# else it should count down
if isPaused:
# pause an exposure with the specified time remaining
self.expTimer.pause(
value = remTime,
)
else:
# count down anything else
self.expTimer.start(
value = remTime,
newMax = netTime,
countUp = False,
)
self.expTimer.grid()
else:
# hide countdown timer
# print "hide timer"
self.expTimer.grid_remove()
self.expTimer.clear()
# play sound, if appropriate
if self.wasExposing is not None \
and self.wasExposing != isExposing \
and self.winfo_ismapped():
# play the appropriate sound
if isExposing:
TUI.PlaySound.exposureBegins()
else:
TUI.PlaySound.exposureEnds()
self.wasExposing = isExposing
if __name__ == '__main__':
root = RO.Wdg.PythonTk()
import TestData
tuiModel = TestData.tuiModel
testFrame = ExposureStateWdg(tuiModel.tkRoot)
testFrame.pack(side="top", expand="yes")
Tkinter.Button(text="Demo", command=TestData.exposeAnimate).pack(side="top")
TestData.exposeStart()
tuiModel.reactor.run()
| r-owen/stui | TUI/Inst/BOSS/ExposureStateWdg.py | Python | bsd-3-clause | 4,960 |
# Exercise 24: More Practice
# Printing text and using tabs and new lines
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
# Variable with tabs multyline comment and newlines
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
# Printing
print "--------------"
print poem
print "--------------"
# Variable with math and print it
five = 10 - 2 + 3 - 6
print "This should be five: %s" % five
# Create function to calculate and return result
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
# variable and variables calling function
start_point = 10000
beans, jars, crates = secret_formula(start_point)
# Printing
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
# Create variable
start_point = start_point / 10
# Printing
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
# Study Drills
# 1. Make sure to do your checks: read it backward, read it
# out loud, and put comments above confusing parts.
# 2. Break the file on purpose, then run it to see what kinds
# of errors you get. Make sure you can fix it.
# Drill 2
# Traceback (most recent call last):
# File "ex24.py", line 34, in <module>
# print "With a starting point of: %d" % start_point
#NameError: name 'start_point' is not defined | Valka7a/python-playground | python-the-hard-way/24-more-practice.py | Python | mit | 1,650 |
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from oioioi.base import admin
from oioioi.base.utils import make_html_link
from oioioi.problems.admin import ProblemAdmin
from oioioi.programs.models import Test
from oioioi.testspackages.forms import TestsPackageInlineFormSet
from oioioi.testspackages.models import TestsPackage
class TestsPackageInline(admin.TabularInline):
formset = TestsPackageInlineFormSet
model = TestsPackage
can_delete = True
extra = 0
readonly_fields = ['package_link']
fields = ['name', 'description', 'tests', 'publish_date', 'package_link']
problem = None
def get_formset(self, request, obj=None, **kwargs):
self.problem = obj
return super(TestsPackageInline, self).get_formset(
request, obj, **kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
# It should filter tests from main_problem_instance
if db_field.name == 'tests' and getattr(self, 'problem', None):
kwargs['queryset'] = Test.objects.filter(
problem_instance=self.problem.main_problem_instance)
return super(TestsPackageInline, self).formfield_for_manytomany(
db_field, request, **kwargs)
def has_add_permission(self, request):
return True
def has_change_permission(self, request, obj=None):
return True
def has_delete_permission(self, request, obj=None):
return True
def package_link(self, instance):
if instance.id is not None:
href = reverse('oioioi.testspackages.views.test_view',
kwargs={'package_id': instance.id, 'contest_id':
instance.problem.contest.id})
return make_html_link(href, instance.package.file.name)
return None
package_link.short_description = _("Package file")
class TestsPackageAdminMixin(object):
def __init__(self, *args, **kwargs):
super(TestsPackageAdminMixin, self).__init__(*args, **kwargs)
self.inlines = self.inlines + [TestsPackageInline]
ProblemAdmin.mix_in(TestsPackageAdminMixin)
| papedaniel/oioioi | oioioi/testspackages/admin.py | Python | gpl-3.0 | 2,180 |
import logging
import inspect
import astroid
import operator
import re
from inspect import Parameter
from ._docstring_parser import DocstringParser, TypeHintParser
from ._base_node import NodeEntityBase, get_qualified_name
from ._argtype import ArgType
KW_ARG_NAME = "**kwargs"
VALIDATION_REQUIRED_DUNDER = ["__init__",]
KWARG_NOT_REQUIRED_METHODS = ["close",]
TYPEHINT_NOT_REQUIRED_METHODS = ["close", "__init__"]
REGEX_ITEM_PAGED = "~[\w.]*\.([\w]*)\s?[\[\(][^\n]*[\]\)]"
PAGED_TYPES = ["ItemPaged", "AsyncItemPaged",]
# Methods that are implementation of known interface should be excluded from lint check
# for e.g. get, update, keys
LINT_EXCLUSION_METHODS = [
"get",
"has_key",
"items",
"keys",
"update",
"values",
"close",
]
# Find types like ~azure.core.paging.ItemPaged and group returns ItemPaged.
# Regex is used to find shorten such instances in complex type
# for e,g, ~azure.core.ItemPaged.ItemPaged[~azure.communication.chat.ChatThreadInfo] to ItemPaged[ChatThreadInfo]
REGEX_FIND_LONG_TYPE = "((?:~?)[\w.]+\.+([\w]+))"
def is_kwarg_mandatory(func_name):
return not func_name.startswith("_") and func_name not in KWARG_NOT_REQUIRED_METHODS
def is_typehint_mandatory(func_name):
return not func_name.startswith("_") and func_name not in TYPEHINT_NOT_REQUIRED_METHODS
class FunctionNode(NodeEntityBase):
"""Function node class represents parsed function signature.
Keyword args will be parsed and added to signature if docstring is available.
:param str: namespace
:param NodeEntityBase: parent_node
:param function: obj
:param bool: is_module_level
"""
def __init__(self, namespace, parent_node, obj, is_module_level=False):
super().__init__(namespace, parent_node, obj)
self.annotations = []
self.args = []
self.return_type = None
self.namespace_id = self.generate_id()
# Set name space level ID as full name
# Name space ID will be later updated for async methods
self.full_name = self.namespace_id
self.is_class_method = False
self.is_module_level = is_module_level
# Some of the methods wont be listed in API review
# For e.g. ABC methods if class implements all ABC methods
self.hidden = False
self._inspect()
def _inspect(self):
logging.debug("Processing function {0}".format(self.name))
code = inspect.getsource(self.obj).strip()
# We cannot do "startswith" check here due to annotations or decorators present for functions
self.is_async = "async def" in code
self.def_key = "async def" if self.is_async else "def"
# Update namespace ID to reflect async status. Otherwise ID will conflict between sync and async methods
if self.is_async:
self.namespace_id += ":async"
# Find decorators and any annotations
try:
node = astroid.extract_node(inspect.getsource(self.obj))
if node.decorators:
self.annotations = [
"@{}".format(x.name)
for x in node.decorators.nodes
if hasattr(x, "name")
]
except:
# todo Update exception details in error
error_message = "Error in parsing decorators for function {}".format(
self.name
)
self.add_error(error_message)
logging.error(error_message)
self.is_class_method = "@classmethod" in self.annotations
self._parse_function()
def _parse_function(self):
"""
Find positional and keyword arguements, type and default value and return type of method
Parsing logic will follow below order to identify these information
1. Identify args, types, default and ret type using inspect
2. Parse type annotations if inspect doesn't have complete info
3. Parse docstring to find keyword arguements
4. Parse type hints
"""
# Add cls as first arg for class methods in API review tool
if "@classmethod" in self.annotations:
self.args.append(ArgType("cls"))
# Find signature to find positional args and return type
sig = inspect.signature(self.obj)
params = sig.parameters
# Add all keyword only args here temporarily until docstring is parsed
# This is to handle the scenario is keyword arg typehint (py3 style is present in signature itself)
self.kw_args = []
for argname in params:
arg = ArgType(argname, get_qualified_name(params[argname].annotation, self.namespace), "", self)
# set default value if available
if params[argname].default != Parameter.empty:
arg.default = str(params[argname].default)
# Store handle to kwarg object to replace it later
if params[argname].kind == Parameter.VAR_KEYWORD:
arg.argname = KW_ARG_NAME
if params[argname].kind == Parameter.KEYWORD_ONLY:
self.kw_args.append(arg)
else:
self.args.append(arg)
if sig.return_annotation:
self.return_type = get_qualified_name(sig.return_annotation, self.namespace)
# parse docstring
self._parse_docstring()
# parse type hints
self._parse_typehint()
self._copy_kw_args()
if not self.return_type and is_typehint_mandatory(self.name):
self.add_error("Return type is missing in both typehint and docstring")
# Validate return type
self._validate_pageable_api()
def _copy_kw_args(self):
# Copy kw only args from signature and docstring
kwargs = [x for x in self.args if x.argname == KW_ARG_NAME]
# add keyword args
if self.kw_args:
# Add separator to differentiate pos_arg and keyword args
self.args.append(ArgType("*"))
self.kw_args.sort(key=operator.attrgetter("argname"))
# Add parsed keyword args to function signature after updating current function node as parent in arg
for arg in self.kw_args:
arg.set_function_node(self)
self.args.append(arg)
# remove arg with name "**kwarg and add at the end"
if kwargs:
kw_arg = kwargs[0]
self.args.remove(kw_arg)
self.args.append(kw_arg)
# API must have **kwargs for non async methods. Flag it as an error if it is missing for public API
if not kwargs and is_kwarg_mandatory(self.name):
self.errors.append("Keyword arg (**kwargs) is missing in method {}".format(self.name))
def _parse_docstring(self):
# Parse docstring to get list of keyword args, type and default value for both positional and
# kw args and return type( if not already found in signature)
docstring = ""
if hasattr(self.obj, "__doc__"):
docstring = getattr(self.obj, "__doc__")
# Refer docstring at class if this is constructor and docstring is missing for __init__
if (
not docstring
and self.name == "__init__"
and hasattr(self.parent_node.obj, "__doc__")
):
docstring = getattr(self.parent_node.obj, "__doc__")
if docstring:
# Parse doc string to find missing types, kwargs and return type
parsed_docstring = DocstringParser(docstring)
parsed_docstring.parse()
# Set return type if not already set
if not self.return_type and parsed_docstring.ret_type:
logging.debug(
"Setting return type from docstring for method {}".format(self.name)
)
self.return_type = parsed_docstring.ret_type
# Update arg type from docstring if available and if argtype is missing from signatrue parsing
arg_type_dict = dict(
[(x.argname, x.argtype) for x in parsed_docstring.pos_args]
)
for pos_arg in self.args:
pos_arg.argtype = arg_type_dict.get(
pos_arg.argname, pos_arg.argtype
)
self.kw_args.extend(parsed_docstring.kw_args)
def _generate_short_type(self, long_type):
short_type = long_type
groups = re.findall(REGEX_FIND_LONG_TYPE, short_type)
for g in groups:
short_type = short_type.replace(g[0], g[1])
return short_type
def _parse_typehint(self):
# Skip parsing typehint if typehint is not expected for e.g dunder or async methods
# and if return type is already found
if self.return_type and not is_typehint_mandatory(self.name) or self.is_async:
return
# Parse type hint to get return type and types for positional args
typehint_parser = TypeHintParser(self.obj)
# Find return type from type hint if return type is not already set
type_hint_ret_type = typehint_parser.find_return_type()
# Type hint must be present for all APIs. Flag it as an error if typehint is missing
if not type_hint_ret_type:
if (is_typehint_mandatory(self.name)):
self.add_error("Typehint is missing for method {}".format(self.name))
return
if not self.return_type:
self.return_type = type_hint_ret_type
else:
# Verify return type is same in docstring and typehint if typehint is available
short_return_type = self._generate_short_type(self.return_type)
long_ret_type = self.return_type
if long_ret_type != type_hint_ret_type and short_return_type != type_hint_ret_type:
logging.info("Long type: {0}, Short type: {1}, Type hint return type: {2}".format(long_ret_type, short_return_type, type_hint_ret_type))
error_message = "Return type in type hint is not matching return type in docstring"
self.add_error(error_message)
def _generate_signature_token(self, apiview):
apiview.add_punctuation("(")
args_count = len(self.args)
use_multi_line = args_count > 2
# Show args in individual line if method has more than 4 args and use two tabs to properly aign them
if use_multi_line:
apiview.begin_group()
apiview.begin_group()
# Generate token for each arg
for index in range(args_count):
# Add new line if args are listed in new line
if use_multi_line:
apiview.add_new_line()
apiview.add_whitespace()
self.args[index].generate_tokens(
apiview, self.namespace_id, use_multi_line
)
# Add punctuation between types except for last one
if index < args_count - 1:
apiview.add_punctuation(",", False, True)
if use_multi_line:
apiview.add_new_line()
apiview.end_group()
apiview.add_whitespace()
apiview.add_punctuation(")")
apiview.end_group()
else:
apiview.add_punctuation(")")
def generate_tokens(self, apiview):
"""Generates token for function signature
:param ApiView: apiview
"""
logging.info("Processing method {0} in class {1}".format(self.name, self.parent_node.namespace_id))
# Add tokens for annotations
for annot in self.annotations:
apiview.add_whitespace()
apiview.add_keyword(annot)
apiview.add_new_line()
apiview.add_whitespace()
apiview.add_line_marker(self.namespace_id)
if self.is_async:
apiview.add_keyword("async", False, True)
apiview.add_keyword("def", False, True)
# Show fully qualified name for module level function and short name for instance functions
apiview.add_text(
self.namespace_id, self.full_name if self.is_module_level else self.name
)
# Add parameters
self._generate_signature_token(apiview)
if self.return_type:
apiview.add_punctuation("->", True, True)
# Add line marker id if signature is displayed in multi lines
if len(self.args) > 2:
line_id = "{}.returntype".format(self.namespace_id)
apiview.add_line_marker(line_id)
apiview.add_type(self.return_type)
if self.errors:
for e in self.errors:
apiview.add_diagnostic(e, self.namespace_id)
def add_error(self, error_msg):
# Ignore errors for lint check excluded methods
if self.name in LINT_EXCLUSION_METHODS:
return
# Hide all diagnostics for now for dunder methods
# These are well known protocol implementation
if not self.name.startswith("_") or self.name in VALIDATION_REQUIRED_DUNDER:
self.errors.append(error_msg)
def _validate_pageable_api(self):
# If api name starts with "list" and if annotated with "@distributed_trace"
# then this method should return ItemPaged or AsyncItemPaged
if self.return_type and self.name.startswith("list") and "@distributed_trace" in self.annotations:
tokens = re.search(REGEX_ITEM_PAGED, self.return_type)
if tokens:
ret_short_type = tokens.groups()[-1]
if ret_short_type in PAGED_TYPES:
logging.debug("list API returns valid paged return type")
return
error_msg = "list API {0} should return ItemPaged or AsyncItemPaged instead of {1} and page type must be included in docstring rtype".format(self.name, self.return_type)
logging.error(error_msg)
self.add_error(error_msg)
def print_errors(self):
if self.errors:
print(" method: {}".format(self.name))
for e in self.errors:
print(" {}".format(e))
| tg-msft/azure-sdk-tools | packages/python-packages/api-stub-generator/apistub/nodes/_function_node.py | Python | mit | 14,187 |
#!/usr/bin/python
"""
(C) Copyright 2018 ALBA-CELLS
Authors: Marc Rosanes, Carlos Falcon, Zbigniew Reszela, Carlos Pascual
The program is distributed under the terms of the
GNU General Public License (or the Lesser GPL).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from argparse import RawTextHelpFormatter
from txm2nexuslib.images.multiplenormalization import normalize_images
def main():
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
description = ('Normalize images located in different hdf5 files\n'
'Each file containing one of the images to be normalized')
parser = argparse.ArgumentParser(description=description,
formatter_class=RawTextHelpFormatter)
parser.register('type', 'bool', str2bool)
parser.add_argument('file_index_fn', metavar='file_index_fn',
type=str, help='DB index json filename of hdf5 data '
'files to be normalized')
parser.add_argument('-d', '--date', type=int,
default=None,
help='Date of files to be normalized\n'
'If None, no filter is applied\n'
'(default: None)')
parser.add_argument('-s', '--sample', type=str,
default=None,
help='Sample name of files to be normalized\n'
'If None, all sample names are normalized\n'
'(default: None)')
parser.add_argument('-e', '--energy', type=float,
default=None,
help='Energy of files to be normalized\n'
'If None, no filter is applied\n'
'(default: None)')
parser.add_argument('-t', '--table_h5', type=str,
default="hdf5_proc",
help='DB table of hdf5 to be normalized\n'
'If None, default tinyDB table is used\n'
'(default: hdf5_proc)')
parser.add_argument('-a', '--average_ff', type='bool',
default=True,
help='Compute average FF and normalize using it\n'
'(default: True)')
parser.add_argument('-c', '--cores', type=int,
default=-1,
help='Number of cores used for the format conversion\n'
'(default is max of available CPUs: -1)')
args = parser.parse_args()
normalize_images(args.file_index_fn, table_name=args.table_h5,
date=args.date, sample=args.sample, energy=args.energy,
average_ff=args.average_ff, cores=args.cores)
if __name__ == "__main__":
main()
| sagiss/txrm2nexus | txm2nexuslib/scripts/manynorm.py | Python | gpl-3.0 | 3,458 |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from myuw.models import UserCourseDisplay
from myuw.dao.instructor_schedule import check_section_instructor,\
get_section_by_label
from myuw.dao.pws import get_person_of_current_user
from myuw.dao.user import get_user_model
def set_pin_on_teaching_page(request,
section_label,
pin=True):
"""
if pin=True, pin the section on teaching page
if pin=False, unpin the section from teaching page
@except InvalidSectionID
@except NotSectionInstructorException
@except UserCourseDisplay.DoesNotExist
"""
section = get_section_by_label(section_label)
check_section_instructor(section, get_person_of_current_user(request))
# not to pin a primary section
if section.is_primary_section:
return False
return UserCourseDisplay.set_pin(
get_user_model(request), section_label, pin)
| uw-it-aca/myuw | myuw/dao/instructor_mini_course_card.py | Python | apache-2.0 | 984 |
#-*- coding: utf-8 -*-
import json
import mock
import unittest
from nose.tools import eq_, ok_
from ..constants import CONST
from ..test_base import TestCase
from pywechat.services.basic import Basic
from pywechat.services.wechat_card import CardService
class CardServiceTest(TestCase):
'''Creates a TestCase for Card Service.'''
def setUp(self):
TestCase.setUp(self)
with mock.patch.object(Basic, 'access_token', autospec=True):
self.card_service = self.service.init_service('Card')
def test_get_colors(self):
with mock.patch.object(CardService, 'get_colors') as mock_method:
mock_method.return_value = {
"colors": [
{"name": "Color010", "value": "#55bd47"},
{"name": "Color020", "value": "#10ad61"},
{"name": "Color030", "value": "#35a4de"},
{"name": "Color040", "value": "#3d78da"},
{"name": "Color050", "value": "#9058cb"},
{"name": "Color060", "value": "#de9c33"},
{"name": "Color070", "value": "#ebac16"},
{"name": "Color080", "value": "#f9861f"},
{"name": "Color081", "value": "#f08500"},
{"name": "Color090", "value": "#e75735"},
{"name": "Color100", "value": "#d54036"},
{"name": "Color101", "value": "#cf3e36"}
],
"errcode": 0,
"errmsg": "success."
}
data = self.card_service.get_colors()
self.assertIsNotNone(data["colors"])
def test_upload_image(self):
image = CONST.STRING
pic_url = CONST.STRING
with mock.patch.object(CardService, 'upload_image') as mock_method:
mock_method.return_value = {
"url": pic_url
}
data = self.card_service.upload_image(image)
eq_(data["url"], pic_url)
def test_unavilable_code(self):
code = CONST.STRING
card_id = CONST.STRING
openid = CONST.STRING
with mock.patch.object(CardService, 'unavailable_code') as mock_method:
mock_method.return_value = {
"errcode": 0,
"errmsg": "ok",
"card": {"card_id": card_id},
"openid": openid
}
data = self.card_service.unavailable_code(code, card_id)
eq_(data["card"]["card_id"], card_id)
eq_(data["openid"], openid)
def test_decrypt_code(self):
encrypt_code = CONST.STRING
card_id = CONST.STRING
openid = CONST.STRING
with mock.patch.object(CardService, 'decrypt_code') as mock_method:
mock_method.return_value = {
"errcode": 0,
"errmsg": "ok",
"card": {"card_id": card_id},
"openid": openid
}
data = self.card_service.decrypt_code(encrypt_code)
eq_(data["card"]["card_id"], card_id)
eq_(data["openid"], openid)
def test_get_code(self):
code = CONST.STRING
card_id = CONST.STRING
begin_time = CONST.NUMBER
end_time = CONST.NUMBER
openid = CONST.STRING
with mock.patch.object(CardService, 'get_code') as mock_method:
mock_method.return_value = {
"errcode": 0,
"errmsg": "ok",
"card": {
"card_id": card_id,
"begin_time": begin_time,
"end_time": end_time,
},
"openid": openid
}
card_data = {
"card_id": card_id,
"begin_time": begin_time,
"end_time": end_time,
}
data = self.card_service.get_code(code, card_id)
eq_(data["card"], card_data)
eq_(data["openid"], openid)
def test_batchget_code(self):
card_id = CONST.STRING
with mock.patch.object(CardService, 'batchget_card') as mock_method:
mock_method.return_value = {
"errcode": 0,
"errmsg": "ok",
"card_id_list": [card_id],
"total_num": 1
}
data = self.card_service.batchget_card(0, 1)
eq_(data["card_id_list"][0], card_id)
eq_(data["total_num"], 1)
def test_modify_stock(self):
card_id = CONST.STRING
increase_stock_value = CONST.NUMBER
reduce_stock_value = CONST.NUMBER
with mock.patch.object(CardService, 'modify_stock') as mock_method:
mock_method.return_value = {
"errcode": 0,
"errmsg": "ok",
}
data = self.card_service.modify_stock(card_id,
increase_stock_value, reduce_stock_value)
self.assertIsNotNone(data)
def test_update_code(self):
code = CONST.STRING
new_code = CONST.STRING
card_id = CONST.STRING
with mock.patch.object(CardService, 'update_code') as mock_method:
mock_method.return_value = {
"errcode": 0,
"errmsg": "ok",
}
data = self.card_service.update_code(code, new_code, card_id)
self.assertIsNotNone(data)
def test_delete_card(self):
card_id = CONST.STRING
with mock.patch.object(CardService, 'delete_card') as mock_method:
mock_method.return_value = {
"errcode": 0,
"errmsg": "ok",
}
data = self.card_service.delete_card(card_id)
self.assertIsNotNone(data)
| OctavianLee/Pywechat | tests/test_service/test_card.py | Python | mit | 5,790 |
# -*- coding: utf-8 -*-
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
# https://github.com/QuantConnect/Lean/blob/master/LICENSE
with open('../LICENSE') as f:
license = f.read()
with open('README.rst') as f:
readme = f.read()
setup(
name='quantconnect',
version='0.1',
description = 'QuantConnect API',
long_description=readme,
author = 'QuantConnect Python Team',
author_email = 'support@quantconnect.com',
url='https://www.quantconnect.com/',
license=license,
packages = find_packages(exclude=('tests', 'docs')),
install_requires=['requests']
) | redmeros/Lean | ApiPython/setup.py | Python | apache-2.0 | 1,298 |
import sys, re
for fn in sys.argv[1:]:
with open(fn, 'r') as f:
s = f.read()
xx = re.findall(r'([^\n]+)\s+\'\'\'(.*?)\'\'\'', s, re.M|re.S)
for (obj, doc) in xx:
s = re.findall('[^:`]\B(([`*])[a-zA-Z_][a-zA-Z0-9_]*\\2)\B', doc)
if s:
print '-'*50
print fn, obj
print '.'*50
print doc
print '.'*50
print [ss[0] for ss in s]
# for vim:
# :s/\([^`:]\)\([`*]\)\([a-zA-Z0-9_]\+\)\2/\1``\3``/
| pyrocko/pyrocko | maintenance/docstring_cop.py | Python | gpl-3.0 | 543 |
"""Tests for DataStream objects."""
import pytest
from iotile.core.exceptions import InternalError
from iotile.sg import DataStream, DataStreamSelector
def test_stream_type_parsing():
"""Make sure we can parse each type of stream."""
# Make sure parsing stream type works
stream = DataStream.FromString('buffered 1')
assert stream.stream_type == stream.BufferedType
stream = DataStream.FromString(u'buffered 1')
assert stream.stream_type == stream.BufferedType
stream = DataStream.FromString('unbuffered 1')
assert stream.stream_type == stream.UnbufferedType
stream = DataStream.FromString(u'unbuffered 1')
assert stream.stream_type == stream.UnbufferedType
stream = DataStream.FromString('counter 1')
assert stream.stream_type == stream.CounterType
stream = DataStream.FromString(u'counter 1')
assert stream.stream_type == stream.CounterType
stream = DataStream.FromString('constant 1')
assert stream.stream_type == stream.ConstantType
stream = DataStream.FromString(u'constant 1')
assert stream.stream_type == stream.ConstantType
stream = DataStream.FromString('output 1')
assert stream.stream_type == stream.OutputType
stream = DataStream.FromString(u'output 1')
assert stream.stream_type == stream.OutputType
def test_stream_id_parsing():
"""Make sure we can parse stream ids."""
stream = DataStream.FromString('buffered 1')
assert stream.stream_id == 1
stream = DataStream.FromString('buffered 0x100')
assert stream.stream_id == 0x100
stream = DataStream.FromString(u'buffered 1')
assert stream.stream_id == 1
stream = DataStream.FromString(u'buffered 0x100')
assert stream.stream_id == 0x100
def test_system_parsing():
"""Make sure we can parse the system prefix."""
stream = DataStream.FromString('buffered 1')
assert stream.system is False
stream = DataStream.FromString(u'buffered 1')
assert stream.system is False
stream = DataStream.FromString('system buffered 1')
assert stream.system is True
stream = DataStream.FromString(u'system buffered 1')
assert stream.system is True
def test_stringification():
"""Make sure we can stringify DataStream objects."""
stream1 = DataStream.FromString('system buffered 1')
stream2 = DataStream.FromString('buffered 0xF')
assert str(stream1) == str('system buffered 1')
assert str(stream2) == str('buffered 15')
def test_selector_parsing():
"""Make sure we can parse DataStreamSelector strings."""
# Make sure parsing stream type works
stream = DataStreamSelector.FromString('buffered 1')
assert stream.match_type == DataStream.BufferedType
stream = DataStreamSelector.FromString(u'buffered 1')
assert stream.match_type == DataStream.BufferedType
stream = DataStreamSelector.FromString('unbuffered 1')
assert stream.match_type == DataStream.UnbufferedType
stream = DataStreamSelector.FromString(u'unbuffered 1')
assert stream.match_type == DataStream.UnbufferedType
stream = DataStreamSelector.FromString('counter 1')
assert stream.match_type == DataStream.CounterType
stream = DataStreamSelector.FromString(u'counter 1')
assert stream.match_type == DataStream.CounterType
stream = DataStreamSelector.FromString('constant 1')
assert stream.match_type == DataStream.ConstantType
stream = DataStreamSelector.FromString(u'constant 1')
assert stream.match_type == DataStream.ConstantType
stream = DataStreamSelector.FromString('output 1')
assert stream.match_type == DataStream.OutputType
stream = DataStreamSelector.FromString(u'output 1')
assert stream.match_type == DataStream.OutputType
def test_stream_selector_id_parsing():
"""Make sure we can parse stream ids."""
stream = DataStreamSelector.FromString('buffered 1')
assert stream.match_id == 1
assert stream.match_spec == DataStreamSelector.MatchUserOnly
stream = DataStreamSelector.FromString('buffered 0x100')
assert stream.match_id == 0x100
assert stream.match_spec == DataStreamSelector.MatchUserOnly
stream = DataStreamSelector.FromString(u'buffered 1')
assert stream.match_id == 1
assert stream.match_spec == DataStreamSelector.MatchUserOnly
stream = DataStreamSelector.FromString(u'buffered 0x100')
assert stream.match_id == 0x100
assert stream.match_spec == DataStreamSelector.MatchUserOnly
stream = DataStreamSelector.FromString(u'system buffered 0x100')
assert stream.match_id == 0x100
assert stream.match_spec == DataStreamSelector.MatchSystemOnly
stream = DataStreamSelector.FromString(u'all buffered')
assert stream.match_id is None
assert stream.match_spec == DataStreamSelector.MatchUserAndBreaks
stream = DataStreamSelector.FromString(u'all user buffered')
assert stream.match_id is None
assert stream.match_spec == DataStreamSelector.MatchUserOnly
stream = DataStreamSelector.FromString(u'all combined buffered')
assert stream.match_id is None
assert stream.match_spec == DataStreamSelector.MatchCombined
stream = DataStreamSelector.FromString(u'all system buffered')
assert stream.match_id is None
assert stream.match_spec == DataStreamSelector.MatchSystemOnly
def test_matching():
"""Test selector stream matching."""
sel = DataStreamSelector.FromString(u'all system buffered')
assert sel.matches(DataStream.FromString('system buffered 1'))
assert not sel.matches(DataStream.FromString('buffered 1'))
assert not sel.matches(DataStream.FromString('counter 1'))
sel = DataStreamSelector.FromString(u'all user outputs')
assert sel.matches(DataStream.FromString('output 1'))
assert not sel.matches(DataStream.FromString('system output 1'))
assert not sel.matches(DataStream.FromString('counter 1'))
sel = DataStreamSelector.FromString(u'all combined outputs')
assert sel.matches(DataStream.FromString('output 1'))
assert sel.matches(DataStream.FromString('system output 1'))
assert not sel.matches(DataStream.FromString('counter 1'))
sel = DataStreamSelector.FromString(u'all outputs')
assert sel.matches(DataStream.FromString('output 1'))
assert sel.matches(DataStream.FromString('system output 1024'))
assert not sel.matches(DataStream.FromString('system output 1'))
assert not sel.matches(DataStream.FromString('counter 1'))
def test_encoding():
"""Test data stream and selector encoding."""
sel = DataStreamSelector.FromString(u'all system output')
assert sel.encode() == 0x5FFF
sel = DataStreamSelector.FromString(u'all user output')
assert sel.encode() == 0x57FF
sel = DataStreamSelector.FromString(u'all output')
assert sel.encode() == 0xD7FF
sel = DataStreamSelector.FromString(u'all combined output')
assert sel.encode() == 0xDFFF
stream = DataStream.FromString('output 1')
assert stream.encode() == 0x5001
stream = DataStream.FromString('unbuffered 10')
assert stream.encode() == 0x100a
def test_selector_from_encoded():
"""Make sure we can create a selector from an encoded value."""
sel = DataStreamSelector.FromEncoded(0x5FFF)
assert str(sel) == 'all system outputs'
sel = DataStreamSelector.FromEncoded(0xD7FF)
assert str(sel) == 'all outputs'
sel = DataStreamSelector.FromEncoded(0x100a)
assert str(sel) == 'unbuffered 10'
assert str(DataStreamSelector.FromEncoded(DataStreamSelector.FromString('all combined output').encode())) == 'all combined outputs'
def test_buffered_pluralization():
"""Make sure we don't incorrectly pluralize buffered streams."""
sel = DataStreamSelector.FromString('all buffered')
assert str(sel) == 'all buffered'
def test_important_inputs():
"""Make sure we support matching important inputs and outputs."""
imp_stream = DataStream.FromString('system input 1024')
imp_store_stream = DataStream.FromString('system input 1536')
assert imp_stream.important is True
assert imp_store_stream.important is True
assert imp_stream.associated_stream() == DataStream.FromString('system output 1024')
assert imp_store_stream.associated_stream() == DataStream.FromString('system buffered 1536')
random_stream = DataStream.FromString('unbuffered 1024')
assert random_stream.important is False
with pytest.raises(InternalError):
random_stream.associated_stream()
| iotile/coretools | iotilesensorgraph/test/test_datastream.py | Python | gpl-3.0 | 8,473 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import contextlib
import copy
import datetime
import hashlib
import inspect
import os
import pprint
import mock
from oslo_log import log
from oslo_utils import timeutils
import six
from testtools import matchers
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import rpc
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_notifier
from nova import utils
LOG = log.getLogger(__name__)
class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {'baz': fields.Field(fields.Integer())}
class MyObj(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.Field(fields.Integer(), default=1),
'bar': fields.Field(fields.String()),
'missing': fields.Field(fields.String()),
'readonly': fields.Field(fields.Integer(), read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
'mutable_default': fields.ListOfStringsField(default=[]),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
self._context = context
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self):
return 'polo'
@base.remotable
def _update_test(self):
self.bar = 'updated'
@base.remotable
def save(self):
self.obj_reset_changes()
@base.remotable
def refresh(self):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class MyObjDiffVers(MyObj):
VERSION = '1.5'
@classmethod
def obj_name(cls):
return 'MyObj'
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.Field(fields.String())}
class TestMetaclass(test.NoDBTestCase):
def test_obj_tracking(self):
@six.add_metaclass(base.NovaObjectMetaclass)
class NewBaseClass(object):
VERSION = '1.0'
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Fake1TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake1'
class Fake1TestObj2(Fake1TestObj1):
pass
class Fake1TestObj3(Fake1TestObj1):
VERSION = '1.1'
class Fake2TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake2'
class Fake1TestObj4(Fake1TestObj3):
VERSION = '1.2'
class Fake2TestObj2(Fake2TestObj1):
VERSION = '1.1'
class Fake1TestObj5(Fake1TestObj1):
VERSION = '1.1'
# Newest versions first in the list. Duplicate versions take the
# newest object.
expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
'fake2': [Fake2TestObj2, Fake2TestObj1]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Fake1TestObj1._obj_classes)
self.assertEqual(expected, Fake1TestObj2._obj_classes)
self.assertEqual(expected, Fake1TestObj3._obj_classes)
self.assertEqual(expected, Fake1TestObj4._obj_classes)
self.assertEqual(expected, Fake1TestObj5._obj_classes)
self.assertEqual(expected, Fake2TestObj1._obj_classes)
self.assertEqual(expected, Fake2TestObj2._obj_classes)
def test_field_checking(self):
def create_class(field):
class TestField(base.NovaObject):
VERSION = '1.5'
fields = {'foo': field()}
return TestField
create_class(fields.IPV4AndV6AddressField)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, fields.IPV4AndV6Address)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, int)
class TestObjToPrimitive(test.NoDBTestCase):
def test_obj_to_primitive_list(self):
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
class TestObjMakeList(test.NoDBTestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('MyObj'),
}
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = getattr(obj, key)
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def str_comparator(self, expected, obj_val):
"""Compare an object field to a string in the db by performing
a simple coercion on the object field value.
"""
self.assertEqual(expected, str(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
try:
f = super(_BaseTestCase, self).assertNotIsInstance
except AttributeError:
self.assertThat(obj,
matchers.Not(matchers.IsInstance(cls)),
message=msg or '')
else:
f(obj, cls, msg=msg)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
self.useFixture(nova_fixtures.IndirectionAPIFixture(None))
def assertRemotes(self):
self.assertEqual(self.remote_object_calls, [])
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
class _RemoteTest(_BaseTestCase):
def _testable_conductor(self):
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.remote_object_calls = list()
orig_object_class_action = \
self.conductor_service.manager.object_class_action
orig_object_action = \
self.conductor_service.manager.object_action
def fake_object_class_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objname'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_class_action(*args, **kwargs)
return (base.NovaObject.obj_from_primitive(result, context=args[0])
if isinstance(result, base.NovaObject) else result)
self.stubs.Set(self.conductor_service.manager, 'object_class_action',
fake_object_class_action)
def fake_object_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objinst'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_action(*args, **kwargs)
return result
self.stubs.Set(self.conductor_service.manager, 'object_action',
fake_object_action)
# Things are remoted by default in this session
self.useFixture(nova_fixtures.IndirectionAPIFixture(
conductor_rpcapi.ConductorAPI()))
# To make sure local and remote contexts match
self.stubs.Set(rpc.RequestContextSerializer,
'serialize_context',
lambda s, c: c)
self.stubs.Set(rpc.RequestContextSerializer,
'deserialize_context',
lambda s, c: c)
def setUp(self):
super(_RemoteTest, self).setUp()
self._testable_conductor()
def assertRemotes(self):
self.assertNotEqual(self.remote_object_calls, [])
class _TestObject(object):
def test_object_attrs_in_init(self):
# Spot check a few
objects.Instance
objects.InstanceInfoCache
objects.SecurityGroup
# Now check the test one in this file. Should be newest version
self.assertEqual('1.6', objects.MyObj.VERSION)
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.2',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_hydration_additional_unexpected_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): If we call obj_from_primitive() directly
# with a version containing .z, we'll get that version
# in the resulting object. In reality, when using the
# serializer, we'll get that snipped off (tested
# elsewhere)
self.assertEqual('1.5.1', obj.VERSION)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
class Foo(base.NovaObject):
fields = {'foobar': fields.Field(fields.Integer())}
obj = Foo()
with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"):
obj.foobar
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('nova_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_obj_class_from_name(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.5')
self.assertEqual('1.5', obj.VERSION)
def test_obj_class_from_name_latest_compatible(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.1')
self.assertEqual('1.6', obj.VERSION)
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.NovaObject.obj_class_from_name, 'foo', '1.0')
def test_obj_class_from_name_supported_version(self):
error = None
try:
base.NovaObject.obj_class_from_name('MyObj', '1.25')
except exception.IncompatibleObjectVersion as error:
pass
self.assertIsNotNone(error)
self.assertEqual('1.6', error.kwargs['supported'])
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj._update_test)
self.assertRemotes()
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
self.assertRemotes()
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertIsInstance(obj.rel_object, MyOwnedObject)
self.assertRemotes()
def test_changed_with_sub_object(self):
class ParentObject(base.NovaObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
self.assertRemotes()
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
self.assertRemotes()
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
actual = obj.obj_to_primitive()
self.assertJsonEqual(actual, expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_obj_reset_changes_recursive(self):
obj = MyObj(rel_object=MyOwnedObject(baz=123),
rel_objects=[MyOwnedObject(baz=456)])
self.assertEqual(set(['rel_object', 'rel_objects']),
obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True, fields=['foo'])
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True)
self.assertEqual(set([]), obj.rel_object.obj_what_changed())
self.assertEqual(set([]), obj.obj_what_changed())
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = (['foo', 'bar', 'missing',
'readonly', 'rel_object',
'rel_objects', 'mutable_default'] +
base_fields)
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_obj_as_admin(self):
obj = MyObj(context=self.context)
def fake(*args, **kwargs):
self.assertTrue(obj._context.is_admin)
with mock.patch.object(obj, 'obj_reset_changes') as mock_fn:
mock_fn.side_effect = fake
with obj.obj_as_admin():
obj.save()
self.assertTrue(mock_fn.called)
self.assertFalse(obj._context.is_admin)
def test_obj_as_admin_orphaned(self):
def testme():
obj = MyObj()
with obj.obj_as_admin():
pass
self.assertRaises(exception.OrphanedObjectError, testme)
def test_obj_alternate_context(self):
obj = MyObj(context=self.context)
with obj.obj_alternate_context(mock.sentinel.alt_ctx):
self.assertEqual(mock.sentinel.alt_ctx,
obj._context)
self.assertEqual(self.context, obj._context)
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.NovaObject):
fields = {'foo': fields.Field(fields.Integer())}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_obj_read_only(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.readonly = 1
self.assertRaises(exception.ReadOnlyFieldError, setattr,
obj, 'readonly', 2)
def test_obj_mutable_default(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.mutable_default = None
obj.mutable_default.append('s1')
self.assertEqual(obj.mutable_default, ['s1'])
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.mutable_default = None
obj1.mutable_default.append('s2')
self.assertEqual(obj1.mutable_default, ['s2'])
def test_obj_mutable_default_set_default(self):
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.obj_set_defaults('mutable_default')
self.assertEqual(obj1.mutable_default, [])
obj1.mutable_default.append('s1')
self.assertEqual(obj1.mutable_default, ['s1'])
obj2 = MyObj(context=self.context, foo=123, bar='abc')
obj2.obj_set_defaults('mutable_default')
self.assertEqual(obj2.mutable_default, [])
obj2.mutable_default.append('s2')
self.assertEqual(obj2.mutable_default, ['s2'])
def test_obj_repr(self):
obj = MyObj(foo=123)
self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
'deleted_at=<?>,foo=123,missing=<?>,'
'mutable_default=<?>,readonly=<?>,rel_object=<?>,'
'rel_objects=<?>,updated_at=<?>)',
repr(obj))
def test_obj_make_obj_compatible(self):
subobj = MyOwnedObject(baz=1)
subobj.VERSION = '1.2'
obj = MyObj(rel_object=subobj)
obj.obj_relationships = {
'rel_object': [('1.5', '1.1'), ('1.7', '1.2')],
}
orig_primitive = obj.obj_to_primitive()['nova_object.data']
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.8', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.7', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.6', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.5', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.4', 'rel_object')
self.assertFalse(mock_compat.called)
self.assertNotIn('rel_object', primitive)
def test_obj_make_compatible_hits_sub_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10',
'rel_object')
def test_obj_make_compatible_skips_unset_sub_objects(self):
obj = MyObj(foo=123)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
self.assertFalse(mock_compat.called)
def test_obj_make_compatible_complains_about_missing_rules(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {}
self.assertRaises(exception.ObjectActionError,
obj.obj_make_compatible, {}, '1.0')
def test_obj_make_compatible_doesnt_skip_falsey_sub_objects(self):
class MyList(base.ObjectListBase, base.NovaObject):
VERSION = '1.2'
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList(objects=[])
class MyOwner(base.NovaObject):
VERSION = '1.2'
fields = {'mylist': fields.ObjectField('MyList')}
obj_relationships = {
'mylist': [('1.1', '1.1')],
}
myowner = MyOwner(mylist=mylist)
primitive = myowner.obj_to_primitive('1.1')
self.assertIn('mylist', primitive['nova_object.data'])
def test_obj_make_compatible_handles_list_of_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_objects=[subobj])
obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]}
def fake_make_compat(primitive, version):
self.assertEqual('1.123', version)
self.assertIn('baz', primitive)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc:
mock_mc.side_effect = fake_make_compat
obj.obj_to_primitive('1.0')
self.assertTrue(mock_mc.called)
def test_delattr(self):
obj = MyObj(bar='foo')
del obj.bar
# Should appear unset now
self.assertFalse(obj.obj_attr_is_set('bar'))
# Make sure post-delete, references trigger lazy loads
self.assertEqual('loaded!', getattr(obj, 'bar'))
def test_delattr_unset(self):
obj = MyObj()
self.assertRaises(AttributeError, delattr, obj, 'bar')
class TestObject(_LocalTest, _TestObject):
def test_set_defaults(self):
obj = MyObj()
obj.obj_set_defaults('foo')
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertEqual(1, obj.foo)
def test_set_defaults_no_default(self):
obj = MyObj()
self.assertRaises(exception.ObjectActionError,
obj.obj_set_defaults, 'bar')
def test_set_all_defaults(self):
obj = MyObj()
obj.obj_set_defaults()
self.assertEqual(set(['deleted', 'foo', 'mutable_default']),
obj.obj_what_changed())
self.assertEqual(1, obj.foo)
def test_set_defaults_not_overwrite(self):
# NOTE(danms): deleted defaults to False, so verify that it does
# not get reset by obj_set_defaults()
obj = MyObj(deleted=True)
obj.obj_set_defaults()
self.assertEqual(1, obj.foo)
self.assertTrue(obj.deleted)
class TestRemoteObject(_RemoteTest, _TestObject):
def test_major_version_mismatch(self):
MyObj2.VERSION = '2.0'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_greater(self):
MyObj2.VERSION = '1.7'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_less(self):
MyObj2.VERSION = '1.2'
obj = MyObj2.query(self.context)
self.assertEqual(obj.bar, 'bar')
self.assertRemotes()
def test_compat(self):
MyObj2.VERSION = '1.1'
obj = MyObj2.query(self.context)
self.assertEqual('oldbar', obj.bar)
def test_revision_ignored(self):
MyObj2.VERSION = '1.1.456'
obj = MyObj2.query(self.context)
self.assertEqual('bar', obj.bar)
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_serialize_set_to_list(self):
ser = base.NovaObjectSerializer()
self.assertEqual([1, 2], ser.serialize_entity(None, set([1, 2])))
def _test_deserialize_entity_newer(self, obj_version, backported_to,
my_version='1.6'):
ser = base.NovaObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport.return_value = 'backported'
class MyTestObj(MyObj):
VERSION = my_version
obj = MyTestObj()
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(ser._conductor.object_backport.called)
else:
self.assertEqual('backported', result)
ser._conductor.object_backport.assert_called_with(self.context,
primitive,
backported_to)
def test_deserialize_entity_newer_version_backports(self):
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
self._test_deserialize_entity_newer('1.7', '1.6.1', '1.6.1')
def test_deserialize_dot_z_with_extra_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
ser = base.NovaObjectSerializer()
obj = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): The serializer is where the logic lives that
# avoids backports for cases where only a .z difference in
# the received object version is detected. As a result, we
# end up with a version of what we expected, effectively the
# .0 of the object.
self.assertEqual('1.6', obj.VERSION)
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('nova_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
# dict case
thing = {'key': obj}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in six.itervalues(primitive):
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in six.itervalues(thing2):
self.assertIsInstance(item, MyObj)
# object-action updates dict case
thing = {'foo': obj.obj_to_primitive()}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(thing, primitive)
thing2 = ser.deserialize_entity(self.context, thing)
self.assertIsInstance(thing2['foo'], base.NovaObject)
class TestArgsSerializer(test.NoDBTestCase):
def setUp(self):
super(TestArgsSerializer, self).setUp()
self.now = timeutils.utcnow()
self.str_now = timeutils.strtime(at=self.now)
@base.serialize_args
def _test_serialize_args(self, *args, **kwargs):
expected_args = ('untouched', self.str_now, self.str_now)
for index, val in enumerate(args):
self.assertEqual(expected_args[index], val)
expected_kwargs = {'a': 'untouched', 'b': self.str_now,
'c': self.str_now}
for key, val in six.iteritems(kwargs):
self.assertEqual(expected_kwargs[key], val)
def test_serialize_args(self):
self._test_serialize_args('untouched', self.now, self.now,
a='untouched', b=self.now, c=self.now)
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'Agent': '1.0-cf1b002f0e50f5333e0f33588f6c2d57',
'AgentList': '1.0-3c73cea65e7c938080184ec70a4ee1f7',
'Aggregate': '1.1-7b3f04af5342ba544955d01c9c954fa5',
'AggregateList': '1.2-13a2dfb67f9cb9aee815e233bc89f34c',
'BandwidthUsage': '1.2-e7d3b3a5c3950cc67c99bc26a1075a70',
'BandwidthUsageList': '1.2-fe73c30369dd23c41619c9c19f27a562',
'BlockDeviceMapping': '1.9-c87e9c7e5cfd6a402f32727aa74aca95',
'BlockDeviceMappingList': '1.10-44b9818d5e90a7396eb807540cbe42c0',
'CellMapping': '1.0-4b1616970814c3c819e10c7ef6b9c3d5',
'ComputeNode': '1.11-5f8cd6948ad98fcc0c39b79d49acc4b6',
'ComputeNodeList': '1.11-f09b7f64339350b4296ac85c07e3a573',
'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba',
'DNSDomainList': '1.0-bc58364180c693203ebcf5e5d5775736',
'EC2Ids': '1.0-8e193896fa01cec598b875aea94da608',
'EC2InstanceMapping': '1.0-e9c3257badcc3aa14089b0a62f163108',
'EC2SnapshotMapping': '1.0-a545acd0d1519d4316b9b00f30e59b4d',
'EC2VolumeMapping': '1.0-15710aa212b5cbfdb155fdc81cce4ede',
'FixedIP': '1.10-4e8060f91f6c94ae73d557708ec62f56',
'FixedIPList': '1.10-724a59f2446d917d0bd13d6aa33edf8a',
'Flavor': '1.1-01ed47361fbe76bf728edf667d3f45d3',
'FlavorList': '1.1-ab3f242e0db21db87285f2ac2ddc5c72',
'FloatingIP': '1.6-24c614d2c3d4887254a679be65c11de5',
'FloatingIPList': '1.7-e61a470ab21d7422f6bb703f86d99b53',
'HVSpec': '1.0-1f9806b94af42dd91e6db369cd10f114',
'Instance': '1.20-0991d6bd300ebf35ec19d7d68922e69b',
'InstanceAction': '1.1-866fb0235d45ab51cc299b8726303d9c',
'InstanceActionEvent': '1.1-538698f30974064543134784c5da6056',
'InstanceActionEventList': '1.0-3510dc5bc494bcf2468f54249366164f',
'InstanceActionList': '1.0-7f3f14a6c16fa16113c112a3b2ffffdd',
'InstanceExternalEvent': '1.0-2c5d816a6447594d9ba91cc44834f685',
'InstanceFault': '1.2-090c74b3833c715845ec2cf24a686aaf',
'InstanceFaultList': '1.1-94f71c64972f25ba5675704bf2087fdb',
'InstanceGroup': '1.9-a77a59735d62790dcaa413a21acfaa73',
'InstanceGroupList': '1.6-4642a730448b2336dfbf0f410f9c0cab',
'InstanceInfoCache': '1.5-ef7394dae46cff2dd560324555cb85cf',
'InstanceList': '1.17-d453df4d1e7e1ec3b5b8b089672a870f',
'InstanceMapping': '1.0-d7cfc251f16c93df612af2b9de59e5b7',
'InstanceMappingList': '1.0-1e388f466f8a306ab3c0a0bb26479435',
'InstanceNUMACell': '1.2-5d2dfa36e9ecca9b63f24bf3bc958ea4',
'InstanceNUMATopology': '1.1-b6fab68a3f0f1dfab4c98a236d29839a',
'InstancePCIRequest': '1.1-e082d174f4643e5756ba098c47c1510f',
'InstancePCIRequests': '1.1-4825b599f000538991fdc9972a92c2c6',
'KeyPair': '1.3-2d7c9ccade5532f7cd185110a9367e6a',
'KeyPairList': '1.2-41b7c9ab5fd2a216be4bbce011a55eff',
'Migration': '1.2-0554a9f061ec0e9fefe43773bc426fcf',
'MigrationList': '1.2-e772d7d6ae0581ec72042d50c6bdf6ec',
'MyObj': '1.6-fce707f79d6fee00f0ebbac98816a380',
'MyOwnedObject': '1.0-0f3d6c028543d7f3715d121db5b8e298',
'NUMACell': '1.2-cb9c3b08cc1c418d021492f788d04173',
'NUMAPagesTopology': '1.0-97d93f70a68625b5f29ff63a40a4f612',
'NUMATopology': '1.2-790f6bdff85bf6e5677f409f3a4f1c6a',
'NUMATopologyLimits': '1.0-201845851897940c0a300e3d14ebf04a',
'Network': '1.2-141c797b794a4f8dbe251f929dc15268',
'NetworkList': '1.2-4997048844f38a920eed0f9e685360e3',
'NetworkRequest': '1.1-f31192f5a725017707f989585e12d7dc',
'NetworkRequestList': '1.1-46ff51f691dde5cf96b4c37b0953a516',
'PciDevice': '1.3-6d37f795ee934e7db75b5a6a1926def0',
'PciDeviceList': '1.1-0aedd5a49b4a9f30da37cf275cd98cf7',
'PciDevicePool': '1.1-2f352e08e128ec5bc84bc3007936cc6d',
'PciDevicePoolList': '1.1-46ff51f691dde5cf96b4c37b0953a516',
'Quotas': '1.2-615ed622082c92d938119fd49e6d84ee',
'QuotasNoOp': '1.2-164c628906b170fd946a7672e85e4935',
'S3ImageMapping': '1.0-56d23342db8131d826797c7229dc4050',
'SecurityGroup': '1.1-cd2f3c063640723b584634fa1075be77',
'SecurityGroupList': '1.0-29b93ebda887d1941ec10c8e34644356',
'SecurityGroupRule': '1.1-38290b6f9a35e416c2bcab5f18708967',
'SecurityGroupRuleList': '1.1-c98e038da57c3a9e47e62a588e5b3c23',
'Service': '1.12-1a34a387914f90aacc33c8c43d45d0b3',
'ServiceList': '1.10-653f472b965b6ed17235ebd683751be7',
'Tag': '1.0-521693d0515aa031dff2b8ae3f86c8e0',
'TagList': '1.0-698b4e8bd7d818db10b71a6d3c596760',
'TestSubclassedObject': '1.6-d0f7f126f87433003c4d2ced202d6c86',
'VirtCPUFeature': '1.0-ac0f6fa47089583a95c57131e46de052',
'VirtCPUModel': '1.0-aa6fd0df43edfd2f8cfa0f2151a06f20',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.0-d3d14066c99b8ae4d5204059fb147279',
'VirtualInterfaceList': '1.0-311365526cc6904e43ace844a794cb6b'
}
object_relationships = {
'BlockDeviceMapping': {'Instance': '1.20'},
'ComputeNode': {'HVSpec': '1.0', 'PciDevicePoolList': '1.1'},
'FixedIP': {'Instance': '1.20', 'Network': '1.2',
'VirtualInterface': '1.0',
'FloatingIPList': '1.7'},
'FloatingIP': {'FixedIP': '1.10'},
'Instance': {'InstanceFault': '1.2',
'InstanceInfoCache': '1.5',
'InstanceNUMATopology': '1.1',
'PciDeviceList': '1.1',
'TagList': '1.0',
'SecurityGroupList': '1.0',
'Flavor': '1.1',
'InstancePCIRequests': '1.1',
'VirtCPUModel': '1.0',
'EC2Ids': '1.0',
},
'InstanceNUMACell': {'VirtCPUTopology': '1.0'},
'InstanceNUMATopology': {'InstanceNUMACell': '1.2'},
'InstancePCIRequests': {'InstancePCIRequest': '1.1'},
'MyObj': {'MyOwnedObject': '1.0'},
'NUMACell': {'NUMAPagesTopology': '1.0'},
'NUMATopology': {'NUMACell': '1.2'},
'SecurityGroupRule': {'SecurityGroup': '1.1'},
'Service': {'ComputeNode': '1.11'},
'TestSubclassedObject': {'MyOwnedObject': '1.0'},
'VirtCPUModel': {'VirtCPUFeature': '1.0', 'VirtCPUTopology': '1.0'},
}
class TestObjectVersions(test.NoDBTestCase):
def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
"""Follow a chain of remotable things down to the original function."""
if isinstance(thing, classmethod):
return self._find_remotable_method(cls, thing.__get__(None, cls))
elif inspect.ismethod(thing) and hasattr(thing, 'remotable'):
return self._find_remotable_method(cls, thing.original_fn,
parent_was_remotable=True)
elif parent_was_remotable:
# We must be the first non-remotable thing underneath a stack of
# remotable things (i.e. the actual implementation method)
return thing
else:
# This means the top-level thing never hit a remotable layer
return None
def _get_fingerprint(self, obj_name):
obj_class = base.NovaObject._obj_classes[obj_name][0]
fields = obj_class.fields.items()
fields.sort()
methods = []
for name in dir(obj_class):
thing = getattr(obj_class, name)
if inspect.ismethod(thing) or isinstance(thing, classmethod):
method = self._find_remotable_method(obj_class, thing)
if method:
methods.append((name, inspect.getargspec(method)))
methods.sort()
# NOTE(danms): Things that need a version bump are any fields
# and their types, or the signatures of any remotable methods.
# Of course, these are just the mechanical changes we can detect,
# but many other things may require a version bump (method behavior
# and return value changes, for example).
if hasattr(obj_class, 'child_versions'):
relevant_data = (fields, methods,
OrderedDict(
sorted(obj_class.child_versions.items())))
else:
relevant_data = (fields, methods)
fingerprint = '%s-%s' % (obj_class.VERSION,
hashlib.md5(str(relevant_data)).hexdigest())
return fingerprint
def test_versions(self):
fingerprints = {}
for obj_name in base.NovaObject._obj_classes:
fingerprints[obj_name] = self._get_fingerprint(obj_name)
if os.getenv('GENERATE_HASHES'):
file('object_hashes.txt', 'w').write(
pprint.pformat(fingerprints))
raise test.TestingException(
'Generated hashes in object_hashes.txt')
stored = set(object_data.items())
computed = set(fingerprints.items())
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, hash in changed:
expected[name] = object_data.get(name)
actual[name] = fingerprints.get(name)
self.assertEqual(expected, actual,
'Some objects have changed; please make sure the '
'versions have been bumped, and then update their '
'hashes here.')
def _get_object_field_name(self, field):
if isinstance(field._type, fields.Object):
return field._type._obj_name
if isinstance(field, fields.ListOfObjectsField):
return field._type._element_type._type._obj_name
return None
def _build_tree(self, tree, obj_class):
obj_name = obj_class.obj_name()
if obj_name in tree:
return
for name, field in obj_class.fields.items():
# Notes(yjiang5): ObjectListBase should be covered by
# child_versions test
if (issubclass(obj_class, base.ObjectListBase) and
name == 'objects'):
continue
sub_obj_name = self._get_object_field_name(field)
if sub_obj_name:
sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0]
self._build_tree(tree, sub_obj_class)
tree.setdefault(obj_name, {})
tree[obj_name][sub_obj_name] = sub_obj_class.VERSION
def test_relationships(self):
tree = {}
for obj_name in base.NovaObject._obj_classes.keys():
self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0])
stored = set([(x, str(y)) for x, y in object_relationships.items()])
computed = set([(x, str(y)) for x, y in tree.items()])
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, deps in changed:
expected[name] = object_relationships.get(name)
actual[name] = tree.get(name)
self.assertEqual(expected, actual,
'Some objects have changed dependencies. '
'Please make sure to bump the versions of '
'parent objects and provide a rule in their '
'obj_make_compatible() routines to backlevel '
'the child object.')
def test_obj_make_compatible(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
obj_class().obj_to_primitive(target_version=test_version)
def _get_obj_to_test(self, obj_class):
obj = obj_class()
for fname, ftype in obj.fields.items():
if isinstance(ftype, fields.ObjectField):
fobjname = ftype.AUTO_TYPE._obj_name
fobjcls = base.NovaObject._obj_classes[fobjname][0]
setattr(obj, fname, self._get_obj_to_test(fobjcls))
elif isinstance(ftype, fields.ListOfObjectsField):
# FIXME(danms): This will result in no tests for this
# field type...
setattr(obj, fname, [])
return obj
def _find_version_mapping(self, my_ver, versions):
closest = None
my_ver = utils.convert_version_to_tuple(my_ver)
for _my, _child in versions:
_my = utils.convert_version_to_tuple(_my)
_child = utils.convert_version_to_tuple(_child)
if _my == my_ver:
return '%s.%s' % _child
elif _my < my_ver:
closest = _child
if closest:
return '%s.%s' % closest
else:
return None
def _validate_object_fields(self, obj_class, primitive):
for fname, ftype in obj_class.fields.items():
if isinstance(ftype, fields.ObjectField):
exp_vers = obj_class.obj_relationships[fname]
exp_ver = self._find_version_mapping(
primitive['nova_object.version'], exp_vers)
if exp_ver is None:
self.assertNotIn(fname, primitive['nova_object.data'])
else:
child_p = primitive['nova_object.data'][fname]
self.assertEqual(exp_ver,
child_p['nova_object.version'])
def test_obj_make_compatible_with_data(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
if 'tests.unit' in obj_class.__module__:
# NOTE(danms): Skip test objects. When we move to
# oslo.versionedobjects, we won't have to do this
continue
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
test_object = self._get_obj_to_test(obj_class)
obj_p = test_object.obj_to_primitive(
target_version=test_version)
self._validate_object_fields(obj_class, obj_p)
def test_obj_relationships_in_order(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
for field, versions in obj_class.obj_relationships.items():
last_my_version = (0, 0)
last_child_version = (0, 0)
for my_version, child_version in versions:
_my_version = utils.convert_version_to_tuple(my_version)
_ch_version = utils.convert_version_to_tuple(child_version)
self.assertTrue((last_my_version < _my_version
and last_child_version <= _ch_version),
'Object %s relationship '
'%s->%s for field %s is out of order' % (
obj_name, my_version, child_version,
field))
last_my_version = _my_version
last_child_version = _ch_version
| adelina-t/nova | nova/tests/unit/objects/test_objects.py | Python | apache-2.0 | 58,349 |
import argparse
import flyfingers
def parse_args():
parser = argparse.ArgumentParser(description=flyfingers.__doc__)
sparsers = parser.add_subparsers(dest='subparser_name')
sub_parser = sparsers.add_parser('run', help='start flyfingers')
sub_parser.add_argument('run')
sub_parser.set_defaults(func=getattr(flyfingers, 'run'))
args = parser.parse_args()
args.func(args.run)
if __name__ == '__main__':
parse_args()
| swdream/flyfingers | flyfingers/cli.py | Python | mit | 451 |
from pyjade import runtime
class TestIteration(object):
def test_it_returns_mappings_unaltered(self):
mapping = {}
assert runtime.iteration(mapping, 1) is mapping
def test_it_returns_empty_list_on_empty_input(self):
l = iter([])
assert list(runtime.iteration(l, 1)) == []
def test_it_iterates_as_is_if_numkeys_is_same_as_cardinality(self):
l = [(1, 2), (3, 4)]
assert list(runtime.iteration(l, 2)) == l
def test_it_extends_with_index_if_items_are_iterable(self):
l = [('a',), ('b',)]
assert list(runtime.iteration(l, 2)) == [('a', 0), ('b', 1)]
def test_it_adds_index_if_items_are_strings(self):
l = ['a', 'b']
assert list(runtime.iteration(l, 2)) == [('a', 0), ('b', 1)]
def test_it_adds_index_if_items_are_non_iterable(self):
l = [1, 2]
assert list(runtime.iteration(l, 2)) == [(1, 0), (2, 1)]
def test_it_doesnt_swallow_first_item_of_iterators(self):
l = [1, 2]
iterator = iter(l)
assert list(runtime.iteration(iterator, 1)) == l
| syrusakbary/pyjade | pyjade/testsuite/test_runtime.py | Python | mit | 1,090 |
# Copyright (c) 2016, Matt Layman
import os
import unittest
from tap.adapter import Adapter
from tap.parser import Parser
from tap.rules import Rules
class Loader(object):
"""Load TAP lines into unittest-able objects."""
ignored_lines = set(['diagnostic', 'unknown'])
def __init__(self):
self._parser = Parser()
def load(self, files):
"""Load any files found into a suite.
Any directories are walked and their files are added as TAP files.
:returns: A ``unittest.TestSuite`` instance
"""
suite = unittest.TestSuite()
for filepath in files:
if os.path.isdir(filepath):
self._find_tests_in_directory(filepath, suite)
else:
suite.addTest(self.load_suite_from_file(filepath))
return suite
def load_suite_from_file(self, filename):
"""Load a test suite with test lines from the provided TAP file.
:returns: A ``unittest.TestSuite`` instance
"""
suite = unittest.TestSuite()
rules = Rules(filename, suite)
if not os.path.exists(filename):
rules.handle_file_does_not_exist()
return suite
line_generator = self._parser.parse_file(filename)
return self._load_lines(filename, line_generator, suite, rules)
def load_suite_from_stdin(self):
"""Load a test suite with test lines from the TAP stream on STDIN.
:returns: A ``unittest.TestSuite`` instance
"""
suite = unittest.TestSuite()
rules = Rules('stream', suite)
line_generator = self._parser.parse_stdin()
return self._load_lines('stream', line_generator, suite, rules)
def _find_tests_in_directory(self, directory, suite):
"""Find test files in the directory and add them to the suite."""
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
suite.addTest(self.load_suite_from_file(filepath))
def _load_lines(self, filename, line_generator, suite, rules):
"""Load a suite with lines produced by the line generator."""
line_counter = 0
for line in line_generator:
line_counter += 1
if line.category in self.ignored_lines:
continue
if line.category == 'test':
suite.addTest(Adapter(filename, line))
rules.saw_test()
elif line.category == 'plan':
if line.skip:
rules.handle_skipping_plan(line)
return suite
rules.saw_plan(line, line_counter)
elif line.category == 'bail':
rules.handle_bail(line)
return suite
elif line.category == 'version':
rules.saw_version_at(line_counter)
rules.check(line_counter)
return suite
| Mark-E-Hamilton/tappy | tap/loader.py | Python | bsd-2-clause | 2,985 |
default_app_config = 'hs_tracking.apps.HSTrackingAppConfig'
| ResearchSoftwareInstitute/MyHPOM | hs_tracking/__init__.py | Python | bsd-3-clause | 60 |
"""
Views for manual refunds in the student support UI.
This interface is used by the support team to track refunds
entered manually in CyberSource (our payment gateway).
DEPRECATION WARNING:
We are currently in the process of replacing lms/djangoapps/shoppingcart
with an E-Commerce service that supports automatic refunds. Once that
transition is complete, we can remove this view.
"""
from __future__ import absolute_import
import logging
from django import forms
from django.contrib import messages
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic.edit import FormView
from openedx.core.lib.courses import clean_course_id
from student.models import CourseEnrollment
from support.decorators import require_support_permission
log = logging.getLogger(__name__)
class RefundForm(forms.Form):
"""
Form for manual refunds
"""
user = forms.EmailField(label=_("Email Address"), required=True)
course_id = forms.CharField(label=_("Course ID"), required=True)
confirmed = forms.CharField(widget=forms.HiddenInput, required=False)
def clean_user(self):
"""
validate user field
"""
user_email = self.cleaned_data['user']
try:
user = User.objects.get(email=user_email)
except User.DoesNotExist:
raise forms.ValidationError(_("User not found"))
return user
def clean_course_id(self):
"""
Validate the course id
"""
return clean_course_id(self)
def clean(self):
"""
clean form
"""
user, course_id = self.cleaned_data.get('user'), self.cleaned_data.get('course_id')
if user and course_id:
self.cleaned_data['enrollment'] = enrollment = CourseEnrollment.get_or_create_enrollment(user, course_id)
if enrollment.refundable():
msg = _(u"Course {course_id} not past the refund window.").format(course_id=course_id)
raise forms.ValidationError(msg)
try:
self.cleaned_data['cert'] = enrollment.certificateitem_set.filter(
mode='verified',
status='purchased'
)[0]
except IndexError:
msg = _(u"No order found for {user} in course {course_id}").format(user=user, course_id=course_id)
raise forms.ValidationError(msg)
return self.cleaned_data
def is_valid(self):
"""
returns whether form is valid
"""
is_valid = super(RefundForm, self).is_valid()
if is_valid and self.cleaned_data.get('confirmed') != 'true':
# this is a two-step form: first look up the data, then issue the refund.
# first time through, set the hidden "confirmed" field to true and then redisplay the form
# second time through, do the unenrollment/refund.
data = dict(list(self.data.items()))
self.cleaned_data['confirmed'] = data['confirmed'] = 'true'
self.data = data
is_valid = False
return is_valid
class RefundSupportView(FormView):
"""
Refund form view
"""
template_name = 'support/refund.html'
form_class = RefundForm
success_url = '/support/'
@method_decorator(require_support_permission)
def dispatch(self, *args, **kwargs):
return super(RefundSupportView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
"""
extra context data to add to page
"""
kwargs = super(RefundSupportView, self).get_context_data(**kwargs)
form = getattr(kwargs['form'], 'cleaned_data', {})
if form.get('confirmed') == 'true':
kwargs['cert'] = form.get('cert')
kwargs['enrollment'] = form.get('enrollment')
return kwargs
def form_valid(self, form):
"""
unenrolls student, issues refund
"""
user = form.cleaned_data['user']
course_id = form.cleaned_data['course_id']
enrollment = form.cleaned_data['enrollment']
cert = form.cleaned_data['cert']
enrollment.can_refund = True
enrollment.update_enrollment(is_active=False)
log.info(u"%s manually refunded %s %s", self.request.user, user, course_id)
messages.success(
self.request,
_(u"Unenrolled {user} from {course_id}").format(
user=user,
course_id=course_id
)
)
messages.success(
self.request,
_(u"Refunded {cost} for order id {order_id}").format(
cost=cert.unit_cost,
order_id=cert.order.id
)
)
return HttpResponseRedirect('/support/refund/')
| ESOedX/edx-platform | lms/djangoapps/support/views/refund.py | Python | agpl-3.0 | 4,934 |
from __pyosshell__ import *
QUEUE_OUT = 'out.info'
QUEUE_ERR = 'err.info'
QUEUE_MAIL = ['%s@mpip-mainz.mpg.de' % getpass.getuser(),'a'] # eab
VOTCARC = '/people/thnfs/homes/poelking/VOTCA_SUSE_12/bin/VOTCARC.csh'
def retrieve_qid(logfile=QUEUE_OUT):
try:
intt = open(logfile,'r')
top = intt.readline()
intt.close()
qid = int(top.split('/')[-2].split('.')[0])
except IOError:
qid = '--qw--'
return qid
def is_qid_active(qid):
is_active = not os.system('qstat | grep %d' % qid)
return is_active
def are_jobs_waiting(username = getpass.getuser()):
sig = os.system('qstat -u %s | grep " qw " > /dev/null' % username)
if sig == 0:
return True
else:
return False
def write_qsub_sh_template(outfile = 'qsub.sh', username = getpass.getuser(),
queue = 'PE_8', procs = 8):
# Placeholder '_USERNAME' (will be replaced by <username>)
outt = open(outfile,'w')
outt.write('#!/bin/tcsh\n')
outt.write('#\n')
outt.write('#$ -pe %s %d\n' % (queue,procs))
outt.write('#$ -o %s\n' % (QUEUE_OUT))
outt.write('#$ -e %s\n' % (QUEUE_ERR))
outt.write('#$ -cwd\n')
outt.write('#$ -j y\n')
outt.write('#$ -m %s\n' % (QUEUE_MAIL[1]))
outt.write('#$ -M %s\n' % (QUEUE_MAIL[0]))
outt.write('#$ -N _DESCRIPTION\n')
outt.write('''
# USERNAME IS _USERNAME
source /sw/linux/intel/composerxe-2011.0.084/bin/compilervars.csh intel64
source /sw/linux/gromacs/4.5/shared/latest/bin/GMXRC
# WORKDIRECTORY
set workdir=`pwd`
echo "Workdir is $workdir"
# WORK SCRATCH
if ( ! -d /usr/scratch/_USERNAME ) then
mkdir /usr/scratch/_USERNAME
endif
set jno=0
while ( -d job_$jno )
set jno = `expr $jno + 1`
end
set jobdir="/usr/scratch/_USERNAME/job_$jno"
mkdir $jobdir
rm -rf $jobdir/*
mkdir $jobdir/temp
echo "Jobdir is $jobdir"
# COPY FOLDER
rsync -ar $workdir/* $jobdir
cd $jobdir
# EXECUTE HEAVY WORK
#_GROMPP_CMD
#_MDRUN_CMD
cd ..
# SYNCHRONIZE BACK & CLEAN
rsync -ar $jobdir/* $workdir
rm -rf $jobdir
''')
outt.close()
return
def WriteCtpBatch(command, tag, outfile = 'ctp_batch.sh',
username = getpass.getuser(), queue = 'PE_8', procs = 8):
outt = open(outfile,'w')
# QUEUE & BATCH INFO
outt.write('#!/bin/tcsh\n')
outt.write('#\n')
outt.write('#$ -pe %s %d\n' % (queue,procs))
outt.write('#$ -o %s\n' % (QUEUE_OUT))
outt.write('#$ -e %s\n' % (QUEUE_ERR))
outt.write('#$ -cwd\n')
outt.write('#$ -j y\n')
outt.write('#$ -m %s\n' % (QUEUE_MAIL[1]))
outt.write('#$ -M %s\n' % (QUEUE_MAIL[0]))
outt.write('#$ -N %s\n' % tag)
outt.write('\n')
# SOURCE VOTCA
outt.write('source %s\n\n' % VOTCARC)
# BASE DIRECTORY
outt.write('# BASE DIRECTORY\n')
outt.write('set basedir=`pwd`\n')
outt.write('if ( ! -d /usr/scratch/%s ) then\n' % username)
outt.write(' mkdir /usr/scratch/%s\n' % username)
outt.write('endif\n\n')
# JOB DIRECTORY
outt.write('# JOB DIRECTORY\n')
outt.write('set jno=0\n')
outt.write('while ( -d job_$jno )\n')
outt.write(' set jno = `expr $jno + 1`\n')
outt.write('end\n')
outt.write('set jobdir="/usr/scratch/%s/job_$jno"\n' % username)
outt.write('mkdir -p $jobdir\n')
outt.write('rm -rf $jobdir/*\n')
outt.write('rsync -ar $basedir/* $jobdir\n\n')
# EXECUTE HEAVY STUFF
outt.write('# EXECUTE HEAVY STUFF\n')
outt.write('cd $jobdir\n')
outt.write('%s\n' % command)
outt.write('cd ..\n\n')
# SYNC BACK
outt.write('# SYNC BACK\n')
outt.write('rsync -ar $jobdir/* $basedir\n')
outt.write('rm -rf $jobdir\n')
outt.close()
return
| 12AngryMen/votca-scripts | lib/Carlstuff/evaporation2/__cluster__.py | Python | apache-2.0 | 3,458 |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=subprocess-run-check,unused-argument,g-space-before-docstring-summary,unused-import,missing-function-docstring,g-inconsistent-quotes,redefined-outer-name,g-short-docstring-punctuation,g-importing-member,g-import-not-at-top
"""evaluator_ringmaster_wrapper: Plays some eval games between two players.
players are rows in the cloud bigtable (normally models_for_eval)
each row is a unique pair of (binary, flags) along with name.
The sh script has already:
1. Downloaded both players (binaries and models)
This script is responsible for:
2. Setup ringmaster control file
3. Call ringmaster
4. Record results into CBT
5. upload games, log, report to GCS
"""
from collections import namedtuple
import os
import shutil
import subprocess
import sys
import time
from bigtable_input import METADATA
import bigtable_output
from tensorflow.compat.v1 import gfile
from google.cloud import bigtable
CTL_NAME = "ringmaster_evals"
CTL_FILENAME = CTL_NAME + ".ctl"
CTL_GAME_DIR = CTL_NAME + ".games"
CTL_LOG = CTL_NAME + ".log"
CTL_REPORT = CTL_NAME + ".report"
MODEL_ROW_FMT = "m_eval_{}"
CTL_FILE = '''
competition_type = "playoff"
description = " Testing models_for_eval "
board_size = 19
komi = 7.5
record_games = True
stderr_to_log = True
def MinigoPlayer(path, model_pb, flags):
return Player(
"bin/bazel-bin/cc/gtp --model='tf,{{}}' {{}}".format(model_pb, flags),
cwd=path,
environ={{"LD_LIBRARY_PATH": "bin/cc/tensorflow/lib"}},
sgf_player_name_from_gtp=False)
p_a = "{m_a.name}_{m_a.hash}"
p_b = "{m_b.name}_{m_b.hash}"
players = {{
p_a: MinigoPlayer("{m_a.hash}", "{m_a.model_pb}", "{m_a.flags}"),
p_b: MinigoPlayer("{m_b.hash}", "{m_b.model_pb}", "{m_b.flags}"),
}}
matchups = [
Matchup(p_a, p_b, id="{m_a.hash}_vs_{m_b.hash}_t_{start_time}",
alternating=True, number_of_games={num_games})
]
'''
Model = namedtuple("Model", ["hash", "name", "model_pb", "flags"])
def setup_ringmaster(model_a, model_b, start_time, num_games):
with open(CTL_FILENAME, "w") as ctl_f:
ctl_f.write(
CTL_FILE.format(
m_a=model_a,
m_b=model_b,
start_time=start_time,
num_games=num_games))
def call_ringmaster(num_games):
process = subprocess.run(["ringmaster", CTL_FILENAME, "run"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=num_games * 20 * 60)
# log errors somewhere
if process.returncode != 0:
def print_err(*args):
print(args) # pylint: disable=syntax-error
print_err("Error ringmaster return=", process.returncode)
print_err("Stdout:")
print_err(process.stdout)
print_err("Stderr:")
print_err(process.stderr)
return process.returncode == 0
def copy_to_gcs(src, dst):
assert gfile.Exists(src), src
assert not gfile.Exists(dst), dst
print("Saving to", dst)
with gfile.GFile(src, "rb") as src_f, gfile.GFile(dst, "wb") as dst_f:
shutil.copyfileobj(src_f, dst_f)
def record_results(bt_table, sgf_base, num_games, start_time):
games = os.listdir(CTL_GAME_DIR)
failed = len(games) != num_games
if failed:
# Upload something? log error somewhere?
assert False, (len(games), num_games)
# Upload .log and .report along side all .games
copy_to_gcs(CTL_LOG, os.path.join(sgf_base, CTL_LOG))
copy_to_gcs(CTL_REPORT, os.path.join(sgf_base, CTL_REPORT))
rows = []
for game_fn in games:
game_path = os.path.join(CTL_GAME_DIR, game_fn)
copy_to_gcs(game_path, os.path.join(sgf_base, game_fn))
metadata = bigtable_output.process_game(game_path)
metadata["sgf"] = game_fn
metadata["tool"] = "evaluator_ringmaster"
# game_fn, which contains timestamp and game number, is unique.
row = bt_table.row(game_fn)
for column, value in metadata.items():
row.set_cell(METADATA, column, value)
rows.append(row)
response = bt_table.mutate_rows(rows)
# validate that all rows were written successfully
all_good = True
for i, status in enumerate(response):
if not status.code:
print("Row number {} failed to write {}".format(i, status))
all_good = False
return all_good
def get_cbt_model(bt_table, model_hash):
model_row = bt_table.read_row(MODEL_ROW_FMT.format(model_hash))
def get_cell(cell):
return model_row.cell_value(METADATA, cell.encode()).decode()
model_flags = get_cell("model_flags")
model_pb = get_cell("model_path")
model_name = get_cell("model")
return Model(
model_hash,
model_name,
os.path.basename(model_pb),
model_flags.replace("flags: ", ""),
)
if __name__ == "__main__":
ENV_VARS = [
"PROJECT",
"CBT_INSTANCE",
"CBT_TABLE",
"MODEL_A",
"MODEL_B",
"SGF_BUCKET_NAME",
]
ENV = {}
for env_var in ENV_VARS:
value = os.environ.get(env_var)
assert value, (env_var, os.environ.keys())
ENV[env_var] = value
print("bigtable: ", ENV["PROJECT"], ENV["CBT_INSTANCE"], ENV["CBT_TABLE"])
bt_table = (
bigtable.Client(ENV["PROJECT"], admin=True).instance(
ENV["CBT_INSTANCE"]).table(ENV["CBT_TABLE"]))
assert bt_table.exists(), "Table doesn't exist"
m_a_name = ENV["MODEL_A"]
m_b_name = ENV["MODEL_B"]
if m_a_name > m_b_name:
# Sort models so a <= b alphabetically
m_a_name, m_b_name = m_b_name, m_a_name
model_a = get_cbt_model(bt_table, m_a_name)
model_b = get_cbt_model(bt_table, m_b_name)
start_time = int(time.time())
print(model_a)
print(model_b)
# TODO(amj): Pass from dockerfile.
num_games = 4
setup_ringmaster(model_a, model_b, start_time, num_games)
success = call_ringmaster(num_games)
assert success
SGF_BASE = "gs://{}/eval_server/games/{}_vs_{}/{}/".format(
ENV["SGF_BUCKET_NAME"], m_a_name, m_b_name, start_time)
print("Saving to", SGF_BASE)
success = record_results(bt_table, SGF_BASE, num_games, start_time=start_time)
assert success
| mlperf/training_results_v0.7 | Google/benchmarks/minigo/implementations/minigo-research-TF-tpu-v4-128/cluster/evaluator/evaluator_ringmaster_wrapper.py | Python | apache-2.0 | 6,587 |
'''
mali_remove_gaps.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python mali_remove_gaps.py --help
Type::
python mali_remove_gaps.py --help
for command line help.
Command line options
--------------------
'''
import sys
import string
import re
import getopt
import CGAT.Experiment as E
import CGAT.Genomics as Genomics
import CGAT.MaliIO as MaliIO
USAGE = """python %s [OPTIONS] < exonerate_output > filtered
Prune a nucelotide multiple alignment according to a master sequence.
1. Go in codon steps through the multiple alignment according
to the master sequence.
2. Remove all columns in other sequences, that
1. fall out of frame
2. are incomplete codons
Version = $Id: mali_remove_gaps.py 2782 2009-09-10 11:40:29Z andreas $
Options:
-h, --help print this message.
-v, --verbose= loglevel.
-o, --file-output output
""" % sys.argv[0]
param_long_options = ["verbose=", "help", "file-output=", "version"]
param_short_options = "v:hm:e:p:c"
param_loglevel = 1
param_gap_char = "-"
param_mask_char = "x"
param_filename_output = None
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
try:
optlist, args = getopt.getopt(
sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("-o", "--file-output"):
param_filename_output = a
# 1. read multiple alignment in fasta format
mali, identifiers = MaliIO.readFasta(sys.stdin)
if param_loglevel >= 1:
print "# read mali with %i entries." % len(identifiers)
print E.GetHeader()
print E.GetParams()
# 1. remove gaps in multiple alignment
mali = MaliIO.removeGaps(mali)
if param_master:
frame_columns = GetFrameColumns(mali, param_master)
elif param_master_pattern:
columns = []
for id in identifiers:
if re.search(param_master_pattern, id):
columns += GetFrameColumns(mali, id)
if len(columns) == 0:
columns += GetFrameColumns(mali, identifiers[0])
# sort all columns by tuple. The "shortest" codon will be first (1,2,3)
# before (1,2,100)
columns.sort()
# select codons
frame_columns = []
last_codon = columns[0]
for codon in columns[1:]:
# skip identical codons
if codon == last_codon:
continue
# take first (shortest) codon in case of identical first residue
if codon[0] == last_codon[0]:
continue
# if not overlapping, keep
if codon[0] > last_codon[2]:
frame_columns.append(last_codon)
# if overlapping, but out of register: skip
last_codon = codon
frame_columns.append(last_codon)
# translate characters to upper/lower case according to exon info.
if exons:
for id in mali:
if id in exons:
mali[id] = AddExonInformation(
mali[id], exons[id], mask_char=param_mask_char)
if param_loglevel >= 1:
print "# found %i columns" % (len(frame_columns))
mask_chars = (string.upper(param_mask_char), string.lower(param_mask_char))
for id in mali.keys():
sequence = mali[id]
fragments = []
nstops, ncodons, naligned = 0, 0, 0
for a, b, c in frame_columns:
codon = sequence[a] + sequence[b] + sequence[c]
codon_is_aligned = False
codon_is_ok = True
for x in codon:
# a codon will be masked, if it either
# 1. contains a gap character
# 2. is an unaligned character, i.e.,
# exons and masked, or no exons and lowerwase
residue_is_unaligned = (x == param_gap_char) or \
(not exons and x in string.lowercase) or \
(exons and x in mask_chars)
codon_is_aligned = codon_is_aligned or not residue_is_unaligned
codon_is_ok = codon_is_ok and not residue_is_unaligned
if codon_is_aligned:
naligned += 1
if codon_is_ok:
ncodons += 1
if string.upper(codon) in ("TAG", "TAA", "TGA"):
if param_remove_stops:
fragments.append(param_gap_char * 3)
else:
fragments.append(codon)
nstops += 1
else:
fragments.append(codon)
else:
fragments.append(param_gap_char * 3)
mali[id] = string.join(fragments, "")
if param_loglevel >= 1:
print "# sequence: %s\tpositions: %i\taligned:%i\tcodons: %i\t stops: %i" % (id, len(fragments), naligned, ncodons, nstops)
sys.stdout.flush()
for id in mali.keys():
if param_mark_codons:
a = mali[id]
f = lambda x: a[x:x + 3]
s = string.join([f(x) for x in range(0, len(a), 3)], " ")
else:
s = mali[id]
print ">%s\n%s" % (id, s)
if param_filename_translation:
outfile = open(param_filename_translation, "w")
for id in mali.keys():
outfile.write(">%s\n%s\n" %
(id, Genomics.TranslateDNA2Protein(mali[id])))
outfile.close()
print E.GetFooter()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| CGATOxford/Optic | scripts/mali_remove_gaps.py | Python | mit | 6,154 |
#!/usr/bin/env python
# encoding: utf8
#
# An RPN calculator that supports numbers with SI scale factors and units.
# Description {{{1
"""
Engineering Calculator
A stack-based (RPN) engineering calculator with a text-based user interface that
is intended to be used interactively.
If run with no arguments, an interactive session is started. If arguments are
present, they are tested to see if they are filenames, and if so, the files are
opened and the contents are executed as a script. If they are not file names,
then the arguments themselves are treated as scripts and executed directly. The
scripts are run in the order they are specified. In this case an interactive
session would not normally be started, but if the interactive option is
specified, it would be started after all scripts have been run.
The contents of ~/.ecrc, ./.ecrc, and the start up file will be run upon start
up if they exist, and then the stack is cleared.
Usage: ec [options] [<script>...]
Options:
-i, --interactive Open an interactive session.
-s, --startup file Run commands from file to initialize calculator before
any script or interactive session is run, stack is
cleared after it is run.
-c, --nocolor Do not color the output.
-v, --verbose Narrate the execution of any scripts.
-V, --version Print the ec version information.
-h, --help Print usage information.
"""
# License {{{1
#
# Copyright (C) 2013-2022 Kenneth S. Kundert
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
# Imports {{{1
from .actions import (
actionsToUse,
defaultFormat,
defaultDigits,
defaultSpacer,
predefinedVariables,
)
from .calculator import Calculator, Display, CalculatorError, __version__, __released__
from docopt import docopt
from inform import Color, display, error, fatal, Inform, os_error, warn, terminate
from os.path import expanduser
import sys, os
# Read command line {{{1
def main():
cmdline = docopt(__doc__)
args = cmdline["<script>"]
colorscheme = None if cmdline["--nocolor"] else "dark"
startUpFile = [cmdline["--startup"]] if cmdline["--startup"] else []
interactiveSession = cmdline["--interactive"] or not args
verbose = cmdline["--verbose"]
Inform(prog_name=False, colorscheme=colorscheme)
if cmdline["--version"]:
display("ec version: {} ({})".format(__version__, __released__))
terminate()
# Define utility functions {{{1
highlight = Color("magenta", colorscheme)
def evaluateLine(calc, line, prompt):
try:
result = calc.evaluate(calc.split(line))
prompt = calc.format(result)
except CalculatorError as e:
if interactiveSession:
error(e.message)
prompt = calc.restoreStack()
else:
fatal(e.message)
return prompt
# Create calculator {{{1
calc = Calculator(
actions = actionsToUse,
formatter = Display(
formatter=defaultFormat, digits=defaultDigits, spacer=defaultSpacer
),
predefinedVariables = predefinedVariables,
backUpStack = interactiveSession,
warningPrinter = lambda warning: None,
# Disable the warning printer initially to suppress warnings from
# scripts. Will add true warning printer before starting
# interactive session. This allows users to override built in
# constants without seeing warnings.
)
prompt = "0"
# Run start up files {{{1
rcFiles = [f"{d}/.ecrc" for d in ["~", "."]]
for each in rcFiles + startUpFile:
lineno = None
try:
cmdFile = expanduser(each)
with open(cmdFile) as pFile:
for lineno, line in enumerate(pFile):
prompt = evaluateLine(calc, line, prompt)
if verbose:
display(
f"{cmdFile} {lineno}: {line.strip()} ==> {prompt}"
)
except OSError as e:
if each not in rcFiles:
fatal(os_error(e), culprit=(each, lineno))
calc.stack.clear()
prompt = "0"
# Run scripts {{{1
for arg in args:
try:
cmdFile = expanduser(arg)
if os.path.exists(cmdFile):
with open(cmdFile) as pFile:
for lineno, line in enumerate(pFile):
loc = f"{cmdFile}s.{lineno + 1}: "
prompt = evaluateLine(calc, line, prompt)
if verbose:
display(
f"{cmdFile} {lineno}: {line.strip()} ==> {prompt}"
)
else:
loc = ""
prompt = evaluateLine(calc, arg, prompt)
if verbose:
display(f"{arg} ==> {prompt}")
except OSError as e:
fatal(os_error(e), culprit=(each, lineno))
# Interact with user {{{1
if interactiveSession:
# turn on warnings
calc.warningPrinter = warn
while True:
try:
entered = input(f"{highlight(prompt)}: ")
except (EOFError, KeyboardInterrupt, SystemError):
display()
terminate()
prompt = evaluateLine(calc, entered, prompt)
display(prompt)
terminate()
| KenKundert/ec | engineering_calculator/main.py | Python | gpl-3.0 | 6,074 |
# import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler() class
kwargs = {}
kwargs["objtype"] = u'saved_question'
kwargs["name"] = u'Installed Applications'
print "...CALLING: handler.get with args: {}".format(kwargs)
response = handler.get(**kwargs)
print "...OUTPUT: Type of response: ", type(response)
print "...OUTPUT: print of response:"
print response
# call the export_obj() method to convert response to JSON and store it in out
export_kwargs = {}
export_kwargs['obj'] = response
export_kwargs['export_format'] = 'json'
print "...CALLING: handler.export_obj() with args {}".format(export_kwargs)
out = handler.export_obj(**export_kwargs)
# trim the output if it is more than 15 lines long
if len(out.splitlines()) > 15:
out = out.splitlines()[0:15]
out.append('..trimmed for brevity..')
out = '\n'.join(out)
print "...OUTPUT: print the objects returned in JSON format:"
print out
| tanium/pytan | BUILD/doc/source/examples/get_saved_question_by_name_code.py | Python | mit | 2,819 |
class node:
def __init__(self,key):
self.left = None
self.right = None
self.val = key
def LevelOrder(root):
if root:
queue = []
queue.append(root)
while(len(queue) > 0):
if(queue[0].left!=None):
queue.append(queue[0].left)
if(queue[0].right!=None):
queue.append(queue[0].right)
print(queue[0].val, end = " ")
queue.pop(0)
root = node(1)
root.left = node(2)
root.right = node(3)
root.left.left = node(4)
root.left.right = node(5)
root.right.left = node(6)
root.right.right = node(7)
print("Level Order traversal of tree is ", end = "")
LevelOrder(root)
''' Output
Level Order traversal of tree is 1 2 3 4 5 6 7
'''
| KavyaSharma/Algo_Ds_Notes | Tree_Levelorder_Traversal/Tree_Levelorder_Traversal.py | Python | gpl-3.0 | 756 |
"""Utility functions and classes used by nose internally.
"""
import inspect
import itertools
import logging
import os
import re
import sys
import types
import unittest
from nose.pyversion import ClassType, TypeType, isgenerator
log = logging.getLogger('nose')
ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$')
class_types = (ClassType, TypeType)
skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)|(?:__pycache__)"
try:
set()
set = set # make from nose.util import set happy
except NameError:
try:
from sets import Set as set
except ImportError:
pass
def ls_tree(dir_path="",
skip_pattern=skip_pattern,
indent="|-- ", branch_indent="| ",
last_indent="`-- ", last_branch_indent=" "):
# TODO: empty directories look like non-directory files
return "\n".join(_ls_tree_lines(dir_path, skip_pattern,
indent, branch_indent,
last_indent, last_branch_indent))
def _ls_tree_lines(dir_path, skip_pattern,
indent, branch_indent, last_indent, last_branch_indent):
if dir_path == "":
dir_path = os.getcwd()
lines = []
names = os.listdir(dir_path)
names.sort()
dirs, nondirs = [], []
for name in names:
if re.match(skip_pattern, name):
continue
if os.path.isdir(os.path.join(dir_path, name)):
dirs.append(name)
else:
nondirs.append(name)
# list non-directories first
entries = list(itertools.chain([(name, False) for name in nondirs],
[(name, True) for name in dirs]))
def ls_entry(name, is_dir, ind, branch_ind):
if not is_dir:
yield ind + name
else:
path = os.path.join(dir_path, name)
if not os.path.islink(path):
yield ind + name
subtree = _ls_tree_lines(path, skip_pattern,
indent, branch_indent,
last_indent, last_branch_indent)
for x in subtree:
yield branch_ind + x
for name, is_dir in entries[:-1]:
for line in ls_entry(name, is_dir, indent, branch_indent):
yield line
if entries:
name, is_dir = entries[-1]
for line in ls_entry(name, is_dir, last_indent, last_branch_indent):
yield line
def absdir(path):
"""Return absolute, normalized path to directory, if it exists; None
otherwise.
"""
if not os.path.isabs(path):
path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
path)))
if path is None or not os.path.isdir(path):
return None
return path
def absfile(path, where=None):
"""Return absolute, normalized path to file (optionally in directory
where), or None if the file can't be found either in where or the current
working directory.
"""
orig = path
if where is None:
where = os.getcwd()
if isinstance(where, list) or isinstance(where, tuple):
for maybe_path in where:
maybe_abs = absfile(path, maybe_path)
if maybe_abs is not None:
return maybe_abs
return None
if not os.path.isabs(path):
path = os.path.normpath(os.path.abspath(os.path.join(where, path)))
if path is None or not os.path.exists(path):
if where != os.getcwd():
# try the cwd instead
path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
orig)))
if path is None or not os.path.exists(path):
return None
if os.path.isdir(path):
# might want an __init__.py from pacakge
init = os.path.join(path,'__init__.py')
if os.path.isfile(init):
return init
elif os.path.isfile(path):
return path
return None
def anyp(predicate, iterable):
for item in iterable:
if predicate(item):
return True
return False
def file_like(name):
"""A name is file-like if it is a path that exists, or it has a
directory part, or it ends in .py, or it isn't a legal python
identifier.
"""
return (os.path.exists(name)
or os.path.dirname(name)
or name.endswith('.py')
or not ident_re.match(os.path.splitext(name)[0]))
def func_lineno(func):
"""Get the line number of a function. First looks for
compat_co_firstlineno, then func_code.co_first_lineno.
"""
try:
return func.compat_co_firstlineno
except AttributeError:
try:
return func.func_code.co_firstlineno
except AttributeError:
return -1
def isclass(obj):
"""Is obj a class? Inspect's isclass is too liberal and returns True
for objects that can't be subclasses of anything.
"""
obj_type = type(obj)
return obj_type in class_types or issubclass(obj_type, type)
# backwards compat (issue #64)
is_generator = isgenerator
def ispackage(path):
"""
Is this path a package directory?
>>> ispackage('nose')
True
>>> ispackage('unit_tests')
False
>>> ispackage('nose/plugins')
True
>>> ispackage('nose/loader.py')
False
"""
if os.path.isdir(path):
# at least the end of the path must be a legal python identifier
# and __init__.py[co] must exist
end = os.path.basename(path)
if ident_re.match(end):
for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
if os.path.isfile(os.path.join(path, init)):
return True
if sys.platform.startswith('java') and \
os.path.isfile(os.path.join(path, '__init__$py.class')):
return True
return False
def isproperty(obj):
"""
Is this a property?
>>> class Foo:
... def got(self):
... return 2
... def get(self):
... return 1
... get = property(get)
>>> isproperty(Foo.got)
False
>>> isproperty(Foo.get)
True
"""
return type(obj) == property
def getfilename(package, relativeTo=None):
"""Find the python source file for a package, relative to a
particular directory (defaults to current working directory if not
given).
"""
if relativeTo is None:
relativeTo = os.getcwd()
path = os.path.join(relativeTo, os.sep.join(package.split('.')))
suffixes = ('/__init__.py', '.py')
for suffix in suffixes:
filename = path + suffix
if os.path.exists(filename):
return filename
return None
def getpackage(filename):
"""
Find the full dotted package name for a given python source file
name. Returns None if the file is not a python source file.
>>> getpackage('foo.py')
'foo'
>>> getpackage('biff/baf.py')
'baf'
>>> getpackage('nose/util.py')
'nose.util'
Works for directories too.
>>> getpackage('nose')
'nose'
>>> getpackage('nose/plugins')
'nose.plugins'
And __init__ files stuck onto directories
>>> getpackage('nose/plugins/__init__.py')
'nose.plugins'
Absolute paths also work.
>>> path = os.path.abspath(os.path.join('nose', 'plugins'))
>>> getpackage(path)
'nose.plugins'
"""
src_file = src(filename)
if (os.path.isdir(src_file) or not src_file.endswith('.py')) and not ispackage(src_file):
return None
base, ext = os.path.splitext(os.path.basename(src_file))
if base == '__init__':
mod_parts = []
else:
mod_parts = [base]
path, part = os.path.split(os.path.split(src_file)[0])
while part:
if ispackage(os.path.join(path, part)):
mod_parts.append(part)
else:
break
path, part = os.path.split(path)
mod_parts.reverse()
return '.'.join(mod_parts)
def ln(label):
"""Draw a 70-char-wide divider, with label in the middle.
>>> ln('hello there')
'---------------------------- hello there -----------------------------'
"""
label_len = len(label) + 2
chunk = (70 - label_len) // 2
out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
pad = 70 - len(out)
if pad > 0:
out = out + ('-' * pad)
return out
def resolve_name(name, module=None):
"""Resolve a dotted name to a module and its parts. This is stolen
wholesale from unittest.TestLoader.loadTestByName.
>>> resolve_name('nose.util') #doctest: +ELLIPSIS
<module 'nose.util' from...>
>>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS
<function resolve_name at...>
"""
parts = name.split('.')
parts_copy = parts[:]
if module is None:
while parts_copy:
try:
log.debug("__import__ %s", name)
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module)
for part in parts:
obj = getattr(obj, part)
return obj
def split_test_name(test):
"""Split a test name into a 3-tuple containing file, module, and callable
names, any of which (but not all) may be blank.
Test names are in the form:
file_or_module:callable
Either side of the : may be dotted. To change the splitting behavior, you
can alter nose.util.split_test_re.
"""
norm = os.path.normpath
file_or_mod = test
fn = None
if not ':' in test:
# only a file or mod part
if file_like(test):
return (norm(test), None, None)
else:
return (None, test, None)
# could be path|mod:callable, or a : in the file path someplace
head, tail = os.path.split(test)
if not head:
# this is a case like 'foo:bar' -- generally a module
# name followed by a callable, but also may be a windows
# drive letter followed by a path
try:
file_or_mod, fn = test.split(':')
if file_like(fn):
# must be a funny path
file_or_mod, fn = test, None
except ValueError:
# more than one : in the test
# this is a case like c:\some\path.py:a_test
parts = test.split(':')
if len(parts[0]) == 1:
file_or_mod, fn = ':'.join(parts[:-1]), parts[-1]
else:
# nonsense like foo:bar:baz
raise ValueError("Test name '%s' could not be parsed. Please "
"format test names as path:callable or "
"module:callable.")
elif not tail:
# this is a case like 'foo:bar/'
# : must be part of the file path, so ignore it
file_or_mod = test
else:
if ':' in tail:
file_part, fn = tail.split(':')
else:
file_part = tail
file_or_mod = os.sep.join([head, file_part])
if file_or_mod:
if file_like(file_or_mod):
return (norm(file_or_mod), None, fn)
else:
return (None, file_or_mod, fn)
else:
return (None, None, fn)
split_test_name.__test__ = False # do not collect
def test_address(test):
"""Find the test address for a test, which may be a module, filename,
class, method or function.
"""
if hasattr(test, "address"):
return test.address()
# type-based polymorphism sucks in general, but I believe is
# appropriate here
t = type(test)
file = module = call = None
if t == types.ModuleType:
file = getattr(test, '__file__', None)
module = getattr(test, '__name__', None)
return (src(file), module, call)
if t == types.FunctionType or issubclass(t, type) or t == types.ClassType:
module = getattr(test, '__module__', None)
if module is not None:
m = sys.modules[module]
file = getattr(m, '__file__', None)
if file is not None:
file = os.path.abspath(file)
call = getattr(test, '__name__', None)
return (src(file), module, call)
if t == types.MethodType:
cls_adr = test_address(test.im_class)
return (src(cls_adr[0]), cls_adr[1],
"%s.%s" % (cls_adr[2], test.__name__))
# handle unittest.TestCase instances
if isinstance(test, unittest.TestCase):
if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7
or hasattr(test, '_testFunc')): # 2.7
# unittest FunctionTestCase
try:
return test_address(test._FunctionTestCase__testFunc)
except AttributeError:
return test_address(test._testFunc)
# regular unittest.TestCase
cls_adr = test_address(test.__class__)
# 2.5 compat: __testMethodName changed to _testMethodName
try:
method_name = test._TestCase__testMethodName
except AttributeError:
method_name = test._testMethodName
return (src(cls_adr[0]), cls_adr[1],
"%s.%s" % (cls_adr[2], method_name))
if (hasattr(test, '__class__') and
test.__class__.__module__ not in ('__builtin__', 'builtins')):
return test_address(test.__class__)
raise TypeError("I don't know what %s is (%s)" % (test, t))
test_address.__test__ = False # do not collect
def try_run(obj, names):
"""Given a list of possible method names, try to run them with the
provided object. Keep going until something works. Used to run
setup/teardown methods for module, package, and function tests.
"""
for name in names:
func = getattr(obj, name, None)
if func is not None:
if type(obj) == types.ModuleType:
# py.test compatibility
try:
args, varargs, varkw, defaults = inspect.getargspec(func)
except TypeError:
# Not a function. If it's callable, call it anyway
if hasattr(func, '__call__'):
func = func.__call__
try:
args, varargs, varkw, defaults = \
inspect.getargspec(func)
args.pop(0) # pop the self off
except TypeError:
raise TypeError("Attribute %s of %r is not a python "
"function. Only functions or callables"
" may be used as fixtures." %
(name, obj))
if len(args):
log.debug("call fixture %s.%s(%s)", obj, name, obj)
return func(obj)
log.debug("call fixture %s.%s", obj, name)
return func()
def src(filename):
"""Find the python source file for a .pyc, .pyo or $py.class file on
jython. Returns the filename provided if it is not a python source
file.
"""
if filename is None:
return filename
if sys.platform.startswith('java') and filename.endswith('$py.class'):
return '.'.join((filename[:-9], 'py'))
base, ext = os.path.splitext(filename)
if ext in ('.pyc', '.pyo', '.py'):
return '.'.join((base, 'py'))
return filename
def regex_last_key(regex):
"""Sort key function factory that puts items that match a
regular expression last.
>>> from nose.config import Config
>>> from nose.pyversion import sort_list
>>> c = Config()
>>> regex = c.testMatch
>>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py']
>>> sort_list(entries, regex_last_key(regex))
>>> entries
['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test']
"""
def k(obj):
if regex.search(obj):
return (1, obj)
return (0, obj)
return k
def tolist(val):
"""Convert a value that may be a list or a (possibly comma-separated)
string into a list. The exception: None is returned as None, not [None].
>>> tolist(["one", "two"])
['one', 'two']
>>> tolist("hello")
['hello']
>>> tolist("separate,values, with, commas, spaces , are ,ok")
['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok']
"""
if val is None:
return None
try:
# might already be a list
val.extend([])
return val
except AttributeError:
pass
# might be a string
try:
return re.split(r'\s*,\s*', val)
except TypeError:
# who knows...
return list(val)
class odict(dict):
"""Simple ordered dict implementation, based on:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
"""
def __init__(self, *arg, **kw):
self._keys = []
super(odict, self).__init__(*arg, **kw)
def __delitem__(self, key):
super(odict, self).__delitem__(key)
self._keys.remove(key)
def __setitem__(self, key, item):
super(odict, self).__setitem__(key, item)
if key not in self._keys:
self._keys.append(key)
def __str__(self):
return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in self.items()])
def clear(self):
super(odict, self).clear()
self._keys = []
def copy(self):
d = super(odict, self).copy()
d._keys = self._keys[:]
return d
def items(self):
return zip(self._keys, self.values())
def keys(self):
return self._keys[:]
def setdefault(self, key, failobj=None):
item = super(odict, self).setdefault(key, failobj)
if key not in self._keys:
self._keys.append(key)
return item
def update(self, dict):
super(odict, self).update(dict)
for key in dict.keys():
if key not in self._keys:
self._keys.append(key)
def values(self):
return map(self.get, self._keys)
def transplant_func(func, module):
"""
Make a function imported from module A appear as if it is located
in module B.
>>> from pprint import pprint
>>> pprint.__module__
'pprint'
>>> pp = transplant_func(pprint, __name__)
>>> pp.__module__
'nose.util'
The original function is not modified.
>>> pprint.__module__
'pprint'
Calling the transplanted function calls the original.
>>> pp([1, 2])
[1, 2]
>>> pprint([1,2])
[1, 2]
"""
from nose.tools import make_decorator
def newfunc(*arg, **kw):
return func(*arg, **kw)
newfunc = make_decorator(func)(newfunc)
newfunc.__module__ = module
return newfunc
def transplant_class(cls, module):
"""
Make a class appear to reside in `module`, rather than the module in which
it is actually defined.
>>> from nose.failure import Failure
>>> Failure.__module__
'nose.failure'
>>> Nf = transplant_class(Failure, __name__)
>>> Nf.__module__
'nose.util'
>>> Nf.__name__
'Failure'
"""
class C(cls):
pass
C.__module__ = module
C.__name__ = cls.__name__
return C
def safe_str(val, encoding='utf-8'):
try:
return str(val)
except UnicodeEncodeError:
if isinstance(val, Exception):
return ' '.join([safe_str(arg, encoding)
for arg in val])
return unicode(val).encode(encoding)
if __name__ == '__main__':
import doctest
doctest.testmod()
| jasrusable/fun | venv/lib/python2.7/site-packages/nose/util.py | Python | gpl-2.0 | 19,896 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import OrOp as OrOp_
from jx_elasticsearch.es52.painless import _utils
from jx_elasticsearch.es52.painless._utils import Painless
from jx_elasticsearch.es52.painless.es_script import EsScript
from mo_json import BOOLEAN
class OrOp(OrOp_):
def to_es_script(self, schema, not_null=False, boolean=False, many=True):
return EsScript(
type=BOOLEAN,
expr=" || ".join(
"(" + Painless[t].to_es_script(schema).expr + ")"
for t in self.terms
if t
),
frum=self,
schema=schema,
)
_utils.OrOp=OrOp
| klahnakoski/SpotManager | vendor/jx_elasticsearch/es52/painless/or_op.py | Python | mpl-2.0 | 984 |
from flask import Blueprint
bookmarks = Blueprint('bookmarks', __name__, template_folder='templates')
from . import views # noqa
| jhh/puka-server-flask | puka/bookmarks/__init__.py | Python | apache-2.0 | 132 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six import iteritems, string_types
import datetime
import frappe, sys
from frappe import _
from frappe.utils import (cint, flt, now, cstr, strip_html, getdate, get_datetime, to_timedelta,
sanitize_html, sanitize_email, cast_fieldtype)
from frappe.model import default_fields
from frappe.model.naming import set_new_name
from frappe.model.utils.link_count import notify_link_count
from frappe.modules import load_doctype_module
from frappe.model import display_fieldtypes
from frappe.model.db_schema import type_map, varchar_len
from frappe.utils.password import get_decrypted_password, set_encrypted_password
_classes = {}
def get_controller(doctype):
"""Returns the **class** object of the given DocType.
For `custom` type, returns `frappe.model.document.Document`.
:param doctype: DocType name as string."""
from frappe.model.document import Document
global _classes
if not doctype in _classes:
module_name, custom = frappe.db.get_value("DocType", doctype, ("module", "custom"), cache=True) \
or ["Core", False]
if custom:
_class = Document
else:
module = load_doctype_module(doctype, module_name)
classname = doctype.replace(" ", "").replace("-", "")
if hasattr(module, classname):
_class = getattr(module, classname)
if issubclass(_class, BaseDocument):
_class = getattr(module, classname)
else:
raise ImportError(doctype)
else:
raise ImportError(doctype)
_classes[doctype] = _class
return _classes[doctype]
class BaseDocument(object):
ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns")
def __init__(self, d):
self.update(d)
self.dont_update_if_missing = []
if hasattr(self, "__setup__"):
self.__setup__()
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = frappe.get_meta(self.doctype)
return self._meta
def update(self, d):
if "doctype" in d:
self.set("doctype", d.get("doctype"))
# first set default field values of base document
for key in default_fields:
if key in d:
self.set(key, d.get(key))
for key, value in iteritems(d):
self.set(key, value)
return self
def update_if_missing(self, d):
if isinstance(d, BaseDocument):
d = d.get_valid_dict()
if "doctype" in d:
self.set("doctype", d.get("doctype"))
for key, value in iteritems(d):
# dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value
if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing):
self.set(key, value)
def get_db_value(self, key):
return frappe.db.get_value(self.doctype, self.name, key)
def get(self, key=None, filters=None, limit=None, default=None):
if key:
if isinstance(key, dict):
return _filter(self.get_all_children(), key, limit=limit)
if filters:
if isinstance(filters, dict):
value = _filter(self.__dict__.get(key, []), filters, limit=limit)
else:
default = filters
filters = None
value = self.__dict__.get(key, default)
else:
value = self.__dict__.get(key, default)
if value is None and key not in self.ignore_in_getter \
and key in (d.fieldname for d in self.meta.get_table_fields()):
self.set(key, [])
value = self.__dict__.get(key)
return value
else:
return self.__dict__
def getone(self, key, filters=None):
return self.get(key, filters=filters, limit=1)[0]
def set(self, key, value, as_value=False):
if isinstance(value, list) and not as_value:
self.__dict__[key] = []
self.extend(key, value)
else:
self.__dict__[key] = value
def delete_key(self, key):
if key in self.__dict__:
del self.__dict__[key]
def append(self, key, value=None):
if value==None:
value={}
if isinstance(value, (dict, BaseDocument)):
if not self.__dict__.get(key):
self.__dict__[key] = []
value = self._init_child(value, key)
self.__dict__[key].append(value)
# reference parent document
value.parent_doc = self
return value
else:
# metaclasses may have arbitrary lists
# which we can ignore
if (getattr(self, '_metaclass', None)
or self.__class__.__name__ in ('Meta', 'FormMeta', 'DocField')):
return value
raise ValueError(
'Document for field "{0}" attached to child table of "{1}" must be a dict or BaseDocument, not {2} ({3})'.format(key,
self.name, str(type(value))[1:-1], value)
)
def extend(self, key, value):
if isinstance(value, list):
for v in value:
self.append(key, v)
else:
raise ValueError
def remove(self, doc):
self.get(doc.parentfield).remove(doc)
def _init_child(self, value, key):
if not self.doctype:
return value
if not isinstance(value, BaseDocument):
if "doctype" not in value:
value["doctype"] = self.get_table_field_doctype(key)
if not value["doctype"]:
raise AttributeError(key)
value = get_controller(value["doctype"])(value)
value.init_valid_columns()
value.parent = self.name
value.parenttype = self.doctype
value.parentfield = key
if value.docstatus is None:
value.docstatus = 0
if not getattr(value, "idx", None):
value.idx = len(self.get(key) or []) + 1
if not getattr(value, "name", None):
value.__dict__['__islocal'] = 1
return value
def get_valid_dict(self, sanitize=True, convert_dates_to_str=False):
d = frappe._dict()
for fieldname in self.meta.get_valid_columns():
d[fieldname] = self.get(fieldname)
# if no need for sanitization and value is None, continue
if not sanitize and d[fieldname] is None:
continue
df = self.meta.get_field(fieldname)
if df:
if df.fieldtype=="Check":
if d[fieldname]==None:
d[fieldname] = 0
elif (not isinstance(d[fieldname], int) or d[fieldname] > 1):
d[fieldname] = 1 if cint(d[fieldname]) else 0
elif df.fieldtype=="Int" and not isinstance(d[fieldname], int):
d[fieldname] = cint(d[fieldname])
elif df.fieldtype in ("Currency", "Float", "Percent") and not isinstance(d[fieldname], float):
d[fieldname] = flt(d[fieldname])
elif df.fieldtype in ("Datetime", "Date", "Time") and d[fieldname]=="":
d[fieldname] = None
elif df.get("unique") and cstr(d[fieldname]).strip()=="":
# unique empty field should be set to None
d[fieldname] = None
if isinstance(d[fieldname], list) and df.fieldtype != 'Table':
frappe.throw(_('Value for {0} cannot be a list').format(_(df.label)))
if convert_dates_to_str and isinstance(d[fieldname], (datetime.datetime, datetime.time, datetime.timedelta)):
d[fieldname] = str(d[fieldname])
return d
def init_valid_columns(self):
for key in default_fields:
if key not in self.__dict__:
self.__dict__[key] = None
if key in ("idx", "docstatus") and self.__dict__[key] is None:
self.__dict__[key] = 0
for key in self.get_valid_columns():
if key not in self.__dict__:
self.__dict__[key] = None
def get_valid_columns(self):
if self.doctype not in frappe.local.valid_columns:
if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"):
from frappe.model.meta import get_table_columns
valid = get_table_columns(self.doctype)
else:
valid = self.meta.get_valid_columns()
frappe.local.valid_columns[self.doctype] = valid
return frappe.local.valid_columns[self.doctype]
def is_new(self):
return self.get("__islocal")
def as_dict(self, no_nulls=False, no_default_fields=False, convert_dates_to_str=False):
doc = self.get_valid_dict(convert_dates_to_str=convert_dates_to_str)
doc["doctype"] = self.doctype
for df in self.meta.get_table_fields():
children = self.get(df.fieldname) or []
doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children]
if no_nulls:
for k in list(doc):
if doc[k] is None:
del doc[k]
if no_default_fields:
for k in list(doc):
if k in default_fields:
del doc[k]
for key in ("_user_tags", "__islocal", "__onload", "_liked_by", "__run_link_triggers"):
if self.get(key):
doc[key] = self.get(key)
return doc
def as_json(self):
return frappe.as_json(self.as_dict())
def get_table_field_doctype(self, fieldname):
return self.meta.get_field(fieldname).options
def get_parentfield_of_doctype(self, doctype):
fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype]
return fieldname[0] if fieldname else None
def db_insert(self):
"""INSERT the document (with valid columns) in the database."""
if not self.name:
# name will be set by document class in most cases
set_new_name(self)
if not self.creation:
self.creation = self.modified = now()
self.created_by = self.modifield_by = frappe.session.user
d = self.get_valid_dict(convert_dates_to_str=True)
columns = list(d)
try:
frappe.db.sql("""insert into `tab{doctype}`
({columns}) values ({values})""".format(
doctype = self.doctype,
columns = ", ".join(["`"+c+"`" for c in columns]),
values = ", ".join(["%s"] * len(columns))
), list(d.values()))
except Exception as e:
if e.args[0]==1062:
if "PRIMARY" in cstr(e.args[1]):
if self.meta.autoname=="hash":
# hash collision? try again
self.name = None
self.db_insert()
return
frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name))
raise frappe.DuplicateEntryError(self.doctype, self.name, e)
elif "Duplicate" in cstr(e.args[1]):
# unique constraint
self.show_unique_validation_message(e)
else:
raise
else:
raise
self.set("__islocal", False)
def db_update(self):
if self.get("__islocal") or not self.name:
self.db_insert()
return
d = self.get_valid_dict(convert_dates_to_str=True)
# don't update name, as case might've been changed
name = d['name']
del d['name']
columns = list(d)
try:
frappe.db.sql("""update `tab{doctype}`
set {values} where name=%s""".format(
doctype = self.doctype,
values = ", ".join(["`"+c+"`=%s" for c in columns])
), list(d.values()) + [name])
except Exception as e:
if e.args[0]==1062 and "Duplicate" in cstr(e.args[1]):
self.show_unique_validation_message(e)
else:
raise
def show_unique_validation_message(self, e):
type, value, traceback = sys.exc_info()
fieldname, label = str(e).split("'")[-2], None
# unique_first_fieldname_second_fieldname is the constraint name
# created using frappe.db.add_unique
if "unique_" in fieldname:
fieldname = fieldname.split("_", 1)[1]
df = self.meta.get_field(fieldname)
if df:
label = df.label
frappe.msgprint(_("{0} must be unique".format(label or fieldname)))
# this is used to preserve traceback
raise frappe.UniqueValidationError(self.doctype, self.name, e)
def update_modified(self):
'''Update modified timestamp'''
self.set("modified", now())
frappe.db.set_value(self.doctype, self.name, 'modified', self.modified, update_modified=False)
def _fix_numeric_types(self):
for df in self.meta.get("fields"):
if df.fieldtype == "Check":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif self.get(df.fieldname) is not None:
if df.fieldtype == "Int":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif df.fieldtype in ("Float", "Currency", "Percent"):
self.set(df.fieldname, flt(self.get(df.fieldname)))
if self.docstatus is not None:
self.docstatus = cint(self.docstatus)
def _get_missing_mandatory_fields(self):
"""Get mandatory fields that do not have any values"""
def get_msg(df):
if df.fieldtype == "Table":
return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label))
elif self.parentfield:
return "{}: {} {} #{}: {}: {}".format(_("Error"), frappe.bold(_(self.doctype)),
_("Row"), self.idx, _("Value missing for"), _(df.label))
else:
return _("Error: Value missing for {0}: {1}").format(_(df.parent), _(df.label))
missing = []
for df in self.meta.get("fields", {"reqd": ('=', 1)}):
if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip():
missing.append((df.fieldname, get_msg(df)))
# check for missing parent and parenttype
if self.meta.istable:
for fieldname in ("parent", "parenttype"):
if not self.get(fieldname):
missing.append((fieldname, get_msg(frappe._dict(label=fieldname))))
return missing
def get_invalid_links(self, is_submittable=False):
'''Returns list of invalid links and also updates fetch values if not set'''
def get_msg(df, docname):
if self.parentfield:
return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname)
else:
return "{}: {}".format(_(df.label), docname)
invalid_links = []
cancelled_links = []
for df in (self.meta.get_link_fields()
+ self.meta.get("fields", {"fieldtype": ('=', "Dynamic Link")})):
docname = self.get(df.fieldname)
if docname:
if df.fieldtype=="Link":
doctype = df.options
if not doctype:
frappe.throw(_("Options not set for link field {0}").format(df.fieldname))
else:
doctype = self.get(df.options)
if not doctype:
frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options)))
# MySQL is case insensitive. Preserve case of the original docname in the Link Field.
# get a map of values ot fetch along with this link query
# that are mapped as link_fieldname.source_fieldname in Options of
# Readonly or Data or Text type fields
fields_to_fetch = [
_df for _df in self.meta.get_fields_to_fetch(df.fieldname)
if not self.get(_df.fieldname)
]
if not fields_to_fetch:
# cache a single value type
values = frappe._dict(name=frappe.db.get_value(doctype, docname,
'name', cache=True))
else:
values_to_fetch = ['name'] + [_df.fetch_from.split('.')[-1]
for _df in fields_to_fetch]
# don't cache if fetching other values too
values = frappe.db.get_value(doctype, docname,
values_to_fetch, as_dict=True)
if frappe.get_meta(doctype).issingle:
values.name = doctype
if values:
setattr(self, df.fieldname, values.name)
for _df in fields_to_fetch:
setattr(self, _df.fieldname, values[_df.fetch_from.split('.')[-1]])
notify_link_count(doctype, docname)
if not values.name:
invalid_links.append((df.fieldname, docname, get_msg(df, docname)))
elif (df.fieldname != "amended_from"
and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable
and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2):
cancelled_links.append((df.fieldname, docname, get_msg(df, docname)))
return invalid_links, cancelled_links
def _validate_selects(self):
if frappe.flags.in_import:
return
for df in self.meta.get_select_fields():
if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options):
continue
options = (df.options or "").split("\n")
# if only empty options
if not filter(None, options):
continue
# strip and set
self.set(df.fieldname, cstr(self.get(df.fieldname)).strip())
value = self.get(df.fieldname)
if value not in options and not (frappe.flags.in_test and value.startswith("_T-")):
# show an elaborate message
prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else ""
label = _(self.meta.get_label(df.fieldname))
comma_options = '", "'.join(_(each) for each in options)
frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label,
value, comma_options))
def _validate_constants(self):
if frappe.flags.in_import or self.is_new() or self.flags.ignore_validate_constants:
return
constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": ('=',1)})]
if constants:
values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True)
for fieldname in constants:
df = self.meta.get_field(fieldname)
# This conversion to string only when fieldtype is Date
if df.fieldtype == 'Date' or df.fieldtype == 'Datetime':
value = str(values.get(fieldname))
else:
value = values.get(fieldname)
if self.get(fieldname) != value:
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)),
frappe.CannotChangeConstantError)
def _validate_length(self):
if frappe.flags.in_install:
return
if self.meta.issingle:
# single doctype value type is mediumtext
return
for fieldname, value in iteritems(self.get_valid_dict()):
df = self.meta.get_field(fieldname)
if df and df.fieldtype in type_map and type_map[df.fieldtype][0]=="varchar":
max_length = cint(df.get("length")) or cint(varchar_len)
if len(cstr(value)) > max_length:
if self.parentfield and self.idx:
reference = _("{0}, Row {1}").format(_(self.doctype), self.idx)
else:
reference = "{0} {1}".format(_(self.doctype), self.name)
frappe.throw(_("{0}: '{1}' ({3}) will get truncated, as max characters allowed is {2}")\
.format(reference, _(df.label), max_length, value), frappe.CharacterLengthExceededError, title=_('Value too big'))
def _validate_update_after_submit(self):
# get the full doc with children
db_values = frappe.get_doc(self.doctype, self.name).as_dict()
for key in self.as_dict():
df = self.meta.get_field(key)
db_value = db_values.get(key)
if df and not df.allow_on_submit and (self.get(key) or db_value):
if df.fieldtype=="Table":
# just check if the table size has changed
# individual fields will be checked in the loop for children
self_value = len(self.get(key))
db_value = len(db_value)
else:
self_value = self.get_value(key)
if self_value != db_value:
frappe.throw(_("Not allowed to change {0} after submission").format(df.label),
frappe.UpdateAfterSubmitError)
def _sanitize_content(self):
"""Sanitize HTML and Email in field values. Used to prevent XSS.
- Ignore if 'Ignore XSS Filter' is checked or fieldtype is 'Code'
"""
if frappe.flags.in_install:
return
for fieldname, value in self.get_valid_dict().items():
if not value or not isinstance(value, string_types):
continue
value = frappe.as_unicode(value)
if (u"<" not in value and u">" not in value):
# doesn't look like html so no need
continue
elif "<!-- markdown -->" in value and not ("<script" in value or "javascript:" in value):
# should be handled separately via the markdown converter function
continue
df = self.meta.get_field(fieldname)
sanitized_value = value
if df and df.get("fieldtype") in ("Data", "Code", "Small Text") and df.get("options")=="Email":
sanitized_value = sanitize_email(value)
elif df and (df.get("ignore_xss_filter")
or (df.get("fieldtype")=="Code" and df.get("options")!="Email")
or df.get("fieldtype") in ("Attach", "Attach Image")
# cancelled and submit but not update after submit should be ignored
or self.docstatus==2
or (self.docstatus==1 and not df.get("allow_on_submit"))):
continue
else:
sanitized_value = sanitize_html(value, linkify=df.fieldtype=='Text Editor')
self.set(fieldname, sanitized_value)
def _save_passwords(self):
'''Save password field values in __Auth table'''
if self.flags.ignore_save_passwords is True:
return
for df in self.meta.get('fields', {'fieldtype': ('=', 'Password')}):
if self.flags.ignore_save_passwords and df.fieldname in self.flags.ignore_save_passwords: continue
new_password = self.get(df.fieldname)
if new_password and not self.is_dummy_password(new_password):
# is not a dummy password like '*****'
set_encrypted_password(self.doctype, self.name, new_password, df.fieldname)
# set dummy password like '*****'
self.set(df.fieldname, '*'*len(new_password))
def get_password(self, fieldname='password', raise_exception=True):
if self.get(fieldname) and not self.is_dummy_password(self.get(fieldname)):
return self.get(fieldname)
return get_decrypted_password(self.doctype, self.name, fieldname, raise_exception=raise_exception)
def is_dummy_password(self, pwd):
return ''.join(set(pwd))=='*'
def precision(self, fieldname, parentfield=None):
"""Returns float precision for a particular field (or get global default).
:param fieldname: Fieldname for which precision is required.
:param parentfield: If fieldname is in child table."""
from frappe.model.meta import get_field_precision
if parentfield and not isinstance(parentfield, string_types):
parentfield = parentfield.parentfield
cache_key = parentfield or "main"
if not hasattr(self, "_precision"):
self._precision = frappe._dict()
if cache_key not in self._precision:
self._precision[cache_key] = frappe._dict()
if fieldname not in self._precision[cache_key]:
self._precision[cache_key][fieldname] = None
doctype = self.meta.get_field(parentfield).options if parentfield else self.doctype
df = frappe.get_meta(doctype).get_field(fieldname)
if df.fieldtype in ("Currency", "Float", "Percent"):
self._precision[cache_key][fieldname] = get_field_precision(df, self)
return self._precision[cache_key][fieldname]
def get_formatted(self, fieldname, doc=None, currency=None, absolute_value=False, translated=False):
from frappe.utils.formatters import format_value
df = self.meta.get_field(fieldname)
if not df and fieldname in default_fields:
from frappe.model.meta import get_default_df
df = get_default_df(fieldname)
val = self.get(fieldname)
if translated:
val = _(val)
if absolute_value and isinstance(val, (int, float)):
val = abs(self.get(fieldname))
if not doc:
doc = getattr(self, "parent_doc", None) or self
return format_value(val, df=df, doc=doc, currency=currency)
def is_print_hide(self, fieldname, df=None, for_print=True):
"""Returns true if fieldname is to be hidden for print.
Print Hide can be set via the Print Format Builder or in the controller as a list
of hidden fields. Example
class MyDoc(Document):
def __setup__(self):
self.print_hide = ["field1", "field2"]
:param fieldname: Fieldname to be checked if hidden.
"""
meta_df = self.meta.get_field(fieldname)
if meta_df and meta_df.get("__print_hide"):
return True
print_hide = 0
if self.get(fieldname)==0 and not self.meta.istable:
print_hide = ( df and df.print_hide_if_no_value ) or ( meta_df and meta_df.print_hide_if_no_value )
if not print_hide:
if df and df.print_hide is not None:
print_hide = df.print_hide
elif meta_df:
print_hide = meta_df.print_hide
return print_hide
def in_format_data(self, fieldname):
"""Returns True if shown via Print Format::`format_data` property.
Called from within standard print format."""
doc = getattr(self, "parent_doc", self)
if hasattr(doc, "format_data_map"):
return fieldname in doc.format_data_map
else:
return True
def reset_values_if_no_permlevel_access(self, has_access_to, high_permlevel_fields):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
to_reset = []
for df in high_permlevel_fields:
if df.permlevel not in has_access_to and df.fieldtype not in display_fieldtypes:
to_reset.append(df)
if to_reset:
if self.is_new():
# if new, set default value
ref_doc = frappe.new_doc(self.doctype)
else:
# get values from old doc
if self.parent:
self.parent_doc.get_latest()
ref_doc = [d for d in self.parent_doc.get(self.parentfield) if d.name == self.name][0]
else:
ref_doc = self.get_latest()
for df in to_reset:
self.set(df.fieldname, ref_doc.get(df.fieldname))
def get_value(self, fieldname):
df = self.meta.get_field(fieldname)
val = self.get(fieldname)
return self.cast(val, df)
def cast(self, value, df):
return cast_fieldtype(df.fieldtype, value)
def _extract_images_from_text_editor(self):
from frappe.utils.file_manager import extract_images_from_doc
if self.doctype != "DocType":
for df in self.meta.get("fields", {"fieldtype": ('=', "Text Editor")}):
extract_images_from_doc(self, df.fieldname)
def _filter(data, filters, limit=None):
"""pass filters as:
{"key": "val", "key": ["!=", "val"],
"key": ["in", "val"], "key": ["not in", "val"], "key": "^val",
"key" : True (exists), "key": False (does not exist) }"""
out, _filters = [], {}
if not data:
return out
# setup filters as tuples
if filters:
for f in filters:
fval = filters[f]
if not isinstance(fval, (tuple, list)):
if fval is True:
fval = ("not None", fval)
elif fval is False:
fval = ("None", fval)
elif isinstance(fval, string_types) and fval.startswith("^"):
fval = ("^", fval[1:])
else:
fval = ("=", fval)
_filters[f] = fval
for d in data:
add = True
for f, fval in iteritems(_filters):
if not frappe.compare(getattr(d, f, None), fval[0], fval[1]):
add = False
break
if add:
out.append(d)
if limit and (len(out)-1)==limit:
break
return out
| neilLasrado/frappe | frappe/model/base_document.py | Python | mit | 25,380 |
import serial
import struct
import time
ser = serial.Serial(3, 1000000) # open first serial port
print ser.portstr # check which port was really used
ser.write("hello") # write a string
snd = [0xFF,0xFF,0x1,0x2, 0x1, 0xFB]
data = []
for element in snd:
data.append(struct.pack('B', element))
data = ''.join(data)
numRuns = 10000
runRange = range(numRuns)
t1 = time.time()
for i in runRange:
ser.write(data)
ser.read(6)
t2 = time.time()
ser.close() # close port
print "total runtime was", t2-t1
print "average RTT was", (t2-t1)/numRuns | robEllenberg/acesHubo | utils/testTools/robotis/sertest.py | Python | gpl-3.0 | 594 |
import frappe
def get_filters_config():
filters_config = {
"fiscal year": {
"label": "Fiscal Year",
"get_field": "erpnext.accounts.utils.get_fiscal_year_filter_field",
"valid_for_fieldtypes": ["Date", "Datetime", "DateRange"],
"depends_on": "company",
}
}
return filters_config | ESS-LLP/erpnext | erpnext/startup/filters.py | Python | gpl-3.0 | 299 |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bayesian NN using factorized VI (Bayes By Backprop. Blundell et al. 2014).
See https://arxiv.org/abs/1505.05424 for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from absl import flags
from bandits.core.bayesian_nn import BayesianNN
FLAGS = flags.FLAGS
def log_gaussian(x, mu, sigma, reduce_sum=True):
"""Returns log Gaussian pdf."""
res = (-0.5 * np.log(2 * np.pi) - tf.log(sigma) - tf.square(x - mu) /
(2 * tf.square(sigma)))
if reduce_sum:
return tf.reduce_sum(res)
else:
return res
def analytic_kl(mu_1, sigma_1, mu_2, sigma_2):
"""KL for two Gaussian distributions with diagonal covariance matrix."""
sigma_1_sq = tf.square(sigma_1)
sigma_2_sq = tf.square(sigma_2)
t1 = tf.square(mu_1 - mu_2) / (2. * sigma_2_sq)
t2 = (sigma_1_sq/sigma_2_sq - 1. - tf.log(sigma_1_sq) + tf.log(sigma_2_sq))/2.
return tf.reduce_sum(t1 + t2)
class VariationalNeuralBanditModel(BayesianNN):
"""Implements an approximate Bayesian NN using Variational Inference."""
def __init__(self, hparams, name="BBBNN"):
self.name = name
self.hparams = hparams
self.n_in = self.hparams.context_dim
self.n_out = self.hparams.num_actions
self.layers = self.hparams.layer_sizes
self.init_scale = self.hparams.init_scale
self.f_num_points = None
if "f_num_points" in hparams:
self.f_num_points = self.hparams.f_num_points
self.cleared_times_trained = self.hparams.cleared_times_trained
self.initial_training_steps = self.hparams.initial_training_steps
self.training_schedule = np.linspace(self.initial_training_steps,
self.hparams.training_epochs,
self.cleared_times_trained)
self.verbose = getattr(self.hparams, "verbose", True)
self.weights_m = {}
self.weights_std = {}
self.biases_m = {}
self.biases_std = {}
self.times_trained = 0
if self.hparams.use_sigma_exp_transform:
self.sigma_transform = tf.exp
self.inverse_sigma_transform = np.log
else:
self.sigma_transform = tf.nn.softplus
self.inverse_sigma_transform = lambda y: y + np.log(1. - np.exp(-y))
# Whether to use the local reparameterization trick to compute the loss.
# See details in https://arxiv.org/abs/1506.02557
self.use_local_reparameterization = True
self.build_graph()
def build_mu_variable(self, shape):
"""Returns a mean variable initialized as N(0, 0.05)."""
return tf.Variable(tf.random_normal(shape, 0.0, 0.05))
def build_sigma_variable(self, shape, init=-5.):
"""Returns a sigma variable initialized as N(init, 0.05)."""
# Initialize sigma to be very small initially to encourage MAP opt first
return tf.Variable(tf.random_normal(shape, init, 0.05))
def build_layer(self, input_x, input_x_local, shape,
layer_id, activation_fn=tf.nn.relu):
"""Builds a variational layer, and computes KL term.
Args:
input_x: Input to the variational layer.
input_x_local: Input when the local reparameterization trick was applied.
shape: [number_inputs, number_outputs] for the layer.
layer_id: Number of layer in the architecture.
activation_fn: Activation function to apply.
Returns:
output_h: Output of the variational layer.
output_h_local: Output when local reparameterization trick was applied.
neg_kl: Negative KL term for the layer.
"""
w_mu = self.build_mu_variable(shape)
w_sigma = self.sigma_transform(self.build_sigma_variable(shape))
w_noise = tf.random_normal(shape)
w = w_mu + w_sigma * w_noise
b_mu = self.build_mu_variable([1, shape[1]])
b_sigma = self.sigma_transform(self.build_sigma_variable([1, shape[1]]))
b = b_mu
# Store means and stds
self.weights_m[layer_id] = w_mu
self.weights_std[layer_id] = w_sigma
self.biases_m[layer_id] = b_mu
self.biases_std[layer_id] = b_sigma
# Create outputs
output_h = activation_fn(tf.matmul(input_x, w) + b)
if self.use_local_reparameterization:
# Use analytic KL divergence wrt the prior
neg_kl = -analytic_kl(w_mu, w_sigma,
0., tf.to_float(np.sqrt(2./shape[0])))
else:
# Create empirical KL loss terms
log_p = log_gaussian(w, 0., tf.to_float(np.sqrt(2./shape[0])))
log_q = log_gaussian(w, tf.stop_gradient(w_mu), tf.stop_gradient(w_sigma))
neg_kl = log_p - log_q
# Apply local reparameterization trick: sample activations pre nonlinearity
m_h = tf.matmul(input_x_local, w_mu) + b
v_h = tf.matmul(tf.square(input_x_local), tf.square(w_sigma))
output_h_local = m_h + tf.sqrt(v_h + 1e-6) * tf.random_normal(tf.shape(v_h))
output_h_local = activation_fn(output_h_local)
return output_h, output_h_local, neg_kl
def build_action_noise(self):
"""Defines a model for additive noise per action, and its KL term."""
# Define mean and std variables (log-normal dist) for each action.
noise_sigma_mu = (self.build_mu_variable([1, self.n_out])
+ self.inverse_sigma_transform(self.hparams.noise_sigma))
noise_sigma_sigma = self.sigma_transform(
self.build_sigma_variable([1, self.n_out]))
pre_noise_sigma = (noise_sigma_mu
+ tf.random_normal([1, self.n_out]) * noise_sigma_sigma)
self.noise_sigma = self.sigma_transform(pre_noise_sigma)
# Compute KL for additive noise sigma terms.
if getattr(self.hparams, "infer_noise_sigma", False):
neg_kl_term = log_gaussian(
pre_noise_sigma,
self.inverse_sigma_transform(self.hparams.noise_sigma),
self.hparams.prior_sigma
)
neg_kl_term -= log_gaussian(pre_noise_sigma,
noise_sigma_mu,
noise_sigma_sigma)
else:
neg_kl_term = 0.
return neg_kl_term
def build_model(self, activation_fn=tf.nn.relu):
"""Defines the actual NN model with fully connected layers.
The loss is computed for partial feedback settings (bandits), so only
the observed outcome is backpropagated (see weighted loss).
Selects the optimizer and, finally, it also initializes the graph.
Args:
activation_fn: the activation function used in the nn layers.
"""
if self.verbose:
print("Initializing model {}.".format(self.name))
neg_kl_term, l_number = 0, 0
use_local_reparameterization = self.use_local_reparameterization
# Compute model additive noise for each action with log-normal distribution
neg_kl_term += self.build_action_noise()
# Build network.
input_x = self.x
input_local = self.x
n_in = self.n_in
for l_number, n_nodes in enumerate(self.layers):
if n_nodes > 0:
h, h_local, neg_kl = self.build_layer(input_x, input_local,
[n_in, n_nodes], l_number)
neg_kl_term += neg_kl
input_x, input_local = h, h_local
n_in = n_nodes
# Create last linear layer
h, h_local, neg_kl = self.build_layer(input_x, input_local,
[n_in, self.n_out],
l_number + 1,
activation_fn=lambda x: x)
neg_kl_term += neg_kl
self.y_pred = h
self.y_pred_local = h_local
# Compute log likelihood (with learned or fixed noise level)
if getattr(self.hparams, "infer_noise_sigma", False):
log_likelihood = log_gaussian(
self.y, self.y_pred_local, self.noise_sigma, reduce_sum=False)
else:
y_hat = self.y_pred_local if use_local_reparameterization else self.y_pred
log_likelihood = log_gaussian(
self.y, y_hat, self.hparams.noise_sigma, reduce_sum=False)
# Only take into account observed outcomes (bandits setting)
batch_size = tf.to_float(tf.shape(self.x)[0])
weighted_log_likelihood = tf.reduce_sum(
log_likelihood * self.weights) / batch_size
# The objective is 1/n * (\sum_i log_like_i - KL); neg_kl_term estimates -KL
elbo = weighted_log_likelihood + (neg_kl_term / self.n)
self.loss = -elbo
self.global_step = tf.train.get_or_create_global_step()
self.train_op = tf.train.AdamOptimizer(self.hparams.initial_lr).minimize(
self.loss, global_step=self.global_step)
# Create tensorboard metrics
self.create_summaries()
self.summary_writer = tf.summary.FileWriter(
"{}/graph_{}".format(FLAGS.logdir, self.name), self.sess.graph)
def build_graph(self):
"""Defines graph, session, placeholders, and model.
Placeholders are: n (size of the dataset), x and y (context and observed
reward for each action), and weights (one-hot encoding of selected action
for each context, i.e., only possibly non-zero element in each y).
"""
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf.Session()
self.n = tf.placeholder(shape=[], dtype=tf.float32)
self.x = tf.placeholder(shape=[None, self.n_in], dtype=tf.float32)
self.y = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32)
self.weights = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32)
self.build_model()
self.sess.run(tf.global_variables_initializer())
def create_summaries(self):
"""Defines summaries including mean loss, and global step."""
with self.graph.as_default():
with tf.name_scope(self.name + "_summaries"):
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("global_step", self.global_step)
self.summary_op = tf.summary.merge_all()
def assign_lr(self):
"""Resets the learning rate in dynamic schedules for subsequent trainings.
In bandits settings, we do expand our dataset over time. Then, we need to
re-train the network with the new data. The algorithms that do not keep
the step constant, can reset it at the start of each *training* process.
"""
decay_steps = 1
if self.hparams.activate_decay:
current_gs = self.sess.run(self.global_step)
with self.graph.as_default():
self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr,
self.global_step - current_gs,
decay_steps,
self.hparams.lr_decay_rate)
def train(self, data, num_steps):
"""Trains the BNN for num_steps, using the data in 'data'.
Args:
data: ContextualDataset object that provides the data.
num_steps: Number of minibatches to train the network for.
Returns:
losses: Loss history during training.
"""
if self.times_trained < self.cleared_times_trained:
num_steps = int(self.training_schedule[self.times_trained])
self.times_trained += 1
losses = []
with self.graph.as_default():
if self.verbose:
print("Training {} for {} steps...".format(self.name, num_steps))
for step in range(num_steps):
x, y, weights = data.get_batch_with_weights(self.hparams.batch_size)
_, summary, global_step, loss = self.sess.run(
[self.train_op, self.summary_op, self.global_step, self.loss],
feed_dict={
self.x: x,
self.y: y,
self.weights: weights,
self.n: data.num_points(self.f_num_points),
})
losses.append(loss)
if step % self.hparams.freq_summary == 0:
if self.hparams.show_training:
print("{} | step: {}, loss: {}".format(
self.name, global_step, loss))
self.summary_writer.add_summary(summary, global_step)
return losses
| tombstone/models | research/deep_contextual_bandits/bandits/algorithms/variational_neural_bandit_model.py | Python | apache-2.0 | 12,618 |
from pylons import c
from ming.orm import FieldProperty
from ming import schema as S
from allura.lib import helpers as h
from .session import project_orm_session
from .filesystem import File
class BaseAttachment(File):
thumbnail_size = (255, 255)
ArtifactType=None
class __mongometa__:
name = 'attachment'
polymorphic_on = 'attachment_type'
polymorphic_identity=None
session = project_orm_session
indexes = [ 'artifact_id', 'app_config_id' ]
artifact_id=FieldProperty(S.ObjectId)
app_config_id=FieldProperty(S.ObjectId)
type=FieldProperty(str)
attachment_type=FieldProperty(str)
@property
def artifact(self):
return self.ArtifactType.query.get(_id=self.artifact_id)
def url(self):
return self.artifact.url() + 'attachment/' + h.urlquote(self.filename)
def is_embedded(self):
from pylons import request
return self.filename in request.environ.get('allura.macro.att_embedded', [])
@classmethod
def metadata_for(cls, artifact):
return dict(
artifact_id=artifact._id,
app_config_id=artifact.app_config_id)
@classmethod
def save_attachment(cls, filename, fp, content_type=None, **kwargs):
thumbnail_meta = dict(type="thumbnail", app_config_id=c.app.config._id)
thumbnail_meta.update(kwargs)
original_meta = dict(type="attachment", app_config_id=c.app.config._id)
original_meta.update(kwargs)
# Try to save as image, with thumbnail
orig, thumbnail = cls.save_image(
filename, fp,
content_type=content_type,
square=True, thumbnail_size=cls.thumbnail_size,
thumbnail_meta=thumbnail_meta,
save_original=True,
original_meta=original_meta)
if orig is not None:
return orig, thumbnail
# No, generic attachment
return cls.from_stream(
filename, fp, content_type=content_type,
**original_meta)
| leotrubach/sourceforge-allura | Allura/allura/model/attachments.py | Python | apache-2.0 | 2,036 |
from board import Board
from brick import Brick
from monster import Monster
from user import User
class Context:
def __init__(self):
self.game_level = 1
self._reset_context(User.START_LIFE, User.START_POINTS, User.DEFAULT_BOMB_CLASS)
def level_up(self):
self.game_level += 1
self._reset_context(self.user.life, self.user.points, self.user.bomb_class)
def _reset_context(self, user_life, user_points, user_bomb_class):
self.board = Board()
self.user = User(self.board, user_life, user_points, user_bomb_class)
self.bricks = []
self.portal = None
self.bombs = []
self.dead_list =[]
self.game_over = False
self.artifacts = []
self.monsters = [
Monster(self.board, self.user, 1),
Monster(self.board, self.user, 1),
Monster(self.board, self.user, 1),
]
for _ in range(20):
self.bricks.append(Brick(self.board, self.user, self.bricks, self.monsters))
| angelinawawrzyniak/bomberman | context.py | Python | mit | 1,031 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ..person import SearchName
from ._memberbase import mother_base
#-------------------------------------------------------------------------
#
# HasNameOf
#
#-------------------------------------------------------------------------
class SearchMotherName(SearchName):
"""Rule that checks for full or partial name matches"""
name = _('Families with mother matching the <name>')
description = _("Matches families whose mother has a specified "
"(partial) name")
category = _('Mother filters')
base_class = SearchName
apply = mother_base
| Forage/Gramps | gramps/gen/filters/rules/family/_searchmothername.py | Python | gpl-2.0 | 1,843 |
from __future__ import absolute_import
import datetime
import os
from decimal import Decimal
from django import forms
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection
from django.forms.models import model_to_dict
from django.utils.unittest import skipUnless
from django.test import TestCase
from .models import (Article, ArticleStatus, BetterWriter, BigInt, Book,
Category, CommaSeparatedInteger, CustomFieldForExclusionModel, DerivedBook,
DerivedPost, ExplicitPK, FlexibleDatePost, ImprovedArticle,
ImprovedArticleWithParentLink, Inventory, PhoneNumber, Post, Price,
Product, TextFile, Writer, WriterProfile, test_images)
if test_images:
from .models import ImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
class ProductForm(forms.ModelForm):
class Meta:
model = Product
class PriceForm(forms.ModelForm):
class Meta:
model = Price
class BookForm(forms.ModelForm):
class Meta:
model = Book
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline','pub_date')
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
class TestArticleForm(forms.ModelForm):
class Meta:
model = Article
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields=('headline', 'slug', 'pub_date')
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
class BetterWriterForm(forms.ModelForm):
class Meta:
model = BetterWriter
class WriterProfileForm(forms.ModelForm):
class Meta:
model = WriterProfile
class PhoneNumberForm(forms.ModelForm):
class Meta:
model = PhoneNumber
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = PhoneNumber
class CommaSeparatedIntegerForm(forms.ModelForm):
class Meta:
model = CommaSeparatedInteger
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(BaseCategoryForm.base_fields.keys(),
['name', 'slug', 'url'])
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(ExtraFields.base_fields.keys(),
['name', 'slug', 'url', 'some_extra_field'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
self.assertTrue(isinstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField))
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_fields(self):
class LimitFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url']
self.assertEqual(LimitFields.base_fields.keys(),
['url'])
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(ExcludeFields.base_fields.keys(),
['name', 'slug'])
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(ConfusedForm.base_fields.keys(),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
MixModelForm.base_fields.keys(),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
ArticleForm.base_fields.keys(),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
#First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
BadForm.base_fields.keys(),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(SubCategoryForm.base_fields.keys(),
['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th><td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(OrderFields.base_fields.keys(),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(OrderFields2.base_fields.keys(),
['slug', 'name'])
class TestWidgetForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
class TestWidgets(TestCase):
def test_base_widgets(self):
frm = TestWidgetForm()
self.assertHTMLEqual(
str(frm['name']),
'<textarea id="id_name" rows="10" cols="40" name="name"></textarea>'
)
self.assertHTMLEqual(
str(frm['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />'
)
self.assertHTMLEqual(
str(frm['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" />'
)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(TestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
# unique/unique_together validation
class UniqueTest(TestCase):
def setUp(self):
self.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], [u'Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], [u'Price with this Price and Quantity already exists.'])
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], [u'Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': u'1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], [u'Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], [u'Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'isbn': isbn})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], [u'Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': u'0',
'suffix2': u'0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'],
[u'Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': u'', 'desc': u'' })
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': u'key1', 'desc': u''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': u'key1', 'desc': u''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], [u'Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], [u'Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], [u'Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], [u'Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], [u'Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], [u'Subtitle must be unique for Posted month.'])
form = PostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], [u'This field is required.'])
def test_inherited_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], [u'Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], [u'Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], [u'Subtitle must be unique for Posted month.'])
form = DerivedPostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
p = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0"}, instance=p)
self.assertTrue(form.is_valid())
class OldFormForXTests(TestCase):
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" />""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>"""
)
def test_with_data(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
self.assertEqual(Category.objects.count(), 1)
f = BaseCategoryForm({'name': "It's a test",
'slug': 'its-test',
'url': 'test'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], "It's a test")
self.assertEqual(f.cleaned_data['slug'], 'its-test')
self.assertEqual(f.cleaned_data['url'], 'test')
c2 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c2, Category.objects.get(pk=c2.pk))
self.assertEqual(c2.name, "It's a test")
self.assertEqual(Category.objects.count(), 2)
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['url'], u'third')
self.assertEqual(f.cleaned_data['name'], u'Third test')
self.assertEqual(f.cleaned_data['slug'], u'third-test')
c3 = f.save(commit=False)
self.assertEqual(c3.name, "Third test")
self.assertEqual(Category.objects.count(), 2)
c3.save()
self.assertEqual(Category.objects.count(), 3)
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], [u'This field is required.'])
self.assertEqual(f.errors['slug'], [u"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."])
with self.assertRaises(AttributeError):
f.cleaned_data
with self.assertRaises(ValueError):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
# Create a couple of Writers.
w_royko = Writer(name='Mike Royko')
w_royko.save()
w_woodward = Writer(name='Bob Woodward')
w_woodward.save()
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(unicode(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select><br /><span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(unicode(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>''')
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
w = Writer.objects.get(name='Mike Royko')
f = RoykoForm(auto_id=False, instance=w)
self.assertHTMLEqual(unicode(f), '''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" /><br /><span class="helptext">Use both first and last names.</span></td></tr>''')
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=w,
article='Hello.'
)
art.save()
art_id_1 = art.id
self.assertEqual(art_id_1 is not None, True)
f = TestArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': u'Test headline',
'slug': 'test-headline',
'pub_date': u'1984-02-06',
'writer': unicode(w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertEqual(f.is_valid(), True)
test_art = f.save()
self.assertEqual(test_art.id == art_id_1, True)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, u'Test headline')
# You can create a form over a subset of the available fields
# by specifying a 'fields' argument to form_for_instance.
f = PartialArticleFormWithSlug({
'headline': u'New headline',
'slug': 'new-headline',
'pub_date': u'1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>''')
self.assertEqual(f.is_valid(), True)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertEqual(new_art.headline, u'New headline')
# Add some categories and test the many-to-many form output.
self.assertEqual(map(lambda o: o.name, new_art.categories.all()), [])
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertEqual(map(lambda o: o.name, new_art.categories.all()), ["Entertainment"])
f = TestArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# Initial values can be provided for model forms
f = TestArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(c1.id), str(c2.id)]
})
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s" selected="selected">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': u'New headline',
'slug': u'new-headline',
'pub_date': u'1988-01-04',
'writer': unicode(w_royko.pk),
'article': u'Hello.',
'categories': [unicode(c1.id), unicode(c2.id)]
}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertEqual(map(lambda o: o.name, new_art.categories.order_by('name')),
["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
f = TestArticleForm({'headline': u'New headline', 'slug': u'new-headline', 'pub_date': u'1988-01-04',
'writer': unicode(w_royko.pk), 'article': u'Hello.'}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertEqual(map(lambda o: o.name, new_art.categories.all()), [])
# Create a new article, with categories, via the form.
f = ArticleForm({'headline': u'The walrus was Paul', 'slug': u'walrus-was-paul', 'pub_date': u'1967-11-01',
'writer': unicode(w_royko.pk), 'article': u'Test.', 'categories': [unicode(c1.id), unicode(c2.id)]})
new_art = f.save()
art_id_2 = new_art.id
self.assertEqual(art_id_2 not in (None, art_id_1), True)
new_art = Article.objects.get(id=art_id_2)
self.assertEqual(map(lambda o: o.name, new_art.categories.order_by('name')), ["Entertainment", "It's a test"])
# Create a new article, with no categories, via the form.
f = ArticleForm({'headline': u'The walrus was Paul', 'slug': u'walrus-was-paul', 'pub_date': u'1967-11-01',
'writer': unicode(w_royko.pk), 'article': u'Test.'})
new_art = f.save()
art_id_3 = new_art.id
self.assertEqual(art_id_3 not in (None, art_id_1, art_id_2), True)
new_art = Article.objects.get(id=art_id_3)
self.assertEqual(map(lambda o: o.name, new_art.categories.all()), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
f = ArticleForm({'headline': u'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': u'1967-11-01',
'writer': unicode(w_royko.pk), 'article': u'Test.', 'categories': [unicode(c1.id), unicode(c2.id)]})
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_4 = new_art.id
self.assertEqual(art_id_4 not in (None, art_id_1, art_id_2, art_id_3), True)
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_4)
self.assertEqual(map(lambda o: o.name, new_art.categories.all()), [])
# Save the m2m data on the form
f.save_m2m()
self.assertEqual(map(lambda o: o.name, new_art.categories.order_by('name')), ["Entertainment", "It's a test"])
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
cat = Category.objects.get(name='Third test')
self.assertEqual(cat.name, "Third test")
self.assertEqual(cat.id == c3.id, True)
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=c3.id).name, 'Third')
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
self.assertEqual(c4.name, 'Fourth')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertEqual(w_bernstein.name, 'Carl Bernstein')
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
<option value="%s">Fourth</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_bernstein.pk, w_royko.pk, c1.pk, c2.pk, c3.pk, c4.pk))
# ModelChoiceField ############################################################
f = forms.ModelChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(u'', u'---------'),
(c1.pk, u'Entertainment'),
(c2.pk, u"It's a test"),
(c3.pk, u'Third'),
(c4.pk, u'Fourth')])
with self.assertRaises(ValidationError):
f.clean('')
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean(0)
self.assertEqual(f.clean(c3.id).name, 'Third')
self.assertEqual(f.clean(c2.id).name, "It's a test")
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c5 = Category.objects.create(name='Fifth', url='5th')
self.assertEqual(c5.name, 'Fifth')
self.assertEqual(f.clean(c5.id).name, 'Fifth')
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='5th').delete()
with self.assertRaises(ValidationError):
f.clean(c5.id)
f = forms.ModelChoiceField(Category.objects.filter(pk=c1.id), required=False)
self.assertEqual(f.clean(''), None)
f.clean('')
self.assertEqual(f.clean(str(c1.id)).name, "Entertainment")
with self.assertRaises(ValidationError):
f.clean('100')
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
(u'', u'---------'),
(c1.pk, u'Entertainment'),
(c2.pk, u"It's a test"),
(c3.pk, u'Third')])
self.assertEqual(f.clean(c3.id).name, 'Third')
with self.assertRaises(ValidationError):
f.clean(c4.id)
# check that we can safely iterate choices repeatedly
gen_one = list(f.choices)
gen_two = f.choices
self.assertEqual(gen_one[2], (c2.pk, u"It's a test"))
self.assertEqual(list(gen_two), [
(u'', u'---------'),
(c1.pk, u'Entertainment'),
(c2.pk, u"It's a test"),
(c3.pk, u'Third')])
# check that we can override the label_from_instance method to print custom labels (#4620)
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "category " + str(obj)
self.assertEqual(list(f.choices), [
(u'', u'---------'),
(c1.pk, 'category Entertainment'),
(c2.pk, "category It's a test"),
(c3.pk, 'category Third'),
(c4.pk, 'category Fourth')])
# ModelMultipleChoiceField ####################################################
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(c1.pk, u'Entertainment'),
(c2.pk, u"It's a test"),
(c3.pk, u'Third'),
(c4.pk, u'Fourth')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertEqual(map(lambda o: o.name, f.clean([c1.id])), ["Entertainment"])
self.assertEqual(map(lambda o: o.name, f.clean([c2.id])), ["It's a test"])
self.assertEqual(map(lambda o: o.name, f.clean([str(c1.id)])), ["Entertainment"])
self.assertEqual(map(lambda o: o.name, f.clean([str(c1.id), str(c2.id)])), ["Entertainment", "It's a test"])
self.assertEqual(map(lambda o: o.name, f.clean([c1.id, str(c2.id)])), ["Entertainment", "It's a test"])
self.assertEqual(map(lambda o: o.name, f.clean((c1.id, str(c2.id)))), ["Entertainment", "It's a test"])
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c6 = Category.objects.create(id=6, name='Sixth', url='6th')
self.assertEqual(c6.name, 'Sixth')
self.assertEqual(map(lambda o: o.name, f.clean([c6.id])), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertEqual(f.clean([]), [])
self.assertEqual(f.clean(()), [])
with self.assertRaises(ValidationError):
f.clean(['10'])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), '10'])
with self.assertRaises(ValidationError):
f.clean([str(c1.id), '10'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
(c1.pk, u'Entertainment'),
(c2.pk, u"It's a test"),
(c3.pk, u'Third')])
self.assertEqual(map(lambda o: o.name, f.clean([c3.id])), ["Third"])
with self.assertRaises(ValidationError):
f.clean([c4.id])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), str(c4.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(c1.pk, 'multicategory Entertainment'),
(c2.pk, "multicategory It's a test"),
(c3.pk, 'multicategory Third'),
(c4.pk, 'multicategory Fourth')])
# OneToOneField ###############################################################
self.assertEqual(ImprovedArticleForm.base_fields.keys(), ['article'])
self.assertEqual(ImprovedArticleWithParentLinkForm.base_fields.keys(), [])
bw = BetterWriter(name=u'Joe Better', score=10)
bw.save()
self.assertEqual(sorted(model_to_dict(bw).keys()),
['id', 'name', 'score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertEqual(form.is_valid(), True)
bw2 = form.save()
bw2.delete()
form = WriterProfileForm()
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" id="id_age" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
data = {
'writer': unicode(w_woodward.pk),
'age': u'65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(unicode(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="%s" selected="selected">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" value="65" id="id_age" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
def test_phone_number_field(self):
f = PhoneNumberForm({'phone': '(312) 555-1212', 'description': 'Assistance'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['phone'], u'312-555-1212')
self.assertEqual(f.cleaned_data['description'], u'Assistance')
def test_file_field(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': u'Assistance'})
self.assertEqual(f.is_valid(), False)
f = TextFileForm(data={'description': u'Assistance'}, files={})
self.assertEqual(f.is_valid(), False)
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': u'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', 'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
f = TextFileForm(
data={'description': u'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', 'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': u'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', 'hello world')})
self.assertEqual(f.is_valid(), False)
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm(
data={'description': u'Assistance'},
instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': u'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', 'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
f = TextFileForm(
data={'description': u'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', 'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
# Test the non-required FileField
f = TextFileForm(data={'description': u'Assistance'})
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': u'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', 'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm(
data={'description': u'New Description'},
instance=instance)
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, u'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
f = TextFileForm(
data={'description': u'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', 'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': [u'Ensure this value is greater than or equal to -9223372036854775808.']})
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': [u'Ensure this value is less than or equal to 9223372036854775807.']})
@skipUnless(test_images, "PIL not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slighty when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
image_data = open(os.path.join(os.path.dirname(__file__), "test.png"), 'rb').read()
image_data2 = open(os.path.join(os.path.dirname(__file__), "test2.png"), 'rb').read()
f = ImageFileForm(
data={'description': u'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': u'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': u'Look, it changed'}, instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': u'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': u'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return u'' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = u''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': u'Test'})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertEqual(instance.width, None)
self.assertEqual(instance.height, None)
f = OptionalImageFileForm(
data={'description': u'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect the image or its width/height properties
f = OptionalImageFileForm(
data={'description': u'New Description'},
instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, u'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': u'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': u'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
# Test image field when cStringIO is not available
from django.forms import fields
from StringIO import StringIO
old_StringIO = fields.StringIO
fields.StringIO = StringIO
try:
f = ImageFileForm(
data={'description': u'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
finally:
fields.StringIO = old_StringIO
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(unicode(f.media), '''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>''')
f = CommaSeparatedIntegerForm({'field': '1,2,3'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': u'1,2,3'})
f = CommaSeparatedIntegerForm({'field': '1a,2'})
self.assertEqual(f.errors, {'field': [u'Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': ',,,,'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': u',,,,'})
f = CommaSeparatedIntegerForm({'field': '1.2'})
self.assertEqual(f.errors, {'field': [u'Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,a,2'})
self.assertEqual(f.errors, {'field': [u'Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,,2'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': u'1,,2'})
f = CommaSeparatedIntegerForm({'field': '1'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': u'1'})
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
price = form.save(commit=False)
with self.assertRaises(ValidationError):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertEqual(form.instance.quantity is None, True)
self.assertEqual(form.instance.pk is None, True)
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
pear = Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
(u'', u'---------'),
(86, u'Apple'),
(87, u'Core'),
(22, u'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(unicode(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(CategoryForm.base_fields.keys(),
['description', 'url'])
self.assertHTMLEqual(unicode(CategoryForm()), '''<tr><th><label for="id_description">Description:</label></th><td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>''')
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, u'Apple'), (87, u'Core'), (22, u'Pear')))
self.assertEqual(map(lambda o: o.name, field.clean([86])), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertEqual(form.is_valid(), True)
self.assertEqual(len(form.cleaned_data), 1)
self.assertEqual(map(lambda o: o.name, form.cleaned_data['items']), ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(CustomFieldForExclusionForm.base_fields.keys(), ['name'])
self.assertHTMLEqual(unicode(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>''')
| chrishas35/django-travis-ci | tests/modeltests/model_forms/tests.py | Python | bsd-3-clause | 65,658 |
# Create your views here.
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from accounts.models import Employee, PhoneNo, Skill, HasSkill
from django.core.urlresolvers import reverse
from django.contrib.auth.hashers import make_password, check_password
from accounts.utils import *
from project.models import *
from django.http import Http404
import json as simplejson
from notification.utils import *
from django.db.models import Q,F
@login_required
def createhandler(request):
if not(isManagerAuthenticated(request)):
return HttpResponseRedirect(reverse('accounts:index'))
else:
name = request.POST['name']
desc = request.POST['desc']
username = request.session.get('username')
emp = Employee.objects.get(username=username)
p = Project(name = name, description = desc, manager = emp)
p.save()
id = p.pk
tm=Teammember(project=p,employee=emp)
tm.save()
return HttpResponseRedirect(reverse('project:view' ,args=(id,)))
@login_required
def proj_analytics(request,project_id):
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
taskCount = Task.objects.filter(project = project).count()
completedTaskCount = Task.objects.filter(project = project , approved = True).count()
delayedTaskCount = Task.objects.filter(project = project , deadline__lt = F('enddate')).count()
taskIssueCount = 0
taskUnresolvedIssueCount = 0
subtaskCount = 0
completedSubtaskCount = 0
delayedSubtaskCount = 0
subtaskIssueCount = 0
subtaskUnresolvedIssueCount = 0
for taskitem in Task.objects.filter(project = project):
subtaskCount = subtaskCount + Subtask.objects.filter(task = taskitem).count()
completedSubtaskCount = completedSubtaskCount + Subtask.objects.filter(task = taskitem , approved = True).count()
delayedSubtaskCount = delayedSubtaskCount + Subtask.objects.filter(task= taskitem , deadline__lt = F('enddate')).count()
taskUnresolvedIssueCount = taskUnresolvedIssueCount + TaskIssue.objects.filter(Q(task = taskitem) & (Q(resolvedate = datetime.date.today()) |Q(resolvedate = None))).count()
taskIssueCount = taskIssueCount + TaskIssue.objects.filter(task= taskitem).count()
for subtaskitem in Subtask.objects.filter(task = taskitem):
subtaskUnresolvedIssueCount = subtaskUnresolvedIssueCount + SubtaskIssue.objects.filter(Q(subtask = subtaskitem) & (Q(resolvedate = datetime.date.today()) |Q(resolvedate = None))).count()
subtaskIssueCount = subtaskIssueCount + SubtaskIssue.objects.filter(subtask= subtaskitem).count()
print completedTaskCount , completedSubtaskCount
context['taskCount'] = taskCount
context['completedTaskCount'] = completedTaskCount
context['subtaskCount'] = subtaskCount
context['completedSubtaskCount'] = completedSubtaskCount
context['delayedTaskCount'] = delayedTaskCount
context['delayedSubtaskCount'] = delayedSubtaskCount
context['project'] = project
context['taskIssueCount'] = taskIssueCount
context['taskUnresolvedIssueCount'] = taskUnresolvedIssueCount
context['subtaskIssueCount'] = subtaskIssueCount
context['subtaskUnresolvedIssueCount'] = subtaskUnresolvedIssueCount
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/projectAnalytics.html', context)
@login_required
def view(request,project_id):
#show my profile page
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
try:
proj = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
projects = Teammember.objects.filter(employee=emp)
context['project'] = proj
manager = proj.manager;
#check if the project is edited
if request.GET.get('edit'):
context['edited'] = 1
else:
context['edited'] = 0
#check if the person viewinf is manager who created the project
if(emp == manager):
context['edit'] = 1
else:
context['edit'] = 0
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/view.html', context)
@login_required
def members(request,project_id):
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
tms= Teammember.objects.filter(project = project)
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
context['project'] = project
context['members'] = tms
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/members.html', context)
@login_required
def tasks(request,project_id):
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
t = Task.objects.filter(project = project)
#check if the person viewinf is manager who created the project
manager = project.manager;
if(emp == manager):
context['manager'] = 1
else:
context['manager'] = 0
context['project'] = project
context['tasks'] = t
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/task.html', context)
@login_required
def subtasks(request,project_id,task_id):
username = request.session.get('username')
emp = Employee.objects.get(username = username)
context = {}
context['user'] = emp
try:
project = Project.objects.get(pk = project_id)
task = Task.objects.get(project = project,taskid = task_id)
except Project.DoesNotExist:
raise Http404
st = Subtask.objects.filter(project = project,task = task)
t = Task.objects.filter(project = project)
#check if the person viewing is manager who created the project
manager = project.manager
taskmanager = task.manager
if(emp == manager or emp == taskmanager):
context['manager'] = 1
else:
context['manager'] = 0
context['task'] = task
context['subtasks'] = st
context['count'] = st.count()
context['tasks'] = t
context['today'] = datetime.date.today()
return render(request, 'project/subtask.html', context)
@login_required
def subtaskview(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['subtask']=subtask
st = Subtask.objects.filter(task = task, project = project)
context['today'] = datetime.date.today()
context['maxdate'] = datetime.date.max
context['subtasks'] = st
skills = SubtaskSkills.objects.filter(subtask=subtask)
context['skills'] = skills
if subtask.enddate == None or subtask.enddate == datetime.date.max :
context['enddate'] = "Yet to be completed"
if emp == subtask.assignee:
context['close'] = 1
else:
context['close'] = 0
if datetime.date.today() > subtask.deadline:
context['status'] = "Deadline exceeded, Ongoing"
else:
context['status'] = "Ongoing"
else:
context['enddate'] = subtask.enddate
context['close'] = 0
if subtask.enddate > subtask.deadline:
if subtask.approved == 1:
context['status'] = "Approved, Deadline Exceeded, Complete"
else:
context['status'] = "Approval Pending, Deadline Exceeded, Complete"
else:
if subtask.approved == 1:
context['status'] = "Approved, Subtask Complete"
else:
context['status'] = "Approval Pending, Subtask Complete"
return render(request, 'project/viewsubtask.html', context)
@login_required
def closesubtask(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == subtask.assignee:
subtask.enddate = datetime.date.today()
subtask.save()
subtaskCompleteNotification(subtask)
return HttpResponseRedirect(reverse('project:subtaskview' ,args=(project_id,task_id,subtask_id,)))
else:
raise Http404
@login_required
def opensubtask(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == task.manager or emp == project.manager:
subtask.enddate = datetime.date.max
subtask.save()
return HttpResponseRedirect(reverse('project:subtaskview' ,args=(project_id,task_id,subtask_id,)))
else:
raise Http404
@login_required
def approvesubtask(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == task.manager or emp == project.manager:
review = request.POST['review']
rating = request.POST['rating']
subtask.approved = 1
subtask.review = review
subtask.rating = rating
subtask.save()
skills = SubtaskSkills.objects.filter(subtask=subtask)
for skill in skills:
emsk=HasSkill.objects.filter(employee=subtask.assignee,skill=skill.skill)
if not emsk:
emsk=HasSkill(employee=task.manager,skill=skill.skill)
emsk.save()
else:
emsk=emsk[0]
no=int(emsk.number)
newrating=(no*float(emsk.rating) + float(rating)) /(no+1)
emsk.number = no + 1
emsk.rating = newrating
emsk.save()
return HttpResponseRedirect(reverse('project:subtaskfeedback' ,args=(project_id,task_id,subtask_id,)))
else:
raise Http404
@login_required
def subtaskfeedback(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == task.manager or emp == project.manager:
if subtask.enddate == None or subtask.enddate == datetime.date.max:
context['form'] = 0
context['complete'] = 0
context['approved'] = 0
else:
context['complete'] = 1
if subtask.approved == 1:
context['form'] = 0
context['approved'] = 1
else:
context['form'] = 1
context['approved'] = 0
else:
context['form'] = 0
if subtask.enddate == None or subtask.enddate == datetime.date.max:
context['complete'] = 0
context['approved'] = 0
else:
context['complete'] = 1
if subtask.approved == 1:
context['approved'] = 1
else:
context['approved'] = 0
context['user'] = emp
context['subtask']=subtask
st = Subtask.objects.filter(task = task, project = project)
context['subtasks'] = st
context['range'] = range(10)
return render(request, 'project/subtaskfeedback.html', context)
@login_required
def taskissueview(request,project_id,task_id,issue_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
issue = TaskIssue.objects.get(pk=issue_id,task = task)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['issue']=issue
ti = TaskIssue.objects.filter(task = task, project = project)
t = Task.objects.filter(project = project)
context['issues'] = ti
context['today'] = datetime.date.today()
context['tasks'] = t
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
context['member'] = 1
else:
context['member'] = 0
return render(request, 'project/viewtaskissue.html', context)
@login_required
def subtaskissueview(request,project_id,task_id,subtask_id,issue_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task,project=project)
issue = SubtaskIssue.objects.get(pk=issue_id,subtask = subtask)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['issue']=issue
sti = SubtaskIssue.objects.filter(subtask = subtask, task = task, project = project)
st = Subtask.objects.filter(project = project, task = task)
context['issues'] = sti
context['today'] = datetime.date.today()
context['subtasks'] = st
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
context['member'] = 1
else:
context['member'] = 0
return render(request, 'project/viewsubtaskissue.html', context)
@login_required
def closesubtaskissue(request,project_id,task_id,subtask_id,issue_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task,project=project)
issue = SubtaskIssue.objects.get(pk=issue_id,subtask = subtask)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['issue']=issue
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
issue.resolvedate=datetime.date.today()
issue.save()
return HttpResponseRedirect(reverse('project:subtaskissueview' ,args=(project_id,task_id,subtask_id,issue_id,)))
else:
raise Http404
@login_required
def closetaskissue(request,project_id,task_id,issue_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
issue = TaskIssue.objects.get(pk=issue_id,task = task)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['issue']=issue
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
issue.resolvedate=datetime.date.today()
issue.save()
return HttpResponseRedirect(reverse('project:taskissueview' ,args=(project_id,task_id,issue_id,)))
else:
raise Http404
@login_required
def taskview(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['project']=project
context['task']=task
t = Task.objects.filter(project = project)
context['tasks'] = t
context['skills'] = TaskSkills.objects.filter(task=task)
return render(request, 'project/viewtask.html', context)
@login_required
def taskissues(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
except Task.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager
taskmanager = task.manager
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
context['manager'] = 1
else:
context['manager'] = 0
context['user'] = emp
context['project']=project
context['task']=task
issues = TaskIssue.objects.filter(task = task)
context['issues'] = issues
context['count'] = issues.count()
context['today'] = datetime.date.today()
t = Task.objects.filter(project = project)
context['tasks'] = t
return render(request, 'project/taskissues.html', context)
@login_required
def subtaskissues(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Task.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if(Teammember.objects.filter(project=project,employee=emp).count()==1):
context['manager'] = 1
else:
context['manager'] = 0
context['user'] = emp
context['project']=project
context['subtask']=subtask
issues = SubtaskIssue.objects.filter(subtask = subtask)
context['issues'] = issues
context['count'] = issues.count()
context['today'] = datetime.date.today()
st = Subtask.objects.filter(project = project,task=task)
context['subtasks'] = st
return render(request, 'project/subtaskissues.html', context)
@login_required
def taskfeedback(request,project_id,task_id):
try:
project = Project.objects.get(pk = project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
context = {}
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context['user'] = emp
context['project']=project
context['task']=task
if(emp==task.manager or emp==project.manager):
context['manager']=1
else:
context['manager']=0
if(emp==project.manager):
context['pm'] = 1
else:
context['pm'] = 0
st = Subtask.objects.filter(task= task,project = project)
pending=[]
complete = 1
for subtask in st:
if subtask.approved == 1 :
complete = complete*1
else:
complete = 0
pending.append(subtask)
context['complete'] = complete
context['pending'] = pending
t = Task.objects.filter(project = project)
context['tasks'] = t
context['today'] = datetime.date.today()
context['range'] = range(10)
return render(request, 'project/taskfeedback.html', context)
@login_required
def closetask(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == task.manager:
st = Subtask.objects.filter(task= task,project = project)
complete = 1
for subtask in st:
if subtask.approved == 1 :
complete = complete*1
else:
complete = 0
if complete == 0 :
return HttpResponseRedirect(reverse('project:taskfeedback' ,args=(project_id,task_id,)))
else:
task.enddate = datetime.date.today()
task.save()
taskCompleteNotification(task)
return HttpResponseRedirect(reverse('project:taskfeedback' ,args=(project_id,task_id,)))
else:
raise Http404
@login_required
def approvetask(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid=task_id,project=project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
if emp == project.manager:
review = request.POST['review']
rating = request.POST['rating']
task.approved = 1
task.review = review
task.rating = rating
if emp == task.manager:
task.enddate = datetime.date.today()
task.save()
skills = TaskSkills.objects.filter(task=task)
for skill in skills:
emsk=HasSkill.objects.filter(employee=task.manager,skill=skill.skill)
if not emsk:
emsk=HasSkill(employee=task.manager,skill=skill.skill)
emsk.save()
else:
emsk=emsk[0]
no=int(emsk.number)
newrating=(no*float(emsk.rating) + float(rating)) /(no+1)
emsk.number = no + 1
emsk.rating = newrating
emsk.save()
return HttpResponseRedirect(reverse('project:taskfeedback' ,args=(project_id,task_id,)))
else:
raise Http404
@login_required
def addsubtask(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager;
if(emp != manager):
raise Http404
else:
context={}
context['user'] = emp
context['task'] = task
st = Subtask.objects.filter(task = task, project = project)
context['subtasks'] = st
users=Employee.objects.all()
names=[]
for user in users:
names.append(user.name)
context['users'] = simplejson.dumps(names)
context['today'] = datetime.date.today()
return render(request,'project/addsubtask.html',context)
@login_required
def addtaskissue(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager;
tms=Teammember.objects.filter(project=project)
c=0
for tm in tms:
if emp ==tm.employee:
c=1
if c!=1:
raise Http404
else:
context={}
context['user'] = emp
context['task'] = task
issues = TaskIssue.objects.filter(task = task)
context['issues'] = issues
return render(request,'project/addtaskissue.html',context)
@login_required
def addsubtaskissue(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager;
###change this manager to teammember
if(emp != manager):
raise Http404
else:
context={}
context['user'] = emp
context['subtask'] = subtask
issues = SubtaskIssue.objects.filter(subtask = subtask)
context['issues'] = issues
return render(request,'project/addsubtaskissue.html',context)
@login_required
def addtask(request,project_id):
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager;
if(emp != manager):
raise Http404
else:
context={}
context['user'] = emp
context['project']=project
t = Task.objects.filter(project = project)
context['tasks'] = t
users=Employee.objects.all()
names=[]
for user in users:
names.append(user.name)
context['users'] = simplejson.dumps(names)
context['today'] = datetime.date.today()
return render(request,'project/addtask.html',context)
@login_required
def addtmanager(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = request.POST['manager']
try:
manage=Employee.objects.get(name=manager)
except Employee.DoesNotExist:
raise Http404
task.manager=manage
task.save()
taskCreateNotification(task)
tm=Teammember.objects.filter(project=project,employee=manage)
if not tm:
tm=Teammember(employee=manage,project=project)
tm.save()
return HttpResponseRedirect(reverse('project:taskview' , args=(project_id,task_id,)))
@login_required
def addstmanager(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
assignee = request.POST['assignee']
try:
manage=Employee.objects.get(name=assignee)
except Employee.DoesNotExist:
raise Http404
subtask.assignee=manage
subtask.save()
subTaskCreateNotification(subtask)
tm=Teammember.objects.filter(project=project,employee=manage)
if not tm:
tm=Teammember(employee=manage,project=project)
tm.save()
return HttpResponseRedirect(reverse('project:subtaskview' , args=(project_id,task_id,subtask_id,)))
@login_required
def createtaskhandler(request,project_id):
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager;
if(emp != manager):
raise Http404
else:
name = request.POST['name']
desc = request.POST['desc']
start = request.POST['start']
deadline = request.POST['deadline']
priority = request.POST['priority']
skills = request.POST.getlist('skill[]')
t = Task.objects.all().filter(project=project).count()
tid=t+1
task=Task(manager=emp,taskid=tid,name=name,description=desc,priority=priority.strip(),startdate=start,deadline=deadline,project=project)
task.save()
for skill in skills:
sk=Skill.objects.filter(name=skill)
if not sk:
sk=Skill(name=skill)
sk.save()
else:
sk=sk[0]
tsk=TaskSkills(task=task,project=project,skill=sk)
tsk.save()
context={}
context['user'] = emp
context['project'] = project
context['task'] =task
skills=TaskSkills.objects.filter(task=task,project=project)
user=[]
users=Employee.objects.all()
for skill in skills:
hss=HasSkill.objects.filter(skill=skill.skill)
for hs in hss:
user.append(hs.employee.name)
context['users'] = simplejson.dumps(user)
return render(request,'project/addtm.html',context)
@login_required
def createsubtaskhandler(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager
taskmanager = task.manager
if(emp != manager and emp!= taskmanager):
raise Http404
else:
name = request.POST['name']
desc = request.POST['desc']
start = request.POST['start']
deadline = request.POST['deadline']
priority = request.POST['priority']
skills = request.POST.getlist('skill[]')
t = Subtask.objects.all().filter(project=project,task=task).count()
tid=t+1
subtask=Subtask(subtaskid=tid,name=name,description=desc,priority=priority.strip(),enddate=datetime.date.max,startdate=start,deadline=deadline,project=project,task=task,assignee=emp)
subtask.save()
for skill in skills:
sk=Skill.objects.filter(name=skill)
if not sk:
sk=Skill(name=skill)
sk.save()
else:
sk=sk[0]
tsk=SubtaskSkills(subtask=subtask,task=task,project=project,skill=sk)
tsk.save()
context={}
context['user'] = emp
context['project'] = project
context['task'] =task
context['subtask'] = subtask
skills=SubtaskSkills.objects.filter(task=task,project=project,subtask=subtask)
user=[]
for skill in skills:
hss=HasSkill.objects.filter(skill=skill.skill)
for hs in hss:
user.append(hs.employee.name)
context['users'] = simplejson.dumps(user)
return render(request,'project/addstm.html',context)
@login_required
def createtaskissuehandler(request,project_id,task_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager
taskmanager = task.manager
if(emp != manager and emp!= taskmanager):
raise Http404
else:
desc = request.POST['desc']
createdate = request.POST['start']
priority = request.POST['priority']
resolvedate = datetime.date.max
ti = TaskIssue(description=desc,priority=priority.strip(),createdate=createdate,resolvedate=resolvedate,task=task,project=project)
ti.save()
return HttpResponseRedirect(reverse('project:taskissueview' , args=(project_id,task_id,ti.pk,)))
@login_required
def createsubtaskissuehandler(request,project_id,task_id,subtask_id):
try:
project = Project.objects.get(pk=project_id)
task = Task.objects.get(taskid = task_id,project = project)
subtask = Subtask.objects.get(subtaskid=subtask_id,task=task)
except Project.DoesNotExist:
raise Http404
username = request.session.get('username')
emp = Employee.objects.get(username=username)
manager = project.manager
taskmanager = task.manager
if(emp != manager and emp!= taskmanager):
raise Http404
else:
desc = request.POST['desc']
createdate = request.POST['start']
priority = request.POST['priority']
resolvedate = datetime.date.max
sti = SubtaskIssue(description=desc,priority=priority.strip(),createdate=createdate,resolvedate=resolvedate,task=task,project=project,subtask=subtask)
sti.save()
return HttpResponseRedirect(reverse('project:subtaskissueview' , args=(project_id,task_id,subtask_id,sti.pk,)))
@login_required
def editproject(request,project_id):
#show my profile page
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404
context['project'] = project
manager = project.manager;
if(emp != manager):
raise Http404
else:
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/editproject.html', context)
@login_required
def create(request):
if not(isManagerAuthenticated(request)):
return HttpResponseRedirect(reverse('accounts:index'))
else:
username = request.session.get('username')
emp = Employee.objects.get(username=username)
context = {}
context['user'] = emp
p = Teammember.objects.filter(employee = emp)
context['projects'] = p
return render(request, 'project/create.html',context)
@login_required
def edithandler(request):
if not(isManagerAuthenticated(request)):
return HttpResponseRedirect(reverse('accounts:index'))
else:
name = request.POST['name']
desc = request.POST['desc']
pid = request.POST['id']
try:
project = Project.objects.get(pk=pid)
except Project.DoesNotExist:
raise Http404
project.name = name
project.description = desc
project.save()
return HttpResponseRedirect("%s?edit=1" %reverse('project:view' ,args=(pid,)))
@login_required
def addteammember(request):
projects = Project.objects.all()
for project in projects:
tms = Teammember.objects.filter(project = project)
e = project.manager
tm = Teammember(project = project, employee = e)
if tms.filter(employee = e).count() == 0:
tm.save()
tasks = Task.objects.filter(project=project)
for task in tasks:
e=task.manager
tm = Teammember(project = project, employee = e)
if tms.filter(employee = e).count() == 0:
tm.save()
subtasks = Subtask.objects.filter(project=project,task=task)
for subtask in subtasks:
e=subtask.assignee
tm = Teammember(project = project, employee = e)
if tms.filter(employee = e).count() == 0:
tm.save()
return HttpResponseRedirect(reverse('accounts:myprofile'))
| sushant-hiray/teamflowy | project/views.py | Python | mit | 35,934 |
# GNU Enterprise Common Library - Special database driver plugins
#
# Copyright 2000-2007 Free Software Foundation
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: __init__.py 7615 2005-06-17 15:24:00Z reinhard $
"""
Database driver plugins for special backends.
"""
| HarmonyEnterpriseSolutions/harmony-platform | src/gnue/common/datasources/drivers/other/__init__.py | Python | gpl-2.0 | 979 |
# -*- coding: utf-8 -*-
#
# nfcpy documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 19 18:10:55 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import codecs
import datetime
import os
import re
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *parts), "rb", "utf-8") as f:
return f.read()
def find_version(*file_paths):
"""
Build a path from *file_paths* and search for a ``__version__``
string inside.
"""
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
autodoc_member_order = 'bysource'
autodoc_default_options = {
'members': True,
'show-inheritance': True
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ndeflib'
year = datetime.date.today().year
copyright = u'2016{0}, Stephen Tiedemann'.format(
u'-{0}'.format(year) if year > 2016 else u""
)
# A string of reStructuredText that will be included at the end of
# every source file that is read. This is the right place to add
# substitutions that should be available in every file.
rst_epilog = """
.. _NFC Forum: http://nfc-forum.org/
"""
# A string of reStructuredText that will be included at the beginning
# of every source file that is read.
rst_prolog = """
"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = find_version("../src/ndef/__init__.py")
# The short X.Y version.
version = release.rsplit(u".", 1)[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_rtd_theme:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = "default"
#html_title = ' '.join([project, version, "documentation"])
#html_short_title = ' '.join([project, version])
#html_last_updated_fmt = '%b %d, %Y'
#html_show_sourcelink = True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/ndeflib.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "images/ndeflib.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ndeflibdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ndeflib.tex', u'ndeflib documentation',
u'Stephen Tiedemann', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ndeflib', u'ndeflib Documentation',
[u'Stephen Tiedemann'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ndeflib', u'ndeflib Documentation',
u'Stephen Tiedemann', 'ndeflib', 'Parse or generate NDEF messages.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| nfcpy/ndeflib | docs/conf.py | Python | isc | 10,172 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import glob
import os
import numpy as np
from matplotlib.testing.decorators import image_comparison
from matplotlib import pyplot as plt
import matplotlib.cm as cm
@image_comparison(baseline_images=['pngsuite'], extensions=['png'])
def test_pngsuite():
dirname = os.path.join(
os.path.dirname(__file__),
'baseline_images',
'pngsuite')
files = glob.glob(os.path.join(dirname, 'basn*.png'))
files.sort()
fig = plt.figure(figsize=(len(files), 2))
for i, fname in enumerate(files):
data = plt.imread(fname)
cmap = None # use default colormap
if data.ndim == 2:
# keep grayscale images gray
cmap = cm.gray
plt.imshow(data, extent=[i, i + 1, 0, 1], cmap=cmap)
plt.gca().patch.set_facecolor("#ddffff")
plt.gca().set_xlim(0, len(files))
def test_imread_png_uint16():
from matplotlib import _png
img = _png.read_png_int(os.path.join(os.path.dirname(__file__),
'baseline_images/test_png/uint16.png'))
assert (img.dtype == np.uint16)
assert np.sum(img.flatten()) == 134184960
| Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/tests/test_png.py | Python | gpl-2.0 | 1,259 |
from monopyly import *
from testing_utils import *
class PlayerWhoGetsOutOfJail(DefaultPlayerAI):
'''
A player who performs the given action when in jail.
'''
def __init__(self, action):
self.action = action
def set_action(self, action):
self.action = action
def get_out_of_jail(self, game_state, player):
return self.action
def test_has_to_pay_on_third_turn():
'''
The player fails to roll doubles for the three turns inside
and must pay £50.
'''
# We create a game with a player, who we put in jail...
game = Game()
player = game.add_player(PlayerWhoGetsOutOfJail(PlayerAIBase.Action.STAY_IN_JAIL))
player.state.square = 10
player.state.is_in_jail = True
player.state.number_of_turns_in_jail = 0
# The player doesn't roll doubles in the next three turns...
game.dice = MockDice([(2, 3), (1, 4), (6, 4)])
# We play the first turn...
game.play_one_turn(player)
assert player.state.square == 10
assert player.state.is_in_jail is True
assert player.state.number_of_turns_in_jail == 1
# We play the second turn...
game.play_one_turn(player)
assert player.state.square == 10
assert player.state.is_in_jail is True
assert player.state.number_of_turns_in_jail == 2
# We play the third turn. The player should have been forced
# to pay their way out...
game.play_one_turn(player)
assert player.state.square == 20
assert player.state.is_in_jail is False
assert player.state.number_of_turns_in_jail == 0
assert player.state.cash == 1450
def test_roll_doubles_on_third_turn():
'''
Tests that rolling doubles on the third turn in jail gets you
out of jail without paying (and that you don't get a turn afterwards).
'''
# We create a game with a player, who we put in jail...
game = Game()
player = game.add_player(PlayerWhoGetsOutOfJail(PlayerAIBase.Action.STAY_IN_JAIL))
player.state.square = 10
player.state.is_in_jail = True
player.state.number_of_turns_in_jail = 0
# The player rolls double on the third turn...
game.dice = MockDice([(2, 3), (1, 4), (5, 5), (1, 2)])
# We play the first turn...
game.play_one_turn(player)
assert player.state.square == 10
assert player.state.is_in_jail is True
assert player.state.number_of_turns_in_jail == 1
# We play the second turn...
game.play_one_turn(player)
assert player.state.square == 10
assert player.state.is_in_jail is True
assert player.state.number_of_turns_in_jail == 2
# We play the third turn. The player rolls doubles to get out...
game.play_one_turn(player)
assert player.state.square == 20
assert player.state.is_in_jail is False
assert player.state.number_of_turns_in_jail == 0
assert player.state.cash == 1500
def test_buy_way_out():
'''
The player buys their way out on the first turn.
'''
# We create a game with a player, who we put in jail...
game = Game()
player = game.add_player(PlayerWhoGetsOutOfJail(PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL))
player.state.square = 10
player.state.is_in_jail = True
player.state.number_of_turns_in_jail = 0
# The player buys their way out then rolls 8...
game.dice = MockDice([(3, 5)])
# The player should have paid £50 and be on Marlborough Street...
game.play_one_turn(player)
assert player.state.square == 18
assert player.state.is_in_jail is False
assert player.state.number_of_turns_in_jail == 0
assert player.state.cash == 1450
def test_get_out_of_jail_free():
'''
The player has two Get Out Of Jail Free cards and plays one of them.
'''
# We create a game with a player, who we put in jail...
game = Game()
player = game.add_player(PlayerWhoGetsOutOfJail(PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD))
player.state.square = 10
player.state.is_in_jail = True
player.state.number_of_turns_in_jail = 0
# The player has two Get Out Of Jail Free cards...
player.state.get_out_of_jail_free_cards.append(GetOutOfJailFree())
player.state.get_out_of_jail_free_cards.append(GetOutOfJailFree())
# The player plays the card then rolls 8...
game.dice = MockDice([(3, 5)])
# The player should have used a card and be on Marlborough Street...
game.play_one_turn(player)
assert player.state.square == 18
assert player.state.is_in_jail is False
assert player.state.number_of_turns_in_jail == 0
assert player.state.cash == 1500
assert player.state.number_of_get_out_of_jail_free_cards == 1
def test_get_out_of_jail_free_no_card():
'''
The player tries to play a Get Out Of Jail Free card, but
doesn't actually have one.
'''
# We create a game with a player, who we put in jail...
game = Game()
player = game.add_player(PlayerWhoGetsOutOfJail(PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD))
player.state.square = 10
player.state.is_in_jail = True
player.state.number_of_turns_in_jail = 0
# The player tries to play the card then rolls 8...
game.dice = MockDice([(3, 5)])
# The player should still be in jail...
game.play_one_turn(player)
assert player.state.square == 10
assert player.state.is_in_jail is True
assert player.state.number_of_turns_in_jail == 1
assert player.state.cash == 1500
assert player.state.number_of_get_out_of_jail_free_cards == 0
def test_out_jail_and_straight_back_again():
'''
The player pays their way out, rolls a 7 and picks up a Community
Chest Go To Jail card.
'''
game = Game()
player = game.add_player(PlayerWhoGetsOutOfJail(PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL))
player.state.square = 10
player.state.is_in_jail = True
player.state.number_of_turns_in_jail = 0
# The player pays their way out then rolls 7 to land on Community Chest...
game.dice = MockDice([(3, 4)])
# The top Community Chest card is Go To Jail...
game.state.board.community_chest_deck = MockCardDeck(GoToJailCard())
# The player should be back in jail after paying £50...
game.play_one_turn(player)
assert player.state.square == 10
assert player.state.is_in_jail is True
assert player.state.number_of_turns_in_jail == 0
assert player.state.cash == 1450
def test_get_out_of_jail_free_card():
'''
Tests that you get a GOOJF card when you land on Chance or
Community Chest and it is the top card.
Also tests that it is removed from the deck, and replaced when
it is played.
'''
game = Game()
player = game.add_player(PlayerWhoGetsOutOfJail(PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD))
# We set up the chance deck with three cards, GOOJF on top...
mock_chance_deck = MockCardDeck()
mock_chance_deck.set_next_cards([GetOutOfJailFree(mock_chance_deck), RewardCard(100), FineCard(50)])
game.state.board.chance_deck = mock_chance_deck
# The player starts on Marlborough street and rolls four to land on
# Chance, where they pick up a GOOJF card...
player.state.square = 18
game.dice = MockDice([(1, 3)])
game.play_one_turn(player)
# We check that the player now has a GOOJF card, and that the
# Chance deck has one fewer card...
assert player.state.number_of_get_out_of_jail_free_cards == 1
assert game.state.board.chance_deck.number_of_cards == 2
# They now roll eight to land on Go To Jail, and in the turn after
# that they play the card...
game.dice = MockDice([(5, 3), (4, 6)])
game.play_one_turn(player)
# The player should be in jail and have not yet played the card...
assert player.state.is_in_jail is True
assert player.state.number_of_get_out_of_jail_free_cards == 1
assert game.state.board.chance_deck.number_of_cards == 2
game.play_one_turn(player)
# The player should be on Free Parking, not have the card and the
# card should be back in the deck...
assert player.state.is_in_jail is False
assert player.state.square == 20
assert player.state.number_of_get_out_of_jail_free_cards == 0
assert game.state.board.chance_deck.number_of_cards == 3
| richard-shepherd/monopyly | tests/test_get_out_of_jail.py | Python | mit | 8,227 |
from django.core.management.base import BaseCommand
from django.db.models import Q
from photos.models import Photo
from intrinsic.models import IntrinsicPointComparison
class Command(BaseCommand):
args = ''
help = 'Fix the point1_image_darker field'
def handle(self, *args, **options):
Photo.objects.all() \
.update(in_iiw_dataset=False)
Photo.objects \
.filter(synthetic=False,
rotated=False,
nonperspective=False,
inappropriate=False,
stylized=False) \
.filter(num_intrinsic_comparisons__gt=0) \
.filter(Q(license__publishable=True) | Q(light_stack__isnull=False)) \
.update(in_iiw_dataset=True)
print 'iiw:', Photo.objects.filter(in_iiw_dataset=True).count()
Photo.objects.all() \
.update(in_iiw_dense_dataset=False)
dense_photo_ids = IntrinsicPointComparison.objects \
.filter(point1__min_separation__lt=0.05) \
.order_by('photo') \
.distinct('photo') \
.values_list('photo_id', flat=True)
Photo.objects \
.filter(in_iiw_dataset=True) \
.filter(id__in=dense_photo_ids) \
.update(in_iiw_dense_dataset=True)
print 'iiw dense:', Photo.objects.filter(in_iiw_dense_dataset=True).count()
| seanbell/opensurfaces | server/intrinsic/management/commands/intrinsic_fill_in_iiw.py | Python | mit | 1,401 |
import asyncio
import aioredis
def main():
loop = asyncio.get_event_loop()
@asyncio.coroutine
def reader(ch):
while (yield from ch.wait_message()):
msg = yield from ch.get_json()
print("Got Message:", msg)
@asyncio.coroutine
def go():
pub = yield from aioredis.create_redis(
('localhost', 6379))
sub = yield from aioredis.create_redis(
('localhost', 6379))
res = yield from sub.subscribe('chan:1')
ch1 = res[0]
tsk = asyncio.async(reader(ch1))
res = yield from pub.publish_json('chan:1', ["Hello", "world"])
assert res == 1
yield from sub.unsubscribe('chan:1')
yield from tsk
sub.close()
pub.close()
loop.run_until_complete(go())
if __name__ == '__main__':
main()
| iho/aioredis | examples/pubsub.py | Python | mit | 846 |
# -*- coding: utf-8 -*-
import re
import logging
from completor.utils import check_subseq
from .utils import parse_uri
word_pat = re.compile(r'([\d\w]+)', re.U)
word_ends = re.compile(r'[\d\w]+$', re.U)
logger = logging.getLogger("completor")
# [
# [{
# u'range': {
# u'start': {u'line': 273, u'character': 5},
# u'end': {u'line': 273, u'character': 12}
# },
# u'uri': u'file:///home/linuxbrew/.linuxbrew/Cellar/go/1.12.4/libexec/src/fmt/print.go' # noqa
# }]
# ]
def gen_jump_list(ft, name, data):
res = []
if not data:
return res
items = data[0]
if items is None:
return res
for item in items:
uri = parse_uri(item['uri'])
if ft == 'go':
uri = uri.replace('%21', '!')
start = item['range']['start']
res.append({
'filename': uri,
'lnum': start['line'] + 1,
'col': start['character'] + 1,
'name': name,
})
return res
# [
# [
# {
# u'newText': u'',
# u'range': {
# u'start': {u'line': 8, u'character': 0},
# u'end': {u'line': 9, u'character': 0}
# }
# }, {
# u'newText': u'',
# u'range': {
# u'start': {u'line': 9, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }, {
# u'newText': u'\tfmt.Println()\n',
# u'range': {
# u'start': {u'line': 10, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }, {
# u'newText': u'}\n',
# u'range': {
# u'start': {u'line': 10, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }
# ]
# ]
def format_text(data):
if not data:
return
for item in data[0]:
pass
def get_completion_word(item, insert_text):
if insert_text != b'label':
try:
return item['textEdit']['newText'], \
item['textEdit']['range']['start']['character']
except KeyError:
pass
label = item['label'].strip()
match = word_pat.match(label)
return match.groups()[0] if match else '', -1
hiddenLines = ["on pkg.go.dev"]
escapes = re.compile(r'''\\([\\\x60*{}[\]()#+\-.!_>~|"$%&'\/:;<=?@^])''',
re.UNICODE)
escape_types = ['go', 'json']
def _shouldHidden(line):
for item in hiddenLines:
if item in line:
return True
return False
def gen_hover_doc(ft, value):
if ft not in escape_types:
return value
lines = []
for l in value.split("\n"):
if _shouldHidden(l):
continue
lines.append(escapes.sub(r"\1", l).replace(' ', ' '))
return "\n".join(lines)
def filter_items(items, input_data):
target = ''
match = word_ends.search(input_data)
if match:
target = match.group()
if not target:
return items
filtered = []
for item in items:
score = check_subseq(target, item[1])
if score is None:
continue
filtered.append((item, score))
filtered.sort(key=lambda x: x[1])
return [e for e, _ in filtered]
| maralla/completor.vim | pythonx/completers/lsp/action.py | Python | mit | 3,344 |
import json, csv
import array, random, pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_predict, cross_val_score
from opt.genetic import GeneticOptimizer, GeneticConfiguration, LogHelper
from deap import creator, base, tools
class GeneticLogHelper(LogHelper):
def __init__(self, genlog, datalog, sep):
super().__init__()
self.genlog = genlog
self.datalog = datalog
self.sep = sep
def get_genlog(self):
return pd.read_csv(self.genlog, self.sep, index_col=0)
def get_datalog(self):
return pd.read_csv(self.datalog, self.sep, index_col=0)
def write_row_2file(self, row, csv_writer, file):
csv_writer.writerow(row)
file.flush()
def log(self, context, generation_no, results):
self.log_generation(context, generation_no, results)
self.log_configuration(context, generation_no, results)
def log_generation(self, context, generation_no, results):
gen_row = [generation_no]
gen_row.extend(results.fitness())
self.write_row_2file(gen_row, context['csv_gen'], context['csv_gen_file'])
def log_configuration(self, context, generation_no, results):
max_config = results.max()
row = [generation_no, max_config.value()]
row.extend(max_config.as_list())
self.write_row_2file(row, context['csv'], context['csv_file'])
def setup_genlog(self, context):
gencols = ['Generation']
gencols.extend(['#' + str(x) for x in range(0, context['settings']['n'])])
context['csv_gen_file'] = open(self.genlog, 'a+')
context['csv_gen'] = csv.writer(context['csv_gen_file'], delimiter=';', lineterminator='\n')
self.write_row_2file(gencols, context['csv_gen'], context['csv_gen_file'])
def setup_configuration_log(self, context):
cols = ['Generation', 'Max Fitness']
cols.extend(context['features'].columns.tolist())
context['csv_file'] = open(self.datalog, 'a+')
context['csv'] = csv.writer(context['csv_file'], delimiter=';', lineterminator='\n')
self.write_row_2file(cols, context['csv'], context['csv_file'])
def setup(self, context):
self.setup_configuration_log(context)
self.setup_genlog(context)
def close(self, context):
context['csv_file'].close()
context['csv_gen_file'].close()
class FeatureSelectionConfiguration(GeneticConfiguration):
def __init__(self, individual, all_columns):
super().__init__(individual)
self.all_columns = all_columns
def column_indices(self):
return [i for i, j in enumerate(self.individual) if j]
# As list of active and not active columns
def as_list(self):
return [v for v in self.individual]
def columns(self):
return self.all_columns[self.column_indices()]
def __str__(self):
cols = self.columns()
return json.dumps({
'fitness': self.value(),
'columns_length': len(cols),
'columns_str': str(cols.tolist()),
'indices_str': str(self.individual)
}, indent=4, sort_keys=True)
class CVGeneticFeatureSelection(GeneticOptimizer):
def __init__(self, clfs, features, labels, score_func=None, **settings):
self.clfs = clfs
self.features = features
self.labels = labels[labels.columns[0]].tolist()
self.score_func = score_func
super().__init__(**settings)
self.settings['n'] = min(self.settings['n'], self.settings['n_max'])
def configuration(self, individual):
return FeatureSelectionConfiguration(individual, self.features.columns)
def default_settings(self):
return {
**super().default_settings(),
"cv_fold": 3,
"str(clf)": str(self.clfs),
"n": self.features_len(),
"n_max": 1000
}
def log_helper(self):
return GeneticLogHelper(self.settings['genlog'],self.settings['datalog'], self.settings['sep'])
def features_len(self):
return self.features.shape[1]
def individual(self, toolbox):
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMax)
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, self.features_len())
def eval_on(self, clfs, features, labels):
fitness = [0]
cv = StratifiedKFold(n_splits=self.cv_fold, random_state=0)
for clf in clfs:
if self.score_func is not None:
y_proba = cross_val_predict(clf, features, labels, cv=cv, method='predict_proba')
fitness.append(self.score_func(labels, y_proba))
else:
fitness.append(cross_val_score(clf, features, labels, cv=cv).mean())
return max(fitness)
def eval(self, individual):
fitness = 0
columns = self.configuration(individual).columns()
if len(columns) > 0:
features_subset = self.features.as_matrix(columns=columns)
fitness = self.eval_on(self.clfs, features_subset, self.labels)
return fitness,
| piotrsobecki/opt | opt/feature_selection/genetic.py | Python | mit | 5,321 |
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api import http_pb2
from google.protobuf import descriptor_pb2
from google.api_core.protobuf_helpers import get_messages
from google.cloud.language_v1.proto import language_service_pb2
_shared_modules = [http_pb2, descriptor_pb2]
_local_modules = [language_service_pb2]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.language_v1.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| tseaver/google-cloud-python | language/google/cloud/language_v1/types.py | Python | apache-2.0 | 1,375 |
""" Benchmarks for functions related to streamline
Run all benchmarks with::
import dipy.tracking as dipytracking
dipytracking.bench()
If you have doctests enabled by default in nose (with a noserc file or
environment variable), and you have a numpy version <= 1.6.1, this will also run
the doctests, let's hope they pass.
Run this benchmark with:
nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_streamline.py
"""
import numpy as np
from dipy.tracking.streamline import set_number_of_points, length
from dipy.tracking.tests.test_streamline import set_number_of_points_python, length_python
from numpy.testing import measure
def bench_resample():
repeat = 1000
nb_points = 42
streamline = np.random.rand(1000, 3)
print("Timing set_number_of_points() in Cython")
cython_time = measure("set_number_of_points(streamline, nb_points)", repeat)
print("Cython time: {0:.2}sec".format(cython_time))
python_time = measure("set_number_of_points_python(streamline, nb_points)", repeat)
print("Python time: {0:.2}sec".format(python_time))
print("Speed up of {0}x".format(python_time/cython_time))
def bench_length():
repeat = 1000
streamline = np.random.rand(1000, 3)
print("Timing length() in Cython")
cython_time = measure("length(streamline)", repeat)
print("Cython time: {0:.2}sec".format(cython_time))
python_time = measure("length_python(streamline)", repeat)
print("Python time: {0:.2}sec".format(python_time))
print("Speed up of {0}x".format(python_time/cython_time))
| samuelstjean/dipy | dipy/tracking/benchmarks/bench_streamline.py | Python | bsd-3-clause | 1,575 |
# -*- coding: utf-8 -*-
"""
Aligning two differently tokenized string representations of bracket
trees.
@author = Andreas Peldszus
@mail = <peldszus at uni dash potsdam dot de>
@version = 0.1.0
"""
class AlignmentError(Exception):
pass
def align_tokenized_tree(toks1, toks2, tree_pair_name="no-name-given",
delexicalize=False):
""" Aligns two tokenized string representations of bracket trees with
different tokenizations.
This function aligns two tokenized bracket tree strings. Brackets are
assumed to be round. Opening brackets and nodelabels are assumed to be
one token of the form '(X', closing brackets are assumed to be separate
tokens. The alignment function returns the tokenized strings, modified
such that the tokens align.
>>> align_tokenized_tree('(S a b c )'.split(), '(S a b c )'.split())
(0, ['(S', 'a', 'b', 'c', ')'], ['(S', 'a', 'b', 'c', ')'])
>>> align_tokenized_tree('(S (S (S a b ) ) )'.split(), '(S a b )'.split())
(0, ['(S', '(S', '(S', 'a', 'b', ')', ')', ')'], ['(S', 'a', 'b', ')'])
>>> align_tokenized_tree('(S a (X b (Y c ) ) )'.split(),
... '(S a b c )'.split())
(0, ['(S', 'a', '(X', 'b', '(Y', 'c', ')', ')', ')'], ['(S', 'a', 'b', 'c', ')'])
>>> align_tokenized_tree('(S a b c )'.split(), '(S ab c )'.split())
Aligning subtokens a and ab in text no-name-given ...
(0, ['(S', 'a', 'b', 'c', ')'], ['(S', 'a', 'b', 'c', ')'])
>>> align_tokenized_tree('(S ab c )'.split(), '(S a bc )'.split())
Aligning subtokens ab and a in text no-name-given ...
Aligning subtokens b and bc in text no-name-given ...
(0, ['(S', 'a', 'b', 'c', ')'], ['(S', 'a', 'b', 'c', ')'])
>>> align_tokenized_tree('(S a b c )'.split(), '(S a )'.split())
Traceback (most recent call last):
...
AlignmentError: Error: Overlap. remaining_a=['c', ')'], remaining_b=[]
>>> align_tokenized_tree('(S a b c )'.split(), '(S a c )'.split())
Traceback (most recent call last):
...
AlignmentError: Error: Trees don't align. current_a, current_b, a, b:
['(S', 'a'] b ['c', ')']
['(S', 'a'] c [')']
"""
# make sure we don't get empty strings
a = [i.strip() for i in toks1 if i.strip() != '']
b = [i.strip() for i in toks2 if i.strip() != '']
# initialize variables used while aligning the trees
new_a = []
new_b = []
current_a = None
current_b = None
buffer_a = ''
buffer_b = ''
error = 0
def add_to(new_list, token):
if delexicalize:
token = 'tok'
new_list.append(token)
while True:
# if nothing in buffer a, get next symbol in a, skipping treebrackets
while buffer_a == '':
if len(a) > 0:
current_a = a.pop(0)
if current_a.startswith('(') or current_a == ')':
new_a.append(current_a)
else:
break
else:
current_a = None
break
else:
current_a = buffer_a
buffer_a = ''
# if nothing in buffer b, get next symbol in b, skipping treebrackets
while buffer_b == '':
if len(b) > 0:
current_b = b.pop(0)
if current_b.startswith('(') or current_b == ')':
new_b.append(current_b)
else:
break
else:
current_b = None
break
else:
current_b = buffer_b
buffer_b = ''
# compare symbols
if current_a is None and current_b is None:
if [] == a == b:
# successfully finished
break
else:
# overlap match
raise AlignmentError(
"Error: Overlap. remaining_a=%s, remaining_b=%s" % (a, b))
elif current_a is None or current_b is None:
# one string is consumed, the other not,
# another form of overlap match
raise AlignmentError(
"Error: Overlap. remaining_a=%s, remaining_b=%s" % (a, b))
elif current_a == current_b:
# align tokens
add_to(new_a, current_a)
add_to(new_b, current_b)
elif current_a.startswith(current_b):
# align subtokens
add_to(new_a, current_b)
add_to(new_b, current_b)
buffer_a = current_a[len(current_b):]
print "Aligning subtokens %s and %s in text %s ..." % \
(current_a, current_b, tree_pair_name)
elif current_b.startswith(current_a):
# align subtokens
add_to(new_a, current_a)
add_to(new_b, current_a)
buffer_b = current_b[len(current_a):]
print "Aligning subtokens %s and %s in text %s ..." % \
(current_a, current_b, tree_pair_name)
else:
# cannot align
raise AlignmentError(
"Error: Trees don't align. current_a, current_b, a, b:\n" +
"%s %s %s\n" % (new_a[-5:], current_a, a[:5]) +
"%s %s %s" % (new_b[-5:], current_b, b[:5]))
return error, new_a, new_b
| peldszus/DiscourseSegmenter | dsegmenter/evaluation/align.py | Python | mit | 5,304 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models for storing campaign finance tables from the CAL-ACCESS database.
"""
# Models
from calaccess_raw import fields
from .base import CalAccessBaseModel
# Annotations
from calaccess_raw import annotations
from calaccess_raw.annotations import DocumentCloud
class CvrSoCd(CalAccessBaseModel):
"""
The cover page for statement-of-organization forms that create or terminate an entity.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"REC_TYPE",
"FORM_TYPE",
)
DOCUMENTCLOUD_PAGES = [
# CAL-ACCESS record layout
DocumentCloud(id='2711614', start_page=39, end_page=41),
# Mapping of .CAL format to CAL-ACCESS database table / fields
DocumentCloud(id='2711616', start_page=28, end_page=31),
# .CAL Format v1.05.02
DocumentCloud(id='2712033', start_page=46, end_page=47),
# .CAL Formate v2.01
DocumentCloud(id='2712034', start_page=59, end_page=61),
]
FILING_FORMS = [
annotations.get_form('F400').get_section('P1'),
annotations.get_form('F400').get_section('P2'),
annotations.get_form('F400').get_section('P4'),
annotations.get_form('F402').get_section('CVR'),
annotations.get_form('F410').get_section('P1'),
annotations.get_form('F410').get_section('P2'),
annotations.get_form('F410').get_section('P4'),
]
acct_opendt = fields.DateField(
db_column="ACCT_OPENDT",
verbose_name='account opened datetime',
null=True,
help_text='Date Account Opened'
)
ACTIVITY_LVL_CHOICES = (
("CI", "City"),
("CO", "County"),
("ST", "State"),
("St", "State"),
("st", "State"),
)
actvty_lvl = fields.CharField(
max_length=2,
db_column="ACTVTY_LVL",
blank=True,
choices=ACTIVITY_LVL_CHOICES,
verbose_name="activity level",
help_text="Organization's level of activity",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=30),
DocumentCloud(id='2712033', start_page=47),
DocumentCloud(id='2712034', start_page=60),
]
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
verbose_name="amendment ID",
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
)
bank_adr1 = fields.CharField(
max_length=55,
db_column="BANK_ADR1",
blank=True,
verbose_name='bank address 1',
help_text='Street 1 of Financial Institution',
)
bank_adr2 = fields.CharField(
max_length=55,
db_column="BANK_ADR2",
blank=True,
verbose_name='bank address 2',
help_text='Street 2 of Financial Institution',
)
bank_city = fields.CharField(
max_length=30,
db_column="BANK_CITY",
blank=True,
verbose_name='bank city',
help_text='City of Financial Institution',
)
bank_nam = fields.CharField(
max_length=200,
db_column="BANK_NAM",
blank=True,
verbose_name='bank name',
help_text='Name of Financial Institution',
)
bank_phon = fields.CharField(
max_length=20,
db_column="BANK_PHON",
blank=True,
verbose_name='bank phone',
help_text='Phone of Financial Institution',
)
bank_st = fields.CharField(
max_length=2,
db_column="BANK_ST",
blank=True,
verbose_name='bank street',
help_text='State of Financial Institution',
)
bank_zip4 = fields.CharField(
max_length=10,
db_column="BANK_ZIP4",
blank=True,
verbose_name='bank zip4',
help_text='ZIP+4 of Financial Institution',
)
brdbase_cb = fields.CharField(
max_length=1,
db_column="BRDBASE_CB",
blank=True,
verbose_name='broad based committee check-box',
help_text='Broad Based Committee Check-box',
)
city = fields.CharField(
max_length=30,
db_column="CITY",
blank=True,
verbose_name='city',
help_text='City of Org / Committee / Candidate or Office holder',
)
cmte_email = fields.CharField(
max_length=60,
db_column="CMTE_EMAIL",
blank=True,
verbose_name='committee email',
help_text='Optional Committee EMAIL address',
)
cmte_fax = fields.CharField(
max_length=20,
db_column="CMTE_FAX",
blank=True,
verbose_name='committee fax',
help_text='Optional Committee FAX number',
)
com82013id = fields.CharField(
max_length=9,
db_column="COM82013ID",
blank=True,
verbose_name='committee 82013 id',
help_text='ID of 82013 Committee (if Com82013Nm is a RCP cmtte)',
)
com82013nm = fields.CharField(
max_length=200,
db_column="COM82013NM",
blank=True,
verbose_name='committee 82013 name',
help_text='Name of 82013 Committee (F400; when Com82013YN=Y)',
)
com82013yn = fields.CharField(
max_length=1,
db_column="COM82013YN",
blank=True,
verbose_name='committee 82013 yes/no',
help_text='Is this SMO a 82013 "Committee"? (Yes/No) (F400)',
)
control_cb = fields.CharField(
max_length=1,
db_column="CONTROL_CB",
blank=True,
verbose_name='controlled checkbox',
help_text='Controlled Committee Check-box',
)
county_act = fields.CharField(
max_length=20,
db_column="COUNTY_ACT",
blank=True,
verbose_name="county active",
help_text='County where Active (F410)',
)
county_res = fields.CharField(
max_length=20,
db_column="COUNTY_RES",
blank=True,
verbose_name='county residence',
help_text='County of Domicile, Residence, or Location',
)
ENTITY_CD_CHOICES = (
('BMC', annotations.choices.CAMPAIGN_ENTITY_CODES['BMC']),
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
('COM', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('CTL', annotations.choices.CAMPAIGN_ENTITY_CODES['CTL']),
('RCP', annotations.choices.CAMPAIGN_ENTITY_CODES['RCP']),
('SMO', annotations.choices.CAMPAIGN_ENTITY_CODES['SMO']),
)
entity_cd = fields.CharField(
max_length=3,
db_column="ENTITY_CD",
blank=True,
choices=ENTITY_CD_CHOICES,
verbose_name="Entity code",
help_text="Entity Code of the Filer. Values: \
SMO - Slate Mailer Organization (F400,402) [COM|RCP] - Recipient Committee (F410)",
documentcloud_pages=annotations.choices.DOCS['entity_codes'] + [
DocumentCloud(id='2712033', start_page=46),
DocumentCloud(id='2712034', start_page=59),
],
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=9,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
filer_namf = fields.CharField(
max_length=45,
db_column="FILER_NAMF",
blank=True,
verbose_name="filer first name",
help_text="Filer first name",
)
filer_naml = fields.CharField(
max_length=200,
db_column="FILER_NAML",
blank=True,
verbose_name="filer last name",
help_text="Filer last name",
)
filer_nams = fields.CharField(
max_length=10,
db_column="FILER_NAMS",
blank=True,
verbose_name="filer name suffix",
help_text="Filer name suffix",
)
filer_namt = fields.CharField(
max_length=10,
db_column="FILER_NAMT",
blank=True,
verbose_name="filer name title",
help_text="Filer name title",
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing id',
help_text="Unique filing identification number"
)
FORM_TYPE_CHOICES = (
('F400', annotations.get_form('F400').full_title),
('F402', annotations.get_form('F402').full_title),
('F410', annotations.get_form('F410').full_title),
)
form_type = fields.CharField(
max_length=4,
db_column="FORM_TYPE",
verbose_name='form type',
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=46),
DocumentCloud(id='2712034', start_page=59),
]
)
genpurp_cb = fields.CharField(
max_length=1,
db_column="GENPURP_CB",
blank=True,
verbose_name='general purpose checkbox',
help_text='General Purpose Committee Check-box',
)
gpc_descr = fields.CharField(
max_length=300,
db_column="GPC_DESCR",
blank=True,
verbose_name='general purpose committee description',
help_text='Brief description of Activity of GPC',
)
mail_city = fields.CharField(
max_length=30,
db_column="MAIL_CITY",
blank=True,
verbose_name='mail city',
help_text='Mailing Address of Filing Committee - City',
)
mail_st = fields.CharField(
max_length=2,
db_column="MAIL_ST",
blank=True,
verbose_name='mail street',
help_text='Mailing Address of Filing Committee - State',
)
mail_zip4 = fields.CharField(
max_length=10,
db_column="MAIL_ZIP4",
blank=True,
verbose_name='mail zip4',
help_text='Mailing Address of Filing Committee - ZIP+4',
)
phone = fields.CharField(
max_length=20,
db_column="PHONE",
blank=True,
verbose_name='phone',
help_text='Phone Number of Org / Committee / Candidate or Office holder',
)
primfc_cb = fields.CharField(
max_length=1,
db_column="PRIMFC_CB",
blank=True,
verbose_name='primarily formed committee check-box',
help_text='Primarily Formed Committee Check-box',
)
qualfy_dt = fields.DateField(
db_column="QUALFY_DT",
null=True,
verbose_name="qualified datetime",
help_text="Date qualified as an organization"
)
qual_cb = fields.CharField(
max_length=1,
db_column="QUAL_CB",
blank=True,
verbose_name='qualified checkbox',
help_text='Qualified Committee check-box (Req. if SMO)',
)
REC_TYPE_CHOICES = (
("CVR", "Cover Page for Stmt of Organization / Slate Mailer Org, Stmt of \
Termination / Slate Mailer Org or Stmt of Organization / Recipient Committee"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text="Record Type Value: CVR",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=28),
DocumentCloud(id='2712033', start_page=46),
DocumentCloud(id='2712034', start_page=59),
],
)
report_num = fields.CharField(
max_length=3,
db_column="REPORT_NUM",
blank=True,
verbose_name='report number',
help_text='Report Number - Values: \
000 - Original Report 001 thru 999 - Amended Rpt #1-#999',
)
rpt_date = fields.DateField(
db_column="RPT_DATE",
null=True,
verbose_name='report date',
help_text='Date this report is filed',
)
smcont_qualdt = fields.DateField(
db_column="SMCONT_QUALDT",
null=True,
verbose_name='small contributor qualified datetime',
help_text='Date Small Contributor Committee Qualified',
)
sponsor_cb = fields.CharField(
max_length=1,
db_column="SPONSOR_CB",
blank=True,
verbose_name='sponsored checkbox',
help_text='Sponsored Committee Check-box',
)
st = fields.CharField(
max_length=2,
db_column="ST",
blank=True,
verbose_name='street',
help_text='State of Org / Committee / Candidate or Office holder',
)
surplusdsp = fields.CharField(
max_length=90,
db_column="SURPLUSDSP",
blank=True,
verbose_name='surplus disposition',
help_text='Disposition of Surplus Funds',
)
term_date = fields.DateField(
db_column="TERM_DATE",
null=True,
verbose_name='termination date',
help_text='Termination Effective Date (Req. if F402)',
)
tres_city = fields.CharField(
max_length=30,
db_column="TRES_CITY",
blank=True,
verbose_name="treasurer city",
help_text="Treasurer's city",
)
tres_namf = fields.CharField(
max_length=45,
db_column="TRES_NAMF",
blank=True,
verbose_name="treasurer first name",
help_text="Treasurer's first name",
)
tres_naml = fields.CharField(
max_length=200,
db_column="TRES_NAML",
blank=True,
verbose_name="treasurer last name",
help_text="Treasurer's last name",
)
tres_nams = fields.CharField(
max_length=10,
db_column="TRES_NAMS",
blank=True,
verbose_name="treasurer name suffix",
help_text="Treasurer's name suffix",
)
tres_namt = fields.CharField(
max_length=10,
db_column="TRES_NAMT",
blank=True,
verbose_name="treasurer name title",
help_text="Treasurer's name title",
)
tres_phon = fields.CharField(
max_length=20,
db_column="TRES_PHON",
blank=True,
verbose_name="treasurer phone number",
help_text="Treasurer's phone number",
)
tres_st = fields.CharField(
max_length=2,
db_column="TRES_ST",
blank=True,
verbose_name="treasurer street",
help_text="Treasurer's street",
)
tres_zip4 = fields.CharField(
max_length=10,
db_column="TRES_ZIP4",
blank=True,
verbose_name="treasurer zip code",
help_text="Treasurer's ZIP Code",
)
zip4 = fields.CharField(
max_length=10,
db_column="ZIP4",
blank=True,
verbose_name='zip4',
help_text='ZIP+4 for Org / Committee / Candidate or Office holder',
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = "CVR_SO_CD"
ordering = ("-rpt_date",)
def __str__(self):
return str(self.filing_id)
class Cvr2SoCd(CalAccessBaseModel):
"""
Extra information from a statement-of-organization form.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=8),
DocumentCloud(id='2711614', start_page=45, end_page=46),
DocumentCloud(id='2711616', start_page=38, end_page=40),
DocumentCloud(id='2712033', start_page=48, end_page=49),
DocumentCloud(id='2712034', start_page=62, end_page=64),
]
FILING_FORMS = [
annotations.get_form('F400').get_section('P3'),
annotations.get_form('F410').get_section('P4'),
]
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
verbose_name="amendment ID",
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
db_index=True,
verbose_name='line item',
help_text="Line item number of this record",
)
REC_TYPE_CHOICES = (
("CVR2", "Cover Page; Additional Names & Addresses"),
)
rec_type = fields.CharField(
choices=REC_TYPE_CHOICES,
db_column='REC_TYPE',
max_length=4,
db_index=True,
verbose_name='record type',
help_text='Type of record. This column will always contain "CVR2".',
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=38),
DocumentCloud(id='2711614', start_page=46),
DocumentCloud(id='2712033', start_page=45),
DocumentCloud(id='2712034', start_page=58),
]
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
db_column='FORM_TYPE',
max_length=4,
verbose_name='form type',
help_text="Form type of the filing the record is included in. This must \
equal the form_type of the parent filing's cover (CVR) record.",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=38),
DocumentCloud(id='2712033', start_page=45, end_page=46),
DocumentCloud(id='2712034', start_page=58, end_page=59),
]
)
tran_id = fields.CharField(
max_length=20,
db_column='TRAN_ID',
blank=True,
verbose_name='transaction ID',
help_text='Permanent value unique to this item',
)
ENTITY_CD_CHOICES = (
('ATH', annotations.choices.CAMPAIGN_ENTITY_CODES['ATH']),
('ATR', annotations.choices.CAMPAIGN_ENTITY_CODES['ATR']),
('BNM', annotations.choices.CAMPAIGN_ENTITY_CODES['BNM']),
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
('COM', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('CTL', annotations.choices.CAMPAIGN_ENTITY_CODES['CTL']),
('OFF', annotations.choices.CAMPAIGN_ENTITY_CODES['OFF']),
('POF', annotations.choices.CAMPAIGN_ENTITY_CODES['POF']),
('PRO', annotations.choices.CAMPAIGN_ENTITY_CODES['PRO']),
('SPO', annotations.choices.CAMPAIGN_ENTITY_CODES['SPO']),
('BMN', 'Unknown'), # Misspelling of 'BNM'?
)
entity_cd = fields.CharField(
choices=ENTITY_CD_CHOICES,
blank=True,
db_column='ENTITY_CD',
max_length=3,
verbose_name='entity code',
help_text='Entity code of the entity described by the record.',
documentcloud_pages=annotations.choices.DOCS['entity_codes'] + [
DocumentCloud(id='2711616', start_page=38),
DocumentCloud(id='2712033', start_page=48),
DocumentCloud(id='2712034', start_page=62),
]
)
enty_naml = fields.CharField(
db_column='ENTY_NAML',
max_length=200,
blank=True,
verbose_name='entity last name',
help_text="Entity's business name or last name if the entity is an \
individual"
)
enty_namf = fields.CharField(
db_column='ENTY_NAMF',
max_length=45,
blank=True,
verbose_name='entity first name',
help_text="Entity's first name if the entity is an individual"
)
enty_namt = fields.CharField(
db_column='ENTY_NAMT',
max_length=10,
blank=True,
verbose_name='entity name title',
help_text="Entity's name prefix or title if the entity is an \
individual"
)
enty_nams = fields.CharField(
db_column='ENTY_NAMS',
max_length=10,
blank=True,
verbose_name='entity name suffix',
help_text="Entity's name suffix if the entity is an individual"
)
ITEM_CD_CHOICES = (
('ATR', 'Assistant Treasurer (F410)'),
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
('CTL', 'Controlled Committee (F410)'),
('P5B', 'Unknown'),
('PFC', 'Primarily Formed Committee Item (F410)'),
('Pfc', 'Primarily Formed Committee Item (F410)'),
('POF', 'Principal Officer (F400, F410'),
('PRO', annotations.choices.CAMPAIGN_ENTITY_CODES['PRO']),
('SMA', 'Slate Mailer Authorizer (F400)'),
('SPO', 'Sponsored Committee Itemization (F410)'),
('n/a', 'Not Applicable'),
('CON', 'Unknown'),
('CST', 'Unknown'),
)
item_cd = fields.CharField(
db_column='ITEM_CD',
max_length=4,
blank=True,
choices=ITEM_CD_CHOICES,
verbose_name='item code',
help_text="Section of the Statement of Organization this itemization \
relates to. See CAL document for the definition of legal values for this column.",
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=8),
DocumentCloud(id='2712034', start_page=10),
DocumentCloud(id='2712033', start_page=48),
DocumentCloud(id='2712034', start_page=62),
],
)
mail_city = fields.CharField(
db_column='MAIL_CITY',
max_length=30,
blank=True,
verbose_name='mail city',
help_text="City portion of the entity's mailing address"
)
mail_st = fields.CharField(
db_column='MAIL_ST',
max_length=4,
blank=True,
verbose_name='mail street',
help_text="State portion of the entity's mailing address"
)
mail_zip4 = fields.CharField(
db_column='MAIL_ZIP4',
max_length=10,
blank=True,
verbose_name='mail zip4',
help_text="Zipcode portion of the entity's mailing address",
)
day_phone = fields.CharField(
db_column='DAY_PHONE',
max_length=20,
blank=True,
verbose_name='day phone',
help_text="Entity's daytime phone number"
)
fax_phone = fields.CharField(
db_column='FAX_PHONE',
max_length=20,
blank=True,
verbose_name='fax phone number',
help_text="Entity's fax number"
)
email_adr = fields.CharField(
db_column='EMAIL_ADR',
max_length=60,
blank=True,
verbose_name='email address',
help_text="Email address. Not contained in current forms."
)
cmte_id = fields.IntegerField(
db_column='CMTE_ID',
blank=True,
null=True,
verbose_name="Committee ID",
help_text="Entity's identification number"
)
ind_group = fields.CharField(
db_column='IND_GROUP',
max_length=90,
blank=True,
verbose_name='industry group',
help_text="Industry group/affiliation description"
)
OFFICE_CD_CHOICES = annotations.sort_choices(annotations.choices.OFFICE_CODES) + (
('Asm', annotations.choices.OFFICE_CODES['ASM']),
('LEG', annotations.choices.OFFICE_CODES['ASM']),
('OF', annotations.choices.OFFICE_CODES['ASM']),
('REP', annotations.choices.OFFICE_CODES['ASM']),
('05', annotations.choices.OFFICE_CODES['ASM']),
# Only one record: http://cal-access.ss.ca.gov/PDFGen/pdfgen.prg?filingid=1388367&amendid=0
# Looks like this was corrected on a later amendment.
# Don't think they actually mean to specify a jurisdiction
('H', 'N/A'),
# Only one record: http://cal-access.ss.ca.gov/PDFGen/pdfgen.prg?filingid=1613541&amendid=1
# Seems like this committee is supporting a state measure, rather than any candidate
# Don't think they actually mean to specify an office. Was removed on later amendment
('PRO', 'N/A'),
# All over the board
('PAC', 'Unknown'),
)
office_cd = fields.CharField(
db_column='OFFICE_CD',
max_length=3,
blank=True,
verbose_name="office code",
help_text="Identifies the office being sought",
choices=OFFICE_CD_CHOICES,
documentcloud_pages=annotations.choices.DOCS['office_codes']
)
offic_dscr = fields.CharField(
db_column='OFFIC_DSCR',
max_length=40,
blank=True,
verbose_name='office description',
help_text="Office sought description used if the office sought code \
(OFFICE_CD) equals other (OTH)."
)
JURIS_CD_CHOICES = annotations.sort_choices(annotations.choices.JURIS_CODES) + (
# Only one record: http://cal-access.ss.ca.gov/PDFGen/pdfgen.prg?filingid=1388367&amendid=0
# Looks like this was corrected on a later amendment.
# Don't think they actually mean to specify a jurisdiction
('FED', 'N/A'),
# Only one record: http://cal-access.ss.ca.gov/PDFGen/pdfgen.prg?filingid=1125823&amendid=0
('JR', 'N/A'),
)
juris_cd = fields.CharField(
choices=JURIS_CD_CHOICES,
db_column='JURIS_CD',
max_length=4,
blank=True,
verbose_name='jurisdiction code',
help_text="Office jurisdiction code. See CAL document for a \
list of legal values.",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=39),
DocumentCloud(id='2712033', start_page=49),
DocumentCloud(id='2712034', start_page=63),
],
)
juris_dscr = fields.CharField(
db_column='JURIS_DSCR',
max_length=40,
blank=True,
verbose_name='jurisdiction description',
help_text="Office jurisdiction description provided if the \
jurisdiction code (JURIS_CD) equals other (OTH)."
)
dist_no = fields.CharField(
db_column='DIST_NO',
max_length=4,
blank=True,
verbose_name='district name',
help_text="Office district number for Senate, Assembly, and Board \
of Equalization districts."
)
OFF_S_H_CD_CHOICES = annotations.sort_choices(annotations.choices.OFF_S_H_CODES)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
verbose_name='office is sought or held code',
help_text='Office sought/held code. Legal values are "S" for \
sought and "H" for held',
choices=OFF_S_H_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2711614', start_page=46),
DocumentCloud(id='2711616', start_page=39),
DocumentCloud(id='2712033', start_page=49),
DocumentCloud(id='2712034', start_page=63),
]
)
non_pty_cb = fields.CharField(
db_column='NON_PTY_CB',
max_length=4,
blank=True,
verbose_name='non-party checkbox',
help_text="Non-partisan check-box. Legal values are 'X' and null."
)
party_name = fields.CharField(
db_column='PARTY_NAME',
max_length=200,
blank=True,
verbose_name='party name',
help_text="Name of party (if partisan)"
)
bal_num = fields.CharField(
db_column='BAL_NUM',
max_length=7,
blank=True,
verbose_name='balance number',
help_text="Ballot measure number or letter"
)
bal_juris = fields.CharField(
db_column='BAL_JURIS',
max_length=40,
blank=True,
verbose_name='balance jurisdiction',
help_text="Jurisdiction of ballot measure"
)
SUP_OPP_CD_CHOICES = annotations.sort_choices(annotations.choices.SUP_OPP_CODES)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support or opposition code",
verbose_name='support or opposition code',
choices=SUP_OPP_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2711614', start_page=46),
DocumentCloud(id='2711616', start_page=40),
DocumentCloud(id='2712033', start_page=49),
DocumentCloud(id='2712034', start_page=64),
]
)
year_elect = fields.CharField(
db_column='YEAR_ELECT',
max_length=4,
blank=True,
verbose_name="year of election",
help_text="Year of election",
)
pof_title = fields.CharField(
db_column='POF_TITLE',
max_length=45,
blank=True,
verbose_name='principal officer title',
help_text="Position/title of the principal officer",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'CVR2_SO_CD'
def __str__(self):
return str(self.filing_id)
class CvrCampaignDisclosureCd(CalAccessBaseModel):
"""
The cover page of campaign-disclosure forms.
"""
UNIQUE_KEY = ('FILING_ID', 'AMEND_ID',)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id="2711614", start_page=7),
DocumentCloud(id="2711614", start_page=25, end_page=29),
DocumentCloud(id="2711616", start_page=6, end_page=14),
DocumentCloud(id="2712033", start_page=18, end_page=22),
DocumentCloud(id="2712034", start_page=22, end_page=30),
]
FILING_FORMS = [
annotations.get_form('F401').get_section('CVR'),
annotations.get_form('F425').get_section('P1'),
annotations.get_form('F450').get_section('CVR'),
annotations.get_form('F460').get_section('CVR'),
annotations.get_form('F461').get_section('P1'),
annotations.get_form('F461').get_section('P2'),
annotations.get_form('F465').get_section('P1'),
annotations.get_form('F465').get_section('P2'),
annotations.get_form('F496').get_section('P1'),
annotations.get_form('F497'),
annotations.get_form('F498'),
annotations.get_form('F511'),
annotations.get_form('F900'),
]
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amendexp_1 = fields.CharField(
max_length=100,
db_column='AMENDEXP_1',
blank=True,
help_text='Amendment explanation line 1'
)
amendexp_2 = fields.CharField(
max_length=100,
db_column='AMENDEXP_2',
blank=True,
help_text="Amendment explanation line 2"
)
amendexp_3 = fields.CharField(
max_length=100,
db_column='AMENDEXP_3',
blank=True,
help_text="Amendment explanation line 3"
)
assoc_cb = fields.CharField(
max_length=4,
db_column='ASSOC_CB',
blank=True,
help_text="Association Interests info included check-box. Legal \
values are 'X' and null."
)
assoc_int = fields.CharField(
max_length=90,
db_column='ASSOC_INT',
blank=True,
help_text="Description of association interests"
)
bal_id = fields.CharField(
max_length=9,
db_column='BAL_ID',
blank=True,
help_text='.CAL format to db tables doc says: "Not Used-AMS KDE"'
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=4,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter"
)
brdbase_yn = fields.CharField(
max_length=1,
db_column='BRDBASE_YN',
blank=True,
help_text="Broad Base Committee (yes/no) check box. Legal \
values are 'Y' or 'N'."
)
# these fields are described in the following docs:
# https://www.documentcloud.org/documents/2711614-CalAccessTablesWeb/pages/25.html
# but are not included on the .tsv file
# bus_adr1 = fields.CharField(
# max_length=55,
# db_column='BUS_ADR1',
# blank=True,
# help_text="First line of the employer/business street address. Applies to the form 461.",
# )
# bus_adr2 = fields.CharField(
# max_length=55,
# db_column='BUS_ADR2',
# blank=True,
# help_text="Second line of the employer/business street address. Applies to the form 461.",
# )
bus_city = fields.CharField(
max_length=30,
db_column='BUS_CITY',
blank=True,
help_text="Employer/business address city"
)
bus_inter = fields.CharField(
max_length=40,
db_column='BUS_INTER',
blank=True,
help_text="Employer/business interest description"
)
bus_name = fields.CharField(
max_length=200,
db_column='BUS_NAME',
blank=True,
help_text="Name of employer/business. Applies to the form 461."
)
bus_st = fields.CharField(
max_length=2,
db_column='BUS_ST',
blank=True,
help_text="Employer/business address state"
)
bus_zip4 = fields.CharField(
max_length=10,
db_column='BUS_ZIP4',
blank=True,
help_text="Employer/business address ZIP Code"
)
busact_cb = fields.CharField(
max_length=10,
db_column='BUSACT_CB',
blank=True,
help_text="Business activity info included check-box. Valid values \
are 'X' and null"
)
busactvity = fields.CharField(
max_length=90,
db_column='BUSACTVITY',
blank=True,
help_text="Business activity description"
)
# these fields are described in the following docs:
# https://www.documentcloud.org/documents/2711614-CalAccessTablesWeb/pages/25.html
# but are not included on the .tsv file
# cand_adr1 = fields.CharField(
# max_length=55,
# db_column='CAND_ADR1',
# blank=True,
# help_text="First line of the candidate/officeholder's street address. \
# Applies to form 460, 465, and 496.",
# )
# cand_adr2 = fields.CharField(
# max_length=55,
# db_column='CAND_ADR2',
# blank=True,
# help_text="Second line of the candidate/officeholder's street address.",
# )
cand_city = fields.CharField(
max_length=30,
db_column='CAND_CITY',
blank=True,
help_text='Candidate/officeholder city'
)
cand_email = fields.CharField(
max_length=60,
db_column='CAND_EMAIL',
blank=True,
help_text='Candidate/officeholder email. This field \
is not contained on the forms.'
)
cand_fax = fields.CharField(
max_length=20,
db_column='CAND_FAX',
blank=True,
help_text='Candidate/officeholder fax. This field \
is not contained on the forms.'
)
cand_id = fields.CharField(
max_length=9,
db_column='CAND_ID',
blank=True,
help_text='.CAL format to db tables doc says: "Not Used-AMS KDE"',
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text='Candidate/officeholder first name'
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder's last name. Applies to forms \
460, 465, and 496."
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder's name suffix"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder's prefix or title"
)
cand_phon = fields.CharField(
max_length=20,
db_column='CAND_PHON',
blank=True,
help_text='Candidate/officeholder phone'
)
cand_st = fields.CharField(
max_length=4,
db_column='CAND_ST',
blank=True,
help_text="Candidate/officeholder's state"
)
cand_zip4 = fields.CharField(
max_length=10,
db_column='CAND_ZIP4',
blank=True,
help_text="Candidate/officeholder's ZIP Code"
)
cmtte_id = fields.CharField(
max_length=9,
db_column='CMTTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee ID (Filer_id) of recipient Committee who's \
campaign statement is attached. This field applies to the form 401."
)
CMTTE_TYPE_CHOICES = (
('C', 'Candidate or officeholder controlled committee'),
('P', 'Candidate or officeholder primarily formed committee'),
('B', 'Ballot-measure committee'),
('G', 'General-purpose committee'),
)
cmtte_type = fields.CharField(
max_length=1,
db_column='CMTTE_TYPE',
blank=True,
choices=CMTTE_TYPE_CHOICES,
verbose_name="Committee type",
help_text="Type of Recipient Committee. Applies to the 450/460.",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=10),
DocumentCloud(id='2712033', start_page=19),
DocumentCloud(id='2712034', start_page=24),
]
)
control_yn = fields.IntegerField(
null=True,
db_column='CONTROL_YN',
blank=True,
help_text="Controlled Committee (yes/no) check box. Legal values \
are 'Y' or 'N'."
)
dist_no = fields.CharField(
max_length=4,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
elect_date = fields.DateField(
null=True,
db_column='ELECT_DATE',
blank=True,
help_text="Date of the General Election"
)
emplbus_cb = fields.CharField(
max_length=4,
db_column='EMPLBUS_CB',
blank=True,
help_text="Employer/Business Info included check-box. Legal \
values are 'X' or null. Applies to the Form 461."
)
employer = fields.CharField(
max_length=200,
db_column='EMPLOYER',
blank=True,
help_text="Employer. This field is most likely unused."
)
ENTITY_CD_CHOICES = (
('BMC', annotations.choices.CAMPAIGN_ENTITY_CODES['BMC']),
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
('COM', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('CTL', annotations.choices.CAMPAIGN_ENTITY_CODES['CTL']),
('IND', annotations.choices.CAMPAIGN_ENTITY_CODES['IND']),
('MDI', annotations.choices.CAMPAIGN_ENTITY_CODES['MDI']),
('OTH', annotations.choices.CAMPAIGN_ENTITY_CODES['OTH']),
('PTY', annotations.choices.CAMPAIGN_ENTITY_CODES['PTY']),
('RCP', annotations.choices.CAMPAIGN_ENTITY_CODES['RCP']),
('SCC', annotations.choices.CAMPAIGN_ENTITY_CODES['SCC']),
('SMO', annotations.choices.CAMPAIGN_ENTITY_CODES['SMO']),
)
entity_cd = fields.CharField(
max_length=4,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
help_text="The entity type of the filer. These codes vary by form type.",
choices=ENTITY_CD_CHOICES,
documentcloud_pages=annotations.choices.DOCS['entity_codes'] + [
DocumentCloud(id='2711616', start_page=6),
DocumentCloud(id='2712033', start_page=18),
DocumentCloud(id='2712034', start_page=22),
]
)
file_email = fields.CharField(
max_length=60,
db_column='FILE_EMAIL',
blank=True,
help_text="Filer's email address"
)
# these fields are described in the following docs:
# https://www.documentcloud.org/documents/2711614-CalAccessTablesWeb/pages/26.html
# but are not included on the .tsv file
# filer_adr1 = fields.CharField(
# max_length=55,
# db_column='FILER_ADR1',
# blank=True,
# help_text="First line of the filer's address",
# )
# filer_adr2 = fields.CharField(
# max_length=55,
# db_column='FILER_ADR2',
# blank=True,
# help_text="Second line of the filer's address",
# )
filer_city = fields.CharField(
max_length=30,
db_column='FILER_CITY',
blank=True,
help_text="Filer's city"
)
filer_fax = fields.CharField(
max_length=20,
db_column='FILER_FAX',
blank=True,
help_text="Filer's fax"
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=15,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
filer_namf = fields.CharField(
max_length=45,
db_column='FILER_NAMF',
blank=True,
help_text="Filer's first name, if an individual"
)
filer_naml = fields.CharField(
max_length=200,
db_column='FILER_NAML',
help_text="The committee's or organization's name or if an \
individual the filer's last name."
)
filer_nams = fields.CharField(
max_length=10,
db_column='FILER_NAMS',
blank=True,
help_text="Filer's suffix, if an individual"
)
filer_namt = fields.CharField(
max_length=10,
db_column='FILER_NAMT',
blank=True,
help_text="Filer's title or prefix, if an individual"
)
filer_phon = fields.CharField(
max_length=20,
db_column='FILER_PHON',
blank=True,
help_text="Filer phone number"
)
filer_st = fields.CharField(
max_length=4,
db_column='FILER_ST',
blank=True,
help_text="Filer state"
)
filer_zip4 = fields.CharField(
max_length=10,
db_column='FILER_ZIP4',
blank=True,
help_text="Filer ZIP Code"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
FORM_TYPE_CHOICES = (
('F401', annotations.get_form('F401').full_title),
('F425', annotations.get_form('F425').full_title),
('F450', annotations.get_form('F450').full_title),
('F460', annotations.get_form('F460').full_title),
('F461', annotations.get_form('F461').full_title),
('F465', annotations.get_form('F465').full_title),
('F496', annotations.get_form('F496').full_title),
('F497', annotations.get_form('F497').full_title),
('F498', annotations.get_form('F498').full_title),
('F511', annotations.get_form('F511').full_title),
('F900', annotations.get_form('F900').full_title),
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=4,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=18),
DocumentCloud(id='2712034', start_page=22),
]
)
from_date = fields.DateField(
null=True,
db_column='FROM_DATE',
blank=True,
help_text="Reporting period from date"
)
JURIS_CD_CHOICES = annotations.sort_choices(annotations.choices.JURIS_CODES) + (
# alt cases
('sen', annotations.choices.JURIS_CODES['SEN']),
('Gov', annotations.choices.JURIS_CODES['STW']),
# statewide office codes
('ATT', annotations.choices.JURIS_CODES['STW']),
('CON', annotations.choices.JURIS_CODES['STW']),
('GOV', annotations.choices.JURIS_CODES['STW']),
('SOS', annotations.choices.JURIS_CODES['STW']),
('SPM', annotations.choices.JURIS_CODES['STW']),
# assembly member districts
('46', annotations.choices.JURIS_CODES['ASM']),
('55', annotations.choices.JURIS_CODES['ASM']),
# county office codes
('BSU', annotations.choices.JURIS_CODES['CTY']),
('CSU', annotations.choices.JURIS_CODES['CTY']),
('DAT', annotations.choices.JURIS_CODES['CTY']),
('SHC', annotations.choices.JURIS_CODES['CTY']),
# city office codes
('MAY', annotations.choices.JURIS_CODES['CIT']),
('CCM', annotations.choices.JURIS_CODES['CIT']),
# other office codes
('APP', annotations.choices.JURIS_CODES['OTH']),
('BED', annotations.choices.JURIS_CODES['OTH']),
('SCJ', annotations.choices.JURIS_CODES['OTH']),
# probably means 'San Diego', rows with this value are all for Arlie Ricasa's
# Board of Education campaign
('SD', annotations.choices.JURIS_CODES['OTH']),
# probably means Orange County, rows with this value are all for Lou Correa's
# Board of Supervisor's campaign
('OC', annotations.choices.JURIS_CODES['CTY']),
# One record for Joaquin Arambula's state assembly run
('AD', annotations.choices.JURIS_CODES['ASM']),
# Often "State of California" but sometimes State Assembly, State Senate or other juris
('CA', 'Unknown'),
('F', 'Unknown'),
)
juris_cd = fields.CharField(
max_length=3,
choices=JURIS_CD_CHOICES,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=13),
DocumentCloud(id='2712033', start_page=21, end_page=22),
DocumentCloud(id='2712034', start_page=28, end_page=29),
]
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office Jurisdiction description if the field JURIS_CD is \
set to city (CIT), county (CTY), local (LOC), or other \
(OTH)."
)
late_rptno = fields.CharField(
max_length=30,
db_column='LATE_RPTNO',
blank=True,
help_text="Identifying Report Number used to distinguish multiple \
reports filed during the same filing period. For example, \
this field allows for multiple form 497s to be filed on the \
same day."
)
# these fields are described in the following docs:
# https://www.documentcloud.org/documents/2711614-CalAccessTablesWeb/pages/27.html
# but are not included on the .tsv file
# mail_adr1 = fields.CharField(
# max_length=55,
# db_column='MAIL_ADR1',
# blank=True,
# help_text="First line of the filer's mailing address. Required if \
# different than the filer's street address.",
# )
# mail_adr2 = fields.CharField(
# max_length=55,
# db_column='MAIL_ADR2',
# blank=True,
# help_text="Second line of the filer's mailing address.",
# )
mail_city = fields.CharField(
max_length=30,
db_column='MAIL_CITY',
blank=True,
help_text="Filer mailing address city"
)
mail_st = fields.CharField(
max_length=4,
db_column='MAIL_ST',
blank=True,
help_text="Filer mailing address state"
)
mail_zip4 = fields.CharField(
max_length=10,
db_column='MAIL_ZIP4',
blank=True,
help_text="Filer mailing address ZIP Code"
)
occupation = fields.CharField(
max_length=60,
db_column='OCCUPATION',
blank=True,
help_text="Occupation. This field is most likely unused."
)
OFF_S_H_CD_CHOICES = (
('S', annotations.choices.OFF_S_H_CODES['S']),
('H', annotations.choices.OFF_S_H_CODES['H']),
('s', annotations.choices.OFF_S_H_CODES['S']),
('h', annotations.choices.OFF_S_H_CODES['H']),
# The codes below appear in the database but are undocumented
('F', 'UNKNOWN'),
('O', 'UNKNOWN'),
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office is sought or held code',
choices=OFF_S_H_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=21),
DocumentCloud(id='2712034', start_page=28),
]
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description if the field OFFICE_CD is set \
to other (OTH)"
)
OFFICE_CD_CHOICES = annotations.sort_choices(annotations.choices.OFFICE_CODES) + (
('Gov', annotations.choices.OFFICE_CODES['GOV']),
('Sen', annotations.choices.OFFICE_CODES['SEN']),
('LOC', annotations.choices.OFFICE_CODES['CCB']),
('LEG', annotations.choices.OFFICE_CODES['SEN']),
('REP', annotations.choices.OFFICE_CODES['ASM']),
# Rows with this value are all for Local Fire Board campaigns, with usually
# categorize as "Other"
('Mem', annotations.choices.OFFICE_CODES['OTH']),
# Looks like a mis-write by Richard Alarcon for Assembly campaign
('CIT', annotations.choices.OFFICE_CODES['ASM']),
# Rows with these value could be any number of offices
('PAC', 'Unknown'),
('F', 'Unknown'),
# No idea on this one
('COM', 'Unknown'),
)
office_cd = fields.CharField(
db_column='OFFICE_CD',
max_length=3,
blank=True,
verbose_name="office code",
help_text="Identifies the office being sought",
choices=OFFICE_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=10),
DocumentCloud(id='2712034', start_page=12),
]
)
other_cb = fields.CharField(
max_length=1,
db_column='OTHER_CB',
blank=True,
help_text="Other entity interests info included check-box. Legal \
values are 'X' and null."
)
other_int = fields.CharField(
max_length=90,
db_column='OTHER_INT',
blank=True,
help_text="Other entity interests description"
)
primfrm_yn = fields.CharField(
max_length=1,
db_column='PRIMFRM_YN',
blank=True,
help_text="Primarily Formed Committee (yes/no) checkbox. Legal \
values are 'Y' or 'N'."
)
REC_TYPE_CHOICES = (
("CVR", "Cover Page"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text='Record Type Value: CVR',
documentcloud_pages=[
DocumentCloud(id='2711614', start_page=25),
DocumentCloud(id='2711616', start_page=6),
DocumentCloud(id="2712033", start_page=18),
DocumentCloud(id="2712034", start_page=22),
]
)
report_num = fields.CharField(
max_length=3,
db_column='REPORT_NUM',
help_text="Amendment number, as reported by the filer \
Report Number 000 represents an original filing. 001-999 are amendments."
)
REPORTNAME_CHOICES = (
('450', annotations.get_form('F450').full_title),
('460', annotations.get_form('F460').full_title),
('461', annotations.get_form('F461').full_title),
)
reportname = fields.CharField(
max_length=3,
db_column='REPORTNAME',
blank=True,
choices=REPORTNAME_CHOICES,
help_text="Attached campaign disclosure statement type. Legal \
values are 450, 460, and 461.",
documentcloud_pages=(
DocumentCloud(id='2712033', start_page=15),
DocumentCloud(id='2712033', start_page=20),
DocumentCloud(id='2712034', start_page=19),
DocumentCloud(id='2712034', start_page=26),
)
)
rpt_att_cb = fields.CharField(
max_length=4,
db_column='RPT_ATT_CB',
blank=True,
help_text="Committee Report Attached check-box. Legal values \
are 'X' or null. This field applies to the form 401."
)
rpt_date = fields.DateField(
db_column='RPT_DATE',
null=True,
help_text="Date this report was filed, according to the filer"
)
rptfromdt = fields.DateField(
null=True,
db_column='RPTFROMDT',
blank=True,
help_text="Attached campaign disclosure statement - Period from \
date."
)
rptthrudt = fields.DateField(
null=True,
db_column='RPTTHRUDT',
blank=True,
help_text="Attached campaign disclosure statement - Period \
through date."
)
selfemp_cb = fields.CharField(
max_length=1,
db_column='SELFEMP_CB',
blank=True,
help_text='Self employed check-box. CAL format to db tables doc says: \
"Not Used-AMS KDE"',
)
sponsor_yn = fields.IntegerField(
null=True,
db_column='SPONSOR_YN',
blank=True,
help_text="Sponsored Committee (yes/no) checkbox. Legal values \
are 'Y' or 'N'."
)
STMT_TYPE_CHOICES = (
('PE', annotations.choices.STMT_TYPES['PE']),
('QT', annotations.choices.STMT_TYPES['QT']),
('SA', annotations.choices.STMT_TYPES['SA']),
('SE', annotations.choices.STMT_TYPES['SE']),
('SY', annotations.choices.STMT_TYPES['SY']),
('S1', annotations.choices.STMT_TYPES['S1']),
('S2', annotations.choices.STMT_TYPES['S2']),
('TS', annotations.choices.STMT_TYPES['TS']),
('pe', annotations.choices.STMT_TYPES['PE']),
('qt', annotations.choices.STMT_TYPES['QT']),
('sa', annotations.choices.STMT_TYPES['SA']),
('se', annotations.choices.STMT_TYPES['SE']),
('sy', annotations.choices.STMT_TYPES['SY']),
('ts', annotations.choices.STMT_TYPES['TS']),
("**", "Amendment"),
("1", "Unknown"),
("2", "Unknown"),
("CA", "Unknown"),
("MD", "Unknown"),
("NA", "Unknown"),
("PR", "Unknown"),
("QS", "Unknown"),
("S", "Unknown"),
("x", "Unknown"),
("YE", "Unknown"),
)
stmt_type = fields.CharField(
max_length=2,
db_column='STMT_TYPE',
blank=True,
help_text='Type of statement',
choices=STMT_TYPE_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=7),
DocumentCloud(id='2712033', start_page=18),
DocumentCloud(id='2712034', start_page=23),
]
)
SUP_OPP_CD_CHOICES = (
('S', annotations.choices.SUP_OPP_CODES['S']),
('O', annotations.choices.SUP_OPP_CODES['O']),
('s', annotations.choices.SUP_OPP_CODES['S']),
('o', annotations.choices.SUP_OPP_CODES['O']),
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support or opposition code",
choices=SUP_OPP_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2711614', start_page=28),
DocumentCloud(id='2711616', start_page=14),
DocumentCloud(id='2712033', start_page=21),
DocumentCloud(id='2712034', start_page=28),
]
)
thru_date = fields.DateField(
null=True,
db_column='THRU_DATE',
blank=True,
help_text='Reporting period through date'
)
# these fields are described in the following docs:
# https://www.documentcloud.org/documents/2711614-CalAccessTablesWeb/pages/28.html
# but are not included on the .tsv file
# tres_adr1 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR1',
# blank=True,
# help_text="First line of the treasurer or responsible officer's street address."
# )
# tres_adr2 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR2',
# blank=True,
# help_text="Second line of the treasurer or responsible officer's street address."
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="City portion of the treasurer or responsible \
officer's street address."
)
tres_email = fields.CharField(
max_length=60,
db_column='TRES_EMAIL',
blank=True,
help_text="Treasurer or responsible officer's email"
)
tres_fax = fields.CharField(
max_length=20,
db_column='TRES_FAX',
blank=True,
help_text="Treasurer or responsible officer's fax number"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's prefix or title"
)
tres_phon = fields.CharField(
max_length=20,
db_column='TRES_PHON',
blank=True,
help_text="Treasurer or responsible officer's phone number"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="Treasurer or responsible officer's state"
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text="Treasurer or responsible officer's ZIP Code"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'CVR_CAMPAIGN_DISCLOSURE_CD'
ordering = ("-rpt_date",)
def __str__(self):
return str(self.filing_id)
class Cvr2CampaignDisclosureCd(CalAccessBaseModel):
"""
Extra information from campaign-disclosure forms.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=8),
DocumentCloud(id='2711614', start_page=41, end_page=43),
DocumentCloud(id='2711616', start_page=32, end_page=35),
DocumentCloud(id='2712033', start_page=23, end_page=24),
DocumentCloud(id='2712034', start_page=31, end_page=34),
]
FILING_FORMS = [
annotations.get_form('F425').get_section('P1'),
annotations.get_form('F450').get_section('P3'),
annotations.get_form('F460').get_section('CVR2'),
annotations.get_form('F465').get_section('P5'),
]
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text="Committee identification number, when the entity \
is a committee"
)
control_yn = fields.IntegerField(
null=True,
db_column='CONTROL_YN',
blank=True,
help_text='Controlled Committee (yes/no) checkbox. Legal values \
are "Y" or "N".'
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
ENTITY_CD_CHOICES = (
('ATR', annotations.choices.CAMPAIGN_ENTITY_CODES['ATR']),
('BNM', annotations.choices.CAMPAIGN_ENTITY_CODES['BNM']),
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
('COM', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('CTL', annotations.choices.CAMPAIGN_ENTITY_CODES['CTL']),
('OFF', annotations.choices.CAMPAIGN_ENTITY_CODES['OFF']),
('POF', annotations.choices.CAMPAIGN_ENTITY_CODES['POF']),
('PRO', annotations.choices.CAMPAIGN_ENTITY_CODES['PRO']),
('RCP', annotations.choices.CAMPAIGN_ENTITY_CODES['RCP']),
# Values observed in this field but not found in docs
('FIL', 'Unknown'),
('PEX', 'Unknown'),
('RDP', 'Unknown'), # Misspelling of RCP?
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
choices=ENTITY_CD_CHOICES,
help_text="Entity code used to identify the type of entity being described \
with in the record.",
documentcloud_pages=annotations.choices.DOCS['entity_codes'] + [
DocumentCloud(id='2711616', start_page=32),
DocumentCloud(id='2712033', start_page=23, end_page=24),
DocumentCloud(id='2712034', start_page=32),
]
)
# enty_adr1 = fields.CharField(
# max_length=55, db_column='ENTY_ADR1', blank=True
# )
# enty_adr2 = fields.CharField(
# max_length=55, db_column='ENTY_ADR2', blank=True
# )
enty_city = fields.CharField(
max_length=30,
db_column='ENTY_CITY',
blank=True,
help_text="Entity city"
)
enty_email = fields.CharField(
max_length=60,
db_column='ENTY_EMAIL',
blank=True,
help_text="Entity email address"
)
enty_fax = fields.CharField(
max_length=20,
db_column='ENTY_FAX',
blank=True,
help_text="Entity fax number"
)
enty_namf = fields.CharField(
max_length=45,
db_column='ENTY_NAMF',
blank=True,
help_text="Entity first name, if an individual"
)
enty_naml = fields.CharField(
max_length=200,
db_column='ENTY_NAML',
blank=True,
help_text="Entity name, or last name if an individual"
)
enty_nams = fields.CharField(
max_length=10,
db_column='ENTY_NAMS',
blank=True,
help_text="Entity suffix, if an individual"
)
enty_namt = fields.CharField(
max_length=10,
db_column='ENTY_NAMT',
blank=True,
help_text="Entity prefix or title, if an individual"
)
enty_phon = fields.CharField(
max_length=20,
db_column='ENTY_PHON',
blank=True,
help_text="Entity phone number"
)
enty_st = fields.CharField(
max_length=2,
db_column='ENTY_ST',
blank=True,
help_text="Entity state"
)
enty_zip4 = fields.CharField(
max_length=10,
db_column='ENTY_ZIP4',
blank=True,
help_text="Entity ZIP code"
)
F460_PART_CHOICES = (
('3', 'Part 3: Committee Information'),
# Part 4 became Part 5 somewhere between 1999 and 2001
# Seems like the use of the old and new versions of the forms overlapped slightly
# https://gist.github.com/gordonje/fb858960bc249cf9a2a581212eccbb8b
('4a', 'Part 4a: Officeholder or Candidate Controlled Committee'),
('4A', 'Part 4a: Officeholder or Candidate Controlled Committee'),
('4b', 'Part 4b: Related Committees Not Included in this Statement'),
('4B', 'Part 4b: Related Committees Not Included in this Statement'),
('5a', 'Part 5a: Officeholder or Candidate Controlled Committee'),
('5A', 'Part 5a: Officeholder or Candidate Controlled Committee'),
('5b', 'Part 5b: Related Committees Not Included in this Statement'),
('5B', 'Part 5b: Related Committees Not Included in this Statement'),
# On the 1999 Form...
('6', 'Part 6: Primarily Formed Committee'),
# On 2001 form...
('6a', 'Part 6a: Name of Ballot Measure'),
('6A', 'Part 6a: Name of Ballot Measure'),
('6b', 'Part 6b: Name of Officeholder, Candidate, or Proponent'),
('6B', 'Part 6b: Name of Officeholder, Candidate, or Proponent'),
('7', 'Part 7: Primarily Formed Committee'),
)
f460_part = fields.CharField(
max_length=2,
db_column='F460_PART',
blank=True,
choices=F460_PART_CHOICES,
help_text="Part of 460 cover page coded on ths cvr2 record",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=32),
DocumentCloud(id='2712033', start_page=24),
DocumentCloud(id='2712034', start_page=32),
]
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
FORM_TYPE_CHOICES = tuple([(f.form.id, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=4,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=23),
DocumentCloud(id='2712034', start_page=31),
]
)
JURIS_CD_CHOICES = annotations.sort_choices(annotations.choices.JURIS_CODES) + (
('sen', annotations.choices.JURIS_CODES['SEN']),
# looks like the Arlie Ricasa Campaign was consistently making this mis-write
('SD', annotations.choices.JURIS_CODES['ASM']),
# ditto for Kevin de Leon Believing in A Better California
('se', annotations.choices.JURIS_CODES['SEN']),
# ditto for Friends of Bob Dutton
('F', annotations.choices.JURIS_CODES['ASM']),
# ditto for Friends To Re-Elect Tonia For 7th District
('LBC', annotations.choices.JURIS_CODES['CIT']),
# Several different filers have made this mis-write
# Usually they mean Statewide, but sometimes they mean State Assembly or City
('CA', annotations.choices.JURIS_CODES['STW']),
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code",
choices=JURIS_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=24),
DocumentCloud(id='2712034', start_page=33),
DocumentCloud(id='2711616', start_page=35),
]
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
# mail_adr1 = fields.CharField(
# max_length=55, db_column='MAIL_ADR1', blank=True
# )
# mail_adr2 = fields.CharField(
# max_length=55, db_column='MAIL_ADR2', blank=True
# )
mail_city = fields.CharField(
max_length=30,
db_column='MAIL_CITY',
blank=True,
help_text="Filer's mailing city"
)
mail_st = fields.CharField(
max_length=2,
db_column='MAIL_ST',
blank=True,
help_text="Filer's mailing state"
)
mail_zip4 = fields.CharField(
max_length=10,
db_column='MAIL_ZIP4',
blank=True,
help_text="Filer's mailing ZIP Code"
)
OFF_S_H_CD_CHOICES = (
('S', annotations.choices.OFF_S_H_CODES['S']),
('H', annotations.choices.OFF_S_H_CODES['H']),
('s', annotations.choices.OFF_S_H_CODES['S']),
# Bob Dutton meant 'Sought'
("F", annotations.choices.OFF_S_H_CODES['S']),
# This one actually says 'Held'. Maybe a mis-read?
("T", annotations.choices.OFF_S_H_CODES['H']),
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office is sought or held code',
choices=OFF_S_H_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=35),
DocumentCloud(id='2712033', start_page=24),
DocumentCloud(id='2712034', start_page=34),
]
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description"
)
OFFICE_CD_CHOICES = annotations.sort_choices(annotations.choices.OFFICE_CODES) + (
# looks like the Richard Alarcon for Assembly made this mis-write
('CIT', annotations.choices.OFFICE_CODES['ASM']),
# ditto for Isadore Hall for Assembly
('CTL', annotations.choices.OFFICE_CODES['ASM']),
# ditto for Friends of Bob Dutton
('F', annotations.choices.OFFICE_CODES['ASM']),
# ditto for Henry Perea
('ST', annotations.choices.OFFICE_CODES['ASM']),
# This one is all over the board
('PAC', 'Unknown'),
)
office_cd = fields.CharField(
db_column='OFFICE_CD',
max_length=3,
blank=True,
verbose_name="office code",
help_text="Identifies the office being sought",
choices=OFFICE_CD_CHOICES,
documentcloud_pages=annotations.choices.DOCS['office_codes'],
)
REC_TYPE_CHOICES = (
("CVR2", "Cover, Page 2"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text="Record Type Value: CVR2",
documentcloud_pages=[
DocumentCloud(id='2711614', start_page=41),
DocumentCloud(id='2711616', start_page=32),
DocumentCloud(id='2712033', start_page=23),
DocumentCloud(id='2712034', start_page=31),
]
)
SUP_OPP_CD_CHOICES = (
('S', annotations.choices.SUP_OPP_CODES['S']),
('O', annotations.choices.SUP_OPP_CODES['O']),
('s', annotations.choices.SUP_OPP_CODES['S']),
('o', annotations.choices.SUP_OPP_CODES['O']),
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support or opposition code",
choices=SUP_OPP_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2711614', start_page=41),
DocumentCloud(id='2711616', start_page=35),
]
)
title = fields.CharField(
max_length=90,
db_column='TITLE',
blank=True,
help_text="Official title of filing officer. Applies to the form 465."
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's prefix or title"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'CVR2_CAMPAIGN_DISCLOSURE_CD'
def __str__(self):
return str(self.filing_id)
class Cvr3VerificationInfoCd(CalAccessBaseModel):
"""
Verification information from campaign-disclosure forms.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=46, end_page=47),
DocumentCloud(id='2711616', start_page=41, end_page=42),
DocumentCloud(id='2712033', start_page=25),
DocumentCloud(id='2712033', start_page=50),
DocumentCloud(id='2712034', start_page=34),
DocumentCloud(id='2712034', start_page=64),
]
FILING_FORMS = [
annotations.get_form('F400').get_section('P5'),
annotations.get_form('F401').get_section('CVR'),
annotations.get_form('F402').get_section('VER'),
annotations.get_form('F410').get_section('P3'),
annotations.get_form('F425').get_section('P3'),
annotations.get_form('F450').get_section('P4'),
annotations.get_form('F460').get_section('CVR'),
annotations.get_form('F461').get_section('P4'),
annotations.get_form('F465').get_section('P6'),
annotations.get_form('F511'),
annotations.get_form('F900'),
]
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("CVR3", "Cover Page 3, Verification Information"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text="Record Type Value: CVR3",
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=25),
DocumentCloud(id='2712033', start_page=50),
DocumentCloud(id='2712034', start_page=34),
DocumentCloud(id='2712034', start_page=64),
]
)
FORM_TYPE_CHOICES = (
('F400', annotations.get_form('F400').get_section('P5').full_title),
('F401', annotations.get_form('F401').get_section('CVR').full_title),
('F402', annotations.get_form('F402').get_section('VER').full_title),
('F410', annotations.get_form('F410').get_section('P3').full_title),
('F425', annotations.get_form('F425').get_section('P3').full_title),
('F450', annotations.get_form('F450').get_section('P4').full_title),
('F460', annotations.get_form('F460').get_section('CVR').full_title),
('F461', annotations.get_form('F461').get_section('P4').full_title),
('F465', annotations.get_form('F465').get_section('P6').full_title),
('F511', annotations.get_form('F511').full_title),
('F900', annotations.get_form('F900').full_title),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
help_text='Name of the source filing form or schedule',
db_index=True,
choices=FORM_TYPE_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=50),
DocumentCloud(id='2712034', start_page=64),
]
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CD_CHOICES = (
# Codes explicitly allowed for this field, according to documentation
('ATR', annotations.choices.CAMPAIGN_ENTITY_CODES['ATR']),
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
('TRE', annotations.choices.CAMPAIGN_ENTITY_CODES['TRE']),
('OFF', annotations.choices.CAMPAIGN_ENTITY_CODES['OFF']),
('PRO', annotations.choices.CAMPAIGN_ENTITY_CODES['PRO']),
('SPO', annotations.choices.CAMPAIGN_ENTITY_CODES['SPO']),
# Lower case versions of valid codes
('atr', annotations.choices.CAMPAIGN_ENTITY_CODES['TRE']),
('tre', annotations.choices.CAMPAIGN_ENTITY_CODES['ATR']),
('cao', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
# Other known codes observed in this field
('MDI', annotations.choices.CAMPAIGN_ENTITY_CODES['MDI']),
('POF', annotations.choices.CAMPAIGN_ENTITY_CODES['POF']),
('RCP', annotations.choices.CAMPAIGN_ENTITY_CODES['RCP']),
# Misspelling of 'CAO', 'Candidate/officeholder'
('COA', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
# Other unknown values observed
('0', 'Unknown'),
('BBB', 'Unknown'),
('CON', 'Unknown'),
('MAI', 'Unknown'),
)
entity_cd = fields.CharField(
db_column='ENTITY_CD',
max_length=3,
blank=True,
verbose_name='entity code',
choices=ENTITY_CD_CHOICES,
help_text='Entity Code', # describing verifier?
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=9),
DocumentCloud(id='2712033', start_page=25),
DocumentCloud(id='2712034', start_page=11),
DocumentCloud(id='2712034', start_page=34),
]
)
sig_date = fields.DateField(
verbose_name='signed date',
db_column='SIG_DATE',
blank=True,
null=True,
help_text='date when signed',
)
sig_loc = fields.CharField(
verbose_name='signed location',
db_column='SIG_LOC',
max_length=39,
blank=True,
help_text='city and state where signed',
)
sig_naml = fields.CharField(
verbose_name='last name',
db_column='SIG_NAML',
max_length=500,
blank=True,
help_text='last name of the signer',
)
sig_namf = fields.CharField(
verbose_name='first name',
db_column='SIG_NAMF',
max_length=45,
blank=True,
help_text='first name of the signer',
)
sig_namt = fields.CharField(
verbose_name='title',
db_column='SIG_NAMT',
max_length=10,
blank=True,
help_text='title of the signer',
)
sig_nams = fields.CharField(
verbose_name='suffix',
db_column='SIG_NAMS',
max_length=8,
blank=True,
help_text='suffix of the signer',
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'CVR3_VERIFICATION_INFO_CD'
ordering = ("-sig_date",)
def __str__(self):
return str(self.filing_id)
class DebtCd(CalAccessBaseModel):
"""
Itemized campaign debts.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=47, end_page=49),
DocumentCloud(id='2711616', start_page=49, end_page=48),
DocumentCloud(id='2712033', start_page=33, end_page=34),
DocumentCloud(id='2712034', start_page=45, end_page=46),
]
FILING_FORMS = [
annotations.get_form('F460').get_section('F'),
]
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amt_incur = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMT_INCUR',
help_text='Amount incurred this period',
)
amt_paid = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMT_PAID',
help_text='Amount paid this period.'
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text='Back reference to a transaction identifier \
of a parent record.'
)
beg_bal = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='BEG_BAL',
help_text='Outstanding balance at beginning of period',
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text='Committee identification number',
)
end_bal = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='END_BAL',
help_text='Outstanding balance at close of this period',
)
ENTITY_CD_CHOICES = (
('BNM', annotations.choices.CAMPAIGN_ENTITY_CODES['BNM']),
('COM', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('IND', annotations.choices.CAMPAIGN_ENTITY_CODES['IND']),
('OTH', annotations.choices.CAMPAIGN_ENTITY_CODES['OTH']),
('PTY', annotations.choices.CAMPAIGN_ENTITY_CODES['PTY']),
('RCP', annotations.choices.CAMPAIGN_ENTITY_CODES['RCP']),
('SCC', annotations.choices.CAMPAIGN_ENTITY_CODES['SCC']),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
choices=ENTITY_CD_CHOICES,
help_text='Entity code describing the payee',
documentcloud_pages=annotations.choices.DOCS['entity_codes'] + [
DocumentCloud(id='2712033', start_page=33),
DocumentCloud(id='2712034', start_page=45),
]
)
EXPN_CODE_CHOICES = annotations.sort_choices(annotations.choices.EXPENSE_CODES) + (
# alt cases of valid codes
('Fnd', annotations.choices.EXPENSE_CODES['FND']),
('ofc', annotations.choices.EXPENSE_CODES['OFC']),
# printed this way on the pdfs, but probably meant consultant code
("'CN", annotations.choices.EXPENSE_CODES['CNS']),
# Other codes observed in the table that are not documented by the state
("*", "Unknown"),
("AIR", "Unknown"),
("BUS", "Unknown"),
("C", "Unknown"),
("CAM", "Unknown"),
("CC", "Unknown"),
("COM", "Unknown"),
("CON", "Unknown"),
("CSN", "Unknown"),
("DEP", "Unknown"),
("EVE", "Unknown"),
("F", "Unknown"),
("FED", "Unknown"),
("fns", "Unknown"),
("G", "Unknown"),
("GGG", "Unknown"),
("HOT", "Unknown"),
("L", "Unknown"),
("LDF", "Unknown"),
("MEE", "Unknown"),
("N", "Unknown"),
("O", "Unknown"),
("OTH", "Unknown"), # Other?
("P", "Unknown"),
("PEN", "Unknown"),
("S", "Unknown"),
("SPE", "Unknown"),
("STA", "Unknown"),
("T", "Unknown"),
("TAX", "Unknown"),
("TRA", "Unknown"),
("V", "Unknown"),
("X", "Unknown"),
)
expn_code = fields.CharField(
max_length=4,
db_column='EXPN_CODE',
blank=True,
verbose_name='expense code',
help_text="Expense Code",
choices=EXPN_CODE_CHOICES,
documentcloud_pages=annotations.choices.DOCS['expense_codes']
)
expn_dscr = fields.CharField(
max_length=400,
db_column='EXPN_DSCR',
blank=True,
verbose_name="expense description",
help_text='Purpose of expense and/or description/explanation',
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number of the parent filing",
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
max_length=1,
db_column='FORM_TYPE',
choices=FORM_TYPE_CHOICES,
help_text='Schedule Name/ID: (F - Sched F / Accrued Expenses)',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=33),
DocumentCloud(id='2712034', start_page=45),
]
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Record line item number",
db_index=True,
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text='Memo amount flag',
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text='Reference to text contained in a TEXT record.'
)
# payee_adr1 = fields.CharField(
# max_length=55, db_column='PAYEE_ADR1', blank=True
# )
# payee_adr2 = fields.CharField(
# max_length=55, db_column='PAYEE_ADR2', blank=True
# )
payee_city = fields.CharField(
max_length=30,
db_column='PAYEE_CITY',
blank=True,
help_text='First line of the payee\'s street address',
)
payee_namf = fields.CharField(
max_length=45,
db_column='PAYEE_NAMF',
blank=True,
help_text='Payee\'s first name if the payee is an individual',
)
payee_naml = fields.CharField(
max_length=200,
db_column='PAYEE_NAML',
help_text="Payee's business name or last name if the payee is an \
individual."
)
payee_nams = fields.CharField(
max_length=10,
db_column='PAYEE_NAMS',
blank=True,
help_text='Payee\'s name suffix if the payee is an individual',
)
payee_namt = fields.CharField(
max_length=100,
db_column='PAYEE_NAMT',
blank=True,
help_text='Payee\'s prefix or title if the payee is an individual',
)
payee_st = fields.CharField(
max_length=2,
db_column='PAYEE_ST',
blank=True,
help_text='Payee\'s state',
)
payee_zip4 = fields.CharField(
max_length=10,
db_column='PAYEE_ZIP4',
blank=True,
help_text='Payee\'s ZIP Code',
)
REC_TYPE_CHOICES = (
("DEBT", "DEBT"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text='Record type value: DEBT',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=33, end_page=34),
DocumentCloud(id='2712034', start_page=45, end_page=46),
]
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Transaction identifier - permanent value unique to \
this item',
)
# tres_adr1 = fields.CharField(
# max_length=55, db_column='TRES_ADR1', blank=True
# )
# tres_adr2 = fields.CharField(
# max_length=55, db_column='TRES_ADR2', blank=True
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text='City portion of the treasurer or responsible \
officer\'s street address',
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text='Treasurer or responsible officer\'s first name'
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text='Treasurer or responsible officer\'s last name'
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text='Treasurer or responsible officer\'s suffix',
)
tres_namt = fields.CharField(
max_length=100,
db_column='TRES_NAMT',
blank=True,
help_text='Treasurer or responsible officer\'s prefix or title',
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text='State portion of the treasurer or responsible \
officer\'s address',
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text='ZIP Code portion of the treasurer or responsible \
officer\'s address',
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text='Related item on other schedule has same \
transaction identifier. /"X/" indicates this condition is true'
)
xref_schnm = fields.CharField(
max_length=2, db_column='XREF_SCHNM', blank=True,
help_text='Related record is included on Schedule C.'
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'DEBT_CD'
def __str__(self):
return str(self.filing_id)
class ExpnCd(CalAccessBaseModel):
"""
Itemized campaign expenditures.
"""
UNIQUE_KEY = (
'FILING_ID',
'AMEND_ID',
'LINE_ITEM',
'REC_TYPE',
'FORM_TYPE'
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=8),
DocumentCloud(id='2711614', start_page=53, end_page=56),
DocumentCloud(id='2711616', start_page=45, end_page=48),
DocumentCloud(id='2712033', start_page=31, end_page=32),
DocumentCloud(id='2712034', start_page=42, end_page=44),
]
FILING_FORMS = [
annotations.get_form('F450').get_section('P5'),
annotations.get_form('F460').get_section('D'),
annotations.get_form('F460').get_section('E'),
annotations.get_form('F460').get_section('G'),
annotations.get_form('F461').get_section('P5'),
annotations.get_form('F465').get_section('P3'),
annotations.get_form('F900'),
]
agent_namf = fields.CharField(
max_length=45,
db_column='AGENT_NAMF',
blank=True,
help_text="Agent of Ind. Contractor's First name"
)
agent_naml = fields.CharField(
max_length=200,
db_column='AGENT_NAML',
blank=True,
help_text="Agent of Ind. Contractor's Last name (Sched G)"
)
agent_nams = fields.CharField(
max_length=10,
db_column='AGENT_NAMS',
blank=True,
help_text="Agent of Ind. Contractor's Suffix"
)
agent_namt = fields.CharField(
max_length=10,
db_column='AGENT_NAMT',
blank=True,
help_text="Agent of Ind. Contractor's Prefix or Title"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amount = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMOUNT',
help_text="Amount of Payment"
)
bakref_tid = fields.CharField(
verbose_name='back reference transaction id',
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back Reference to a Tran_ID of a 'parent' record"
)
bal_juris = fields.CharField(
verbose_name='ballot measure jurisdiction',
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure's jurisdiction"
)
bal_name = fields.CharField(
verbose_name='ballot measure name',
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot Measure Name"
)
bal_num = fields.CharField(
verbose_name='ballot measure number',
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot Number or Letter"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate's First name"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate's Last name"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate's Suffix"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate's Prefix or Title"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text="Committee ID (If [COM|RCP] & no ID#, Treas info Req.)"
)
cum_oth = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_OTH',
blank=True,
help_text="Cumulative / 'Other' (No Cumulative on Sched E & G)"
)
cum_ytd = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_YTD',
blank=True,
help_text="Cumulative / Year-to-date amount \
(No Cumulative on Sched E & G)"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="Office District Number (Req. if Juris_Cd=[SEN|ASM|BOE]"
)
ENTITY_CD_CHOICES = (
# Codes explicitly allowed for this field, according to documentation
('COM', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('IND', annotations.choices.CAMPAIGN_ENTITY_CODES['IND']),
('RCP', annotations.choices.CAMPAIGN_ENTITY_CODES['RCP']),
('OTH', annotations.choices.CAMPAIGN_ENTITY_CODES['OTH']),
('PTY', annotations.choices.CAMPAIGN_ENTITY_CODES['PTY']),
('SCC', annotations.choices.CAMPAIGN_ENTITY_CODES['SCC']),
# Other known codes observed in this field
('BNM', annotations.choices.CAMPAIGN_ENTITY_CODES['BNM']),
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
('MBR', annotations.choices.LOBBYING_ENTITY_CODES['MBR']),
('OFF', annotations.choices.CAMPAIGN_ENTITY_CODES['OFF']),
# Unknown codes observed in this field
('0', 'Unknown'),
('PTH', 'Unknown'),
('RFD', 'Unknown'), # 'RFD' from EXPN_CD? Request For Development?
)
entity_cd = fields.CharField(
choices=ENTITY_CD_CHOICES,
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
help_text='Entity Code describing payee',
documentcloud_pages=annotations.choices.DOCS['entity_codes'] + [
DocumentCloud(id='2712033', start_page=31),
DocumentCloud(id='2712034', start_page=42),
]
)
expn_chkno = fields.CharField(
max_length=20,
db_column='EXPN_CHKNO',
blank=True,
help_text="Check Number (Optional)"
)
EXPN_CODE_CHOICES = annotations.sort_choices(annotations.choices.EXPENSE_CODES) + (
('ctb', annotations.choices.EXPENSE_CODES['CTB']),
('ikd', annotations.choices.EXPENSE_CODES['IKD']),
('Mon', annotations.choices.EXPENSE_CODES['MON']),
('ofc', annotations.choices.EXPENSE_CODES['OFC']),
('OFc', annotations.choices.EXPENSE_CODES['OFC']),
('Ofc', annotations.choices.EXPENSE_CODES['OFC']),
# Codes observed in this field, but not found in docs
("", "Unknown"),
("*", "Unknown"),
("0", "Unknown"),
("001", "Unknown"),
("011", "Unknown"),
("200", "Unknown"),
("401", "Unknown"),
("ADV", "Unknown"),
("ANN", "Unknown"),
("APR", "Unknown"),
("AUG", "Unknown"),
("AUT", "Unknown"),
("Ban", "Unknown"),
("BAN", "Unknown"),
("BOO", "Unknown"),
("BOX", "Unknown"),
("C", "Unknown"),
("CAT", "Unknown"),
("CC", "Unknown"),
("CHE", "Unknown"),
("CIV", "Unknown"),
("CNT", "Unknown"),
("CON", "Unknown"),
("COP", "Unknown"),
("CRE", "Unknown"),
("CSN", "Unknown"),
("CT", "Unknown"),
(",CT", "Unknown"),
(".CT", "Unknown"),
("CTN", "Unknown"),
("CVD", "Unknown"),
("DAT", "Unknown"),
("DEC", "Unknown"),
("Dem", "Unknown"),
("DIN", "Unknown"),
("Don", "Unknown"),
("DON", "Unknown"),
("Ear", "Unknown"),
("EIM", "Unknown"),
("EMP", "Unknown"),
("F", "Unknown"),
("FAX", "Unknown"),
("FDN", "Unknown"),
("FED", "Unknown"),
("FEE", "Unknown"),
("FIN", "Unknown"),
("Fun", "Unknown"),
("FUN", "Unknown"),
("G", "Unknown"),
("GEN", "Unknown"),
("GGG", "Unknown"),
("GOT", "Unknown"),
("IEs", "Unknown"),
("IN-", "Unknown"),
("Ina", "Unknown"),
("INK", "Unknown"), # Misspelling of 'IKD' ('In-kind')?
("INS", "Unknown"),
("ITE", "Unknown"),
("JAN", "Unknown"),
("JUL", "Unknown"),
("JUN", "Unknown"),
("KIC", "Unknown"),
("L", "Unknown"),
("LEV", "Unknown"),
("Lit", "Unknown"),
("LN#", "Unknown"),
("LOG", "Unknown"),
("M", "Unknown"),
("MAI", "Unknown"),
("Mar", "Unknown"),
("MAR", "Unknown"),
("MAY", "Unknown"),
("MED", "Unknown"),
("MEE", "Unknown"),
("MGT", "Unknown"),
("Mis", "Unknown"),
("MRB", "Unknown"),
("NGP", "Unknown"), # Nathaniel G. Pearlman?
("NON", "Unknown"),
("NOT", "Unknown"),
("NOV", "Unknown"),
("O", "Unknown"),
("OCT", "Unknown"),
(".OF", "Unknown"),
("OFF", "Unknown"), # Misspelling 'OFC' ('Office expenses')?
("OPE", "Unknown"),
("OTH", "Unknown"), # Other?
("P", "Unknown"),
("Pac", "Unknown"),
("PAI", "Unknown"),
("PAR", "Unknown"),
("PAY", "Unknown"),
("PEN", "Unknown"),
("PMT", "Unknown"),
(".PO", "Unknown"),
("Pos", "Unknown"),
("PRE", "Unknown"),
("PRI", "Unknown"),
("PRP", "Unknown"),
("R", "Unknown"),
(".Re", "Unknown"),
(".RE", "Unknown"),
("REF", "Unknown"),
("REI", "Unknown"),
("RFP", "Unknown"),
("S", "Unknown"),
("S-A", "Unknown"),
("SA", "Unknown"),
("Sal", "Unknown"),
("S C", "Unknown"),
("S.C", "Unknown"),
("SCU", "Unknown"),
("SEE", "Unknown"),
("SEN", "Unknown"),
("SEP", "Unknown"),
("S.M.", "Unknown"),
("SOF", "Unknown"),
("SWI", "Unknown"),
("T", "Unknown"),
("TAX", "Unknown"),
("TB", "Unknown"),
("TB,", "Unknown"),
("TIC", "Unknown"),
("Tor", "Unknown"),
("TRA", "Unknown"),
("TRF", "Unknown"),
("TRV", "Unknown"),
("UN", "Unknown"),
("UTI", "Unknown"),
("V", "Unknown"),
("VEN", "Unknown"),
("-VO", "Unknown"),
("VOI", "Unknown"),
("VOY", "Unknown"),
("WI", "Unknown"),
("x", "Unknown"),
("X", "Unknown"),
('S-6', 'Unknown'),
('S.M', 'Unknown'),
('S-4', 'Unknown'),
('SA:', 'Unknown'),
('100', 'Unknown'),
('RFN', 'Unknown'),
('REN', 'Unknown'),
('003', 'Unknown'),
('S-1', 'Unknown'),
('08', 'Unknown'),
)
expn_code = fields.CharField(
max_length=4,
db_column='EXPN_CODE',
blank=True,
choices=EXPN_CODE_CHOICES,
verbose_name="expense code",
help_text="The type of expenditure",
documentcloud_pages=annotations.choices.DOCS['expense_codes']
)
expn_date = fields.DateField(
null=True,
db_column='EXPN_DATE',
blank=True,
verbose_name="expense date",
help_text="Date of Expenditure (Note: Date not on Sched E & G)"
)
expn_dscr = fields.CharField(
max_length=400,
db_column='EXPN_DSCR',
verbose_name="expense description",
blank=True,
help_text="Purpose of expense and/or description/explanation"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=6,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=31),
DocumentCloud(id='2712034', start_page=42),
]
)
g_from_e_f = fields.CharField(
max_length=1,
db_column='G_FROM_E_F',
blank=True,
help_text="Back Reference from Sched G to Sched 'E' or 'F'?"
)
JURIS_CD_CHOICES = annotations.sort_choices(annotations.choices.JURIS_CODES) + (
# alt casing of valid values
('Cit', annotations.choices.JURIS_CODES['CIT']),
('sen', annotations.choices.JURIS_CODES['SEN']),
('Sen', annotations.choices.JURIS_CODES['SEN']),
('stw', annotations.choices.JURIS_CODES['STW']),
# statewide office codes
('APP', annotations.choices.JURIS_CODES['STW']),
('ASR', annotations.choices.JURIS_CODES['CTY']),
('ATT', annotations.choices.JURIS_CODES['STW']),
('GOV', annotations.choices.JURIS_CODES['STW']),
('LTG', annotations.choices.JURIS_CODES['STW']),
('SOS', annotations.choices.JURIS_CODES['STW']),
('SUP', annotations.choices.JURIS_CODES['STW']),
('TRE', annotations.choices.JURIS_CODES['STW']),
# county office codes
('BSU', annotations.choices.JURIS_CODES['CTY']),
('CSU', annotations.choices.JURIS_CODES['CTY']),
# city office codes
('ES', annotations.choices.JURIS_CODES['CIT']),
('SM', annotations.choices.JURIS_CODES['CIT']),
# "other" office codes
('BED', annotations.choices.JURIS_CODES['OTH']),
('CCB', annotations.choices.JURIS_CODES['OTH']),
('CCM', annotations.choices.JURIS_CODES['OTH']),
('PDR', annotations.choices.JURIS_CODES['OTH']),
# state senate districts
('12', annotations.choices.JURIS_CODES['SEN']),
# Ballot Propositions
('4', annotations.choices.JURIS_CODES['STW']),
('8', annotations.choices.JURIS_CODES['STW']),
('27', annotations.choices.JURIS_CODES['STW']),
('93', annotations.choices.JURIS_CODES['STW']),
('98', annotations.choices.JURIS_CODES['STW']),
# Community College Board, except that one time where it's City Council
('CLB', 'Unknown'),
# Sometimes these are Assembly Members, sometimes State Senators,
# sometimes Public Employees Retirement System
('PER', 'Unknown'),
# Misprint
('Boa', annotations.choices.JURIS_CODES['BOE']),
# Usually Assembly Member except for those two times when it's governor and attorney general
('Sta', 'Unknown'),
('STA', 'Unknown'),
# All over the board
('CA', 'Unknown'),
('SAN', 'Unknown'),
('ES ', 'Unknown'),
('CON', 'Unknown'),
('LA', 'Unknown'),
('LBC', 'Unknown'),
('OR', 'Unknown'),
('SB', 'Unknown'),
('WES', 'Unknown'),
('BM', 'Unknown'),
('(Lo', 'Unknown'),
('(Ci', 'Unknown'),
('vty', 'Unknown'),
('OC', 'Unknown'),
('SM ', 'Unknown'),
('ASS', 'Unknown'),
('JR', 'Unknown'),
('O', 'Unknown'),
('ADM', 'Unknown'),
('SAC', 'Unknown'),
('US', 'Unknown'),
('J', 'Unknown'),
('LOS', 'Unknown'),
('IRV', 'Unknown'),
('CO', 'Unknown'),
('JRS', 'Unknown'),
('NEV', 'Unknown'),
('IB', 'Unknown'),
('A', 'Unknown'),
('Ass', 'Unknown'),
('SD', 'Unknown'),
('D', 'Unknown'),
('SEC', 'Unknown'),
('SC', 'Unknown'),
('RB', 'Unknown'),
('GEN', 'Unknown'),
('CC', 'Unknown'),
('FED', 'Unknown'),
('FM', 'Unknown'),
('R', 'Unknown'),
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
choices=JURIS_CD_CHOICES,
help_text="Office Jurisdiction Code",
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=32),
DocumentCloud(id='2712034', start_page=44),
]
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office Jurisdiction Description \
(Req. if Juris_Cd=[CIT|CTY|LOC|OTH]"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo Amount? (Date/Amount are informational only). For Form"
" 460 filings, this indicates the record is a sub-item and "
"its amount is included in another item reported on the "
"filing."
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record."
)
OFF_S_H_CD_CHOICES = annotations.sort_choices(annotations.choices.OFF_S_H_CODES) + (
('s', annotations.choices.OFF_S_H_CODES['S']),
('h', annotations.choices.OFF_S_H_CODES['H']),
# The codes below appear in the database but are undocumented
('A', 'UNKNOWN'),
('a', 'UNKNOWN'),
('8', 'UNKNOWN'),
('O', 'UNKNOWN'),
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office is sought or held code',
choices=OFF_S_H_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=32),
DocumentCloud(id='2712034', start_page=44),
]
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office Sought Description (Req. if Office_Cd=OTH)"
)
OFFICE_CD_CHOICES = annotations.sort_choices(annotations.choices.OFFICE_CODES) + (
# alt cases for valid codes
('Cou', annotations.choices.OFFICE_CODES['COU']),
('sen', annotations.choices.OFFICE_CODES['SEN']),
('AtT', annotations.choices.OFFICE_CODES['ATT']),
('May', annotations.choices.OFFICE_CODES['MAY']),
('Sen', annotations.choices.OFFICE_CODES['SEN']),
('asm', annotations.choices.OFFICE_CODES['ASM']),
('gov', annotations.choices.OFFICE_CODES['GOV']),
('Gov', annotations.choices.OFFICE_CODES['GOV']),
# unknown codes
('LA', 'Unknown'),
('HOU', 'Unknown'),
('LAD', 'Unknown'),
('11A', 'Unknown'),
('001', 'Unknown'),
('BM', 'Unknown'),
('AS1', 'Unknown'),
('ASS', 'Unknown'),
('73', 'Unknown'),
('CIT', 'Unknown'),
('HSE', 'Unknown'),
('LT', 'Unknown'),
('CTY', 'Unknown'),
('STA', 'Unknown'),
('GO', 'Unknown'),
('CO', 'Unknown'),
('A', 'Unknown'),
('PAC', 'Unknown'),
('REP', 'Unknown'),
('OFF', 'Unknown'),
('SE', 'Unknown'),
('031', 'Unknown'),
('COM', 'Unknown'),
('ASB', 'Unknown'),
('OT', 'Unknown'),
('NAT', 'Unknown'),
('CC', 'Unknown'),
('SWE', 'Unknown'),
('FED', 'Unknown'),
('STE', 'Unknown'),
('H', 'Unknown'),
('DA', 'Unknown'),
('S', 'Unknown'),
('AS', 'Unknown'),
('OF', 'Unknown'),
('LEG', 'Unknown'),
('STW', 'Unknown'),
('ST', 'Unknown'),
('PRE', 'Unknown'),
('/S', 'Unknown'),
('U S', 'Unknown'),
('O', 'Unknown'),
('8', 'Unknown'),
('C:S', 'Unknown'),
)
office_cd = fields.CharField(
db_column='OFFICE_CD',
max_length=3,
blank=True,
verbose_name="office code",
help_text="Identifies the office being sought",
choices=OFFICE_CD_CHOICES,
documentcloud_pages=annotations.choices.DOCS['office_codes']
)
# payee_adr1 = fields.CharField(
# max_length=55,
# db_column='PAYEE_ADR1',
# blank=True,
# help_text="Address of Payee"
# )
# payee_adr2 = fields.CharField(
# max_length=55,
# db_column='PAYEE_ADR2',
# blank=True,
# help_text="Optional 2nd line of Address"
# )
payee_city = fields.CharField(
max_length=30,
db_column='PAYEE_CITY',
blank=True,
help_text="Payee City"
)
payee_namf = fields.CharField(
max_length=45,
db_column='PAYEE_NAMF',
blank=True,
help_text="Payee's First name"
)
payee_naml = fields.CharField(
max_length=200,
db_column='PAYEE_NAML',
blank=True,
help_text="Payee's Last name"
)
payee_nams = fields.CharField(
max_length=10,
db_column='PAYEE_NAMS',
blank=True,
help_text="Payee's Suffix"
)
payee_namt = fields.CharField(
max_length=10,
db_column='PAYEE_NAMT',
blank=True,
help_text="Payee's Prefix or Title"
)
payee_st = fields.CharField(
max_length=2,
db_column='PAYEE_ST',
blank=True,
help_text="State code"
)
payee_zip4 = fields.CharField(
max_length=10,
db_column='PAYEE_ZIP4',
blank=True,
help_text="Zip+4"
)
REC_TYPE_CHOICES = (
("EXPN", "Expense"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text='Record Type Value: EXPN',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=31),
DocumentCloud(id='2712034', start_page=42),
]
)
SUP_OPP_CD_CHOICES = annotations.sort_choices(annotations.choices.SUP_OPP_CODES) + (
('s', annotations.choices.SUP_OPP_CODES['S']),
('o', annotations.choices.SUP_OPP_CODES['O']),
('H', 'UNKNOWN'),
('N', 'UNKNOWN'),
('X', 'UNKNOWN'),
('Y', 'UNKNOWN'),
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support or opposition code",
choices=SUP_OPP_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=32),
DocumentCloud(id='2712034', start_page=44),
]
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
# tres_adr1 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR1',
# blank=True,
# help_text="Treasurer Street 1(Req if [COM|RCP] & no ID#)"
# )
# tres_adr2 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR2',
# blank=True,
# help_text="Treasurer Street 2"
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="Treasurer City"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer's First name (Req if [COM|RCP] & no ID#)"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer's Last name (Req if [COM|RCP] & no ID#)"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer's Suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer's Prefix or Title"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="Treasurer State"
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text="Treasurer ZIP+4"
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text="X = Related item on other Sched has same Tran_ID"
)
xref_schnm = fields.CharField(
max_length=2,
db_column='XREF_SCHNM',
blank=True,
help_text="Related item is included on Sched 'C' or 'H2'"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'EXPN_CD'
ordering = ("-expn_date",)
def __str__(self):
return str(self.filing_id)
class LoanCd(CalAccessBaseModel):
"""
Itemized campaign loans.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=87, end_page=90),
DocumentCloud(id='2711616', start_page=60, end_page=63),
DocumentCloud(id='2712033', start_page=35, end_page=39),
DocumentCloud(id='2712034', start_page=47, end_page=50),
]
FILING_FORMS = [
annotations.get_form('F460').get_section('B1'),
annotations.get_form('F460').get_section('B2'),
annotations.get_form('F460').get_section('B3'),
annotations.get_form('F460').get_section('H'),
annotations.get_form('F460').get_section('H1'),
annotations.get_form('F460').get_section('H2'),
annotations.get_form('F460').get_section('H3'),
]
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back Reference to transaction identifier of parent record"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee identification number"
)
ENTITY_CD_CHOICES = (
('COM', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('IND', annotations.choices.CAMPAIGN_ENTITY_CODES['IND']),
('OTH', annotations.choices.CAMPAIGN_ENTITY_CODES['OTH']),
('PTY', annotations.choices.CAMPAIGN_ENTITY_CODES['PTY']),
('RCP', annotations.choices.CAMPAIGN_ENTITY_CODES['RCP']),
('SCC', annotations.choices.CAMPAIGN_ENTITY_CODES['SCC']),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name="entity code",
help_text="Entity code describing the lender",
choices=ENTITY_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=35),
DocumentCloud(id='2712034', start_page=47),
]
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
max_length=2,
db_column='FORM_TYPE',
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=35),
DocumentCloud(id='2712034', start_page=47),
]
)
# intr_adr1 = fields.CharField(
# max_length=55, db_column='INTR_ADR1', blank=True
# )
# intr_adr2 = fields.CharField(
# max_length=55, db_column='INTR_ADR2', blank=True
# )
intr_city = fields.CharField(
max_length=30,
db_column='INTR_CITY',
blank=True,
help_text="Intermediary's city"
)
intr_namf = fields.CharField(
max_length=45,
db_column='INTR_NAMF',
blank=True,
help_text="Intermediary's first name"
)
intr_naml = fields.CharField(
max_length=200,
db_column='INTR_NAML',
blank=True,
help_text="Intermediary's last name"
)
intr_nams = fields.CharField(
max_length=10,
db_column='INTR_NAMS',
blank=True,
help_text="Intermediary's suffix"
)
intr_namt = fields.CharField(
max_length=10,
db_column='INTR_NAMT',
blank=True,
help_text="Intermediary's title or prefix"
)
intr_st = fields.CharField(
max_length=2,
db_column='INTR_ST',
blank=True,
help_text="Intermediary's state"
)
intr_zip4 = fields.CharField(
max_length=10,
db_column='INTR_ZIP4',
blank=True,
help_text="Intermediary's ZIP Code"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
lndr_namf = fields.CharField(
max_length=45,
db_column='LNDR_NAMF',
blank=True,
help_text="Lender's first name"
)
lndr_naml = fields.CharField(
max_length=200,
db_column='LNDR_NAML',
help_text="Lender's last name or business name"
)
lndr_nams = fields.CharField(
max_length=10,
db_column='LNDR_NAMS',
blank=True,
help_text="Lender's suffix"
)
lndr_namt = fields.CharField(
max_length=10,
db_column='LNDR_NAMT',
blank=True,
help_text="Lender's title or prefix"
)
# loan_adr1 = fields.CharField(
# max_length=55, db_column='LOAN_ADR1', blank=True
# )
# loan_adr2 = fields.CharField(
# max_length=55, db_column='LOAN_ADR2', blank=True
# )
loan_amt1 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT1',
blank=True,
help_text="Repaid or forgiven amount; Original loan amount. The \
content of this column varies based on the \
schedule/part that the record applies to. See the CAL \
document for a description of the value of this field."
)
loan_amt2 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT2',
blank=True,
help_text="Outstanding Principal; unpaid balance. The content of \
this column varies based on the schedule/part that the \
record applies to. See the CAL document for a \
description of the value of this field."
)
loan_amt3 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT3',
blank=True,
help_text="Interest Paid; Unpaid interest; Interest received. The \
content of this column varies based on the \
schedule/part that the record applies to. See the CAL \
document for a description of the value of this field."
)
loan_amt4 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT4',
blank=True,
help_text="Cumulative Amount/Other. The content of this column \
varies based on the schedule/part that the record \
applies to. See the CAL document for a description of the \
value of this field."
)
loan_amt5 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT5',
blank=True,
help_text="This field is undocumented"
)
loan_amt6 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT6',
blank=True,
help_text="This field is undocumented"
)
loan_amt7 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT7',
blank=True,
help_text="This field is undocumented"
)
loan_amt8 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT8',
blank=True,
help_text="This field is undocumented"
)
loan_city = fields.CharField(
max_length=30,
db_column='LOAN_CITY',
blank=True,
help_text="Lender's city"
)
loan_date1 = fields.DateField(
db_column='LOAN_DATE1',
null=True,
help_text="Date the loan was made or received. The content of this \
column varies based on the schedule/part that the \
record applies to. See the CAL document for a description of the value."
)
loan_date2 = fields.DateField(
null=True,
db_column='LOAN_DATE2',
blank=True,
help_text="Date repaid/forgiven; date loan due. The content of this \
column varies based on the schedule/part that the \
record applies to. See the CAL document for a \
description of the value of this field."
)
loan_emp = fields.CharField(
max_length=200,
db_column='LOAN_EMP',
blank=True,
help_text="Loan employer. Applies to the Form 460 Schedule B \
Part 1."
)
loan_occ = fields.CharField(
max_length=60,
db_column='LOAN_OCC',
blank=True,
help_text="Loan occupation. Applies to the Form 460 Schedule B \
Part 1."
)
loan_rate = fields.CharField(
max_length=30,
db_column='LOAN_RATE',
blank=True,
help_text="Interest Rate. The content of this column varies based \
on the schedule/part that the record applies to. See the \
CAL document for a description of the value of this field."
)
loan_self = fields.CharField(
max_length=1,
db_column='LOAN_SELF',
blank=True,
help_text="Self-employed checkbox"
)
loan_st = fields.CharField(
max_length=2,
db_column='LOAN_ST',
blank=True,
help_text="Lender's state"
)
LOAN_TYPE_CHOICES = (
("H2T", "Third party payment"),
("H2F", "Forgiven"),
("H2R", "Repay"),
("B2T", "Third party payment"),
("B2F", "Forgiven"),
("B2R", "Repay"),
("B1G", "Guarantor"),
("B1L", "Lender"),
)
loan_type = fields.CharField(
max_length=3,
db_column='LOAN_TYPE',
blank=True,
choices=LOAN_TYPE_CHOICES,
help_text="Type of loan",
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=35),
# this field may no longer be in use, CAL format v2 instructs to leave NULL
DocumentCloud(id='2712034', start_page=47),
]
)
loan_zip4 = fields.CharField(
max_length=10,
db_column='LOAN_ZIP4',
blank=True,
help_text="Lender's ZIP Code"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record"
)
REC_TYPE_CHOICES = (
("LOAN", "LOAN"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text='Record Type Value: LOAN',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=35),
DocumentCloud(id='2712034', start_page=47),
]
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
# tres_adr1 = fields.CharField(
# max_length=55, db_column='TRES_ADR1', blank=True
# )
# tres_adr2 = fields.CharField(
# max_length=55, db_column='TRES_ADR2', blank=True
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="Treasurer or responsible officer's city"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's title or prefix"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="Treasurer or responsible officer's street address"
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text="Treasurer or responsible officer's ZIP Code"
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text='Related item on other schedule has same transaction \
identifier. "X" indicates this condition is true.'
)
xref_schnm = fields.CharField(
max_length=2,
db_column='XREF_SCHNM',
blank=True,
help_text="Related record is included on Form 460 Schedule 'A' or 'E'"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOAN_CD'
ordering = ("-loan_date1",)
def __str__(self):
return str(self.filing_id)
class RcptCd(CalAccessBaseModel):
"""
Itemized campaign contributions.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=13),
DocumentCloud(id='2711614', start_page=118, end_page=121),
DocumentCloud(id='2711616', start_page=71, end_page=75),
DocumentCloud(id='2712033', start_page=29, end_page=30),
DocumentCloud(id='2712034', start_page=37, end_page=41),
]
FILING_FORMS = [
annotations.get_form('E530'),
annotations.get_form('F900'),
annotations.get_form('F401').get_section('A'),
annotations.get_form('F460').get_section('A'),
annotations.get_form('F460').get_section('A-1'),
annotations.get_form('F460').get_section('C'),
annotations.get_form('F460').get_section('I'),
annotations.get_form('F496').get_section('P3'),
]
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amount = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMOUNT',
help_text="Amount Received (Monetary, In-kind, Promise)"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back Reference to a transaction identifier of a parent \
record"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Jurisdiction of ballot measure. Used on the Form 401 \
Schedule A"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name. Used on the Form 401 Schedule A"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter. Used on the Form 401 \
Schedule A"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officeholder's first name. Used on the Form \
401 Schedule A"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder's last name. Used on the Form \
401 Schedule A"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder's name suffix. Used on the Form \
401 Schedule A"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder's name prefix or title. Used on \
the Form 401 Schedule A"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text="Committee Identification number"
)
# ctrib_adr1 = fields.CharField(
# max_length=55,
# db_column='CTRIB_ADR1',
# blank=True,
# default="",
# help_text="First line of the contributor's street address"
# )
# ctrib_adr2 = fields.CharField(
# max_length=55,
# db_column='CTRIB_ADR2',
# blank=True,
# help_text="Second line of the contributor's street address"
# )
ctrib_city = fields.CharField(
max_length=30,
db_column='CTRIB_CITY',
blank=True,
help_text="Contributor's City"
)
ctrib_dscr = fields.CharField(
max_length=90,
db_column='CTRIB_DSCR',
blank=True,
help_text="Description of goods/services received"
)
ctrib_emp = fields.CharField(
max_length=200,
db_column='CTRIB_EMP',
blank=True,
help_text="Employer"
)
ctrib_namf = fields.CharField(
max_length=45,
db_column='CTRIB_NAMF',
blank=True,
help_text="Contributor's First Name"
)
ctrib_naml = fields.CharField(
max_length=200,
db_column='CTRIB_NAML',
help_text="Contributor's last name or business name"
)
ctrib_nams = fields.CharField(
max_length=10,
db_column='CTRIB_NAMS',
blank=True,
help_text="Contributor's Suffix"
)
ctrib_namt = fields.CharField(
max_length=10,
db_column='CTRIB_NAMT',
blank=True,
help_text="Contributor's Prefix or Title"
)
ctrib_occ = fields.CharField(
max_length=60,
db_column='CTRIB_OCC',
blank=True,
help_text="Occupation"
)
ctrib_self = fields.CharField(
max_length=1,
db_column='CTRIB_SELF',
blank=True,
help_text="Self Employed Check-box"
)
ctrib_st = fields.CharField(
max_length=2,
db_column='CTRIB_ST',
blank=True,
help_text="Contributor's State"
)
ctrib_zip4 = fields.CharField(
max_length=10,
db_column='CTRIB_ZIP4',
blank=True,
help_text="Contributor's ZIP+4"
)
cum_oth = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_OTH',
blank=True,
help_text="Cumulative Other (Sched A, A-1)"
)
cum_ytd = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_YTD',
blank=True,
help_text="Cumulative year to date amount (Form 460 Schedule A \
and Form 401 Schedule A, A-1)"
)
date_thru = fields.DateField(
null=True,
db_column='DATE_THRU',
blank=True,
help_text="End of date range for items received"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="Office District Number (used on F401A)"
)
ENTITY_CD_CHOICES = (
# Codes explicitly allowed for this field, according to documentation
('COM', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('IND', annotations.choices.CAMPAIGN_ENTITY_CODES['IND']),
('PTY', annotations.choices.CAMPAIGN_ENTITY_CODES['PTY']),
('OTH', annotations.choices.CAMPAIGN_ENTITY_CODES['OTH']),
('RCP', annotations.choices.CAMPAIGN_ENTITY_CODES['RCP']),
('SCC', annotations.choices.CAMPAIGN_ENTITY_CODES['SCC']),
('Com', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
# Other known codes observed in this field
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
('BNM', annotations.choices.CAMPAIGN_ENTITY_CODES['BNM']),
('OFF', annotations.choices.CAMPAIGN_ENTITY_CODES['OFF']),
# Other unknown values observed
('0', "Unknown"),
('PTH', 'Unknown'),
('RFD', 'Unknown'), # Request for Development?
('MBR', 'Unknown'), # Member?
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
help_text="Entity Code describing the contributor",
choices=ENTITY_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=71),
DocumentCloud(id='2712033', start_page=29),
DocumentCloud(id='2712034', start_page=37),
] + annotations.choices.DOCS['entity_codes']
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=9,
db_index=True,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=29),
DocumentCloud(id='2712034', start_page=37),
]
)
int_rate = fields.CharField(
max_length=9,
db_column='INT_RATE',
blank=True,
help_text="This field is undocumented. The observed values look like "
"filer_ids taken from section 5, cover page 2 of Form 460 "
"(Related Committees Not Included in this Statement)."
)
# intr_adr1 = fields.CharField(
# max_length=55,
# db_column='INTR_ADR1',
# blank=True,
# help_text="First line of the intermediary's street address."
# )
# intr_adr2 = fields.CharField(
# max_length=55,
# db_column='INTR_ADR2',
# blank=True,
# help_text="Second line of the Intermediary's street address."
# )
intr_city = fields.CharField(
max_length=30,
db_column='INTR_CITY',
blank=True,
help_text="Intermediary's City"
)
intr_cmteid = fields.CharField(
max_length=9,
db_column='INTR_CMTEID',
blank=True,
help_text="This field is undocumented"
)
intr_emp = fields.CharField(
max_length=200,
db_column='INTR_EMP',
blank=True,
help_text="Intermediary's Employer"
)
intr_namf = fields.CharField(
max_length=45,
db_column='INTR_NAMF',
blank=True,
help_text="Intermediary's First Name"
)
intr_naml = fields.CharField(
max_length=200,
db_column='INTR_NAML',
blank=True,
help_text="Intermediary's Last Name"
)
intr_nams = fields.CharField(
max_length=10,
db_column='INTR_NAMS',
blank=True,
help_text="Intermediary's Suffix"
)
intr_namt = fields.CharField(
max_length=10,
db_column='INTR_NAMT',
blank=True,
help_text="Intermediary's Prefix or Title"
)
intr_occ = fields.CharField(
max_length=60,
db_column='INTR_OCC',
blank=True,
help_text="Intermediary's Occupation"
)
intr_self = fields.CharField(
max_length=1,
db_column='INTR_SELF',
blank=True,
help_text="Intermediary's self employed check box"
)
intr_st = fields.CharField(
max_length=2,
db_column='INTR_ST',
blank=True,
help_text="Intermediary's state"
)
intr_zip4 = fields.CharField(
max_length=10,
db_column='INTR_ZIP4',
blank=True,
help_text="Intermediary's zip code"
)
JURIS_CD_CHOICES = annotations.sort_choices(annotations.choices.JURIS_CODES) + (
# "other" office codes
('BED', annotations.choices.JURIS_CODES['OTH']),
('CLB', annotations.choices.JURIS_CODES['OTH']),
# misprint
('COU', annotations.choices.JURIS_CODES['CTY']),
# Office Code for this one record is Superior Court Judge
('CO', annotations.choices.JURIS_CODES['OTH']),
('SAC', 'Unknown'),
('PER', 'Unknown'),
('SF', 'Unknown'),
('OR', 'Unknown'),
('AL', 'Unknown'),
('4', 'Unknown'),
('CA', 'Unknown'),
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
choices=JURIS_CD_CHOICES,
help_text="Office jurisdiction code. See the CAL document for the \
list of legal values. Used on Form 401 Schedule A",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=74),
DocumentCloud(id='2712033', start_page=30),
DocumentCloud(id='2712034', start_page=40),
]
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office Jurisdiction Description (used on F401A)"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag (Date/Amount are informational only)"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record"
)
OFF_S_H_CD_CHOICES = (
('S', annotations.choices.OFF_S_H_CODES['S']),
('H', annotations.choices.OFF_S_H_CODES['H']),
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office is sought or held code',
choices=OFF_S_H_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=75),
DocumentCloud(id='2712033', start_page=30),
DocumentCloud(id='2712034', start_page=40),
]
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office Sought Description (used on F401A)"
)
OFFICE_CD_CHOICES = annotations.sort_choices(annotations.choices.OFFICE_CODES) + (
# alt cases for valid codes
('asm', annotations.choices.OFFICE_CODES['ASM']),
('gov', annotations.choices.OFFICE_CODES['GOV']),
('OTh', annotations.choices.OFFICE_CODES['OTH']),
('oth', annotations.choices.OFFICE_CODES['OTH']),
('csu', annotations.choices.OFFICE_CODES['CSU']),
# invalid codes
('H', 'Unknown'),
('HOU', 'Unknown'),
('ASS', 'Unknown'),
)
office_cd = fields.CharField(
db_column='OFFICE_CD',
max_length=3,
blank=True,
verbose_name="office code",
help_text="Identifies the office being sought",
choices=OFFICE_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=10),
DocumentCloud(id='2712034', start_page=12),
DocumentCloud(id='2712032', start_page=2),
],
)
rcpt_date = fields.DateField(
db_column='RCPT_DATE',
null=True,
help_text="Date item received"
)
REC_TYPE_CHOICES = (
('E530', annotations.get_form('E530').full_title),
("RCPT", "Receipt"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text="Record Type Value: CVR",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=71),
DocumentCloud(id='2712033', start_page=37),
DocumentCloud(id='2712034', start_page=29),
]
)
SUP_OPP_CD_CHOICES = (
# Codes explicitly allowed for this field, according to documentation
('S', annotations.choices.SUP_OPP_CODES['S']),
('O', annotations.choices.SUP_OPP_CODES['O']),
# Other unknown values observed
('F', 'Unknown'),
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support or opposition code",
choices=SUP_OPP_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=74),
DocumentCloud(id='2712033', start_page=30),
DocumentCloud(id='2712034', start_page=40),
]
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
TRAN_TYPE_CHOICES = [
# Codes explicitly allowed for this field, according to documentation
('F', 'Forgiven Loan'),
('I', 'Intermediary'),
('R', 'Returned'),
('T', 'Third Party Repayment'),
('X', 'Transfer'),
# Other unknown values observed
('0', 'Unknown'),
('M', 'Unknown'),
('N', 'Unknown'),
]
tran_type = fields.CharField(
verbose_name="transaction type",
max_length=1,
db_column='TRAN_TYPE',
blank=True,
choices=TRAN_TYPE_CHOICES,
help_text="Transaction Type",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=72),
DocumentCloud(id='2712033', start_page=29),
DocumentCloud(id='2712034', start_page=38),
]
)
# tres_adr1 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR1',
# blank=True,
# help_text="First line of the treasurer or responsible officer's \
# street address"
# )
# tres_adr2 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR2',
# blank=True,
# help_text="Second line of the treasurer or responsible officer's \
# street address"
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="City portion of the treasurer or responsible officer's \
street address"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's prefix or title"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="State portion of the treasurer or responsible officer's \
address"
)
tres_zip4 = fields.CharField(
null=True,
max_length=10,
blank=True,
db_column='TRES_ZIP4',
help_text="Zip code portion of the treasurer or responsible officer's \
address"
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text="Related item on other schedule has same transaction \
identifier. 'X' indicates this condition is true"
)
xref_schnm = fields.CharField(
max_length=2,
db_column='XREF_SCHNM',
blank=True,
help_text="Related record is included on Sched 'B2' or 'F'"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'RCPT_CD'
ordering = ("-rcpt_date",)
def __str__(self):
return str(self.filing_id)
class S401Cd(CalAccessBaseModel):
"""
Payments and other disclosures made by slate-mailer organizations.
"""
UNIQUE_KEY = (
'FILING_ID',
'AMEND_ID',
'LINE_ITEM',
'REC_TYPE',
'FORM_TYPE'
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=123, end_page=124),
DocumentCloud(id='2711616', start_page=76, end_page=78),
DocumentCloud(id='2712033', start_page=39),
DocumentCloud(id='2712034', start_page=51, end_page=52),
]
FILING_FORMS = [
annotations.get_form('F401').get_section('B'),
annotations.get_form('F401').get_section('B-1'),
annotations.get_form('F401').get_section('C'),
annotations.get_form('F401').get_section('D'),
]
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("S401", "S401"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
help_text="Record Type Value: S401",
choices=REC_TYPE_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=39),
DocumentCloud(id='2712034', start_page=51),
]
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
max_length=7,
db_column='FORM_TYPE',
blank=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=39),
DocumentCloud(id='2712034', start_page=51),
]
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
agent_naml = fields.CharField(
max_length=200,
db_column='AGENT_NAML',
blank=True,
help_text="Agent or independent contractor's last name"
)
agent_namf = fields.CharField(
max_length=45,
db_column='AGENT_NAMF',
blank=True,
help_text="Agent or independent contractor's first name"
)
agent_namt = fields.CharField(
max_length=200,
db_column='AGENT_NAMT',
blank=True,
help_text="Agent or independent contractor's title or prefix"
)
agent_nams = fields.CharField(
max_length=10,
db_column='AGENT_NAMS',
blank=True,
help_text="Agent or independent contractor's suffix"
)
payee_naml = fields.CharField(
max_length=200,
db_column='PAYEE_NAML',
blank=True,
help_text="Payee's business name or last name if the payee is an \
individual"
)
payee_namf = fields.CharField(
max_length=45,
db_column='PAYEE_NAMF',
blank=True,
help_text="Payee's first name if the payee is an individual"
)
payee_namt = fields.CharField(
max_length=10,
db_column='PAYEE_NAMT',
blank=True,
help_text="Payee's title or prefix if the payee is an individual"
)
payee_nams = fields.CharField(
max_length=10,
db_column='PAYEE_NAMS',
blank=True,
help_text="Payee's suffix if the payee is an individual"
)
payee_city = fields.CharField(
max_length=30,
db_column='PAYEE_CITY',
blank=True,
help_text="Payee's city address"
)
payee_st = fields.CharField(
max_length=2,
db_column='PAYEE_ST',
blank=True,
help_text="Payee state address"
)
payee_zip4 = fields.CharField(
max_length=10,
db_column='PAYEE_ZIP4',
blank=True,
help_text="Payee ZIP Code"
)
amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMOUNT',
help_text="Amount (Sched F401B, 401B-1, 401C)"
)
aggregate = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AGGREGATE',
help_text="Aggregate year-to-date amount (Sched 401C)"
)
expn_dscr = fields.CharField(
max_length=90,
db_column='EXPN_DSCR',
blank=True,
help_text="Purpose of expense and/or description/explanation"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder last name"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officeholder first name"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder title or prefix"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder suffix"
)
OFFICE_CD_CHOICES = annotations.sort_choices(annotations.choices.OFFICE_CODES) + (
# alt cases for valid codes
('asm', annotations.choices.OFFICE_CODES['ASM']),
('ltg', annotations.choices.OFFICE_CODES['LTG']),
('OTh', annotations.choices.OFFICE_CODES['OTH']),
('att', annotations.choices.OFFICE_CODES['ATT']),
('oth', annotations.choices.OFFICE_CODES['OTH']),
('tre', annotations.choices.OFFICE_CODES['TRE']),
('con', annotations.choices.OFFICE_CODES['CON']),
('boe', annotations.choices.OFFICE_CODES['BOE']),
('sos', annotations.choices.OFFICE_CODES['SOS']),
('sup', annotations.choices.OFFICE_CODES['SUP']),
# invalid codes
('H', 'Unknown'),
)
office_cd = fields.CharField(
db_column='OFFICE_CD',
max_length=3,
blank=True,
verbose_name="office code",
help_text="Identifies the office being sought",
choices=OFFICE_CD_CHOICES,
documentcloud_pages=annotations.choices.DOCS['office_codes'],
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description"
)
JURIS_CD_CHOICES = annotations.sort_choices(annotations.choices.JURIS_CODES) + (
('SAC', 'Unknown'),
('CT', 'Unknown'),
('ca', 'Unknown'),
('CAL', 'Unknown'),
('OR', 'Unknown'),
('AL', 'Unknown'),
('CA', 'Unknown'),
('10', 'Unknown'),
)
juris_cd = fields.CharField(
max_length=3,
choices=JURIS_CD_CHOICES,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code",
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=77),
DocumentCloud(id='2712033', start_page=39),
DocumentCloud(id='2712034', start_page=52),
]
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
OFF_S_H_CD_CHOICES = (
('S', annotations.choices.OFF_S_H_CODES['S']),
('H', annotations.choices.OFF_S_H_CODES['H']),
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office is sought or held code',
choices=OFF_S_H_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=39),
DocumentCloud(id='2712034', start_page=52),
]
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
SUP_OPP_CD_CHOICES = (
('S', annotations.choices.SUP_OPP_CODES['S']),
('O', annotations.choices.SUP_OPP_CODES['O']),
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support or opposition code",
choices=SUP_OPP_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=39),
DocumentCloud(id='2712034', start_page=52),
]
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in the TEXT record"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back reference to transaction identifier of parent record"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'S401_CD'
def __str__(self):
return str(self.filing_id)
class F495P2Cd(CalAccessBaseModel):
"""
Supplemental pre-election campaign statements.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=56, end_page=57),
DocumentCloud(id='2711616', start_page=49),
DocumentCloud(id='2712033', start_page=26),
DocumentCloud(id='2712034', start_page=35),
]
FILING_FORMS = [
annotations.get_form('F450'),
annotations.get_form('F460'),
annotations.get_form('F495'),
]
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
('F495', 'F495'),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
help_text='Record Type Value: F495',
choices=REC_TYPE_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=26),
DocumentCloud(id='2712034', start_page=35),
]
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
choices=FORM_TYPE_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=26),
DocumentCloud(id='2712034', start_page=35),
],
help_text='Name of the source filing form to which the Form 495 is \
attached (must equal Form_Type in CVR record)',
)
elect_date = fields.DateField(
db_column='ELECT_DATE',
blank=True,
null=True,
help_text="Date of the General Election This date will be the same \
as on the filing's cover (CVR) record."
)
electjuris = fields.CharField(
db_column='ELECTJURIS',
max_length=40,
help_text="Jurisdiction of the election"
)
contribamt = fields.FloatField(
db_column='CONTRIBAMT',
help_text="Contribution amount (For the period of 6 months prior to \
17 days before the election)"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'F495P2_CD'
ordering = ("-elect_date",)
def __str__(self):
return str(self.filing_id)
class S496Cd(CalAccessBaseModel):
"""
Itemized independent expenditures made in the 90 days before an election.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=12),
DocumentCloud(id='2711614', start_page=124, end_page=125),
DocumentCloud(id='2711616', start_page=79),
DocumentCloud(id='2712033', start_page=40),
DocumentCloud(id='2712034', start_page=53),
]
FILING_FORMS = [
annotations.get_form('F496')
]
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
('S496', 'S496'),
)
rec_type = fields.CharField(
verbose_name='record type',
max_length=4,
db_column='REC_TYPE',
db_index=True,
choices=REC_TYPE_CHOICES,
help_text="Record Type Value: S496",
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=40),
DocumentCloud(id='2712034', start_page=53),
]
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
max_length=4,
db_column='FORM_TYPE',
blank=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=40),
DocumentCloud(id='2712034', start_page=53),
]
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMOUNT',
help_text="Expenditure amount"
)
exp_date = fields.DateField(
db_column='EXP_DATE',
null=True,
help_text="Expenditure dates"
)
expn_dscr = fields.CharField(
max_length=90,
db_column='EXPN_DSCR',
blank=True,
help_text="Purpose of expense and/or description/explanation"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record"
)
date_thru = fields.DateField(
db_column='DATE_THRU',
null=True,
help_text="End of date range for items paid"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'S496_CD'
ordering = ("-exp_date",)
def __str__(self):
return "{} Filing {}, Amendment {}".format(
self.form_type,
self.filing_id,
self.amend_id
)
class S497Cd(CalAccessBaseModel):
"""
Campaign contributions made or received in the 90 days before an election.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=12),
DocumentCloud(id='2711614', start_page=125, end_page=127),
DocumentCloud(id='2711616', start_page=80, end_page=82),
DocumentCloud(id='2712033', start_page=41, end_page=42),
DocumentCloud(id='2712034', start_page=54, end_page=55),
]
FILING_FORMS = [
annotations.get_form('F497').get_section('P1'),
annotations.get_form('F497').get_section('P2'),
]
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("S497", "S497"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
help_text="Record Type Value: S497",
choices=REC_TYPE_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=41),
DocumentCloud(id='2712034', start_page=54),
]
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
max_length=6,
db_column='FORM_TYPE',
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=41),
DocumentCloud(id='2712034', start_page=54),
]
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CD_CHOICES = (
('BNM', annotations.choices.CAMPAIGN_ENTITY_CODES['BNM']),
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
('CTL', annotations.choices.CAMPAIGN_ENTITY_CODES['CTL']),
('COM', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('com', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('IND', annotations.choices.CAMPAIGN_ENTITY_CODES['IND']),
('OFF', annotations.choices.CAMPAIGN_ENTITY_CODES['OFF']),
('OTH', annotations.choices.CAMPAIGN_ENTITY_CODES['OTH']),
('PTY', annotations.choices.CAMPAIGN_ENTITY_CODES['PTY']),
('RCP', annotations.choices.CAMPAIGN_ENTITY_CODES['RCP']),
('SCC', annotations.choices.CAMPAIGN_ENTITY_CODES['SCC']),
('0', 'Unknown'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
help_text='Entity Code describing the Contributor/Recipient',
choices=ENTITY_CD_CHOICES,
documentcloud_pages=annotations.choices.DOCS['entity_codes'] + [
DocumentCloud(id='2712033', start_page=41),
DocumentCloud(id='2712034', start_page=54),
]
)
enty_naml = fields.CharField(
max_length=200,
db_column='ENTY_NAML',
blank=True,
help_text="Last name of Contributor/Recipient",
)
enty_namf = fields.CharField(
max_length=45,
db_column='ENTY_NAMF',
blank=True,
help_text="First name of Contributor/Recipient",
)
enty_namt = fields.CharField(
max_length=10,
db_column='ENTY_NAMT',
blank=True,
help_text="Name title or prefix of Contributor/Recipient",
)
enty_nams = fields.CharField(
max_length=10,
db_column='ENTY_NAMS',
blank=True,
help_text="Name suffix of Contributor/Recipient",
)
enty_city = fields.CharField(
max_length=30,
db_column='ENTY_CITY',
blank=True,
help_text="City address of Contributor/Recipient",
)
enty_st = fields.CharField(
max_length=2,
db_column='ENTY_ST',
blank=True,
help_text="State address of Contributor/Recipient",
)
enty_zip4 = fields.CharField(
max_length=10,
db_column='ENTY_ZIP4',
blank=True,
help_text="ZIP Code of Contributor/Recipient",
)
ctrib_emp = fields.CharField(
max_length=200,
db_column='CTRIB_EMP',
blank=True,
help_text="Employer of Contributor (populated for some Recipients as well)",
)
ctrib_occ = fields.CharField(
max_length=60,
db_column='CTRIB_OCC',
blank=True,
help_text="Occupation of Contributor (populated for some Recipients as well)"
)
ctrib_self = fields.CharField(
max_length=1,
db_column='CTRIB_SELF',
blank=True,
verbose_name="Contributor self-employed checkbox",
help_text='Contributor self-employed checkbox. "X" indicates the contributor is \
self-employed.'
)
elec_date = fields.DateField(
db_column='ELEC_DATE',
null=True,
help_text="Date of election"
)
ctrib_date = fields.DateField(
db_column='CTRIB_DATE',
null=True,
help_text="Date item received/made"
)
date_thru = fields.DateField(
db_column='DATE_THRU',
null=True,
help_text="End of date range for items received"
)
amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMOUNT',
help_text="Amount received/made"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee identification number"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder's last name"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officeholder's first name"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder's title or prefix"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder's suffix"
)
OFFICE_CD_CHOICES = annotations.sort_choices(annotations.choices.OFFICE_CODES) + (
# alt cases of valid codes
('asm', annotations.choices.OFFICE_CODES['ASM']),
('sen', annotations.choices.OFFICE_CODES['SEN']),
('Asm', annotations.choices.OFFICE_CODES['ASM']),
('May', annotations.choices.OFFICE_CODES['MAY']),
('ASm', annotations.choices.OFFICE_CODES['ASM']),
('oth', annotations.choices.OFFICE_CODES['OTH']),
('csu', annotations.choices.OFFICE_CODES['CSU']),
('Oth', annotations.choices.OFFICE_CODES['OTH']),
# invalid codes
('H', 'Unknown'),
('S', 'Unknown'),
('OF', 'Unknown'),
('HOU', 'Unknown'),
('LOC', 'Unknown'),
('LEG', 'Unknown'),
('STW', 'Unknown'),
('P', 'Unknown'),
('LTV', 'Unknown'),
('LT', 'Unknown'),
('CTY', 'Unknown'),
('OFF', 'Unknown'),
('REP', 'Unknown'),
('COM', 'Unknown'),
('N/A', 'Unknown'),
)
office_cd = fields.CharField(
db_column='OFFICE_CD',
max_length=3,
blank=True,
verbose_name="office code",
help_text="Identifies the office being sought",
choices=OFFICE_CD_CHOICES,
documentcloud_pages=annotations.choices.DOCS['office_codes']
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description"
)
JURIS_CD_CHOICES = annotations.sort_choices(annotations.choices.JURIS_CODES) + (
('asm', annotations.choices.JURIS_CODES['ASM']),
('sen', annotations.choices.JURIS_CODES['SEN']),
('cit', annotations.choices.JURIS_CODES['CIT']),
('GOV', annotations.choices.JURIS_CODES['STW']),
# city office codes
('MAY', annotations.choices.JURIS_CODES['CIT']),
# county office codes
('BSU', annotations.choices.JURIS_CODES['CTY']),
('CSU', annotations.choices.JURIS_CODES['CTY']),
# statewide office codes
('SUP', annotations.choices.JURIS_CODES['STW']),
# other office codes
('BED', annotations.choices.JURIS_CODES['OTH']),
('CCB', annotations.choices.JURIS_CODES['OTH']),
('CCM', annotations.choices.JURIS_CODES['OTH']),
('CLB', annotations.choices.JURIS_CODES['OTH']),
# These are all for City Council Member offices
('IRV', annotations.choices.JURIS_CODES['CIT']),
('Fon', annotations.choices.JURIS_CODES['CIT']),
# For Arnold's Gov campaign
('JRS', annotations.choices.JURIS_CODES['STW']),
# County Supervisor office
('CO', annotations.choices.JURIS_CODES['CTY']),
('Riv', annotations.choices.JURIS_CODES['CTY']),
# misspelling
('SNE', annotations.choices.JURIS_CODES['SEN']),
# This is for Prop 83
('83', annotations.choices.JURIS_CODES['STW']),
# Sometimes Assembly Member, sometimes Public Employees Retirement System
('PER', 'Unknown'),
# These look like contributions to federal campaigns (e.g., President, Congress)
('FED', 'Unknown'),
# Sometimes for Assembly Members, sometimes Statewide offices, kinda all over
('CA', 'Unknown'),
('JR', 'Unknown'),
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
verbose_name="jurisdiction code",
help_text="Jurisdiction code describing the office being sought",
choices=JURIS_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=42),
DocumentCloud(id='2712034', start_page=55),
]
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
OFF_S_H_CD_CHOICES = annotations.sort_choices(annotations.choices.OFF_S_H_CODES) + (
('s', annotations.choices.OFF_S_H_CODES['S']),
('h', annotations.choices.OFF_S_H_CODES['H']),
# The codes below appear in the database but are undocumented
('F', 'UNKNOWN'),
('T', 'UNKNOWN'),
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office is sought or held code',
choices=OFF_S_H_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=42),
DocumentCloud(id='2712034', start_page=55),
]
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in TEXT code"
)
bal_id = fields.CharField(
max_length=9,
db_column='BAL_ID',
blank=True,
help_text="This field is undocumented"
)
cand_id = fields.CharField(
max_length=9,
db_column='CAND_ID',
blank=True,
help_text="This field is undocumented"
)
sup_off_cd = fields.CharField(
max_length=1,
db_column='SUP_OFF_CD',
blank=True,
help_text="This field is undocumented"
)
SUP_OPP_CD_CHOICES = annotations.sort_choices(annotations.choices.SUP_OPP_CODES)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support or opposition code",
choices=SUP_OPP_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2711616', start_page=82),
]
)
def __str__(self):
return "{} Filing {}, Amendment {}".format(
self.get_form_type_display(),
self.filing_id,
self.amend_id
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'S497_CD'
ordering = ("-ctrib_date",)
class S498Cd(CalAccessBaseModel):
"""
Payments received by slate-mailer organizations in the 90 days before an election.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE",
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=12),
DocumentCloud(id='2711614', start_page=127, end_page=129),
DocumentCloud(id='2711616', start_page=83, end_page=85),
DocumentCloud(id='2712033', start_page=43, end_page=44),
DocumentCloud(id='2712034', start_page=56, end_page=57),
]
FILING_FORMS = [
annotations.get_form('F498').get_section('A'),
annotations.get_form('F498').get_section('R'),
]
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("S498", "S498"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text="Record Type Value: S498",
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=43),
DocumentCloud(id='2712034', start_page=56),
]
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
max_length=9,
db_column='FORM_TYPE',
blank=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=43),
DocumentCloud(id='2712034', start_page=56),
]
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CD_CHOICES = (
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
('COM', annotations.choices.CAMPAIGN_ENTITY_CODES['COM']),
('IND', annotations.choices.CAMPAIGN_ENTITY_CODES['IND']),
('OTH', annotations.choices.CAMPAIGN_ENTITY_CODES['OTH']),
('RCP', annotations.choices.CAMPAIGN_ENTITY_CODES['RCP']),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
help_text="Entity code",
choices=ENTITY_CD_CHOICES,
documentcloud_pages=annotations.choices.DOCS['entity_codes'] + [
DocumentCloud(id='2712033', start_page=43),
DocumentCloud(id='2712034', start_page=56),
]
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee identification number"
)
payor_naml = fields.CharField(
max_length=200,
db_column='PAYOR_NAML',
blank=True,
help_text="Payor's last name or business name"
)
payor_namf = fields.CharField(
max_length=45,
db_column='PAYOR_NAMF',
blank=True,
help_text="Payor's first name."
)
payor_namt = fields.CharField(
max_length=10,
db_column='PAYOR_NAMT',
blank=True,
help_text="Payor's Prefix or title."
)
payor_nams = fields.CharField(
max_length=10,
db_column='PAYOR_NAMS',
blank=True,
help_text="Payor's suffix."
)
payor_city = fields.CharField(
max_length=30,
db_column='PAYOR_CITY',
blank=True,
help_text="Payor's city."
)
payor_st = fields.CharField(
max_length=2,
db_column='PAYOR_ST',
blank=True,
help_text="Payor's State."
)
payor_zip4 = fields.CharField(
max_length=10,
db_column='PAYOR_ZIP4',
blank=True,
help_text="Payor's zip code"
)
date_rcvd = fields.DateField(
db_column='DATE_RCVD',
null=True,
help_text="Date received"
)
amt_rcvd = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMT_RCVD',
help_text="Amount received"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officerholder last name"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officerholder first name"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officerholder title or prefix"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officerholder suffix"
)
OFFICE_CD_CHOICES = annotations.sort_choices(annotations.choices.OFFICE_CODES) + (
('gov', annotations.choices.OFFICE_CODES['GOV']),
('oth', annotations.choices.OFFICE_CODES['OTH']),
)
office_cd = fields.CharField(
db_column='OFFICE_CD',
max_length=4,
blank=True,
verbose_name="office code",
choices=OFFICE_CD_CHOICES,
help_text="Identifies the office being sought",
documentcloud_pages=annotations.choices.DOCS['office_codes'],
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Description of office sought"
)
JURIS_CD_CHOICES = annotations.sort_choices(annotations.choices.JURIS_CODES) + (
('GOV', annotations.choices.JURIS_CODES['STW']),
('COU', annotations.choices.JURIS_CODES['CTY']),
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
choices=JURIS_CD_CHOICES,
help_text="Office jurisdiction code",
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=43),
DocumentCloud(id='2712034', start_page=57),
]
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. \
Populated for Senate, Assembly, or Board of Equalization races."
)
OFF_S_H_CD_CHOICES = annotations.sort_choices(annotations.choices.OFF_S_H_CODES)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office is sought or held code',
choices=OFF_S_H_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=44),
DocumentCloud(id='2712034', start_page=57),
]
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter."
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Jurisdiction of ballot measure"
)
SUP_OPP_CD_CHOICES = annotations.sort_choices(annotations.choices.SUP_OPP_CODES)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support or opposition code",
choices=SUP_OPP_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2712033', start_page=43),
DocumentCloud(id='2712034', start_page=57),
]
)
amt_attrib = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMT_ATTRIB',
help_text="Amount attributed (only if Form_type = 'F498-A')"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flat"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text='Reference text contained in TEXT record'
)
employer = fields.CharField(
max_length=200,
db_column='EMPLOYER',
blank=True,
help_text="This field is undocumented"
)
occupation = fields.CharField(
max_length=60,
db_column='OCCUPATION',
blank=True,
help_text='This field is undocumented'
)
selfemp_cb = fields.CharField(
max_length=1,
db_column='SELFEMP_CB',
blank=True,
help_text='Self-employed checkbox'
)
def __str__(self):
return str(self.filing_id)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'S498_CD'
ordering = ("-date_rcvd",)
class F501502Cd(CalAccessBaseModel):
"""
Candidate intention statements.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614', start_page=8),
DocumentCloud(id='2711614', start_page=57, end_page=59),
]
FILING_FORMS = [
annotations.get_form('F501'),
annotations.get_form('F502'),
]
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
REC_TYPE_CHOICES = (
("CVR", "CVR"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text="Record Type",
documentcloud_pages=[
DocumentCloud(id='2711614', start_page=58),
]
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule',
documentcloud_pages=[
DocumentCloud(id='2711614', start_page=58),
]
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=9,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
committee_id = fields.CharField(
db_column='COMMITTEE_ID',
max_length=9,
blank=True,
verbose_name="Committee ID",
help_text='Committee identification number'
)
ENTITY_CD_CHOICES = annotations.sort_choices(annotations.choices.CAMPAIGN_ENTITY_CODES) + (
('8', 'Unknown'),
)
entity_cd = fields.CharField(
db_column='ENTITY_CD',
choices=ENTITY_CD_CHOICES,
blank=True,
max_length=9,
help_text='Entity code', # Not clear which values are valid
documentcloud_pages=annotations.choices.DOCS['entity_codes'],
)
report_num = fields.CharField(
db_column='REPORT_NUM',
blank=True,
null=True,
max_length=3,
help_text='Report Number; 000 Original; 001-999 Amended'
)
rpt_date = fields.DateField(
db_column='RPT_DATE',
blank=True,
null=True,
help_text='date this report is filed'
)
STMT_TYPE_CHOICES = (
(10001, 'ORIGINAL/INITIAL'),
(10002, 'AMENDMENT'),
(10003, 'TERMINATION'),
(10004, 'REDESIGNATE THE ACCOUNT FOR FUTURE ELECTION TO THE SAME OFFICE'),
(10005, 'LOG'),
(10006, 'LOG/AMENDMENT'),
(10007, 'AS FILED BY COMMITTEE')
)
stmt_type = fields.IntegerField(
db_column='STMT_TYPE',
verbose_name="statement type",
choices=STMT_TYPE_CHOICES,
help_text='Type of statement',
documentcloud_pages=[
DocumentCloud(id='2774529', start_page=6)
],
)
from_date = fields.CharField(
db_column='FROM_DATE',
max_length=32,
blank=True,
help_text='Reporting period from date'
)
thru_date = fields.CharField(
db_column='THRU_DATE',
max_length=32,
blank=True,
help_text="Reporting period through date"
)
elect_date = fields.CharField(
db_column='ELECT_DATE',
max_length=32,
blank=True,
help_text='Date of election'
)
cand_naml = fields.CharField(
db_column='CAND_NAML',
max_length=200,
blank=True,
help_text="Candidate/officerholder last name"
)
cand_namf = fields.CharField(
db_column='CAND_NAMF',
max_length=45,
blank=True,
help_text="Candidate/officerholder first name"
)
can_namm = fields.CharField(
db_column='CAN_NAMM',
max_length=20,
blank=True,
help_text='Candidate/officeholder middle name'
)
cand_namt = fields.CharField(
db_column='CAND_NAMT',
max_length=100,
blank=True,
help_text="Candidate/officerholder title or prefix"
)
cand_nams = fields.CharField(
db_column='CAND_NAMS',
max_length=10,
blank=True,
help_text="Candidate/officeholder suffix"
)
moniker_pos = fields.CharField(
db_column='MONIKER_POS',
max_length=32,
blank=True,
help_text="Location of the candidate/officeholder's moniker"
)
moniker = fields.CharField(
db_column='MONIKER',
max_length=20,
blank=True,
help_text="Candidate/officeholder's moniker"
)
cand_city = fields.CharField(
db_column='CAND_CITY',
max_length=30,
blank=True,
help_text="Candidate/officerholder city"
)
cand_st = fields.CharField(
db_column='CAND_ST',
max_length=4,
blank=True,
help_text='Candidate/officeholder state'
)
cand_zip4 = fields.CharField(
db_column='CAND_ZIP4',
max_length=10,
blank=True,
help_text='Candidate/officeholder zip +4'
)
cand_phon = fields.CharField(
db_column='CAND_PHON',
max_length=20,
blank=True,
help_text='Candidate/officeholder phone number'
)
cand_fax = fields.CharField(
db_column='CAND_FAX',
max_length=20,
blank=True,
help_text="Candidate/officerholder fax"
)
cand_email = fields.CharField(
db_column='CAND_EMAIL',
max_length=60,
blank=True,
help_text='Candidate/officeholder email address'
)
fin_naml = fields.CharField(
db_column='FIN_NAML',
max_length=200,
blank=True,
help_text="Financial institution's business name"
)
fin_namf = fields.CharField(
db_column='FIN_NAMF',
max_length=45,
blank=True,
help_text="Unused. Financial institution's first name."
)
fin_namt = fields.CharField(
db_column='FIN_NAMT',
max_length=100,
blank=True,
help_text="Unused. Financial institution's title."
)
fin_nams = fields.CharField(
db_column='FIN_NAMS',
max_length=32,
blank=True,
help_text="Unused. Financial institution's suffix."
)
fin_city = fields.CharField(
db_column='FIN_CITY',
max_length=30,
blank=True,
help_text="Financial institution's city."
)
fin_st = fields.CharField(
db_column='FIN_ST',
max_length=4,
blank=True,
help_text="Financial institution's state."
)
fin_zip4 = fields.CharField(
db_column='FIN_ZIP4',
max_length=10,
blank=True,
help_text="Financial institution's zip code."
)
fin_phon = fields.CharField(
db_column='FIN_PHON',
max_length=20,
blank=True,
help_text="Financial institution's phone number."
)
fin_fax = fields.CharField(
db_column='FIN_FAX',
max_length=20,
blank=True,
help_text="Financial institution's FAX Number."
)
fin_email = fields.CharField(
db_column='FIN_EMAIL',
max_length=60,
blank=True,
help_text="Financial institution's e-mail address."
)
OFFICE_CD_CHOICES = (
(0, "N/A"),
(30001, "PRESIDENT"),
(30002, "GOVERNOR"),
(30003, "LIEUTENANT GOVERNOR"),
(30004, "SECRETARY OF STATE"),
(30005, "CONTROLLER"),
(30006, "TREASURER"),
(30007, "ATTORNEY GENERAL"),
(30008, "SUPERINTENDENT OF PUBLIC INSTRUCTION"),
(30009, "MEMBER BOARD OF EQUALIZATION"),
(30010, "OXNARD HARBOR COMMISSIONER"),
(30011, "CITY CONTROLLER"),
(30012, "STATE SENATE"),
(30013, "ASSEMBLY"),
(30014, "INSURANCE COMMISSIONER"),
(30015, "JUDGE"),
(30016, "BOARD MEMBER"),
(30017, "TAX COLLECTOR"),
(30018, "TRUSTEE"),
(30019, "SUPERVISOR"),
(30020, "SHERIFF"),
(30021, "CORONER"),
(30022, "MARSHALL"),
(30023, "CITY CLERK"),
(30024, "SCHOOL BOARD"),
(30025, "HARBOR COMMISSIONER"),
(30026, "DISTRICT ATTORNEY"),
(30027, "COUNTY CLERK"),
(30028, "AUDITOR"),
(30029, "MAYOR"),
(30030, "CITY ATTORNEY"),
(30031, "DEMOCRATIC COUNTY CENTRAL COMMITTEE"),
(30032, "TOWN COUNCIL"),
(30033, "ASSESSOR"),
(30034, "CITY TREASURER"),
(30035, "CITY COUNCIL"),
(30036, "COMMISSIONER"),
(30037, "REPUBLICAN COUNTY CENTRAL COMMITTEE"),
(30038, "DIRECTOR"),
(30039, "DIRECTOR OF ZONE 7"),
(30040, "COMMUNITY COLLEGE BOARD"),
(30041, "POLICE CHIEF"),
(30042, "CHIEF OF POLICE"),
(30043, "CENTRAL COMMITTEE"),
(30044, "BOARD OF EDUCATION"),
(30045, "BOARD OF DIRECTORS"),
(30046, "COLLEGE BOARD"),
(30047, "BART BOARD DIRECTOR"),
(30048, "BOARD OF TRUSTEES"),
(30049, "IRRIGATION"),
(30050, "WATER BOARD"),
(30051, "COMMUNITY PLANNING GROUP"),
(30052, "BOARD OF SUPERVISORS"),
(30053, "SUPERIOR COURT JUDGE"),
(30054, "DISTRICT ATTORNEY/PUBLIC DEFENDER"),
(30055, "MEASURE"),
(30056, "CITY PROSECUTOR"),
(30057, "SUPREME COURT JUDGE"),
(30058, "PUBLIC EMPLOYEES RETIREMENT BOARD"),
(30059, "APPELLATE COURT JUDGE"),
(50001, "Ag"),
(50002, "Assembly"),
(50003, "Assessor"),
(50004, "Assessor/Clerk/Recorder"),
(50005, "Assessor/County Clerk/Recorder"),
(50006, "Assessor/Recorder"),
(50007, "Associate Justice"),
(50008, "Auditor"),
(50009, "Auditor/Controller"),
(50010, "Auditor/Controller/Clerk/Recorder"),
(50011, "Auditor/Controller/Recorder"),
(50012, "Auditor/Controller/Treasurer/Tax Collector"),
(50013, "Auditor/Recorder"),
(50014, "Board Member"),
(50015, "Board Of Director"),
(50016, "Board Of Supervisor"),
(50017, "Boe"),
(50018, "Chief Justice"),
(50019, "City"),
(50020, "City Attorney"),
(50021, "City Auditor"),
(50022, "City Clerk"),
(50023, "City Council"),
(50024, "City Of Los Angeles"),
(50025, "City Of South El Monte"),
(50026, "City Prosecutor"),
(50027, "City Treasurer"),
(50028, "Clerk/Auditor"),
(50029, "Clerk/Record/Public Admin"),
(50030, "Clerk/Recorder"),
(50031, "Clerk/Recorder/Registar"),
(50032, "Clerk/Recorder/Registrar"),
(50033, "Commissioner"),
(50034, "Controller"),
(50035, "Costa Mesa"),
(50036, "Council Member"),
(50037, "County Clerk"),
(50038, "County Clerk/Auditor"),
(50039, "County Clerk/Auditor/Controller"),
(50040, "County Clerk/Recorder"),
(50041, "County Clerk/Recorder/Assessor"),
(50042, "County Clerk/Recorder/Public Admin"),
(50043, "Democratic County Central Committee"),
(50044, "Director"),
(50045, "District Attorney"),
(50046, "District Attorney/Public Administrator"),
(50047, "Gccc"),
(50048, "Governor"),
(50049, "Harbor Commissioner"),
(50050, "Ic"),
(50051, "Irrigation Dist"),
(50052, "Judge"),
(50053, "Justice"),
(50054, "Legislature"),
(50055, "Lieutenant Governor"),
(50056, "Mayor"),
(50057, "N/A"),
(50058, "Placentia"),
(50059, "Public Administrator"),
(50060, "Public Administrator/Guardian"),
(50061, "Rent Stabilization Board"),
(50062, "Republican Central Committee"),
(50063, "San Francisco Dccc"),
(50064, "Sanger"),
(50065, "School Board"),
(50066, "Secretary Of State"),
(50067, "Senator"),
(50068, "Sheriff"),
(50069, "Sheriff/Coroner"),
(50070, "Sheriff/Coroner/Marshall"),
(50071, "Sheriff/Coroner/Public Administrator"),
(50072, "Solana Beach"),
(50073, "Superintendent"),
(50074, "Supervisor"),
(50075, "Supt Of Schools"),
(50076, "Tax Collector"),
(50077, "Town Council"),
(50078, "Treasurer"),
(50079, "Treasurer/Tax Collector"),
(50080, "Treasurer/Tax Collector/Clerk"),
(50081, "Treasurer/Tax Collector/Public Administrator"),
(50082, "Treasurer/Tax Collector/Public Administrator/County Clerk"),
(50083, "Treasurer/Tax Collector/Recorder"),
(50084, "Trustee"),
(50085, "Weed Recreation Board Member"),
)
office_cd = fields.IntegerField(
db_column='OFFICE_CD',
verbose_name="office code",
help_text="Identifies the office being sought",
choices=OFFICE_CD_CHOICES,
documentcloud_pages=[
DocumentCloud(id='2774529', start_page=20, end_page=22),
]
)
offic_dscr = fields.CharField(
db_column='OFFIC_DSCR',
max_length=80,
blank=True,
help_text="Office sought description"
)
agency_nam = fields.CharField(
db_column='AGENCY_NAM',
max_length=200,
blank=True,
help_text="Agency name"
)
JURIS_CD_CHOICES = (
(0, "N/A"),
(40501, "LOCAL"),
(40502, "STATE"),
(40503, "COUNTY"),
(40504, "MULTI-COUNTY"),
(40505, "CITY"),
(40507, "SUPERIOR COURT JUDGE"),
)
juris_cd = fields.IntegerField(
db_column='JURIS_CD',
blank=True,
null=True,
choices=JURIS_CD_CHOICES,
help_text='Office jurisdiction code',
documentcloud_pages=[
DocumentCloud(id='2774529', start_page=19, end_page=20),
]
)
juris_dscr = fields.CharField(
db_column='JURIS_DSCR',
max_length=30,
blank=True,
help_text='office jurisdiction description'
)
dist_no = fields.CharField(
db_column='DIST_NO',
max_length=4,
blank=True,
help_text='District number for the office being sought. \
Populated for Senate, Assembly or Board of Equalization races.'
)
party = fields.CharField(
db_column='PARTY',
max_length=200,
blank=True,
help_text="Political party"
)
yr_of_elec = fields.IntegerField(
db_column='YR_OF_ELEC',
blank=True,
null=True,
help_text='Year of election'
)
ELEC_TYPE_CHOICES = (
(0, 'N/A'),
(3001, "GENERAL"),
(3002, "PRIMARY"),
(3003, "RECALL"),
(3004, "SPECIAL ELECTION"),
(3005, "OFFICEHOLDER"),
(3006, "SPECIAL RUNOFF"),
# Observed in this field, but not documented
(3007, "UNKNOWN"),
)
elec_type = fields.IntegerField(
db_column='ELEC_TYPE',
blank=True,
null=True,
verbose_name="Election type",
choices=ELEC_TYPE_CHOICES,
help_text="Election type",
documentcloud_pages=[
DocumentCloud(id='2774529', start_page=3, end_page=4),
]
)
execute_dt = fields.DateField(
db_column='EXECUTE_DT',
blank=True,
null=True,
help_text='Execution date'
)
can_sig = fields.CharField(
db_column='CAN_SIG',
max_length=200,
blank=True,
help_text='Candidate signature'
)
account_no = fields.CharField(
db_column='ACCOUNT_NO',
max_length=32,
blank=True,
help_text='Account number'
)
acct_op_dt = fields.DateField(
db_column='ACCT_OP_DT',
blank=True,
null=True,
help_text='Account open date'
)
PARTY_CD_CHOICES = (
(0, 'N/A'),
(16001, 'DEMOCRATIC'),
(16002, 'REPUBLICAN'),
(16003, 'GREEN PARTY'),
(16004, 'REFORM PARTY'),
(16005, 'AMERICAN INDEPENDENT PARTY'),
(16006, 'PEACE AND FREEDOM'),
(16007, 'INDEPENDENT'),
(16008, 'LIBERTARIAN'),
(16009, 'NON PARTISAN'),
(16010, 'NATURAL LAW'),
(16011, 'UNKNOWN'),
(16012, 'NO PARTY PREFERENCE'),
(16013, 'AMERICANS ELECT'),
(16014, 'UNKNOWN'),
(16020, 'PEACE AND FREEDOM'),
)
party_cd = fields.IntegerField(
db_column='PARTY_CD',
blank=True,
null=True,
choices=PARTY_CD_CHOICES,
help_text="Party code",
documentcloud_pages=[
DocumentCloud(id='2774529', start_page=10, end_page=11),
]
)
DISTRICT_CD_CHOICES = (
(0, 'N/A'),
(17001, '01'),
(17002, '13'),
(17003, '24'),
(17004, '35'),
(17005, '46'),
(17006, '57'),
(17007, '68'),
(17008, '79'),
(17009, '02'),
(17010, '05'),
(17011, '04'),
(17013, '06'),
(17014, '07'),
(17015, '08'),
(17016, '19'),
(17017, '10'),
(17018, '11'),
(17019, '12'),
(17020, '14'),
(17021, '15'),
(17022, '16'),
(17023, '17'),
(17024, '18'),
(17026, '20'),
(17027, '21'),
(17028, '22'),
(17029, '23'),
(17030, '25'),
(17031, '26'),
(17032, '27'),
(17033, '28'),
(17034, '29'),
(17035, '30'),
(17036, '31'),
(17037, '32'),
(17038, '33'),
(17039, '34'),
(17040, '36'),
(17041, '37'),
(17042, '38'),
(17043, '39'),
(17044, '40'),
(17045, '41'),
(17046, '42'),
(17047, '43'),
(17048, '44'),
(17049, '45'),
(17050, '47'),
(17051, '48'),
(17052, '49'),
(17053, '50'),
(17054, '51'),
(17055, '52'),
(17056, '53'),
(17057, '54'),
(17058, '55'),
(17059, '56'),
(17060, '03'),
(17061, '59'),
(17062, '60'),
(17063, '61'),
(17064, '62'),
(17065, '63'),
(17066, '64'),
(17067, '65'),
(17068, '66'),
(17069, '67'),
(17070, '69'),
(17071, '70'),
(17072, '71'),
(17073, '72'),
(17074, '73'),
(17075, '74'),
(17076, '75'),
(17077, '76'),
(17078, '77'),
(17079, '78'),
(17080, '80'),
(17081, '09'),
(17090, '58'),
(17012, 'Unknown'),
(17082, 'Unknown'),
(17025, 'Unknown'),
)
district_cd = fields.IntegerField(
db_column='DISTRICT_CD',
blank=True,
null=True,
choices=DISTRICT_CD_CHOICES,
help_text='District number for the office being sought. \
Populated for Senate, Assembly, or Board of Equalization races.',
documentcloud_pages=[
DocumentCloud(id='2774529', start_page=11, end_page=13),
]
)
accept_limit_yn = fields.IntegerField(
db_column='ACCEPT_LIMIT_YN',
blank=True,
null=True,
help_text='This field is undocumented'
)
did_exceed_dt = fields.DateField(
db_column='DID_EXCEED_DT',
blank=True,
null=True,
help_text='This field is undocumented'
)
cntrb_prsnl_fnds_dt = fields.DateField(
db_column='CNTRB_PRSNL_FNDS_DT',
blank=True,
null=True,
help_text="This field is undocumented"
)
def __str__(self):
return str(self.filing_id)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'F501_502_CD'
ordering = ("-rpt_date",)
| california-civic-data-coalition/django-calaccess-raw-data | calaccess_raw/models/campaign.py | Python | mit | 207,009 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.